diff --git a/CREDITS b/CREDITS index 832436e1dd91c491fdb81efbda723e071c7084f6..550bb2b9fe8bbbbb9be54aae3d6ab0ebbaf648af 100644 --- a/CREDITS +++ b/CREDITS @@ -665,6 +665,11 @@ D: Minor updates to SCSI types, added /proc/pid/maps protection S: (ask for current address) S: USA +N: Robin Cornelius +E: robincornelius@users.sourceforge.net +D: Ralink rt2x00 WLAN driver +S: Cornwall, U.K. + N: Mark Corner E: mcorner@umich.edu W: http://www.eecs.umich.edu/~mcorner/ @@ -679,6 +684,11 @@ D: Kernel module SMART utilities S: Santa Cruz, California S: USA +N: Luis Correia +E: lfcorreia@users.sf.net +D: Ralink rt2x00 WLAN driver +S: Belas, Portugal + N: Alan Cox W: http://www.linux.org.uk/diary/ D: Linux Networking (0.99.10->2.0.29) @@ -833,6 +843,12 @@ S: Lancs S: PR4 6AX S: United Kingdom +N: Ivo van Doorn +E: IvDoorn@gmail.com +W: http://www.mendiosus.nl +D: Ralink rt2x00 WLAN driver +S: Haarlem, The Netherlands + N: John G Dorsey E: john+@cs.cmu.edu D: ARM Linux ports to Assabet/Neponset, Spot @@ -3517,6 +3533,12 @@ S: Maastrichterweg 63 S: 5554 GG Valkenswaard S: The Netherlands +N: Mark Wallis +E: mwallis@serialmonkey.com +W: http://mark.serialmonkey.com +D: Ralink rt2x00 WLAN driver +S: Newcastle, Australia + N: Peter Shaobo Wang E: pwang@mmdcorp.com W: http://www.mmdcorp.com/pw/linux @@ -3651,6 +3673,15 @@ S: Alte Regensburger Str. 11a S: 93149 Nittenau S: Germany +N: Gertjan van Wingerde +E: gwingerde@home.nl +D: Ralink rt2x00 WLAN driver +D: Minix V2 file-system +D: Misc fixes +S: Geessinkweg 177 +S: 7544 TX Enschede +S: The Netherlands + N: Lars Wirzenius E: liw@iki.fi D: Linux System Administrator's Guide, author, former maintainer diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl index b886f52a9aac8f48212a50ff3079f382e4735f68..e5da4f2b7c22cab386261b3139c7ba032a3de159 100644 --- a/Documentation/DocBook/kernel-api.tmpl +++ b/Documentation/DocBook/kernel-api.tmpl @@ -240,17 +240,23 @@ X!Ilib/string.c Driver Support !Enet/core/dev.c !Enet/ethernet/eth.c +!Enet/sched/sch_generic.c !Iinclude/linux/etherdevice.h +!Iinclude/linux/netdevice.h + + PHY Support !Edrivers/net/phy/phy.c !Idrivers/net/phy/phy.c !Edrivers/net/phy/phy_device.c !Idrivers/net/phy/phy_device.c !Edrivers/net/phy/mdio_bus.c !Idrivers/net/phy/mdio_bus.c + +--> Synchronous PPP !Edrivers/net/wan/syncppp.c diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 675f75601ae632a694fe06a0e8658b5039bf586f..63df2262d41a2625da0816a0383aaaece00963bc 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -314,3 +314,16 @@ Why: The i386/x86_64 merge provides a symlink to the old bzImage location so not yet updated user space tools, e.g. package scripts, do not break. Who: Thomas Gleixner + +--------------------------- + +What: shaper network driver +When: January 2008 +Files: drivers/net/shaper.c, include/linux/if_shaper.h +Why: This driver has been marked obsolete for many years. + It was only designed to work on lower speed links and has design + flaws that lead to machine crashes. The qdisc infrastructure in + 2.4 or later kernels, provides richer features and is more robust. +Who: Stephen Hemminger + +--------------------------- diff --git a/Documentation/networking/NAPI_HOWTO.txt b/Documentation/networking/NAPI_HOWTO.txt deleted file mode 100644 index 7907435a661c3ed463a2718021f7b5fe4d394f31..0000000000000000000000000000000000000000 --- a/Documentation/networking/NAPI_HOWTO.txt +++ /dev/null @@ -1,766 +0,0 @@ -HISTORY: -February 16/2002 -- revision 0.2.1: -COR typo corrected -February 10/2002 -- revision 0.2: -some spell checking ;-> -January 12/2002 -- revision 0.1 -This is still work in progress so may change. -To keep up to date please watch this space. - -Introduction to NAPI -==================== - -NAPI is a proven (www.cyberus.ca/~hadi/usenix-paper.tgz) technique -to improve network performance on Linux. For more details please -read that paper. -NAPI provides a "inherent mitigation" which is bound by system capacity -as can be seen from the following data collected by Robert on Gigabit -ethernet (e1000): - - Psize Ipps Tput Rxint Txint Done Ndone - --------------------------------------------------------------- - 60 890000 409362 17 27622 7 6823 - 128 758150 464364 21 9301 10 7738 - 256 445632 774646 42 15507 21 12906 - 512 232666 994445 241292 19147 241192 1062 - 1024 119061 1000003 872519 19258 872511 0 - 1440 85193 1000003 946576 19505 946569 0 - - -Legend: -"Ipps" stands for input packets per second. -"Tput" == packets out of total 1M that made it out. -"txint" == transmit completion interrupts seen -"Done" == The number of times that the poll() managed to pull all -packets out of the rx ring. Note from this that the lower the -load the more we could clean up the rxring -"Ndone" == is the converse of "Done". Note again, that the higher -the load the more times we couldn't clean up the rxring. - -Observe that: -when the NIC receives 890Kpackets/sec only 17 rx interrupts are generated. -The system cant handle the processing at 1 interrupt/packet at that load level. -At lower rates on the other hand, rx interrupts go up and therefore the -interrupt/packet ratio goes up (as observable from that table). So there is -possibility that under low enough input, you get one poll call for each -input packet caused by a single interrupt each time. And if the system -cant handle interrupt per packet ratio of 1, then it will just have to -chug along .... - - -0) Prerequisites: -================== -A driver MAY continue using the old 2.4 technique for interfacing -to the network stack and not benefit from the NAPI changes. -NAPI additions to the kernel do not break backward compatibility. -NAPI, however, requires the following features to be available: - -A) DMA ring or enough RAM to store packets in software devices. - -B) Ability to turn off interrupts or maybe events that send packets up -the stack. - -NAPI processes packet events in what is known as dev->poll() method. -Typically, only packet receive events are processed in dev->poll(). -The rest of the events MAY be processed by the regular interrupt handler -to reduce processing latency (justified also because there are not that -many of them). -Note, however, NAPI does not enforce that dev->poll() only processes -receive events. -Tests with the tulip driver indicated slightly increased latency if -all of the interrupt handler is moved to dev->poll(). Also MII handling -gets a little trickier. -The example used in this document is to move the receive processing only -to dev->poll(); this is shown with the patch for the tulip driver. -For an example of code that moves all the interrupt driver to -dev->poll() look at the ported e1000 code. - -There are caveats that might force you to go with moving everything to -dev->poll(). Different NICs work differently depending on their status/event -acknowledgement setup. -There are two types of event register ACK mechanisms. - I) what is known as Clear-on-read (COR). - when you read the status/event register, it clears everything! - The natsemi and sunbmac NICs are known to do this. - In this case your only choice is to move all to dev->poll() - - II) Clear-on-write (COW) - i) you clear the status by writing a 1 in the bit-location you want. - These are the majority of the NICs and work the best with NAPI. - Put only receive events in dev->poll(); leave the rest in - the old interrupt handler. - ii) whatever you write in the status register clears every thing ;-> - Cant seem to find any supported by Linux which do this. If - someone knows such a chip email us please. - Move all to dev->poll() - -C) Ability to detect new work correctly. -NAPI works by shutting down event interrupts when there's work and -turning them on when there's none. -New packets might show up in the small window while interrupts were being -re-enabled (refer to appendix 2). A packet might sneak in during the period -we are enabling interrupts. We only get to know about such a packet when the -next new packet arrives and generates an interrupt. -Essentially, there is a small window of opportunity for a race condition -which for clarity we'll refer to as the "rotting packet". - -This is a very important topic and appendix 2 is dedicated for more -discussion. - -Locking rules and environmental guarantees -========================================== - --Guarantee: Only one CPU at any time can call dev->poll(); this is because -only one CPU can pick the initial interrupt and hence the initial -netif_rx_schedule(dev); -- The core layer invokes devices to send packets in a round robin format. -This implies receive is totally lockless because of the guarantee that only -one CPU is executing it. -- contention can only be the result of some other CPU accessing the rx -ring. This happens only in close() and suspend() (when these methods -try to clean the rx ring); -****guarantee: driver authors need not worry about this; synchronization -is taken care for them by the top net layer. --local interrupts are enabled (if you dont move all to dev->poll()). For -example link/MII and txcomplete continue functioning just same old way. -This improves the latency of processing these events. It is also assumed that -the receive interrupt is the largest cause of noise. Note this might not -always be true. -[according to Manfred Spraul, the winbond insists on sending one -txmitcomplete interrupt for each packet (although this can be mitigated)]. -For these broken drivers, move all to dev->poll(). - -For the rest of this text, we'll assume that dev->poll() only -processes receive events. - -new methods introduce by NAPI -============================= - -a) netif_rx_schedule(dev) -Called by an IRQ handler to schedule a poll for device - -b) netif_rx_schedule_prep(dev) -puts the device in a state which allows for it to be added to the -CPU polling list if it is up and running. You can look at this as -the first half of netif_rx_schedule(dev) above; the second half -being c) below. - -c) __netif_rx_schedule(dev) -Add device to the poll list for this CPU; assuming that _prep above -has already been called and returned 1. - -d) netif_rx_reschedule(dev, undo) -Called to reschedule polling for device specifically for some -deficient hardware. Read Appendix 2 for more details. - -e) netif_rx_complete(dev) - -Remove interface from the CPU poll list: it must be in the poll list -on current cpu. This primitive is called by dev->poll(), when -it completes its work. The device cannot be out of poll list at this -call, if it is then clearly it is a BUG(). You'll know ;-> - -All of the above methods are used below, so keep reading for clarity. - -Device driver changes to be made when porting NAPI -================================================== - -Below we describe what kind of changes are required for NAPI to work. - -1) introduction of dev->poll() method -===================================== - -This is the method that is invoked by the network core when it requests -for new packets from the driver. A driver is allowed to send upto -dev->quota packets by the current CPU before yielding to the network -subsystem (so other devices can also get opportunity to send to the stack). - -dev->poll() prototype looks as follows: -int my_poll(struct net_device *dev, int *budget) - -budget is the remaining number of packets the network subsystem on the -current CPU can send up the stack before yielding to other system tasks. -*Each driver is responsible for decrementing budget by the total number of -packets sent. - Total number of packets cannot exceed dev->quota. - -dev->poll() method is invoked by the top layer, the driver just sends if it -can to the stack the packet quantity requested. - -more on dev->poll() below after the interrupt changes are explained. - -2) registering dev->poll() method -=================================== - -dev->poll should be set in the dev->probe() method. -e.g: -dev->open = my_open; -. -. -/* two new additions */ -/* first register my poll method */ -dev->poll = my_poll; -/* next register my weight/quanta; can be overridden in /proc */ -dev->weight = 16; -. -. -dev->stop = my_close; - - - -3) scheduling dev->poll() -============================= -This involves modifying the interrupt handler and the code -path which takes the packet off the NIC and sends them to the -stack. - -it's important at this point to introduce the classical D Becker -interrupt processor: - ------------------- -static irqreturn_t -netdevice_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - - struct net_device *dev = (struct net_device *)dev_instance; - struct my_private *tp = (struct my_private *)dev->priv; - - int work_count = my_work_count; - status = read_interrupt_status_reg(); - if (status == 0) - return IRQ_NONE; /* Shared IRQ: not us */ - if (status == 0xffff) - return IRQ_HANDLED; /* Hot unplug */ - if (status & error) - do_some_error_handling() - - do { - acknowledge_ints_ASAP(); - - if (status & link_interrupt) { - spin_lock(&tp->link_lock); - do_some_link_stat_stuff(); - spin_lock(&tp->link_lock); - } - - if (status & rx_interrupt) { - receive_packets(dev); - } - - if (status & rx_nobufs) { - make_rx_buffs_avail(); - } - - if (status & tx_related) { - spin_lock(&tp->lock); - tx_ring_free(dev); - if (tx_died) - restart_tx(); - spin_unlock(&tp->lock); - } - - status = read_interrupt_status_reg(); - - } while (!(status & error) || more_work_to_be_done); - return IRQ_HANDLED; -} - ----------------------------------------------------------------------- - -We now change this to what is shown below to NAPI-enable it: - ----------------------------------------------------------------------- -static irqreturn_t -netdevice_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - struct net_device *dev = (struct net_device *)dev_instance; - struct my_private *tp = (struct my_private *)dev->priv; - - status = read_interrupt_status_reg(); - if (status == 0) - return IRQ_NONE; /* Shared IRQ: not us */ - if (status == 0xffff) - return IRQ_HANDLED; /* Hot unplug */ - if (status & error) - do_some_error_handling(); - - do { -/************************ start note *********************************/ - acknowledge_ints_ASAP(); // dont ack rx and rxnobuff here -/************************ end note *********************************/ - - if (status & link_interrupt) { - spin_lock(&tp->link_lock); - do_some_link_stat_stuff(); - spin_unlock(&tp->link_lock); - } -/************************ start note *********************************/ - if (status & rx_interrupt || (status & rx_nobuffs)) { - if (netif_rx_schedule_prep(dev)) { - - /* disable interrupts caused - * by arriving packets */ - disable_rx_and_rxnobuff_ints(); - /* tell system we have work to be done. */ - __netif_rx_schedule(dev); - } else { - printk("driver bug! interrupt while in poll\n"); - /* FIX by disabling interrupts */ - disable_rx_and_rxnobuff_ints(); - } - } -/************************ end note note *********************************/ - - if (status & tx_related) { - spin_lock(&tp->lock); - tx_ring_free(dev); - - if (tx_died) - restart_tx(); - spin_unlock(&tp->lock); - } - - status = read_interrupt_status_reg(); - -/************************ start note *********************************/ - } while (!(status & error) || more_work_to_be_done(status)); -/************************ end note note *********************************/ - return IRQ_HANDLED; -} - ---------------------------------------------------------------------- - - -We note several things from above: - -I) Any interrupt source which is caused by arriving packets is now -turned off when it occurs. Depending on the hardware, there could be -several reasons that arriving packets would cause interrupts; these are the -interrupt sources we wish to avoid. The two common ones are a) a packet -arriving (rxint) b) a packet arriving and finding no DMA buffers available -(rxnobuff) . -This means also acknowledge_ints_ASAP() will not clear the status -register for those two items above; clearing is done in the place where -proper work is done within NAPI; at the poll() and refill_rx_ring() -discussed further below. -netif_rx_schedule_prep() returns 1 if device is in running state and -gets successfully added to the core poll list. If we get a zero value -we can _almost_ assume are already added to the list (instead of not running. -Logic based on the fact that you shouldn't get interrupt if not running) -We rectify this by disabling rx and rxnobuf interrupts. - -II) that receive_packets(dev) and make_rx_buffs_avail() may have disappeared. -These functionalities are still around actually...... - -infact, receive_packets(dev) is very close to my_poll() and -make_rx_buffs_avail() is invoked from my_poll() - -4) converting receive_packets() to dev->poll() -=============================================== - -We need to convert the classical D Becker receive_packets(dev) to my_poll() - -First the typical receive_packets() below: -------------------------------------------------------------------- - -/* this is called by interrupt handler */ -static void receive_packets (struct net_device *dev) -{ - - struct my_private *tp = (struct my_private *)dev->priv; - rx_ring = tp->rx_ring; - cur_rx = tp->cur_rx; - int entry = cur_rx % RX_RING_SIZE; - int received = 0; - int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; - - while (rx_ring_not_empty) { - u32 rx_status; - unsigned int rx_size; - unsigned int pkt_size; - struct sk_buff *skb; - /* read size+status of next frame from DMA ring buffer */ - /* the number 16 and 4 are just examples */ - rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); - rx_size = rx_status >> 16; - pkt_size = rx_size - 4; - - /* process errors */ - if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) || - (!(rx_status & RxStatusOK))) { - netdrv_rx_err (rx_status, dev, tp, ioaddr); - return; - } - - if (--rx_work_limit < 0) - break; - - /* grab a skb */ - skb = dev_alloc_skb (pkt_size + 2); - if (skb) { - . - . - netif_rx (skb); - . - . - } else { /* OOM */ - /*seems very driver specific ... some just pass - whatever is on the ring already. */ - } - - /* move to the next skb on the ring */ - entry = (++tp->cur_rx) % RX_RING_SIZE; - received++ ; - - } - - /* store current ring pointer state */ - tp->cur_rx = cur_rx; - - /* Refill the Rx ring buffers if they are needed */ - refill_rx_ring(); - . - . - -} -------------------------------------------------------------------- -We change it to a new one below; note the additional parameter in -the call. - -------------------------------------------------------------------- - -/* this is called by the network core */ -static int my_poll (struct net_device *dev, int *budget) -{ - - struct my_private *tp = (struct my_private *)dev->priv; - rx_ring = tp->rx_ring; - cur_rx = tp->cur_rx; - int entry = cur_rx % RX_BUF_LEN; - /* maximum packets to send to the stack */ -/************************ note note *********************************/ - int rx_work_limit = dev->quota; - -/************************ end note note *********************************/ - do { // outer beginning loop starts here - - clear_rx_status_register_bit(); - - while (rx_ring_not_empty) { - u32 rx_status; - unsigned int rx_size; - unsigned int pkt_size; - struct sk_buff *skb; - /* read size+status of next frame from DMA ring buffer */ - /* the number 16 and 4 are just examples */ - rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); - rx_size = rx_status >> 16; - pkt_size = rx_size - 4; - - /* process errors */ - if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) || - (!(rx_status & RxStatusOK))) { - netdrv_rx_err (rx_status, dev, tp, ioaddr); - return 1; - } - -/************************ note note *********************************/ - if (--rx_work_limit < 0) { /* we got packets, but no quota */ - /* store current ring pointer state */ - tp->cur_rx = cur_rx; - - /* Refill the Rx ring buffers if they are needed */ - refill_rx_ring(dev); - goto not_done; - } -/********************** end note **********************************/ - - /* grab a skb */ - skb = dev_alloc_skb (pkt_size + 2); - if (skb) { - . - . -/************************ note note *********************************/ - netif_receive_skb (skb); -/********************** end note **********************************/ - . - . - } else { /* OOM */ - /*seems very driver specific ... common is just pass - whatever is on the ring already. */ - } - - /* move to the next skb on the ring */ - entry = (++tp->cur_rx) % RX_RING_SIZE; - received++ ; - - } - - /* store current ring pointer state */ - tp->cur_rx = cur_rx; - - /* Refill the Rx ring buffers if they are needed */ - refill_rx_ring(dev); - - /* no packets on ring; but new ones can arrive since we last - checked */ - status = read_interrupt_status_reg(); - if (rx status is not set) { - /* If something arrives in this narrow window, - an interrupt will be generated */ - goto done; - } - /* done! at least that's what it looks like ;-> - if new packets came in after our last check on status bits - they'll be caught by the while check and we go back and clear them - since we havent exceeded our quota */ - } while (rx_status_is_set); - -done: - -/************************ note note *********************************/ - dev->quota -= received; - *budget -= received; - - /* If RX ring is not full we are out of memory. */ - if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) - goto oom; - - /* we are happy/done, no more packets on ring; put us back - to where we can start processing interrupts again */ - netif_rx_complete(dev); - enable_rx_and_rxnobuf_ints(); - - /* The last op happens after poll completion. Which means the following: - * 1. it can race with disabling irqs in irq handler (which are done to - * schedule polls) - * 2. it can race with dis/enabling irqs in other poll threads - * 3. if an irq raised after the beginning of the outer beginning - * loop (marked in the code above), it will be immediately - * triggered here. - * - * Summarizing: the logic may result in some redundant irqs both - * due to races in masking and due to too late acking of already - * processed irqs. The good news: no events are ever lost. - */ - - return 0; /* done */ - -not_done: - if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || - tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) - refill_rx_ring(dev); - - if (!received) { - printk("received==0\n"); - received = 1; - } - dev->quota -= received; - *budget -= received; - return 1; /* not_done */ - -oom: - /* Start timer, stop polling, but do not enable rx interrupts. */ - start_poll_timer(dev); - return 0; /* we'll take it from here so tell core "done"*/ - -/************************ End note note *********************************/ -} -------------------------------------------------------------------- - -From above we note that: -0) rx_work_limit = dev->quota -1) refill_rx_ring() is in charge of clearing the bit for rxnobuff when -it does the work. -2) We have a done and not_done state. -3) instead of netif_rx() we call netif_receive_skb() to pass the skb. -4) we have a new way of handling oom condition -5) A new outer for (;;) loop has been added. This serves the purpose of -ensuring that if a new packet has come in, after we are all set and done, -and we have not exceeded our quota that we continue sending packets up. - - ------------------------------------------------------------ -Poll timer code will need to do the following: - -a) - - if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || - tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) - refill_rx_ring(dev); - - /* If RX ring is not full we are still out of memory. - Restart the timer again. Else we re-add ourselves - to the master poll list. - */ - - if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) - restart_timer(); - - else netif_rx_schedule(dev); /* we are back on the poll list */ - -5) dev->close() and dev->suspend() issues -========================================== -The driver writer needn't worry about this; the top net layer takes -care of it. - -6) Adding new Stats to /proc -============================= -In order to debug some of the new features, we introduce new stats -that need to be collected. -TODO: Fill this later. - -APPENDIX 1: discussion on using ethernet HW FC -============================================== -Most chips with FC only send a pause packet when they run out of Rx buffers. -Since packets are pulled off the DMA ring by a softirq in NAPI, -if the system is slow in grabbing them and we have a high input -rate (faster than the system's capacity to remove packets), then theoretically -there will only be one rx interrupt for all packets during a given packetstorm. -Under low load, we might have a single interrupt per packet. -FC should be programmed to apply in the case when the system cant pull out -packets fast enough i.e send a pause only when you run out of rx buffers. -Note FC in itself is a good solution but we have found it to not be -much of a commodity feature (both in NICs and switches) and hence falls -under the same category as using NIC based mitigation. Also, experiments -indicate that it's much harder to resolve the resource allocation -issue (aka lazy receiving that NAPI offers) and hence quantify its usefulness -proved harder. In any case, FC works even better with NAPI but is not -necessary. - - -APPENDIX 2: the "rotting packet" race-window avoidance scheme -============================================================= - -There are two types of associations seen here - -1) status/int which honors level triggered IRQ - -If a status bit for receive or rxnobuff is set and the corresponding -interrupt-enable bit is not on, then no interrupts will be generated. However, -as soon as the "interrupt-enable" bit is unmasked, an immediate interrupt is -generated. [assuming the status bit was not turned off]. -Generally the concept of level triggered IRQs in association with a status and -interrupt-enable CSR register set is used to avoid the race. - -If we take the example of the tulip: -"pending work" is indicated by the status bit(CSR5 in tulip). -the corresponding interrupt bit (CSR7 in tulip) might be turned off (but -the CSR5 will continue to be turned on with new packet arrivals even if -we clear it the first time) -Very important is the fact that if we turn on the interrupt bit on when -status is set that an immediate irq is triggered. - -If we cleared the rx ring and proclaimed there was "no more work -to be done" and then went on to do a few other things; then when we enable -interrupts, there is a possibility that a new packet might sneak in during -this phase. It helps to look at the pseudo code for the tulip poll -routine: - --------------------------- - do { - ACK; - while (ring_is_not_empty()) { - work-work-work - if quota is exceeded: exit, no touching irq status/mask - } - /* No packets, but new can arrive while we are doing this*/ - CSR5 := read - if (CSR5 is not set) { - /* If something arrives in this narrow window here, - * where the comments are ;-> irq will be generated */ - unmask irqs; - exit poll; - } - } while (rx_status_is_set); ------------------------- - -CSR5 bit of interest is only the rx status. -If you look at the last if statement: -you just finished grabbing all the packets from the rx ring .. you check if -status bit says there are more packets just in ... it says none; you then -enable rx interrupts again; if a new packet just came in during this check, -we are counting that CSR5 will be set in that small window of opportunity -and that by re-enabling interrupts, we would actually trigger an interrupt -to register the new packet for processing. - -[The above description nay be very verbose, if you have better wording -that will make this more understandable, please suggest it.] - -2) non-capable hardware - -These do not generally respect level triggered IRQs. Normally, -irqs may be lost while being masked and the only way to leave poll is to do -a double check for new input after netif_rx_complete() is invoked -and re-enable polling (after seeing this new input). - -Sample code: - ---------- - . - . -restart_poll: - while (ring_is_not_empty()) { - work-work-work - if quota is exceeded: exit, not touching irq status/mask - } - . - . - . - enable_rx_interrupts() - netif_rx_complete(dev); - if (ring_has_new_packet() && netif_rx_reschedule(dev, received)) { - disable_rx_and_rxnobufs() - goto restart_poll - } while (rx_status_is_set); ---------- - -Basically netif_rx_complete() removes us from the poll list, but because a -new packet which will never be caught due to the possibility of a race -might come in, we attempt to re-add ourselves to the poll list. - - - - -APPENDIX 3: Scheduling issues. -============================== -As seen NAPI moves processing to softirq level. Linux uses the ksoftirqd as the -general solution to schedule softirq's to run before next interrupt and by putting -them under scheduler control. Also this prevents consecutive softirq's from -monopolize the CPU. This also have the effect that the priority of ksoftirq needs -to be considered when running very CPU-intensive applications and networking to -get the proper balance of softirq/user balance. Increasing ksoftirq priority to 0 -(eventually more) is reported cure problems with low network performance at high -CPU load. - -Most used processes in a GIGE router: -USER PID %CPU %MEM SIZE RSS TTY STAT START TIME COMMAND -root 3 0.2 0.0 0 0 ? RWN Aug 15 602:00 (ksoftirqd_CPU0) -root 232 0.0 7.9 41400 40884 ? S Aug 15 74:12 gated - --------------------------------------------------------------------- - -relevant sites: -================== -ftp://robur.slu.se/pub/Linux/net-development/NAPI/ - - --------------------------------------------------------------------- -TODO: Write net-skeleton.c driver. -------------------------------------------------------------- - -Authors: -======== -Alexey Kuznetsov -Jamal Hadi Salim -Robert Olsson - -Acknowledgements: -================ -People who made this document better: - -Lennert Buytenhek -Andrew Morton -Manfred Spraul -Donald Becker -Jeff Garzik diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt index 4504cc59e40571433c3120a18620aeec5668a4e4..afb66f9a8aff5aa3f32ea32ea65784ac80d6131e 100644 --- a/Documentation/networking/dccp.txt +++ b/Documentation/networking/dccp.txt @@ -38,8 +38,13 @@ Socket options DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, the socket will fall back to 0 (which means that no meaningful service code -is present). Connecting sockets set at most one service option; for -listening sockets, multiple service codes can be specified. +is present). On active sockets this is set before connect(); specifying more +than one code has no effect (all subsequent service codes are ignored). The +case is different for passive sockets, where multiple service codes (up to 32) +can be set before calling bind(). + +DCCP_SOCKOPT_GET_CUR_MPS is read-only and retrieves the current maximum packet +size (application payload size) in bytes, see RFC 4340, section 14. DCCP_SOCKOPT_SEND_CSCOV and DCCP_SOCKOPT_RECV_CSCOV are used for setting the partial checksum coverage (RFC 4340, sec. 9.2). The default is that checksums @@ -50,12 +55,13 @@ be enabled at the receiver, too with suitable choice of CsCov. DCCP_SOCKOPT_SEND_CSCOV sets the sender checksum coverage. Values in the range 0..15 are acceptable. The default setting is 0 (full coverage), values between 1..15 indicate partial coverage. -DCCP_SOCKOPT_SEND_CSCOV is for the receiver and has a different meaning: it +DCCP_SOCKOPT_RECV_CSCOV is for the receiver and has a different meaning: it sets a threshold, where again values 0..15 are acceptable. The default of 0 means that all packets with a partial coverage will be discarded. Values in the range 1..15 indicate that packets with minimally such a coverage value are also acceptable. The higher the number, the more - restrictive this setting (see [RFC 4340, sec. 9.2.1]). + restrictive this setting (see [RFC 4340, sec. 9.2.1]). Partial coverage + settings are inherited to the child socket after accept(). The following two options apply to CCID 3 exclusively and are getsockopt()-only. In either case, a TFRC info struct (defined in ) is returned. @@ -112,9 +118,14 @@ tx_qlen = 5 The size of the transmit buffer in packets. A value of 0 corresponds to an unbounded transmit buffer. +sync_ratelimit = 125 ms + The timeout between subsequent DCCP-Sync packets sent in response to + sequence-invalid packets on the same socket (RFC 4340, 7.5.4). The unit + of this parameter is milliseconds; a value of 0 disables rate-limiting. + Notes ===== DCCP does not travel through NAT successfully at present on many boxes. This is -because the checksum covers the psuedo-header as per TCP and UDP. Linux NAT +because the checksum covers the pseudo-header as per TCP and UDP. Linux NAT support for DCCP has been added. diff --git a/Documentation/networking/dgrs.txt b/Documentation/networking/dgrs.txt deleted file mode 100644 index 1aa1bb3f94abd292ef56311463c47f142f7e5e27..0000000000000000000000000000000000000000 --- a/Documentation/networking/dgrs.txt +++ /dev/null @@ -1,52 +0,0 @@ - The Digi International RightSwitch SE-X (dgrs) Device Driver - -This is a Linux driver for the Digi International RightSwitch SE-X -EISA and PCI boards. These are 4 (EISA) or 6 (PCI) port Ethernet -switches and a NIC combined into a single board. This driver can -be compiled into the kernel statically or as a loadable module. - -There is also a companion management tool, called "xrightswitch". -The management tool lets you watch the performance graphically, -as well as set the SNMP agent IP and IPX addresses, IEEE Spanning -Tree, and Aging time. These can also be set from the command line -when the driver is loaded. The driver command line options are: - - debug=NNN Debug printing level - dma=0/1 Disable/Enable DMA on PCI card - spantree=0/1 Disable/Enable IEEE spanning tree - hashexpire=NNN Change address aging time (default 300 seconds) - ipaddr=A,B,C,D Set SNMP agent IP address i.e. 199,86,8,221 - iptrap=A,B,C,D Set SNMP agent IP trap address i.e. 199,86,8,221 - ipxnet=NNN Set SNMP agent IPX network number - nicmode=0/1 Disable/Enable multiple NIC mode - -There is also a tool for setting up input and output packet filters -on each port, called "dgrsfilt". - -Both the management tool and the filtering tool are available -separately from the following FTP site: - - ftp://ftp.dgii.com/drivers/rightswitch/linux/ - -When nicmode=1, the board and driver operate as 4 or 6 individual -NIC ports (eth0...eth5) instead of as a switch. All switching -functions are disabled. In the future, the board firmware may include -a routing cache when in this mode. - -Copyright 1995-1996 Digi International Inc. - -This software may be used and distributed according to the terms -of the GNU General Public License, incorporated herein by reference. - -For information on purchasing a RightSwitch SE-4 or SE-6 -board, please contact Digi's sales department at 1-612-912-3444 -or 1-800-DIGIBRD. Outside the U.S., please check our Web page at: - - http://www.dgii.com - -for sales offices worldwide. Tech support is also available through -the channels listed on the Web site, although as long as I am -employed on networking products at Digi I will be happy to provide -any bug fixes that may be needed. - --Rick Richardson, rick@dgii.com diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 32c2e9da5f3a3dd0f2c522799ffa92575f64a01e..6ae2feff3087edfc13f58fb1ea9d9ddfc6f06a97 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -180,13 +180,20 @@ tcp_fin_timeout - INTEGER to live longer. Cf. tcp_max_orphans. tcp_frto - INTEGER - Enables F-RTO, an enhanced recovery algorithm for TCP retransmission + Enables Forward RTO-Recovery (F-RTO) defined in RFC4138. + F-RTO is an enhanced recovery algorithm for TCP retransmission timeouts. It is particularly beneficial in wireless environments where packet loss is typically due to random radio interference - rather than intermediate router congestion. If set to 1, basic - version is enabled. 2 enables SACK enhanced F-RTO, which is - EXPERIMENTAL. The basic version can be used also when SACK is - enabled for a flow through tcp_sack sysctl. + rather than intermediate router congestion. FRTO is sender-side + only modification. Therefore it does not require any support from + the peer, but in a typical case, however, where wireless link is + the local access link and most of the data flows downlink, the + faraway servers should have FRTO enabled to take advantage of it. + If set to 1, basic version is enabled. 2 enables SACK enhanced + F-RTO if flow uses SACK. The basic version can be used also when + SACK is in use though scenario(s) with it exists where FRTO + interacts badly with the packet counting of the SACK enabled TCP + flow. tcp_frto_response - INTEGER When F-RTO has detected that a TCP retransmission timeout was diff --git a/Documentation/networking/mac80211-injection.txt b/Documentation/networking/mac80211-injection.txt index 53ef7a06f49cb0f40567066f62e42ec2deff9def..84906ef3ed6e969404a9fac550f9973254f0039d 100644 --- a/Documentation/networking/mac80211-injection.txt +++ b/Documentation/networking/mac80211-injection.txt @@ -13,15 +13,35 @@ The radiotap format is discussed in ./Documentation/networking/radiotap-headers.txt. Despite 13 radiotap argument types are currently defined, most only make sense -to appear on received packets. Currently three kinds of argument are used by -the injection code, although it knows to skip any other arguments that are -present (facilitating replay of captured radiotap headers directly): +to appear on received packets. The following information is parsed from the +radiotap headers and used to control injection: - - IEEE80211_RADIOTAP_RATE - u8 arg in 500kbps units (0x02 --> 1Mbps) + * IEEE80211_RADIOTAP_RATE - - IEEE80211_RADIOTAP_ANTENNA - u8 arg, 0x00 = ant1, 0x01 = ant2 + rate in 500kbps units, automatic if invalid or not present - - IEEE80211_RADIOTAP_DBM_TX_POWER - u8 arg, dBm + + * IEEE80211_RADIOTAP_ANTENNA + + antenna to use, automatic if not present + + + * IEEE80211_RADIOTAP_DBM_TX_POWER + + transmit power in dBm, automatic if not present + + + * IEEE80211_RADIOTAP_FLAGS + + IEEE80211_RADIOTAP_F_FCS: FCS will be removed and recalculated + IEEE80211_RADIOTAP_F_WEP: frame will be encrypted if key available + IEEE80211_RADIOTAP_F_FRAG: frame will be fragmented if longer than the + current fragmentation threshold. Note that + this flag is only reliable when software + fragmentation is enabled) + +The injection code can also skip all other currently defined radiotap fields +facilitating replay of captured radiotap headers directly. Here is an example valid radiotap header defining these three parameters diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt index 1caa6c734691bc8bc2d14493065ae6f08613f5e6..3c2f2b3286385337ce5ec24afebd4699dd1e6e0a 100644 --- a/Documentation/networking/netconsole.txt +++ b/Documentation/networking/netconsole.txt @@ -3,6 +3,10 @@ started by Ingo Molnar , 2001.09.17 2.6 port and netpoll api by Matt Mackall , Sep 9 2003 Please send bug reports to Matt Mackall +and Satyam Sharma + +Introduction: +============= This module logs kernel printk messages over UDP allowing debugging of problem where disk logging fails and serial consoles are impractical. @@ -13,6 +17,9 @@ the specified interface as soon as possible. While this doesn't allow capture of early kernel panics, it does capture most of the boot process. +Sender and receiver configuration: +================================== + It takes a string configuration parameter "netconsole" in the following format: @@ -34,21 +41,113 @@ Examples: insmod netconsole netconsole=@/,@10.0.0.2/ +It also supports logging to multiple remote agents by specifying +parameters for the multiple agents separated by semicolons and the +complete string enclosed in "quotes", thusly: + + modprobe netconsole netconsole="@/,@10.0.0.2/;@/eth1,6892@10.0.0.3/" + Built-in netconsole starts immediately after the TCP stack is initialized and attempts to bring up the supplied dev at the supplied address. The remote host can run either 'netcat -u -l -p ' or syslogd. +Dynamic reconfiguration: +======================== + +Dynamic reconfigurability is a useful addition to netconsole that enables +remote logging targets to be dynamically added, removed, or have their +parameters reconfigured at runtime from a configfs-based userspace interface. +[ Note that the parameters of netconsole targets that were specified/created +from the boot/module option are not exposed via this interface, and hence +cannot be modified dynamically. ] + +To include this feature, select CONFIG_NETCONSOLE_DYNAMIC when building the +netconsole module (or kernel, if netconsole is built-in). + +Some examples follow (where configfs is mounted at the /sys/kernel/config +mountpoint). + +To add a remote logging target (target names can be arbitrary): + + cd /sys/kernel/config/netconsole/ + mkdir target1 + +Note that newly created targets have default parameter values (as mentioned +above) and are disabled by default -- they must first be enabled by writing +"1" to the "enabled" attribute (usually after setting parameters accordingly) +as described below. + +To remove a target: + + rmdir /sys/kernel/config/netconsole/othertarget/ + +The interface exposes these parameters of a netconsole target to userspace: + + enabled Is this target currently enabled? (read-write) + dev_name Local network interface name (read-write) + local_port Source UDP port to use (read-write) + remote_port Remote agent's UDP port (read-write) + local_ip Source IP address to use (read-write) + remote_ip Remote agent's IP address (read-write) + local_mac Local interface's MAC address (read-only) + remote_mac Remote agent's MAC address (read-write) + +The "enabled" attribute is also used to control whether the parameters of +a target can be updated or not -- you can modify the parameters of only +disabled targets (i.e. if "enabled" is 0). + +To update a target's parameters: + + cat enabled # check if enabled is 1 + echo 0 > enabled # disable the target (if required) + echo eth2 > dev_name # set local interface + echo 10.0.0.4 > remote_ip # update some parameter + echo cb:a9:87:65:43:21 > remote_mac # update more parameters + echo 1 > enabled # enable target again + +You can also update the local interface dynamically. This is especially +useful if you want to use interfaces that have newly come up (and may not +have existed when netconsole was loaded / initialized). + +Miscellaneous notes: +==================== + WARNING: the default target ethernet setting uses the broadcast ethernet address to send packets, which can cause increased load on other systems on the same ethernet segment. +TIP: some LAN switches may be configured to suppress ethernet broadcasts +so it is advised to explicitly specify the remote agents' MAC addresses +from the config parameters passed to netconsole. + +TIP: to find out the MAC address of, say, 10.0.0.2, you may try using: + + ping -c 1 10.0.0.2 ; /sbin/arp -n | grep 10.0.0.2 + +TIP: in case the remote logging agent is on a separate LAN subnet than +the sender, it is suggested to try specifying the MAC address of the +default gateway (you may use /sbin/route -n to find it out) as the +remote MAC address instead. + NOTE: the network device (eth1 in the above case) can run any kind of other network traffic, netconsole is not intrusive. Netconsole might cause slight delays in other traffic if the volume of kernel messages is high, but should have no other impact. +NOTE: if you find that the remote logging agent is not receiving or +printing all messages from the sender, it is likely that you have set +the "console_loglevel" parameter (on the sender) to only send high +priority messages to the console. You can change this at runtime using: + + dmesg -n 8 + +or by specifying "debug" on the kernel command line at boot, to send +all kernel messages to the console. A specific value for this parameter +can also be set using the "loglevel" kernel boot option. See the +dmesg(8) man page and Documentation/kernel-parameters.txt for details. + Netconsole was designed to be as instantaneous as possible, to enable the logging of even the most critical kernel bugs. It works from IRQ contexts as well, and does not enable interrupts while diff --git a/Documentation/networking/netdevices.txt b/Documentation/networking/netdevices.txt index 37869295fc7092e659c0ab08a36209b253522dc0..d0f71fc7f7829b1dfb9dfc689afe0978bec53c76 100644 --- a/Documentation/networking/netdevices.txt +++ b/Documentation/networking/netdevices.txt @@ -73,7 +73,8 @@ dev->hard_start_xmit: has to lock by itself when needed. It is recommended to use a try lock for this and return NETDEV_TX_LOCKED when the spin lock fails. The locking there should also properly protect against - set_multicast_list. + set_multicast_list. Note that the use of NETIF_F_LLTX is deprecated. + Dont use it for new drivers. Context: Process with BHs disabled or BH (timer), will be called with interrupts disabled by netconsole. @@ -95,9 +96,13 @@ dev->set_multicast_list: Synchronization: netif_tx_lock spinlock. Context: BHs disabled -dev->poll: - Synchronization: __LINK_STATE_RX_SCHED bit in dev->state. See - dev_close code and comments in net/core/dev.c for more info. +struct napi_struct synchronization rules +======================================== +napi->poll: + Synchronization: NAPI_STATE_SCHED bit in napi->state. Device + driver's dev->close method will invoke napi_disable() on + all NAPI instances which will do a sleeping poll on the + NAPI_STATE_SCHED napi->state bit, waiting for all pending + NAPI activity to cease. Context: softirq will be called with interrupts disabled by netconsole. - diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt index 76733a3962f05ae30aec247db6a08bbf353df13c..838fd323e79762f6d910a630a29970713c371fbe 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/powerpc/booting-without-of.txt @@ -1824,6 +1824,162 @@ platforms are moved over to use the flattened-device-tree model. fsl,has-rstcr; }; + + h) 4xx/Axon EMAC ethernet nodes + + The EMAC ethernet controller in IBM and AMCC 4xx chips, and also + the Axon bridge. To operate this needs to interact with a ths + special McMAL DMA controller, and sometimes an RGMII or ZMII + interface. In addition to the nodes and properties described + below, the node for the OPB bus on which the EMAC sits must have a + correct clock-frequency property. + + i) The EMAC node itself + + Required properties: + - device_type : "network" + + - compatible : compatible list, contains 2 entries, first is + "ibm,emac-CHIP" where CHIP is the host ASIC (440gx, + 405gp, Axon) and second is either "ibm,emac" or + "ibm,emac4". For Axon, thus, we have: "ibm,emac-axon", + "ibm,emac4" + - interrupts : + - interrupt-parent : optional, if needed for interrupt mapping + - reg : + - local-mac-address : 6 bytes, MAC address + - mal-device : phandle of the associated McMAL node + - mal-tx-channel : 1 cell, index of the tx channel on McMAL associated + with this EMAC + - mal-rx-channel : 1 cell, index of the rx channel on McMAL associated + with this EMAC + - cell-index : 1 cell, hardware index of the EMAC cell on a given + ASIC (typically 0x0 and 0x1 for EMAC0 and EMAC1 on + each Axon chip) + - max-frame-size : 1 cell, maximum frame size supported in bytes + - rx-fifo-size : 1 cell, Rx fifo size in bytes for 10 and 100 Mb/sec + operations. + For Axon, 2048 + - tx-fifo-size : 1 cell, Tx fifo size in bytes for 10 and 100 Mb/sec + operations. + For Axon, 2048. + - fifo-entry-size : 1 cell, size of a fifo entry (used to calculate + thresholds). + For Axon, 0x00000010 + - mal-burst-size : 1 cell, MAL burst size (used to calculate thresholds) + in bytes. + For Axon, 0x00000100 (I think ...) + - phy-mode : string, mode of operations of the PHY interface. + Supported values are: "mii", "rmii", "smii", "rgmii", + "tbi", "gmii", rtbi", "sgmii". + For Axon on CAB, it is "rgmii" + - mdio-device : 1 cell, required iff using shared MDIO registers + (440EP). phandle of the EMAC to use to drive the + MDIO lines for the PHY used by this EMAC. + - zmii-device : 1 cell, required iff connected to a ZMII. phandle of + the ZMII device node + - zmii-channel : 1 cell, required iff connected to a ZMII. Which ZMII + channel or 0xffffffff if ZMII is only used for MDIO. + - rgmii-device : 1 cell, required iff connected to an RGMII. phandle + of the RGMII device node. + For Axon: phandle of plb5/plb4/opb/rgmii + - rgmii-channel : 1 cell, required iff connected to an RGMII. Which + RGMII channel is used by this EMAC. + Fox Axon: present, whatever value is appropriate for each + EMAC, that is the content of the current (bogus) "phy-port" + property. + + Recommended properties: + - linux,network-index : This is the intended "index" of this + network device. This is used by the bootwrapper to interpret + MAC addresses passed by the firmware when no information other + than indices is available to associate an address with a device. + + Optional properties: + - phy-address : 1 cell, optional, MDIO address of the PHY. If absent, + a search is performed. + - phy-map : 1 cell, optional, bitmap of addresses to probe the PHY + for, used if phy-address is absent. bit 0x00000001 is + MDIO address 0. + For Axon it can be absent, thouugh my current driver + doesn't handle phy-address yet so for now, keep + 0x00ffffff in it. + - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec + operations (if absent the value is the same as + rx-fifo-size). For Axon, either absent or 2048. + - tx-fifo-size-gige : 1 cell, Tx fifo size in bytes for 1000 Mb/sec + operations (if absent the value is the same as + tx-fifo-size). For Axon, either absent or 2048. + - tah-device : 1 cell, optional. If connected to a TAH engine for + offload, phandle of the TAH device node. + - tah-channel : 1 cell, optional. If appropriate, channel used on the + TAH engine. + + Example: + + EMAC0: ethernet@40000800 { + linux,network-index = <0>; + device_type = "network"; + compatible = "ibm,emac-440gp", "ibm,emac"; + interrupt-parent = <&UIC1>; + interrupts = <1c 4 1d 4>; + reg = <40000800 70>; + local-mac-address = [00 04 AC E3 1B 1E]; + mal-device = <&MAL0>; + mal-tx-channel = <0 1>; + mal-rx-channel = <0>; + cell-index = <0>; + max-frame-size = <5dc>; + rx-fifo-size = <1000>; + tx-fifo-size = <800>; + phy-mode = "rmii"; + phy-map = <00000001>; + zmii-device = <&ZMII0>; + zmii-channel = <0>; + }; + + ii) McMAL node + + Required properties: + - device_type : "dma-controller" + - compatible : compatible list, containing 2 entries, first is + "ibm,mcmal-CHIP" where CHIP is the host ASIC (like + emac) and the second is either "ibm,mcmal" or + "ibm,mcmal2". + For Axon, "ibm,mcmal-axon","ibm,mcmal2" + - interrupts : . + For Axon: This is _different_ from the current + firmware. We use the "delayed" interrupts for txeob + and rxeob. Thus we end up with mapping those 5 MPIC + interrupts, all level positive sensitive: 10, 11, 32, + 33, 34 (in decimal) + - dcr-reg : < DCR registers range > + - dcr-parent : if needed for dcr-reg + - num-tx-chans : 1 cell, number of Tx channels + - num-rx-chans : 1 cell, number of Rx channels + + iii) ZMII node + + Required properties: + - compatible : compatible list, containing 2 entries, first is + "ibm,zmii-CHIP" where CHIP is the host ASIC (like + EMAC) and the second is "ibm,zmii". + For Axon, there is no ZMII node. + - reg : + + iv) RGMII node + + Required properties: + - compatible : compatible list, containing 2 entries, first is + "ibm,rgmii-CHIP" where CHIP is the host ASIC (like + EMAC) and the second is "ibm,rgmii". + For Axon, "ibm,rgmii-axon","ibm,rgmii" + - reg : + - revision : as provided by the RGMII new version register if + available. + For Axon: 0x0000012a + More devices will be defined as this spec matures. VII - Specifying interrupt information for devices diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt new file mode 100644 index 0000000000000000000000000000000000000000..a83ff23cd68cea84d4476a43df7b9e9e91f10cf8 --- /dev/null +++ b/Documentation/rfkill.txt @@ -0,0 +1,89 @@ +rfkill - RF switch subsystem support +==================================== + +1 Implementation details +2 Driver support +3 Userspace support + +=============================================================================== +1: Implementation details + +The rfkill switch subsystem offers support for keys often found on laptops +to enable wireless devices like WiFi and Bluetooth. + +This is done by providing the user 3 possibilities: + 1 - The rfkill system handles all events; userspace is not aware of events. + 2 - The rfkill system handles all events; userspace is informed about the events. + 3 - The rfkill system does not handle events; userspace handles all events. + +The buttons to enable and disable the wireless radios are important in +situations where the user is for example using his laptop on a location where +wireless radios _must_ be disabled (e.g. airplanes). +Because of this requirement, userspace support for the keys should not be +made mandatory. Because userspace might want to perform some additional smarter +tasks when the key is pressed, rfkill still provides userspace the possibility +to take over the task to handle the key events. + +The system inside the kernel has been split into 2 separate sections: + 1 - RFKILL + 2 - RFKILL_INPUT + +The first option enables rfkill support and will make sure userspace will +be notified of any events through the input device. It also creates several +sysfs entries which can be used by userspace. See section "Userspace support". + +The second option provides an rfkill input handler. This handler will +listen to all rfkill key events and will toggle the radio accordingly. +With this option enabled userspace could either do nothing or simply +perform monitoring tasks. + +==================================== +2: Driver support + +To build a driver with rfkill subsystem support, the driver should +depend on the Kconfig symbol RFKILL; it should _not_ depend on +RKFILL_INPUT. + +Unless key events trigger an interrupt to which the driver listens, polling +will be required to determine the key state changes. For this the input +layer providers the input-polldev handler. + +A driver should implement a few steps to correctly make use of the +rfkill subsystem. First for non-polling drivers: + + - rfkill_allocate() + - input_allocate_device() + - rfkill_register() + - input_register_device() + +For polling drivers: + + - rfkill_allocate() + - input_allocate_polled_device() + - rfkill_register() + - input_register_polled_device() + +When a key event has been detected, the correct event should be +sent over the input device which has been registered by the driver. + +==================================== +3: Userspace support + +For each key an input device will be created which will send out the correct +key event when the rfkill key has been pressed. + +The following sysfs entries will be created: + + name: Name assigned by driver to this key (interface or driver name). + type: Name of the key type ("wlan", "bluetooth", etc). + state: Current state of the key. 1: On, 0: Off. + claim: 1: Userspace handles events, 0: Kernel handles events + +Both the "state" and "claim" entries are also writable. For the "state" entry +this means that when 1 or 0 is written all radios, not yet in the requested +state, will be will be toggled accordingly. +For the "claim" entry writing 1 to it means that the kernel no longer handles +key events even though RFKILL_INPUT input was enabled. When "claim" has been +set to 0, userspace should make sure that it listens for the input events or +check the sysfs "state" entry regularly to correctly perform the required +tasks when the rkfill key is pressed. diff --git a/MAINTAINERS b/MAINTAINERS index 60162706716fff315fe667aaf3d8394b7f361ca3..8a1360045c2d65024b5cc5ff14db6ca214148bbe 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -284,6 +284,14 @@ M: corentin.labbe@geomatys.fr L: lm-sensors@lm-sensors.org S: Maintained +ADM8211 WIRELESS DRIVER +P: Michael Wu +M: flamingice@sourmilk.net +L: linux-wireless@vger.kernel.org +W: http://linuxwireless.org/ +T: git kernel.org:/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git +S: Maintained + ADT746X FAN DRIVER P: Colin Leroy M: colin@colino.net @@ -761,6 +769,22 @@ L: linux-hams@vger.kernel.org W: http://www.baycom.org/~tom/ham/ham.html S: Maintained +B43 WIRELESS DRIVER +P: Michael Buesch +M: mb@bu3sch.de +P: Stefano Brivio +M: st3@riseup.net +L: linux-wireless@vger.kernel.org +W: http://bcm43xx.berlios.de/ +S: Maintained + +B43LEGACY WIRELESS DRIVER +P: Larry Finger +M: Larry.Finger@lwfinger.net +L: linux-wireless@vger.kernel.org +W: http://bcm43xx.berlios.de/ +S: Maintained + BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION) P: Larry Finger M: Larry.Finger@lwfinger.net @@ -1224,12 +1248,6 @@ L: Eng.Linux@digi.com W: http://www.digi.com S: Orphaned -DIGI RIGHTSWITCH NETWORK DRIVER -P: Rick Richardson -L: netdev@vger.kernel.org -W: http://www.digi.com -S: Orphaned - DIRECTORY NOTIFICATION P: Stephen Rothwell M: sfr@canb.auug.org.au @@ -1996,16 +2014,14 @@ W: http://sourceforge.net/projects/e1000/ S: Supported INTEL PRO/10GbE SUPPORT -P: Jeff Kirsher -M: jeffrey.t.kirsher@intel.com P: Ayyappan Veeraiyan M: ayyappan.veeraiyan@intel.com -P: John Ronciak -M: john.ronciak@intel.com -P: Jesse Brandeburg -M: jesse.brandeburg@intel.com P: Auke Kok M: auke-jan.h.kok@intel.com +P: Jesse Brandeburg +M: jesse.brandeburg@intel.com +P: John Ronciak +M: john.ronciak@intel.com L: e1000-devel@lists.sourceforge.net W: http://sourceforge.net/projects/e1000/ S: Supported @@ -2032,6 +2048,15 @@ L: http://lists.sourceforge.net/mailman/listinfo/ipw2100-devel W: http://ipw2200.sourceforge.net S: Supported +INTEL WIRELESS WIFI LINK (iwlwifi) +P: Zhu Yi +M: yi.zhu@intel.com +L: linux-wireless@vger.kernel.org +L: ipw3945-devel@lists.sourceforge.net +W: http://intellinuxwireless.org +T: git git://intellinuxwireless.org/repos/iwlwifi +S: Supported + IOC3 ETHERNET DRIVER P: Ralf Baechle M: ralf@linux-mips.org @@ -2049,6 +2074,16 @@ P: Juanjo Ciarlante M: jjciarla@raiz.uncu.edu.ar S: Maintained +IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER +P: Francois Romieu +M: romieu@fr.zoreil.com +P: Sorbica Shieh +M: sorbica@icplus.com.tw +P: Jesse Huang +M: jesse@icplus.com.tw +L: netdev@vger.kernel.org +S: Maintained + IPATH DRIVER: P: Arthur Jones M: infinipath@qlogic.com @@ -2999,6 +3034,14 @@ L: kpreempt-tech@lists.sourceforge.net W: ftp://ftp.kernel.org/pub/linux/kernel/people/rml/preempt-kernel S: Supported +P54 WIRELESS DRIVER +P: Michael Wu +M: flamingice@sourmilk.net +L: linux-wireless@vger.kernel.org +W: http://prism54.org +T: git kernel.org:/pub/scm/linux/kernel/git/mwu/mac80211-drivers.git +S: Maintained + PRISM54 WIRELESS DRIVER P: Luis R. Rodriguez M: mcgrof@gmail.com @@ -3086,6 +3129,14 @@ M: corey@world.std.com L: linux-wireless@vger.kernel.org S: Maintained +RALINK RT2X00 WLAN DRIVER +P: rt2x00 project +L: linux-wireless@vger.kernel.org +L: rt2400-devel@lists.sourceforge.net +W: http://rt2x00.serialmonkey.com/ +S: Maintained +F: drivers/net/wireless/rt2x00/ + RANDOM NUMBER DRIVER P: Matt Mackall M: mpm@selenic.com @@ -3171,8 +3222,8 @@ W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported S390 NETWORK DRIVERS -P: Frank Pavlic -M: fpavlic@de.ibm.com +P: Ursula Braun +M: ubraun@linux.vnet.ibm.com M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -3186,6 +3237,14 @@ L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported +S390 IUCV NETWORK LAYER +P: Ursula Braun +M: ubraun@linux.vnet.ibm.com +M: linux390@de.ibm.com +L: linux-s390@vger.kernel.org +W: http://www.ibm.com/developerworks/linux/linux390/ +S: Supported + SAA7146 VIDEO4LINUX-2 DRIVER P: Michael Hunold M: michael@mihu.de @@ -3410,6 +3469,12 @@ M: tsbogend@alpha.franken.de L: netdev@vger.kernel.org S: Maintained +SONICS SILICON BACKPLANE DRIVER (SSB) +P: Michael Buesch +M: mb@bu3sch.de +L: netdev@vger.kernel.org +S: Maintained + SONY VAIO CONTROL DEVICE DRIVER P: Mattia Dongili M: malattia@linux.it @@ -3594,6 +3659,14 @@ M: hlhung3i@gmail.com W: http://tcp-lp-mod.sourceforge.net/ S: Maintained +TEHUTI ETHERNET DRIVER +P: Alexander Indenbaum +M: baum@tehutinetworks.net +P: Andy Gospodarek +M: andy@greyhouse.net +L: netdev@vger.kernel.org +S: Supported + TI FLASH MEDIA INTERFACE DRIVER P: Alex Dubov M: oakad@yahoo.com diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index 4017696ada637d0633998daf750a668336b5189c..08b117e2c54bdade231aa96c29cac4f4ce1bc728 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -294,6 +294,9 @@ simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr) return NOTIFY_DONE; } + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE; /* diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index 30f3e9a2466ff11cff7596a40730aeafe8dff41d..80b0c99c2cfb9af113a3e79518a33825a6e249d2 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -573,7 +573,7 @@ CONFIG_MII=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set -CONFIG_NET_SB1250_MAC=y +CONFIG_SB1250_MAC=y # CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SKY2 is not set diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig index 3d1b6281d887b0ee4b28269b49a3995c089042d7..3ed991ae0ebe180d5942f42bab47f5573594620b 100644 --- a/arch/mips/configs/sb1250-swarm_defconfig +++ b/arch/mips/configs/sb1250-swarm_defconfig @@ -565,7 +565,7 @@ CONFIG_MII=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set -CONFIG_NET_SB1250_MAC=y +CONFIG_SB1250_MAC=y # CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SKY2 is not set diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 24bea97c736c75ca59f3ceed5e48443dd6d14daf..9bf63d5256dbeec42f0fc1a610bdac16e0fcdc12 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -395,7 +395,6 @@ void do_softirq(void) local_irq_restore(flags); } -EXPORT_SYMBOL(do_softirq); /* diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 1b3e008fd1481474f26bcbccef24c731f194d31e..8e66949e7c67c0b391b41e1beeafa57f5112de35 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -38,8 +38,7 @@ config 440EP config 440GP bool -# Disabled until the new EMAC Driver is merged. -# select IBM_NEW_EMAC_ZMII + select IBM_NEW_EMAC_ZMII config 440GX bool diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index ac8032034fb8725026ebdf39f7e93790c8fbae38..e1e2f6a43019d9618f5da854c07a07bc982de49a 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -10,6 +10,10 @@ config PPC_CELL_NATIVE select PPC_INDIRECT_IO select PPC_NATIVE select MPIC + select IBM_NEW_EMAC_EMAC4 + select IBM_NEW_EMAC_RGMII + select IBM_NEW_EMAC_ZMII #test only + select IBM_NEW_EMAC_TAH #test only default n config PPC_IBM_CELL_BLADE diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig index 95cd90fd81c74f3ed38dd61766787afb0987965b..e95261ef6f98e6d0c4edd28e6cc7fb2eab7f05a8 100644 --- a/arch/powerpc/platforms/pasemi/Kconfig +++ b/arch/powerpc/platforms/pasemi/Kconfig @@ -18,6 +18,16 @@ config PPC_PASEMI_IOMMU help IOMMU support for PA6T-1682M +config PPC_PASEMI_IOMMU_DMA_FORCE + bool "Force DMA engine to use IOMMU" + depends on PPC_PASEMI_IOMMU + help + This option forces the use of the IOMMU also for the + DMA engine. Otherwise the kernel will use it only when + running under a hypervisor. + + If in doubt, say "N". + config PPC_PASEMI_MDIO depends on PHYLIB tristate "MDIO support via GPIO" diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 9014d55c717bec00e97bac2b38fcceb88f3aeb27..a1111b5c6cb467fbdabdd03c3f57ed52b9fe9347 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -25,6 +25,7 @@ #include #include #include +#include #define IOBMAP_PAGE_SHIFT 12 @@ -175,13 +176,17 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) { pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev)); - /* DMA device is untranslated, but all other PCI-e goes through - * the IOMMU +#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) + /* For non-LPAR environment, don't translate anything for the DMA + * engine. The exception to this is if the user has enabled + * CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time. */ - if (dev->vendor == 0x1959 && dev->device == 0xa007) + if (dev->vendor == 0x1959 && dev->device == 0xa007 && + !firmware_has_feature(FW_FEATURE_LPAR)) dev->dev.archdata.dma_ops = &dma_direct_ops; - else - dev->dev.archdata.dma_data = &iommu_table_iobmap; +#endif + + dev->dev.archdata.dma_data = &iommu_table_iobmap; } static void pci_dma_bus_setup_null(struct pci_bus *b) { } diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 2180ac105b05f5ba8f7b0f08328b615530d72304..6c1815a47714389e0f6664722e7c3563907a930a 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "appldata.h" @@ -107,7 +108,7 @@ static void appldata_get_net_sum_data(void *data) tx_dropped = 0; collisions = 0; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { stats = dev->get_stats(dev); rx_packets += stats->rx_packets; tx_packets += stats->tx_packets; diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 3660ca6a330694a470d5ca2a999709c5173042cc..512669691ad01b9513ce5c283d5d9db6e75e5aed 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -7,7 +7,7 @@ * Copyright IBM Corp. 2005,2007 * Author(s): Jan Glauber (jang@de.ibm.com) * - * Derived from "crypto/aes.c" + * Derived from "crypto/aes_generic.c" * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index af4460ec381f62aede34475b3e9e47fd172329ec..5a834f6578ab85827c6a1416d788280c72322c47 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -12,7 +12,7 @@ * Author(s): Thomas Spatzier * Jan Glauber (jan.glauber@de.ibm.com) * - * Derived from "crypto/sha1.c" + * Derived from "crypto/sha1_generic.c" * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald * Copyright (c) Jean-Francois Dive @@ -26,12 +26,10 @@ #include #include #include +#include #include "crypt_s390.h" -#define SHA1_DIGEST_SIZE 20 -#define SHA1_BLOCK_SIZE 64 - struct s390_sha1_ctx { u64 count; /* message length */ u32 state[5]; @@ -42,11 +40,11 @@ static void sha1_init(struct crypto_tfm *tfm) { struct s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); - sctx->state[0] = 0x67452301; - sctx->state[1] = 0xEFCDAB89; - sctx->state[2] = 0x98BADCFE; - sctx->state[3] = 0x10325476; - sctx->state[4] = 0xC3D2E1F0; + sctx->state[0] = SHA1_H0; + sctx->state[1] = SHA1_H1; + sctx->state[2] = SHA1_H2; + sctx->state[3] = SHA1_H3; + sctx->state[4] = SHA1_H4; sctx->count = 0; } diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 2ced3330bce0b52bf85a5d5ab3bc13e2bd9b6901..ccf8633c4f6580f509cff71c1bce08179037a65c 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -7,7 +7,7 @@ * Copyright IBM Corp. 2005,2007 * Author(s): Jan Glauber (jang@de.ibm.com) * - * Derived from "crypto/sha256.c" + * Derived from "crypto/sha256_generic.c" * and "arch/s390/crypto/sha1_s390.c" * * This program is free software; you can redistribute it and/or modify it @@ -19,12 +19,10 @@ #include #include #include +#include #include "crypt_s390.h" -#define SHA256_DIGEST_SIZE 32 -#define SHA256_BLOCK_SIZE 64 - struct s390_sha256_ctx { u64 count; /* message length */ u32 state[8]; @@ -35,14 +33,14 @@ static void sha256_init(struct crypto_tfm *tfm) { struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); - sctx->state[0] = 0x6a09e667; - sctx->state[1] = 0xbb67ae85; - sctx->state[2] = 0x3c6ef372; - sctx->state[3] = 0xa54ff53a; - sctx->state[4] = 0x510e527f; - sctx->state[5] = 0x9b05688c; - sctx->state[6] = 0x1f83d9ab; - sctx->state[7] = 0x5be0cd19; + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; sctx->count = 0; } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 8f0cbca3120318a497a268dca6e4ad946c461e3b..c36d8123ca14bd464b2cfb59b2c3c9eb0498ca6d 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -95,7 +95,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } -EXPORT_SYMBOL(do_softirq); void init_irq_proc(void) { diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 03404987528dd083970977dfde5379c2bca67d8c..4b49d03ffbd2edd6b1598f35c58e5ab7a37d18d5 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -245,7 +245,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } -EXPORT_SYMBOL(do_softirq); #endif void __init init_IRQ(void) diff --git a/arch/sparc64/solaris/ioctl.c b/arch/sparc64/solaris/ioctl.c index 18352a49862842f6e6f432b1799b5370139e3358..8ad10a6d993bacc5821f3c15e308202ca82514c8 100644 --- a/arch/sparc64/solaris/ioctl.c +++ b/arch/sparc64/solaris/ioctl.c @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -686,7 +687,7 @@ static inline int solaris_i(unsigned int fd, unsigned int cmd, u32 arg) int i = 0; read_lock_bh(&dev_base_lock); - for_each_netdev(d) + for_each_netdev(&init_net, d) i++; read_unlock_bh(&dev_base_lock); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index dd2b97fc00b2ad8c76fe2f7bce0a2164e4866ae6..4f681bcdb1fc3b1afac4676359ff1116dc3454bf 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -231,8 +231,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } - -EXPORT_SYMBOL(do_softirq); #endif /* diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 39cb3fa83ebbe3e68af877a0f43074189b1c027a..bd11e42b22bfa6b66ebf2dcf897845a1baaa25f9 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -210,4 +210,3 @@ asmlinkage void do_softirq(void) } local_irq_restore(flags); } -EXPORT_SYMBOL(do_softirq); diff --git a/crypto/Kconfig b/crypto/Kconfig index 3d1a1e27944f059e6cca382cf02636e5d308df23..083d2e1dfc21640e67f6e6f85671323b2f954a97 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -28,6 +28,10 @@ config CRYPTO_ABLKCIPHER tristate select CRYPTO_BLKCIPHER +config CRYPTO_AEAD + tristate + select CRYPTO_ALGAPI + config CRYPTO_BLKCIPHER tristate select CRYPTO_ALGAPI @@ -146,7 +150,6 @@ config CRYPTO_ECB tristate "ECB support" select CRYPTO_BLKCIPHER select CRYPTO_MANAGER - default m help ECB: Electronic CodeBook mode This is the simplest block cipher algorithm. It simply encrypts @@ -156,7 +159,6 @@ config CRYPTO_CBC tristate "CBC support" select CRYPTO_BLKCIPHER select CRYPTO_MANAGER - default m help CBC: Cipher Block Chaining mode This block cipher algorithm is required for IPSec. @@ -165,7 +167,6 @@ config CRYPTO_PCBC tristate "PCBC support" select CRYPTO_BLKCIPHER select CRYPTO_MANAGER - default m help PCBC: Propagating Cipher Block Chaining mode This block cipher algorithm is required for RxRPC. @@ -183,6 +184,17 @@ config CRYPTO_LRW The first 128, 192 or 256 bits in the key are used for AES and the rest is used to tie each cipher block to its logical position. +config CRYPTO_XTS + tristate "XTS support (EXPERIMENTAL)" + depends on EXPERIMENTAL + select CRYPTO_BLKCIPHER + select CRYPTO_MANAGER + select CRYPTO_GF128MUL + help + XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain, + key size 256, 384 or 512 bits. This implementation currently + can't handle a sectorsize which is not a multiple of 16 bytes. + config CRYPTO_CRYPTD tristate "Software async crypto daemon" select CRYPTO_ABLKCIPHER @@ -415,6 +427,20 @@ config CRYPTO_ANUBIS +config CRYPTO_SEED + tristate "SEED cipher algorithm" + select CRYPTO_ALGAPI + help + SEED cipher algorithm (RFC4269). + + SEED is a 128-bit symmetric key block cipher that has been + developed by KISA (Korea Information Security Agency) as a + national standard encryption algorithm of the Republic of Korea. + It is a 16 round block cipher with the key size of 128 bit. + + See also: + + config CRYPTO_DEFLATE tristate "Deflate compression algorithm" @@ -468,6 +494,14 @@ config CRYPTO_TEST help Quick & dirty crypto test module. +config CRYPTO_AUTHENC + tristate "Authenc support" + select CRYPTO_AEAD + select CRYPTO_MANAGER + help + Authenc: Combined mode wrapper for IPsec. + This is required for IPSec. + source "drivers/crypto/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index 0cf17f1ea1513a17ee828d53bea6bd09b56e27d0..43c2a0dc99365faf07d960dfe6ffbf4cc1c816b1 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -2,13 +2,14 @@ # Cryptographic API # -obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o +obj-$(CONFIG_CRYPTO) += api.o cipher.o digest.o compress.o crypto_algapi-$(CONFIG_PROC_FS) += proc.o -crypto_algapi-objs := algapi.o $(crypto_algapi-y) +crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y) obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o obj-$(CONFIG_CRYPTO_ABLKCIPHER) += ablkcipher.o +obj-$(CONFIG_CRYPTO_AEAD) += aead.o obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o crypto_hash-objs := hash.o @@ -20,8 +21,8 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o obj-$(CONFIG_CRYPTO_MD5) += md5.o -obj-$(CONFIG_CRYPTO_SHA1) += sha1.o -obj-$(CONFIG_CRYPTO_SHA256) += sha256.o +obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o +obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o @@ -30,14 +31,15 @@ obj-$(CONFIG_CRYPTO_ECB) += ecb.o obj-$(CONFIG_CRYPTO_CBC) += cbc.o obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o obj-$(CONFIG_CRYPTO_LRW) += lrw.o +obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o -obj-$(CONFIG_CRYPTO_DES) += des.o +obj-$(CONFIG_CRYPTO_DES) += des_generic.o obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o -obj-$(CONFIG_CRYPTO_AES) += aes.o +obj-$(CONFIG_CRYPTO_AES) += aes_generic.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o obj-$(CONFIG_CRYPTO_CAST5) += cast5.o obj-$(CONFIG_CRYPTO_CAST6) += cast6.o @@ -45,9 +47,11 @@ obj-$(CONFIG_CRYPTO_ARC4) += arc4.o obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o +obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o +obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 3dbb1cc6eab52f5eeba3767f52a6e8225c8d4173..2731acb86e7d53755bbfd3d7cdc98926844ca887 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -16,10 +16,13 @@ #include #include #include +#include #include +#include #include -static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) +static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) { struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); @@ -91,10 +94,6 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); - if (ablkcipher->queue) { - seq_printf(m, "qlen : %u\n", ablkcipher->queue->qlen); - seq_printf(m, "max qlen : %u\n", ablkcipher->queue->max_qlen); - } } const struct crypto_type crypto_ablkcipher_type = { diff --git a/crypto/aead.c b/crypto/aead.c new file mode 100644 index 0000000000000000000000000000000000000000..84a3501fb478bf8bb9d12c2586e925c59eb46135 --- /dev/null +++ b/crypto/aead.c @@ -0,0 +1,101 @@ +/* + * AEAD: Authenticated Encryption with Associated Data + * + * This file provides API support for AEAD algorithms. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct aead_alg *aead = crypto_aead_alg(tfm); + unsigned long alignmask = crypto_aead_alignmask(tfm); + int ret; + u8 *buffer, *alignbuffer; + unsigned long absize; + + absize = keylen + alignmask; + buffer = kmalloc(absize, GFP_ATOMIC); + if (!buffer) + return -ENOMEM; + + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + memcpy(alignbuffer, key, keylen); + ret = aead->setkey(tfm, alignbuffer, keylen); + memset(alignbuffer, 0, keylen); + kfree(buffer); + return ret; +} + +static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) +{ + struct aead_alg *aead = crypto_aead_alg(tfm); + unsigned long alignmask = crypto_aead_alignmask(tfm); + + if ((unsigned long)key & alignmask) + return setkey_unaligned(tfm, key, keylen); + + return aead->setkey(tfm, key, keylen); +} + +static unsigned int crypto_aead_ctxsize(struct crypto_alg *alg, u32 type, + u32 mask) +{ + return alg->cra_ctxsize; +} + +static int crypto_init_aead_ops(struct crypto_tfm *tfm, u32 type, u32 mask) +{ + struct aead_alg *alg = &tfm->__crt_alg->cra_aead; + struct aead_tfm *crt = &tfm->crt_aead; + + if (max(alg->authsize, alg->ivsize) > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + crt->ivsize = alg->ivsize; + crt->authsize = alg->authsize; + + return 0; +} + +static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) + __attribute__ ((unused)); +static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) +{ + struct aead_alg *aead = &alg->cra_aead; + + seq_printf(m, "type : aead\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "ivsize : %u\n", aead->ivsize); + seq_printf(m, "authsize : %u\n", aead->authsize); +} + +const struct crypto_type crypto_aead_type = { + .ctxsize = crypto_aead_ctxsize, + .init = crypto_init_aead_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_aead_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_aead_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)"); diff --git a/crypto/aes.c b/crypto/aes_generic.c similarity index 99% rename from crypto/aes.c rename to crypto/aes_generic.c index e2440773878cc960ff5b22b5240fd735a048e8f8..9401dca85e87c9a5c3908ef6cfbd78fa9a20db89 100644 --- a/crypto/aes.c +++ b/crypto/aes_generic.c @@ -453,4 +453,4 @@ module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("Dual BSD/GPL"); - +MODULE_ALIAS("aes"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 38aa9e994703829a40bd6a09542f0cef97b63fef..8ff8c2656d9c492902a5608436c145965f52723f 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -63,9 +63,6 @@ static int crypto_check_alg(struct crypto_alg *alg) if (alg->cra_alignmask & (alg->cra_alignmask + 1)) return -EINVAL; - if (alg->cra_alignmask & alg->cra_blocksize) - return -EINVAL; - if (alg->cra_blocksize > PAGE_SIZE / 8) return -EINVAL; @@ -152,6 +149,11 @@ static int __crypto_register_alg(struct crypto_alg *alg, if (crypto_is_larval(q)) { struct crypto_larval *larval = (void *)q; + /* + * Check to see if either our generic name or + * specific name can satisfy the name requested + * by the larval entry q. + */ if (strcmp(alg->cra_name, q->cra_name) && strcmp(alg->cra_driver_name, q->cra_name)) continue; @@ -439,13 +441,15 @@ EXPORT_SYMBOL_GPL(crypto_unregister_notifier); struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb) { - struct rtattr *rta = tb[CRYPTOA_TYPE - 1]; + struct rtattr *rta = tb[0]; struct crypto_attr_type *algt; if (!rta) return ERR_PTR(-ENOENT); if (RTA_PAYLOAD(rta) < sizeof(*algt)) return ERR_PTR(-EINVAL); + if (rta->rta_type != CRYPTOA_TYPE) + return ERR_PTR(-EINVAL); algt = RTA_DATA(rta); @@ -468,22 +472,41 @@ int crypto_check_attr_type(struct rtattr **tb, u32 type) } EXPORT_SYMBOL_GPL(crypto_check_attr_type); -struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask) +struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) { - struct rtattr *rta = tb[CRYPTOA_ALG - 1]; struct crypto_attr_alg *alga; if (!rta) return ERR_PTR(-ENOENT); if (RTA_PAYLOAD(rta) < sizeof(*alga)) return ERR_PTR(-EINVAL); + if (rta->rta_type != CRYPTOA_ALG) + return ERR_PTR(-EINVAL); alga = RTA_DATA(rta); alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0; return crypto_alg_mod_lookup(alga->name, type, mask); } -EXPORT_SYMBOL_GPL(crypto_get_attr_alg); +EXPORT_SYMBOL_GPL(crypto_attr_alg); + +int crypto_attr_u32(struct rtattr *rta, u32 *num) +{ + struct crypto_attr_u32 *nu32; + + if (!rta) + return -ENOENT; + if (RTA_PAYLOAD(rta) < sizeof(*nu32)) + return -EINVAL; + if (rta->rta_type != CRYPTOA_U32) + return -EINVAL; + + nu32 = RTA_DATA(rta); + *num = nu32->num; + + return 0; +} +EXPORT_SYMBOL_GPL(crypto_attr_u32); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg) diff --git a/crypto/authenc.c b/crypto/authenc.c new file mode 100644 index 0000000000000000000000000000000000000000..0b29a6ae673d257baba2eb5b1a17460e28204f4a --- /dev/null +++ b/crypto/authenc.c @@ -0,0 +1,400 @@ +/* + * Authenc: Simple AEAD wrapper for IPsec + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "scatterwalk.h" + +struct authenc_instance_ctx { + struct crypto_spawn auth; + struct crypto_spawn enc; + + unsigned int authsize; + unsigned int enckeylen; +}; + +struct crypto_authenc_ctx { + spinlock_t auth_lock; + struct crypto_hash *auth; + struct crypto_ablkcipher *enc; +}; + +static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, + unsigned int keylen) +{ + struct authenc_instance_ctx *ictx = + crypto_instance_ctx(crypto_aead_alg_instance(authenc)); + unsigned int enckeylen = ictx->enckeylen; + unsigned int authkeylen; + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_hash *auth = ctx->auth; + struct crypto_ablkcipher *enc = ctx->enc; + int err = -EINVAL; + + if (keylen < enckeylen) { + crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); + goto out; + } + authkeylen = keylen - enckeylen; + + crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); + crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & + CRYPTO_TFM_REQ_MASK); + err = crypto_hash_setkey(auth, key, authkeylen); + crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & + CRYPTO_TFM_RES_MASK); + + if (err) + goto out; + + crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); + crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & + CRYPTO_TFM_REQ_MASK); + err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); + crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & + CRYPTO_TFM_RES_MASK); + +out: + return err; +} + +static int crypto_authenc_hash(struct aead_request *req) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct authenc_instance_ctx *ictx = + crypto_instance_ctx(crypto_aead_alg_instance(authenc)); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_hash *auth = ctx->auth; + struct hash_desc desc = { + .tfm = auth, + }; + u8 *hash = aead_request_ctx(req); + struct scatterlist *dst; + unsigned int cryptlen; + int err; + + hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), + crypto_hash_alignmask(auth) + 1); + + spin_lock_bh(&ctx->auth_lock); + err = crypto_hash_init(&desc); + if (err) + goto auth_unlock; + + err = crypto_hash_update(&desc, req->assoc, req->assoclen); + if (err) + goto auth_unlock; + + cryptlen = req->cryptlen; + dst = req->dst; + err = crypto_hash_update(&desc, dst, cryptlen); + if (err) + goto auth_unlock; + + err = crypto_hash_final(&desc, hash); +auth_unlock: + spin_unlock_bh(&ctx->auth_lock); + + if (err) + return err; + + scatterwalk_map_and_copy(hash, dst, cryptlen, ictx->authsize, 1); + return 0; +} + +static void crypto_authenc_encrypt_done(struct crypto_async_request *req, + int err) +{ + if (!err) + err = crypto_authenc_hash(req->data); + + aead_request_complete(req->data, err); +} + +static int crypto_authenc_encrypt(struct aead_request *req) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct ablkcipher_request *abreq = aead_request_ctx(req); + int err; + + ablkcipher_request_set_tfm(abreq, ctx->enc); + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + crypto_authenc_encrypt_done, req); + ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen, + req->iv); + + err = crypto_ablkcipher_encrypt(abreq); + if (err) + return err; + + return crypto_authenc_hash(req); +} + +static int crypto_authenc_verify(struct aead_request *req) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct authenc_instance_ctx *ictx = + crypto_instance_ctx(crypto_aead_alg_instance(authenc)); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct crypto_hash *auth = ctx->auth; + struct hash_desc desc = { + .tfm = auth, + .flags = aead_request_flags(req), + }; + u8 *ohash = aead_request_ctx(req); + u8 *ihash; + struct scatterlist *src; + unsigned int cryptlen; + unsigned int authsize; + int err; + + ohash = (u8 *)ALIGN((unsigned long)ohash + crypto_hash_alignmask(auth), + crypto_hash_alignmask(auth) + 1); + ihash = ohash + crypto_hash_digestsize(auth); + + spin_lock_bh(&ctx->auth_lock); + err = crypto_hash_init(&desc); + if (err) + goto auth_unlock; + + err = crypto_hash_update(&desc, req->assoc, req->assoclen); + if (err) + goto auth_unlock; + + cryptlen = req->cryptlen; + src = req->src; + err = crypto_hash_update(&desc, src, cryptlen); + if (err) + goto auth_unlock; + + err = crypto_hash_final(&desc, ohash); +auth_unlock: + spin_unlock_bh(&ctx->auth_lock); + + if (err) + return err; + + authsize = ictx->authsize; + scatterwalk_map_and_copy(ihash, src, cryptlen, authsize, 0); + return memcmp(ihash, ohash, authsize) ? -EINVAL : 0; +} + +static void crypto_authenc_decrypt_done(struct crypto_async_request *req, + int err) +{ + aead_request_complete(req->data, err); +} + +static int crypto_authenc_decrypt(struct aead_request *req) +{ + struct crypto_aead *authenc = crypto_aead_reqtfm(req); + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); + struct ablkcipher_request *abreq = aead_request_ctx(req); + int err; + + err = crypto_authenc_verify(req); + if (err) + return err; + + ablkcipher_request_set_tfm(abreq, ctx->enc); + ablkcipher_request_set_callback(abreq, aead_request_flags(req), + crypto_authenc_decrypt_done, req); + ablkcipher_request_set_crypt(abreq, req->src, req->dst, req->cryptlen, + req->iv); + + return crypto_ablkcipher_decrypt(abreq); +} + +static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); + struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_hash *auth; + struct crypto_ablkcipher *enc; + unsigned int digestsize; + int err; + + auth = crypto_spawn_hash(&ictx->auth); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + err = -EINVAL; + digestsize = crypto_hash_digestsize(auth); + if (ictx->authsize > digestsize) + goto err_free_hash; + + enc = crypto_spawn_ablkcipher(&ictx->enc); + err = PTR_ERR(enc); + if (IS_ERR(enc)) + goto err_free_hash; + + ctx->auth = auth; + ctx->enc = enc; + tfm->crt_aead.reqsize = max_t(unsigned int, + (crypto_hash_alignmask(auth) & + ~(crypto_tfm_ctx_alignment() - 1)) + + digestsize * 2, + sizeof(struct ablkcipher_request) + + crypto_ablkcipher_reqsize(enc)); + + spin_lock_init(&ctx->auth_lock); + + return 0; + +err_free_hash: + crypto_free_hash(auth); + return err; +} + +static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); + + crypto_free_hash(ctx->auth); + crypto_free_ablkcipher(ctx->enc); +} + +static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + struct crypto_alg *auth; + struct crypto_alg *enc; + struct authenc_instance_ctx *ctx; + unsigned int authsize; + unsigned int enckeylen; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD); + if (err) + return ERR_PTR(err); + + auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK); + if (IS_ERR(auth)) + return ERR_PTR(PTR_ERR(auth)); + + err = crypto_attr_u32(tb[2], &authsize); + inst = ERR_PTR(err); + if (err) + goto out_put_auth; + + enc = crypto_attr_alg(tb[3], CRYPTO_ALG_TYPE_BLKCIPHER, + CRYPTO_ALG_TYPE_MASK); + inst = ERR_PTR(PTR_ERR(enc)); + if (IS_ERR(enc)) + goto out_put_auth; + + err = crypto_attr_u32(tb[4], &enckeylen); + if (err) + goto out_put_enc; + + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); + err = -ENOMEM; + if (!inst) + goto out_put_enc; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, + "authenc(%s,%u,%s,%u)", auth->cra_name, authsize, + enc->cra_name, enckeylen) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "authenc(%s,%u,%s,%u)", auth->cra_driver_name, + authsize, enc->cra_driver_name, enckeylen) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + ctx = crypto_instance_ctx(inst); + ctx->authsize = authsize; + ctx->enckeylen = enckeylen; + + err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_free_inst; + + err = crypto_init_spawn(&ctx->enc, enc, inst, CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_drop_auth; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; + inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; + inst->alg.cra_blocksize = enc->cra_blocksize; + inst->alg.cra_alignmask = max(auth->cra_alignmask, enc->cra_alignmask); + inst->alg.cra_type = &crypto_aead_type; + + inst->alg.cra_aead.ivsize = enc->cra_blkcipher.ivsize; + inst->alg.cra_aead.authsize = authsize; + + inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); + + inst->alg.cra_init = crypto_authenc_init_tfm; + inst->alg.cra_exit = crypto_authenc_exit_tfm; + + inst->alg.cra_aead.setkey = crypto_authenc_setkey; + inst->alg.cra_aead.encrypt = crypto_authenc_encrypt; + inst->alg.cra_aead.decrypt = crypto_authenc_decrypt; + +out: + crypto_mod_put(enc); +out_put_auth: + crypto_mod_put(auth); + return inst; + +err_drop_auth: + crypto_drop_spawn(&ctx->auth); +err_free_inst: + kfree(inst); +out_put_enc: + inst = ERR_PTR(err); + goto out; +} + +static void crypto_authenc_free(struct crypto_instance *inst) +{ + struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); + + crypto_drop_spawn(&ctx->enc); + crypto_drop_spawn(&ctx->auth); + kfree(inst); +} + +static struct crypto_template crypto_authenc_tmpl = { + .name = "authenc", + .alloc = crypto_authenc_alloc, + .free = crypto_authenc_free, + .module = THIS_MODULE, +}; + +static int __init crypto_authenc_module_init(void) +{ + return crypto_register_template(&crypto_authenc_tmpl); +} + +static void __exit crypto_authenc_module_exit(void) +{ + crypto_unregister_template(&crypto_authenc_tmpl); +} + +module_init(crypto_authenc_module_init); +module_exit(crypto_authenc_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec"); diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index d8f8ec320213a4fc4656ad9340b30ddc904328a0..f6c67f9d4e5c2f9085558991a6fc87893e269670 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -65,7 +65,7 @@ static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) { u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); - return start > end_page ? start : end_page; + return max(start, end_page); } static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, @@ -84,8 +84,6 @@ static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, unsigned int n) { - n = walk->nbytes - n; - if (walk->flags & BLKCIPHER_WALK_COPY) { blkcipher_map_dst(walk); memcpy(walk->dst.virt.addr, walk->page, n); @@ -109,13 +107,15 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, unsigned int nbytes = 0; if (likely(err >= 0)) { - unsigned int bsize = crypto_blkcipher_blocksize(tfm); - unsigned int n; + unsigned int n = walk->nbytes - err; if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) - n = blkcipher_done_fast(walk, err); - else - n = blkcipher_done_slow(tfm, walk, bsize); + n = blkcipher_done_fast(walk, n); + else if (WARN_ON(err)) { + err = -EINVAL; + goto err; + } else + n = blkcipher_done_slow(tfm, walk, n); nbytes = walk->total - n; err = 0; @@ -132,6 +132,7 @@ int blkcipher_walk_done(struct blkcipher_desc *desc, return blkcipher_walk_next(desc, walk); } +err: if (walk->iv != desc->info) memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); if (walk->buffer != walk->page) @@ -149,6 +150,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc, unsigned int alignmask) { unsigned int n; + unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); if (walk->buffer) goto ok; @@ -157,7 +159,7 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc, if (walk->buffer) goto ok; - n = bsize * 3 - (alignmask + 1) + + n = aligned_bsize * 3 - (alignmask + 1) + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); walk->buffer = kmalloc(n, GFP_ATOMIC); if (!walk->buffer) @@ -167,8 +169,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc, walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); - walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize, - bsize); + walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + + aligned_bsize, bsize); scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); @@ -224,12 +226,12 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, { struct crypto_blkcipher *tfm = desc->tfm; unsigned int alignmask = crypto_blkcipher_alignmask(tfm); - unsigned int bsize = crypto_blkcipher_blocksize(tfm); + unsigned int bsize; unsigned int n; int err; n = walk->total; - if (unlikely(n < bsize)) { + if (unlikely(n < crypto_blkcipher_blocksize(tfm))) { desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return blkcipher_walk_done(desc, walk, -EINVAL); } @@ -246,6 +248,7 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, } } + bsize = min(walk->blocksize, n); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); @@ -276,9 +279,11 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, struct crypto_blkcipher *tfm, unsigned int alignmask) { - unsigned bs = crypto_blkcipher_blocksize(tfm); + unsigned bs = walk->blocksize; unsigned int ivsize = crypto_blkcipher_ivsize(tfm); - unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); + unsigned aligned_bs = ALIGN(bs, alignmask + 1); + unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - + (alignmask + 1); u8 *iv; size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); @@ -287,8 +292,8 @@ static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, return -ENOMEM; iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); - iv = blkcipher_get_spot(iv, bs) + bs; - iv = blkcipher_get_spot(iv, bs) + bs; + iv = blkcipher_get_spot(iv, bs) + aligned_bs; + iv = blkcipher_get_spot(iv, bs) + aligned_bs; iv = blkcipher_get_spot(iv, ivsize); walk->iv = memcpy(iv, walk->iv, ivsize); @@ -299,6 +304,7 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { walk->flags &= ~BLKCIPHER_WALK_PHYS; + walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); return blkcipher_walk_first(desc, walk); } EXPORT_SYMBOL_GPL(blkcipher_walk_virt); @@ -307,6 +313,7 @@ int blkcipher_walk_phys(struct blkcipher_desc *desc, struct blkcipher_walk *walk) { walk->flags |= BLKCIPHER_WALK_PHYS; + walk->blocksize = crypto_blkcipher_blocksize(desc->tfm); return blkcipher_walk_first(desc, walk); } EXPORT_SYMBOL_GPL(blkcipher_walk_phys); @@ -339,7 +346,18 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc, return blkcipher_walk_next(desc, walk); } -static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) +int blkcipher_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + unsigned int blocksize) +{ + walk->flags &= ~BLKCIPHER_WALK_PHYS; + walk->blocksize = blocksize; + return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block); + +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) { struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); @@ -360,8 +378,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int return ret; } -static int setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); diff --git a/crypto/cipher.c b/crypto/cipher.c index fc6b46f2a9b0d495724f32113e18652498cf1035..9a1a7316eeacf6f71befe1b149283a4350db0c56 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -16,11 +16,12 @@ #include #include #include -#include +#include #include #include "internal.h" -static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) { struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index ac6dce2e7596c4518287bdc355c088eb1a1e5a37..8bf2da835f7beae4513e164b59622d56e0387a76 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -131,7 +131,7 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, req->base.complete = complete; spin_lock_bh(&state->lock); - err = ablkcipher_enqueue_request(crypto_ablkcipher_alg(tfm), req); + err = ablkcipher_enqueue_request(&state->queue, req); spin_unlock_bh(&state->lock); wake_up_process(state->task); @@ -173,7 +173,8 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) int active; mutex_lock(&state->mutex); - active = ablkcipher_tfm_in_queue(__crypto_ablkcipher_cast(tfm)); + active = ablkcipher_tfm_in_queue(&state->queue, + __crypto_ablkcipher_cast(tfm)); mutex_unlock(&state->mutex); BUG_ON(active); @@ -251,8 +252,6 @@ static struct crypto_instance *cryptd_alloc_blkcipher( inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; - inst->alg.cra_ablkcipher.queue = &state->queue; - out_put_alg: crypto_mod_put(alg); return inst; diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index e5fb7cca5107d742ccd999bc87eaaf024a204f07..e5e3cf848d425ad0c3fa8584e323b51d9135221b 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c @@ -24,22 +24,26 @@ #include "internal.h" struct cryptomgr_param { - struct rtattr *tb[CRYPTOA_MAX]; + struct rtattr *tb[CRYPTO_MAX_ATTRS + 2]; struct { struct rtattr attr; struct crypto_attr_type data; } type; - struct { + union { struct rtattr attr; - struct crypto_attr_alg data; - } alg; - - struct { - char name[CRYPTO_MAX_ALG_NAME]; - } larval; - + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } alg; + struct { + struct rtattr attr; + struct crypto_attr_u32 data; + } nu32; + } attrs[CRYPTO_MAX_ATTRS]; + + char larval[CRYPTO_MAX_ALG_NAME]; char template[CRYPTO_MAX_ALG_NAME]; }; @@ -72,7 +76,7 @@ static int cryptomgr_probe(void *data) module_put_and_exit(0); err: - crypto_larval_error(param->larval.name, param->type.data.type, + crypto_larval_error(param->larval, param->type.data.type, param->type.data.mask); goto out; } @@ -84,6 +88,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) const char *name = larval->alg.cra_name; const char *p; unsigned int len; + int i; if (!try_module_get(THIS_MODULE)) goto err; @@ -101,33 +106,74 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) memcpy(param->template, name, len); - name = p + 1; - len = 0; - for (p = name; *p; p++) { - for (; isalnum(*p) || *p == '-' || *p == '_' || *p == '('; p++) - ; + i = 0; + for (;;) { + int notnum = 0; - if (*p != ')') - goto err_free_param; + name = ++p; + len = 0; + + for (; isalnum(*p) || *p == '-' || *p == '_'; p++) + notnum |= !isdigit(*p); + + if (*p == '(') { + int recursion = 0; + + for (;;) { + if (!*++p) + goto err_free_param; + if (*p == '(') + recursion++; + else if (*p == ')' && !recursion--) + break; + } + + notnum = 1; + p++; + } len = p - name; + if (!len) + goto err_free_param; + + if (notnum) { + param->attrs[i].alg.attr.rta_len = + sizeof(param->attrs[i].alg); + param->attrs[i].alg.attr.rta_type = CRYPTOA_ALG; + memcpy(param->attrs[i].alg.data.name, name, len); + } else { + param->attrs[i].nu32.attr.rta_len = + sizeof(param->attrs[i].nu32); + param->attrs[i].nu32.attr.rta_type = CRYPTOA_U32; + param->attrs[i].nu32.data.num = + simple_strtol(name, NULL, 0); + } + + param->tb[i + 1] = ¶m->attrs[i].attr; + i++; + + if (i >= CRYPTO_MAX_ATTRS) + goto err_free_param; + + if (*p == ')') + break; + + if (*p != ',') + goto err_free_param; } - if (!len || name[len + 1]) + if (!i) goto err_free_param; + param->tb[i + 1] = NULL; + param->type.attr.rta_len = sizeof(param->type); param->type.attr.rta_type = CRYPTOA_TYPE; param->type.data.type = larval->alg.cra_flags; param->type.data.mask = larval->mask; - param->tb[CRYPTOA_TYPE - 1] = ¶m->type.attr; - - param->alg.attr.rta_len = sizeof(param->alg); - param->alg.attr.rta_type = CRYPTOA_ALG; - memcpy(param->alg.data.name, name, len); - param->tb[CRYPTOA_ALG - 1] = ¶m->alg.attr; + param->tb[0] = ¶m->type.attr; - memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); + memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); thread = kthread_run(cryptomgr_probe, param, "cryptomgr"); if (IS_ERR(thread)) diff --git a/crypto/des.c b/crypto/des_generic.c similarity index 99% rename from crypto/des.c rename to crypto/des_generic.c index 1df3a714fa47fa5e0b3362281556b0b14a94ed99..59966d14b8e0dbb672bfff2ebc4ee8de2b752742 100644 --- a/crypto/des.c +++ b/crypto/des_generic.c @@ -1009,3 +1009,4 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); MODULE_AUTHOR("Dag Arne Osvik "); +MODULE_ALIAS("des"); diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c index 0a2aadfa1d850378cc2e2ec6884f8b89c2676318..ecbeaa1f17e1eae7f17b04c05e922283b0472dd1 100644 --- a/crypto/gf128mul.c +++ b/crypto/gf128mul.c @@ -142,6 +142,17 @@ static void gf128mul_x_bbe(be128 *r, const be128 *x) r->b = cpu_to_be64((b << 1) ^ _tt); } +void gf128mul_x_ble(be128 *r, const be128 *x) +{ + u64 a = le64_to_cpu(x->a); + u64 b = le64_to_cpu(x->b); + u64 _tt = gf128mul_table_bbe[b >> 63]; + + r->a = cpu_to_le64((a << 1) ^ _tt); + r->b = cpu_to_le64((b << 1) | (a >> 63)); +} +EXPORT_SYMBOL(gf128mul_x_ble); + static void gf128mul_x8_lle(be128 *x) { u64 a = be64_to_cpu(x->a); diff --git a/crypto/hash.c b/crypto/hash.c index 4fd470bd729bef90bd9f606dbf43b9367f374165..7dcff671c19b6d91a6127467b57c4db91a23fff4 100644 --- a/crypto/hash.c +++ b/crypto/hash.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "internal.h" @@ -46,7 +47,7 @@ static int hash_setkey_unaligned(struct crypto_hash *crt, const u8 *key, } static int hash_setkey(struct crypto_hash *crt, const u8 *key, - unsigned int keylen) + unsigned int keylen) { struct crypto_tfm *tfm = crypto_hash_tfm(crt); struct hash_alg *alg = &tfm->__crt_alg->cra_hash; diff --git a/crypto/internal.h b/crypto/internal.h index 60acad9788c50104b7e627aafbb3cd4c0ab57998..abb01f71f817a80192a449439ed0ecd0565d60c6 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -50,11 +50,16 @@ extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; extern struct blocking_notifier_head crypto_chain; -extern enum km_type crypto_km_types[]; - static inline enum km_type crypto_kmap_type(int out) { - return crypto_km_types[(in_softirq() ? 2 : 0) + out]; + enum km_type type; + + if (in_softirq()) + type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0; + else + type = out * (KM_USER1 - KM_USER0) + KM_USER0; + + return type; } static inline void *crypto_kmap(struct page *page, int out) diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 81afd1790a1d1c28fc8a7e2569940401a668d5ac..3052f6507f53f3118de2adfc818da7d56e78b3f4 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -23,14 +23,6 @@ #include "internal.h" #include "scatterwalk.h" -enum km_type crypto_km_types[] = { - KM_USER0, - KM_USER1, - KM_SOFTIRQ0, - KM_SOFTIRQ1, -}; -EXPORT_SYMBOL_GPL(crypto_km_types); - static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) { void *src = out ? buf : sgdata; @@ -107,3 +99,25 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, } } EXPORT_SYMBOL_GPL(scatterwalk_copychunks); + +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out) +{ + struct scatter_walk walk; + unsigned int offset = 0; + + for (;;) { + scatterwalk_start(&walk, sg); + + if (start < offset + sg->length) + break; + + offset += sg->length; + sg = sg_next(sg); + } + + scatterwalk_advance(&walk, start - offset); + scatterwalk_copychunks(buf, &walk, nbytes, out); + scatterwalk_done(&walk, out, 0); +} +EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h index f1592cc2d0f42bb76132667fe69cf3f5e61bb8cb..500a220ad908cbad83bc8f1a285a03c169d71961 100644 --- a/crypto/scatterwalk.h +++ b/crypto/scatterwalk.h @@ -74,4 +74,7 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, void *scatterwalk_map(struct scatter_walk *walk, int out); void scatterwalk_done(struct scatter_walk *walk, int out, int more); +void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes, int out); + #endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/crypto/seed.c b/crypto/seed.c new file mode 100644 index 0000000000000000000000000000000000000000..d3e422f6055621aea9a6f6c78a08738466ace49d --- /dev/null +++ b/crypto/seed.c @@ -0,0 +1,479 @@ +/* + * Cryptographic API. + * + * SEED Cipher Algorithm. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Documentation of SEED can be found in RFC 4269. + * Copyright (C) 2007 Korea Information Security Agency (KISA). + */ + +#include +#include +#include +#include +#include +#include + +#define SEED_NUM_KCONSTANTS 16 +#define SEED_KEY_SIZE 16 +#define SEED_BLOCK_SIZE 16 +#define SEED_KEYSCHED_LEN 32 + +/* + * #define byte(x, nr) ((unsigned char)((x) >> (nr*8))) + */ +static inline u8 +byte(const u32 x, const unsigned n) +{ + return x >> (n << 3); +} + +struct seed_ctx { + u32 keysched[SEED_KEYSCHED_LEN]; +}; + +static const u32 SS0[256] = { + 0x2989a1a8, 0x05858184, 0x16c6d2d4, 0x13c3d3d0, + 0x14445054, 0x1d0d111c, 0x2c8ca0ac, 0x25052124, + 0x1d4d515c, 0x03434340, 0x18081018, 0x1e0e121c, + 0x11415150, 0x3cccf0fc, 0x0acac2c8, 0x23436360, + 0x28082028, 0x04444044, 0x20002020, 0x1d8d919c, + 0x20c0e0e0, 0x22c2e2e0, 0x08c8c0c8, 0x17071314, + 0x2585a1a4, 0x0f8f838c, 0x03030300, 0x3b4b7378, + 0x3b8bb3b8, 0x13031310, 0x12c2d2d0, 0x2ecee2ec, + 0x30407070, 0x0c8c808c, 0x3f0f333c, 0x2888a0a8, + 0x32023230, 0x1dcdd1dc, 0x36c6f2f4, 0x34447074, + 0x2ccce0ec, 0x15859194, 0x0b0b0308, 0x17475354, + 0x1c4c505c, 0x1b4b5358, 0x3d8db1bc, 0x01010100, + 0x24042024, 0x1c0c101c, 0x33437370, 0x18889098, + 0x10001010, 0x0cccc0cc, 0x32c2f2f0, 0x19c9d1d8, + 0x2c0c202c, 0x27c7e3e4, 0x32427270, 0x03838380, + 0x1b8b9398, 0x11c1d1d0, 0x06868284, 0x09c9c1c8, + 0x20406060, 0x10405050, 0x2383a3a0, 0x2bcbe3e8, + 0x0d0d010c, 0x3686b2b4, 0x1e8e929c, 0x0f4f434c, + 0x3787b3b4, 0x1a4a5258, 0x06c6c2c4, 0x38487078, + 0x2686a2a4, 0x12021210, 0x2f8fa3ac, 0x15c5d1d4, + 0x21416160, 0x03c3c3c0, 0x3484b0b4, 0x01414140, + 0x12425250, 0x3d4d717c, 0x0d8d818c, 0x08080008, + 0x1f0f131c, 0x19899198, 0x00000000, 0x19091118, + 0x04040004, 0x13435350, 0x37c7f3f4, 0x21c1e1e0, + 0x3dcdf1fc, 0x36467274, 0x2f0f232c, 0x27072324, + 0x3080b0b0, 0x0b8b8388, 0x0e0e020c, 0x2b8ba3a8, + 0x2282a2a0, 0x2e4e626c, 0x13839390, 0x0d4d414c, + 0x29496168, 0x3c4c707c, 0x09090108, 0x0a0a0208, + 0x3f8fb3bc, 0x2fcfe3ec, 0x33c3f3f0, 0x05c5c1c4, + 0x07878384, 0x14041014, 0x3ecef2fc, 0x24446064, + 0x1eced2dc, 0x2e0e222c, 0x0b4b4348, 0x1a0a1218, + 0x06060204, 0x21012120, 0x2b4b6368, 0x26466264, + 0x02020200, 0x35c5f1f4, 0x12829290, 0x0a8a8288, + 0x0c0c000c, 0x3383b3b0, 0x3e4e727c, 0x10c0d0d0, + 0x3a4a7278, 0x07474344, 0x16869294, 0x25c5e1e4, + 0x26062224, 0x00808080, 0x2d8da1ac, 0x1fcfd3dc, + 0x2181a1a0, 0x30003030, 0x37073334, 0x2e8ea2ac, + 0x36063234, 0x15051114, 0x22022220, 0x38083038, + 0x34c4f0f4, 0x2787a3a4, 0x05454144, 0x0c4c404c, + 0x01818180, 0x29c9e1e8, 0x04848084, 0x17879394, + 0x35053134, 0x0bcbc3c8, 0x0ecec2cc, 0x3c0c303c, + 0x31417170, 0x11011110, 0x07c7c3c4, 0x09898188, + 0x35457174, 0x3bcbf3f8, 0x1acad2d8, 0x38c8f0f8, + 0x14849094, 0x19495158, 0x02828280, 0x04c4c0c4, + 0x3fcff3fc, 0x09494148, 0x39093138, 0x27476364, + 0x00c0c0c0, 0x0fcfc3cc, 0x17c7d3d4, 0x3888b0b8, + 0x0f0f030c, 0x0e8e828c, 0x02424240, 0x23032320, + 0x11819190, 0x2c4c606c, 0x1bcbd3d8, 0x2484a0a4, + 0x34043034, 0x31c1f1f0, 0x08484048, 0x02c2c2c0, + 0x2f4f636c, 0x3d0d313c, 0x2d0d212c, 0x00404040, + 0x3e8eb2bc, 0x3e0e323c, 0x3c8cb0bc, 0x01c1c1c0, + 0x2a8aa2a8, 0x3a8ab2b8, 0x0e4e424c, 0x15455154, + 0x3b0b3338, 0x1cccd0dc, 0x28486068, 0x3f4f737c, + 0x1c8c909c, 0x18c8d0d8, 0x0a4a4248, 0x16465254, + 0x37477374, 0x2080a0a0, 0x2dcde1ec, 0x06464244, + 0x3585b1b4, 0x2b0b2328, 0x25456164, 0x3acaf2f8, + 0x23c3e3e0, 0x3989b1b8, 0x3181b1b0, 0x1f8f939c, + 0x1e4e525c, 0x39c9f1f8, 0x26c6e2e4, 0x3282b2b0, + 0x31013130, 0x2acae2e8, 0x2d4d616c, 0x1f4f535c, + 0x24c4e0e4, 0x30c0f0f0, 0x0dcdc1cc, 0x08888088, + 0x16061214, 0x3a0a3238, 0x18485058, 0x14c4d0d4, + 0x22426260, 0x29092128, 0x07070304, 0x33033330, + 0x28c8e0e8, 0x1b0b1318, 0x05050104, 0x39497178, + 0x10809090, 0x2a4a6268, 0x2a0a2228, 0x1a8a9298, +}; + +static const u32 SS1[256] = { + 0x38380830, 0xe828c8e0, 0x2c2d0d21, 0xa42686a2, + 0xcc0fcfc3, 0xdc1eced2, 0xb03383b3, 0xb83888b0, + 0xac2f8fa3, 0x60204060, 0x54154551, 0xc407c7c3, + 0x44044440, 0x6c2f4f63, 0x682b4b63, 0x581b4b53, + 0xc003c3c3, 0x60224262, 0x30330333, 0xb43585b1, + 0x28290921, 0xa02080a0, 0xe022c2e2, 0xa42787a3, + 0xd013c3d3, 0x90118191, 0x10110111, 0x04060602, + 0x1c1c0c10, 0xbc3c8cb0, 0x34360632, 0x480b4b43, + 0xec2fcfe3, 0x88088880, 0x6c2c4c60, 0xa82888a0, + 0x14170713, 0xc404c4c0, 0x14160612, 0xf434c4f0, + 0xc002c2c2, 0x44054541, 0xe021c1e1, 0xd416c6d2, + 0x3c3f0f33, 0x3c3d0d31, 0x8c0e8e82, 0x98188890, + 0x28280820, 0x4c0e4e42, 0xf436c6f2, 0x3c3e0e32, + 0xa42585a1, 0xf839c9f1, 0x0c0d0d01, 0xdc1fcfd3, + 0xd818c8d0, 0x282b0b23, 0x64264662, 0x783a4a72, + 0x24270723, 0x2c2f0f23, 0xf031c1f1, 0x70324272, + 0x40024242, 0xd414c4d0, 0x40014141, 0xc000c0c0, + 0x70334373, 0x64274763, 0xac2c8ca0, 0x880b8b83, + 0xf437c7f3, 0xac2d8da1, 0x80008080, 0x1c1f0f13, + 0xc80acac2, 0x2c2c0c20, 0xa82a8aa2, 0x34340430, + 0xd012c2d2, 0x080b0b03, 0xec2ecee2, 0xe829c9e1, + 0x5c1d4d51, 0x94148490, 0x18180810, 0xf838c8f0, + 0x54174753, 0xac2e8ea2, 0x08080800, 0xc405c5c1, + 0x10130313, 0xcc0dcdc1, 0x84068682, 0xb83989b1, + 0xfc3fcff3, 0x7c3d4d71, 0xc001c1c1, 0x30310131, + 0xf435c5f1, 0x880a8a82, 0x682a4a62, 0xb03181b1, + 0xd011c1d1, 0x20200020, 0xd417c7d3, 0x00020202, + 0x20220222, 0x04040400, 0x68284860, 0x70314171, + 0x04070703, 0xd81bcbd3, 0x9c1d8d91, 0x98198991, + 0x60214161, 0xbc3e8eb2, 0xe426c6e2, 0x58194951, + 0xdc1dcdd1, 0x50114151, 0x90108090, 0xdc1cccd0, + 0x981a8a92, 0xa02383a3, 0xa82b8ba3, 0xd010c0d0, + 0x80018181, 0x0c0f0f03, 0x44074743, 0x181a0a12, + 0xe023c3e3, 0xec2ccce0, 0x8c0d8d81, 0xbc3f8fb3, + 0x94168692, 0x783b4b73, 0x5c1c4c50, 0xa02282a2, + 0xa02181a1, 0x60234363, 0x20230323, 0x4c0d4d41, + 0xc808c8c0, 0x9c1e8e92, 0x9c1c8c90, 0x383a0a32, + 0x0c0c0c00, 0x2c2e0e22, 0xb83a8ab2, 0x6c2e4e62, + 0x9c1f8f93, 0x581a4a52, 0xf032c2f2, 0x90128292, + 0xf033c3f3, 0x48094941, 0x78384870, 0xcc0cccc0, + 0x14150511, 0xf83bcbf3, 0x70304070, 0x74354571, + 0x7c3f4f73, 0x34350531, 0x10100010, 0x00030303, + 0x64244460, 0x6c2d4d61, 0xc406c6c2, 0x74344470, + 0xd415c5d1, 0xb43484b0, 0xe82acae2, 0x08090901, + 0x74364672, 0x18190911, 0xfc3ecef2, 0x40004040, + 0x10120212, 0xe020c0e0, 0xbc3d8db1, 0x04050501, + 0xf83acaf2, 0x00010101, 0xf030c0f0, 0x282a0a22, + 0x5c1e4e52, 0xa82989a1, 0x54164652, 0x40034343, + 0x84058581, 0x14140410, 0x88098981, 0x981b8b93, + 0xb03080b0, 0xe425c5e1, 0x48084840, 0x78394971, + 0x94178793, 0xfc3cccf0, 0x1c1e0e12, 0x80028282, + 0x20210121, 0x8c0c8c80, 0x181b0b13, 0x5c1f4f53, + 0x74374773, 0x54144450, 0xb03282b2, 0x1c1d0d11, + 0x24250521, 0x4c0f4f43, 0x00000000, 0x44064642, + 0xec2dcde1, 0x58184850, 0x50124252, 0xe82bcbe3, + 0x7c3e4e72, 0xd81acad2, 0xc809c9c1, 0xfc3dcdf1, + 0x30300030, 0x94158591, 0x64254561, 0x3c3c0c30, + 0xb43686b2, 0xe424c4e0, 0xb83b8bb3, 0x7c3c4c70, + 0x0c0e0e02, 0x50104050, 0x38390931, 0x24260622, + 0x30320232, 0x84048480, 0x68294961, 0x90138393, + 0x34370733, 0xe427c7e3, 0x24240420, 0xa42484a0, + 0xc80bcbc3, 0x50134353, 0x080a0a02, 0x84078783, + 0xd819c9d1, 0x4c0c4c40, 0x80038383, 0x8c0f8f83, + 0xcc0ecec2, 0x383b0b33, 0x480a4a42, 0xb43787b3, +}; + +static const u32 SS2[256] = { + 0xa1a82989, 0x81840585, 0xd2d416c6, 0xd3d013c3, + 0x50541444, 0x111c1d0d, 0xa0ac2c8c, 0x21242505, + 0x515c1d4d, 0x43400343, 0x10181808, 0x121c1e0e, + 0x51501141, 0xf0fc3ccc, 0xc2c80aca, 0x63602343, + 0x20282808, 0x40440444, 0x20202000, 0x919c1d8d, + 0xe0e020c0, 0xe2e022c2, 0xc0c808c8, 0x13141707, + 0xa1a42585, 0x838c0f8f, 0x03000303, 0x73783b4b, + 0xb3b83b8b, 0x13101303, 0xd2d012c2, 0xe2ec2ece, + 0x70703040, 0x808c0c8c, 0x333c3f0f, 0xa0a82888, + 0x32303202, 0xd1dc1dcd, 0xf2f436c6, 0x70743444, + 0xe0ec2ccc, 0x91941585, 0x03080b0b, 0x53541747, + 0x505c1c4c, 0x53581b4b, 0xb1bc3d8d, 0x01000101, + 0x20242404, 0x101c1c0c, 0x73703343, 0x90981888, + 0x10101000, 0xc0cc0ccc, 0xf2f032c2, 0xd1d819c9, + 0x202c2c0c, 0xe3e427c7, 0x72703242, 0x83800383, + 0x93981b8b, 0xd1d011c1, 0x82840686, 0xc1c809c9, + 0x60602040, 0x50501040, 0xa3a02383, 0xe3e82bcb, + 0x010c0d0d, 0xb2b43686, 0x929c1e8e, 0x434c0f4f, + 0xb3b43787, 0x52581a4a, 0xc2c406c6, 0x70783848, + 0xa2a42686, 0x12101202, 0xa3ac2f8f, 0xd1d415c5, + 0x61602141, 0xc3c003c3, 0xb0b43484, 0x41400141, + 0x52501242, 0x717c3d4d, 0x818c0d8d, 0x00080808, + 0x131c1f0f, 0x91981989, 0x00000000, 0x11181909, + 0x00040404, 0x53501343, 0xf3f437c7, 0xe1e021c1, + 0xf1fc3dcd, 0x72743646, 0x232c2f0f, 0x23242707, + 0xb0b03080, 0x83880b8b, 0x020c0e0e, 0xa3a82b8b, + 0xa2a02282, 0x626c2e4e, 0x93901383, 0x414c0d4d, + 0x61682949, 0x707c3c4c, 0x01080909, 0x02080a0a, + 0xb3bc3f8f, 0xe3ec2fcf, 0xf3f033c3, 0xc1c405c5, + 0x83840787, 0x10141404, 0xf2fc3ece, 0x60642444, + 0xd2dc1ece, 0x222c2e0e, 0x43480b4b, 0x12181a0a, + 0x02040606, 0x21202101, 0x63682b4b, 0x62642646, + 0x02000202, 0xf1f435c5, 0x92901282, 0x82880a8a, + 0x000c0c0c, 0xb3b03383, 0x727c3e4e, 0xd0d010c0, + 0x72783a4a, 0x43440747, 0x92941686, 0xe1e425c5, + 0x22242606, 0x80800080, 0xa1ac2d8d, 0xd3dc1fcf, + 0xa1a02181, 0x30303000, 0x33343707, 0xa2ac2e8e, + 0x32343606, 0x11141505, 0x22202202, 0x30383808, + 0xf0f434c4, 0xa3a42787, 0x41440545, 0x404c0c4c, + 0x81800181, 0xe1e829c9, 0x80840484, 0x93941787, + 0x31343505, 0xc3c80bcb, 0xc2cc0ece, 0x303c3c0c, + 0x71703141, 0x11101101, 0xc3c407c7, 0x81880989, + 0x71743545, 0xf3f83bcb, 0xd2d81aca, 0xf0f838c8, + 0x90941484, 0x51581949, 0x82800282, 0xc0c404c4, + 0xf3fc3fcf, 0x41480949, 0x31383909, 0x63642747, + 0xc0c000c0, 0xc3cc0fcf, 0xd3d417c7, 0xb0b83888, + 0x030c0f0f, 0x828c0e8e, 0x42400242, 0x23202303, + 0x91901181, 0x606c2c4c, 0xd3d81bcb, 0xa0a42484, + 0x30343404, 0xf1f031c1, 0x40480848, 0xc2c002c2, + 0x636c2f4f, 0x313c3d0d, 0x212c2d0d, 0x40400040, + 0xb2bc3e8e, 0x323c3e0e, 0xb0bc3c8c, 0xc1c001c1, + 0xa2a82a8a, 0xb2b83a8a, 0x424c0e4e, 0x51541545, + 0x33383b0b, 0xd0dc1ccc, 0x60682848, 0x737c3f4f, + 0x909c1c8c, 0xd0d818c8, 0x42480a4a, 0x52541646, + 0x73743747, 0xa0a02080, 0xe1ec2dcd, 0x42440646, + 0xb1b43585, 0x23282b0b, 0x61642545, 0xf2f83aca, + 0xe3e023c3, 0xb1b83989, 0xb1b03181, 0x939c1f8f, + 0x525c1e4e, 0xf1f839c9, 0xe2e426c6, 0xb2b03282, + 0x31303101, 0xe2e82aca, 0x616c2d4d, 0x535c1f4f, + 0xe0e424c4, 0xf0f030c0, 0xc1cc0dcd, 0x80880888, + 0x12141606, 0x32383a0a, 0x50581848, 0xd0d414c4, + 0x62602242, 0x21282909, 0x03040707, 0x33303303, + 0xe0e828c8, 0x13181b0b, 0x01040505, 0x71783949, + 0x90901080, 0x62682a4a, 0x22282a0a, 0x92981a8a, +}; + +static const u32 SS3[256] = { + 0x08303838, 0xc8e0e828, 0x0d212c2d, 0x86a2a426, + 0xcfc3cc0f, 0xced2dc1e, 0x83b3b033, 0x88b0b838, + 0x8fa3ac2f, 0x40606020, 0x45515415, 0xc7c3c407, + 0x44404404, 0x4f636c2f, 0x4b63682b, 0x4b53581b, + 0xc3c3c003, 0x42626022, 0x03333033, 0x85b1b435, + 0x09212829, 0x80a0a020, 0xc2e2e022, 0x87a3a427, + 0xc3d3d013, 0x81919011, 0x01111011, 0x06020406, + 0x0c101c1c, 0x8cb0bc3c, 0x06323436, 0x4b43480b, + 0xcfe3ec2f, 0x88808808, 0x4c606c2c, 0x88a0a828, + 0x07131417, 0xc4c0c404, 0x06121416, 0xc4f0f434, + 0xc2c2c002, 0x45414405, 0xc1e1e021, 0xc6d2d416, + 0x0f333c3f, 0x0d313c3d, 0x8e828c0e, 0x88909818, + 0x08202828, 0x4e424c0e, 0xc6f2f436, 0x0e323c3e, + 0x85a1a425, 0xc9f1f839, 0x0d010c0d, 0xcfd3dc1f, + 0xc8d0d818, 0x0b23282b, 0x46626426, 0x4a72783a, + 0x07232427, 0x0f232c2f, 0xc1f1f031, 0x42727032, + 0x42424002, 0xc4d0d414, 0x41414001, 0xc0c0c000, + 0x43737033, 0x47636427, 0x8ca0ac2c, 0x8b83880b, + 0xc7f3f437, 0x8da1ac2d, 0x80808000, 0x0f131c1f, + 0xcac2c80a, 0x0c202c2c, 0x8aa2a82a, 0x04303434, + 0xc2d2d012, 0x0b03080b, 0xcee2ec2e, 0xc9e1e829, + 0x4d515c1d, 0x84909414, 0x08101818, 0xc8f0f838, + 0x47535417, 0x8ea2ac2e, 0x08000808, 0xc5c1c405, + 0x03131013, 0xcdc1cc0d, 0x86828406, 0x89b1b839, + 0xcff3fc3f, 0x4d717c3d, 0xc1c1c001, 0x01313031, + 0xc5f1f435, 0x8a82880a, 0x4a62682a, 0x81b1b031, + 0xc1d1d011, 0x00202020, 0xc7d3d417, 0x02020002, + 0x02222022, 0x04000404, 0x48606828, 0x41717031, + 0x07030407, 0xcbd3d81b, 0x8d919c1d, 0x89919819, + 0x41616021, 0x8eb2bc3e, 0xc6e2e426, 0x49515819, + 0xcdd1dc1d, 0x41515011, 0x80909010, 0xccd0dc1c, + 0x8a92981a, 0x83a3a023, 0x8ba3a82b, 0xc0d0d010, + 0x81818001, 0x0f030c0f, 0x47434407, 0x0a12181a, + 0xc3e3e023, 0xcce0ec2c, 0x8d818c0d, 0x8fb3bc3f, + 0x86929416, 0x4b73783b, 0x4c505c1c, 0x82a2a022, + 0x81a1a021, 0x43636023, 0x03232023, 0x4d414c0d, + 0xc8c0c808, 0x8e929c1e, 0x8c909c1c, 0x0a32383a, + 0x0c000c0c, 0x0e222c2e, 0x8ab2b83a, 0x4e626c2e, + 0x8f939c1f, 0x4a52581a, 0xc2f2f032, 0x82929012, + 0xc3f3f033, 0x49414809, 0x48707838, 0xccc0cc0c, + 0x05111415, 0xcbf3f83b, 0x40707030, 0x45717435, + 0x4f737c3f, 0x05313435, 0x00101010, 0x03030003, + 0x44606424, 0x4d616c2d, 0xc6c2c406, 0x44707434, + 0xc5d1d415, 0x84b0b434, 0xcae2e82a, 0x09010809, + 0x46727436, 0x09111819, 0xcef2fc3e, 0x40404000, + 0x02121012, 0xc0e0e020, 0x8db1bc3d, 0x05010405, + 0xcaf2f83a, 0x01010001, 0xc0f0f030, 0x0a22282a, + 0x4e525c1e, 0x89a1a829, 0x46525416, 0x43434003, + 0x85818405, 0x04101414, 0x89818809, 0x8b93981b, + 0x80b0b030, 0xc5e1e425, 0x48404808, 0x49717839, + 0x87939417, 0xccf0fc3c, 0x0e121c1e, 0x82828002, + 0x01212021, 0x8c808c0c, 0x0b13181b, 0x4f535c1f, + 0x47737437, 0x44505414, 0x82b2b032, 0x0d111c1d, + 0x05212425, 0x4f434c0f, 0x00000000, 0x46424406, + 0xcde1ec2d, 0x48505818, 0x42525012, 0xcbe3e82b, + 0x4e727c3e, 0xcad2d81a, 0xc9c1c809, 0xcdf1fc3d, + 0x00303030, 0x85919415, 0x45616425, 0x0c303c3c, + 0x86b2b436, 0xc4e0e424, 0x8bb3b83b, 0x4c707c3c, + 0x0e020c0e, 0x40505010, 0x09313839, 0x06222426, + 0x02323032, 0x84808404, 0x49616829, 0x83939013, + 0x07333437, 0xc7e3e427, 0x04202424, 0x84a0a424, + 0xcbc3c80b, 0x43535013, 0x0a02080a, 0x87838407, + 0xc9d1d819, 0x4c404c0c, 0x83838003, 0x8f838c0f, + 0xcec2cc0e, 0x0b33383b, 0x4a42480a, 0x87b3b437, +}; + +static const u32 KC[SEED_NUM_KCONSTANTS] = { + 0x9e3779b9, 0x3c6ef373, 0x78dde6e6, 0xf1bbcdcc, + 0xe3779b99, 0xc6ef3733, 0x8dde6e67, 0x1bbcdccf, + 0x3779b99e, 0x6ef3733c, 0xdde6e678, 0xbbcdccf1, + 0x779b99e3, 0xef3733c6, 0xde6e678d, 0xbcdccf1b, +}; + +#define OP(X1, X2, X3, X4, rbase) \ + t0 = X3 ^ ks[rbase]; \ + t1 = X4 ^ ks[rbase+1]; \ + t1 ^= t0; \ + t1 = SS0[byte(t1, 0)] ^ SS1[byte(t1, 1)] ^ \ + SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)]; \ + t0 += t1; \ + t0 = SS0[byte(t0, 0)] ^ SS1[byte(t0, 1)] ^ \ + SS2[byte(t0, 2)] ^ SS3[byte(t0, 3)]; \ + t1 += t0; \ + t1 = SS0[byte(t1, 0)] ^ SS1[byte(t1, 1)] ^ \ + SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)]; \ + t0 += t1; \ + X1 ^= t0; \ + X2 ^= t1; + +static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct seed_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *keyout = ctx->keysched; + const __be32 *key = (const __be32 *)in_key; + u32 i, t0, t1, x1, x2, x3, x4; + + x1 = be32_to_cpu(key[0]); + x2 = be32_to_cpu(key[1]); + x3 = be32_to_cpu(key[2]); + x4 = be32_to_cpu(key[3]); + + for (i = 0; i < SEED_NUM_KCONSTANTS; i++) { + t0 = x1 + x3 - KC[i]; + t1 = x2 + KC[i] - x4; + *(keyout++) = SS0[byte(t0, 0)] ^ SS1[byte(t0, 1)] ^ + SS2[byte(t0, 2)] ^ SS3[byte(t0, 3)]; + *(keyout++) = SS0[byte(t1, 0)] ^ SS1[byte(t1, 1)] ^ + SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)]; + + if (i % 2 == 0) { + t0 = x1; + x1 = (x1 >> 8) ^ (x2 << 24); + x2 = (x2 >> 8) ^ (t0 << 24); + } else { + t0 = x3; + x3 = (x3 << 8) ^ (x4 >> 24); + x4 = (x4 << 8) ^ (t0 >> 24); + } + } + + return 0; +} + +/* encrypt a block of text */ + +static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); + const __be32 *src = (const __be32 *)in; + __be32 *dst = (__be32 *)out; + u32 x1, x2, x3, x4, t0, t1; + const u32 *ks = ctx->keysched; + + x1 = be32_to_cpu(src[0]); + x2 = be32_to_cpu(src[1]); + x3 = be32_to_cpu(src[2]); + x4 = be32_to_cpu(src[3]); + + OP(x1, x2, x3, x4, 0); + OP(x3, x4, x1, x2, 2); + OP(x1, x2, x3, x4, 4); + OP(x3, x4, x1, x2, 6); + OP(x1, x2, x3, x4, 8); + OP(x3, x4, x1, x2, 10); + OP(x1, x2, x3, x4, 12); + OP(x3, x4, x1, x2, 14); + OP(x1, x2, x3, x4, 16); + OP(x3, x4, x1, x2, 18); + OP(x1, x2, x3, x4, 20); + OP(x3, x4, x1, x2, 22); + OP(x1, x2, x3, x4, 24); + OP(x3, x4, x1, x2, 26); + OP(x1, x2, x3, x4, 28); + OP(x3, x4, x1, x2, 30); + + dst[0] = cpu_to_be32(x3); + dst[1] = cpu_to_be32(x4); + dst[2] = cpu_to_be32(x1); + dst[3] = cpu_to_be32(x2); +} + +/* decrypt a block of text */ + +static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); + const __be32 *src = (const __be32 *)in; + __be32 *dst = (__be32 *)out; + u32 x1, x2, x3, x4, t0, t1; + const u32 *ks = ctx->keysched; + + x1 = be32_to_cpu(src[0]); + x2 = be32_to_cpu(src[1]); + x3 = be32_to_cpu(src[2]); + x4 = be32_to_cpu(src[3]); + + OP(x1, x2, x3, x4, 30); + OP(x3, x4, x1, x2, 28); + OP(x1, x2, x3, x4, 26); + OP(x3, x4, x1, x2, 24); + OP(x1, x2, x3, x4, 22); + OP(x3, x4, x1, x2, 20); + OP(x1, x2, x3, x4, 18); + OP(x3, x4, x1, x2, 16); + OP(x1, x2, x3, x4, 14); + OP(x3, x4, x1, x2, 12); + OP(x1, x2, x3, x4, 10); + OP(x3, x4, x1, x2, 8); + OP(x1, x2, x3, x4, 6); + OP(x3, x4, x1, x2, 4); + OP(x1, x2, x3, x4, 2); + OP(x3, x4, x1, x2, 0); + + dst[0] = cpu_to_be32(x3); + dst[1] = cpu_to_be32(x4); + dst[2] = cpu_to_be32(x1); + dst[3] = cpu_to_be32(x2); +} + + +static struct crypto_alg seed_alg = { + .cra_name = "seed", + .cra_driver_name = "seed-generic", + .cra_priority = 100, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SEED_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct seed_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(seed_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = SEED_KEY_SIZE, + .cia_max_keysize = SEED_KEY_SIZE, + .cia_setkey = seed_set_key, + .cia_encrypt = seed_encrypt, + .cia_decrypt = seed_decrypt + } + } +}; + +static int __init seed_init(void) +{ + return crypto_register_alg(&seed_alg); +} + +static void __exit seed_fini(void) +{ + crypto_unregister_alg(&seed_alg); +} + +module_init(seed_init); +module_exit(seed_fini); + +MODULE_DESCRIPTION("SEED Cipher Algorithm"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Hye-Shik Chang , Kim Hyun "); diff --git a/crypto/sha1.c b/crypto/sha1_generic.c similarity index 93% rename from crypto/sha1.c rename to crypto/sha1_generic.c index 1bba551e5b456371ba3224200d74726a420cd7c7..48a3c3e0bf5f8b4fb974c2e8aeddabb80fca8043 100644 --- a/crypto/sha1.c +++ b/crypto/sha1_generic.c @@ -22,12 +22,10 @@ #include #include #include +#include #include #include -#define SHA1_DIGEST_SIZE 20 -#define SHA1_HMAC_BLOCK_SIZE 64 - struct sha1_ctx { u64 count; u32 state[5]; @@ -39,7 +37,7 @@ static void sha1_init(struct crypto_tfm *tfm) struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); static const struct sha1_ctx initstate = { 0, - { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }, + { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, { 0, } }; @@ -111,7 +109,7 @@ static struct crypto_alg alg = { .cra_name = "sha1", .cra_driver_name= "sha1-generic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, + .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha1_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, @@ -139,4 +137,4 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); -MODULE_ALIAS("sha1-generic"); +MODULE_ALIAS("sha1"); diff --git a/crypto/sha256.c b/crypto/sha256_generic.c similarity index 95% rename from crypto/sha256.c rename to crypto/sha256_generic.c index 716195bb54f247b936dd1e6ef6d655eec3dca79d..5f4332edcf6ba64b4e18d43406322c200f4fb63d 100644 --- a/crypto/sha256.c +++ b/crypto/sha256_generic.c @@ -21,12 +21,10 @@ #include #include #include +#include #include #include -#define SHA256_DIGEST_SIZE 32 -#define SHA256_HMAC_BLOCK_SIZE 64 - struct sha256_ctx { u32 count[2]; u32 state[8]; @@ -48,15 +46,6 @@ static inline u32 Maj(u32 x, u32 y, u32 z) #define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3)) #define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10)) -#define H0 0x6a09e667 -#define H1 0xbb67ae85 -#define H2 0x3c6ef372 -#define H3 0xa54ff53a -#define H4 0x510e527f -#define H5 0x9b05688c -#define H6 0x1f83d9ab -#define H7 0x5be0cd19 - static inline void LOAD_OP(int I, u32 *W, const u8 *input) { W[I] = __be32_to_cpu( ((__be32*)(input))[I] ); @@ -233,14 +222,14 @@ static void sha256_transform(u32 *state, const u8 *input) static void sha256_init(struct crypto_tfm *tfm) { struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); - sctx->state[0] = H0; - sctx->state[1] = H1; - sctx->state[2] = H2; - sctx->state[3] = H3; - sctx->state[4] = H4; - sctx->state[5] = H5; - sctx->state[6] = H6; - sctx->state[7] = H7; + sctx->state[0] = SHA256_H0; + sctx->state[1] = SHA256_H1; + sctx->state[2] = SHA256_H2; + sctx->state[3] = SHA256_H3; + sctx->state[4] = SHA256_H4; + sctx->state[5] = SHA256_H5; + sctx->state[6] = SHA256_H6; + sctx->state[7] = SHA256_H7; sctx->count[0] = sctx->count[1] = 0; } @@ -311,7 +300,7 @@ static struct crypto_alg alg = { .cra_name = "sha256", .cra_driver_name= "sha256-generic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, + .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha256_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, @@ -339,4 +328,4 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); -MODULE_ALIAS("sha256-generic"); +MODULE_ALIAS("sha256"); diff --git a/crypto/sha512.c b/crypto/sha512.c index 15eab9db9be410dfe12d990b4bdc2b417e798050..e736596ca5741a86adf19f44129b0a4399eecdd6 100644 --- a/crypto/sha512.c +++ b/crypto/sha512.c @@ -13,20 +13,15 @@ #include #include - #include #include #include #include +#include #include #include -#define SHA384_DIGEST_SIZE 48 -#define SHA512_DIGEST_SIZE 64 -#define SHA384_HMAC_BLOCK_SIZE 128 -#define SHA512_HMAC_BLOCK_SIZE 128 - struct sha512_ctx { u64 state[8]; u32 count[4]; @@ -84,26 +79,6 @@ static const u64 sha512_K[80] = { #define s0(x) (RORu64(x, 1) ^ RORu64(x, 8) ^ (x >> 7)) #define s1(x) (RORu64(x,19) ^ RORu64(x,61) ^ (x >> 6)) -/* H* initial state for SHA-512 */ -#define H0 0x6a09e667f3bcc908ULL -#define H1 0xbb67ae8584caa73bULL -#define H2 0x3c6ef372fe94f82bULL -#define H3 0xa54ff53a5f1d36f1ULL -#define H4 0x510e527fade682d1ULL -#define H5 0x9b05688c2b3e6c1fULL -#define H6 0x1f83d9abfb41bd6bULL -#define H7 0x5be0cd19137e2179ULL - -/* H'* initial state for SHA-384 */ -#define HP0 0xcbbb9d5dc1059ed8ULL -#define HP1 0x629a292a367cd507ULL -#define HP2 0x9159015a3070dd17ULL -#define HP3 0x152fecd8f70e5939ULL -#define HP4 0x67332667ffc00b31ULL -#define HP5 0x8eb44a8768581511ULL -#define HP6 0xdb0c2e0d64f98fa7ULL -#define HP7 0x47b5481dbefa4fa4ULL - static inline void LOAD_OP(int I, u64 *W, const u8 *input) { W[I] = __be64_to_cpu( ((__be64*)(input))[I] ); @@ -164,14 +139,14 @@ static void sha512_init(struct crypto_tfm *tfm) { struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); - sctx->state[0] = H0; - sctx->state[1] = H1; - sctx->state[2] = H2; - sctx->state[3] = H3; - sctx->state[4] = H4; - sctx->state[5] = H5; - sctx->state[6] = H6; - sctx->state[7] = H7; + sctx->state[0] = SHA512_H0; + sctx->state[1] = SHA512_H1; + sctx->state[2] = SHA512_H2; + sctx->state[3] = SHA512_H3; + sctx->state[4] = SHA512_H4; + sctx->state[5] = SHA512_H5; + sctx->state[6] = SHA512_H6; + sctx->state[7] = SHA512_H7; sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; } @@ -179,14 +154,14 @@ static void sha384_init(struct crypto_tfm *tfm) { struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); - sctx->state[0] = HP0; - sctx->state[1] = HP1; - sctx->state[2] = HP2; - sctx->state[3] = HP3; - sctx->state[4] = HP4; - sctx->state[5] = HP5; - sctx->state[6] = HP6; - sctx->state[7] = HP7; + sctx->state[0] = SHA384_H0; + sctx->state[1] = SHA384_H1; + sctx->state[2] = SHA384_H2; + sctx->state[3] = SHA384_H3; + sctx->state[4] = SHA384_H4; + sctx->state[5] = SHA384_H5; + sctx->state[6] = SHA384_H6; + sctx->state[7] = SHA384_H7; sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; } @@ -275,7 +250,7 @@ static void sha384_final(struct crypto_tfm *tfm, u8 *hash) static struct crypto_alg sha512 = { .cra_name = "sha512", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA512_HMAC_BLOCK_SIZE, + .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha512_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, @@ -291,7 +266,7 @@ static struct crypto_alg sha512 = { static struct crypto_alg sha384 = { .cra_name = "sha384", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, - .cra_blocksize = SHA384_HMAC_BLOCK_SIZE, + .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha512_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 11f9359538166c37a5b152b515f13f2262622072..18d489c8b935f3834fdf72743d1e1ef0e2acd457 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -78,7 +78,7 @@ static char *check[] = { "twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", - "camellia", NULL + "camellia", "seed", NULL }; static void hexdump(unsigned char *buf, unsigned int len) @@ -955,6 +955,10 @@ static void do_test(void) AES_LRW_ENC_TEST_VECTORS); test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template, AES_LRW_DEC_TEST_VECTORS); + test_cipher("xts(aes)", ENCRYPT, aes_xts_enc_tv_template, + AES_XTS_ENC_TEST_VECTORS); + test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, + AES_XTS_DEC_TEST_VECTORS); //CAST5 test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, @@ -1029,6 +1033,12 @@ static void do_test(void) camellia_cbc_dec_tv_template, CAMELLIA_CBC_DEC_TEST_VECTORS); + //SEED + test_cipher("ecb(seed)", ENCRYPT, seed_enc_tv_template, + SEED_ENC_TEST_VECTORS); + test_cipher("ecb(seed)", DECRYPT, seed_dec_tv_template, + SEED_DEC_TEST_VECTORS); + test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS); @@ -1132,6 +1142,10 @@ static void do_test(void) AES_LRW_ENC_TEST_VECTORS); test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template, AES_LRW_DEC_TEST_VECTORS); + test_cipher("xts(aes)", ENCRYPT, aes_xts_enc_tv_template, + AES_XTS_ENC_TEST_VECTORS); + test_cipher("xts(aes)", DECRYPT, aes_xts_dec_tv_template, + AES_XTS_DEC_TEST_VECTORS); break; case 11: @@ -1307,6 +1321,10 @@ static void do_test(void) aes_lrw_speed_template); test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, aes_lrw_speed_template); + test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, + aes_xts_speed_template); + test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, + aes_xts_speed_template); break; case 201: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 887527bd5bc6613260075f3bc9e3b850953ddcbf..ec861388d9a0ac7a42e1bdd142a8aaeecc8fe61e 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -2144,6 +2144,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = { #define AES_CBC_DEC_TEST_VECTORS 2 #define AES_LRW_ENC_TEST_VECTORS 8 #define AES_LRW_DEC_TEST_VECTORS 8 +#define AES_XTS_ENC_TEST_VECTORS 4 +#define AES_XTS_DEC_TEST_VECTORS 4 static struct cipher_testvec aes_enc_tv_template[] = { { /* From FIPS-197 */ @@ -2784,6 +2786,400 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = { } }; +static struct cipher_testvec aes_xts_enc_tv_template[] = { + /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */ + { /* XTS-AES 1 */ + .key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .klen = 32, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .ilen = 32, + .result = { 0x91, 0x7c, 0xf6, 0x9e, 0xbd, 0x68, 0xb2, 0xec, + 0x9b, 0x9f, 0xe9, 0xa3, 0xea, 0xdd, 0xa6, 0x92, + 0xcd, 0x43, 0xd2, 0xf5, 0x95, 0x98, 0xed, 0x85, + 0x8c, 0x02, 0xc2, 0x65, 0x2f, 0xbf, 0x92, 0x2e }, + .rlen = 32, + }, { /* XTS-AES 2 */ + .key = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22 }, + .klen = 32, + .iv = { 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44 }, + .ilen = 32, + .result = { 0xc4, 0x54, 0x18, 0x5e, 0x6a, 0x16, 0x93, 0x6e, + 0x39, 0x33, 0x40, 0x38, 0xac, 0xef, 0x83, 0x8b, + 0xfb, 0x18, 0x6f, 0xff, 0x74, 0x80, 0xad, 0xc4, + 0x28, 0x93, 0x82, 0xec, 0xd6, 0xd3, 0x94, 0xf0 }, + .rlen = 32, + }, { /* XTS-AES 3 */ + .key = { 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22 }, + .klen = 32, + .iv = { 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44 }, + .ilen = 32, + .result = { 0xaf, 0x85, 0x33, 0x6b, 0x59, 0x7a, 0xfc, 0x1a, + 0x90, 0x0b, 0x2e, 0xb2, 0x1e, 0xc9, 0x49, 0xd2, + 0x92, 0xdf, 0x4c, 0x04, 0x7e, 0x0b, 0x21, 0x53, + 0x21, 0x86, 0xa5, 0x97, 0x1a, 0x22, 0x7a, 0x89 }, + .rlen = 32, + }, { /* XTS-AES 4 */ + .key = { 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45, + 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26, + 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93, + 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95 }, + .klen = 32, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }, + .ilen = 512, + .result = { 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76, + 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2, + 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25, + 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c, + 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f, + 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00, + 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad, + 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12, + 0x32, 0x80, 0x63, 0xfd, 0x2a, 0xab, 0x53, 0xe5, + 0xea, 0x1e, 0x0a, 0x9f, 0x33, 0x25, 0x00, 0xa5, + 0xdf, 0x94, 0x87, 0xd0, 0x7a, 0x5c, 0x92, 0xcc, + 0x51, 0x2c, 0x88, 0x66, 0xc7, 0xe8, 0x60, 0xce, + 0x93, 0xfd, 0xf1, 0x66, 0xa2, 0x49, 0x12, 0xb4, + 0x22, 0x97, 0x61, 0x46, 0xae, 0x20, 0xce, 0x84, + 0x6b, 0xb7, 0xdc, 0x9b, 0xa9, 0x4a, 0x76, 0x7a, + 0xae, 0xf2, 0x0c, 0x0d, 0x61, 0xad, 0x02, 0x65, + 0x5e, 0xa9, 0x2d, 0xc4, 0xc4, 0xe4, 0x1a, 0x89, + 0x52, 0xc6, 0x51, 0xd3, 0x31, 0x74, 0xbe, 0x51, + 0xa1, 0x0c, 0x42, 0x11, 0x10, 0xe6, 0xd8, 0x15, + 0x88, 0xed, 0xe8, 0x21, 0x03, 0xa2, 0x52, 0xd8, + 0xa7, 0x50, 0xe8, 0x76, 0x8d, 0xef, 0xff, 0xed, + 0x91, 0x22, 0x81, 0x0a, 0xae, 0xb9, 0x9f, 0x91, + 0x72, 0xaf, 0x82, 0xb6, 0x04, 0xdc, 0x4b, 0x8e, + 0x51, 0xbc, 0xb0, 0x82, 0x35, 0xa6, 0xf4, 0x34, + 0x13, 0x32, 0xe4, 0xca, 0x60, 0x48, 0x2a, 0x4b, + 0xa1, 0xa0, 0x3b, 0x3e, 0x65, 0x00, 0x8f, 0xc5, + 0xda, 0x76, 0xb7, 0x0b, 0xf1, 0x69, 0x0d, 0xb4, + 0xea, 0xe2, 0x9c, 0x5f, 0x1b, 0xad, 0xd0, 0x3c, + 0x5c, 0xcf, 0x2a, 0x55, 0xd7, 0x05, 0xdd, 0xcd, + 0x86, 0xd4, 0x49, 0x51, 0x1c, 0xeb, 0x7e, 0xc3, + 0x0b, 0xf1, 0x2b, 0x1f, 0xa3, 0x5b, 0x91, 0x3f, + 0x9f, 0x74, 0x7a, 0x8a, 0xfd, 0x1b, 0x13, 0x0e, + 0x94, 0xbf, 0xf9, 0x4e, 0xff, 0xd0, 0x1a, 0x91, + 0x73, 0x5c, 0xa1, 0x72, 0x6a, 0xcd, 0x0b, 0x19, + 0x7c, 0x4e, 0x5b, 0x03, 0x39, 0x36, 0x97, 0xe1, + 0x26, 0x82, 0x6f, 0xb6, 0xbb, 0xde, 0x8e, 0xcc, + 0x1e, 0x08, 0x29, 0x85, 0x16, 0xe2, 0xc9, 0xed, + 0x03, 0xff, 0x3c, 0x1b, 0x78, 0x60, 0xf6, 0xde, + 0x76, 0xd4, 0xce, 0xcd, 0x94, 0xc8, 0x11, 0x98, + 0x55, 0xef, 0x52, 0x97, 0xca, 0x67, 0xe9, 0xf3, + 0xe7, 0xff, 0x72, 0xb1, 0xe9, 0x97, 0x85, 0xca, + 0x0a, 0x7e, 0x77, 0x20, 0xc5, 0xb3, 0x6d, 0xc6, + 0xd7, 0x2c, 0xac, 0x95, 0x74, 0xc8, 0xcb, 0xbc, + 0x2f, 0x80, 0x1e, 0x23, 0xe5, 0x6f, 0xd3, 0x44, + 0xb0, 0x7f, 0x22, 0x15, 0x4b, 0xeb, 0xa0, 0xf0, + 0x8c, 0xe8, 0x89, 0x1e, 0x64, 0x3e, 0xd9, 0x95, + 0xc9, 0x4d, 0x9a, 0x69, 0xc9, 0xf1, 0xb5, 0xf4, + 0x99, 0x02, 0x7a, 0x78, 0x57, 0x2a, 0xee, 0xbd, + 0x74, 0xd2, 0x0c, 0xc3, 0x98, 0x81, 0xc2, 0x13, + 0xee, 0x77, 0x0b, 0x10, 0x10, 0xe4, 0xbe, 0xa7, + 0x18, 0x84, 0x69, 0x77, 0xae, 0x11, 0x9f, 0x7a, + 0x02, 0x3a, 0xb5, 0x8c, 0xca, 0x0a, 0xd7, 0x52, + 0xaf, 0xe6, 0x56, 0xbb, 0x3c, 0x17, 0x25, 0x6a, + 0x9f, 0x6e, 0x9b, 0xf1, 0x9f, 0xdd, 0x5a, 0x38, + 0xfc, 0x82, 0xbb, 0xe8, 0x72, 0xc5, 0x53, 0x9e, + 0xdb, 0x60, 0x9e, 0xf4, 0xf7, 0x9c, 0x20, 0x3e, + 0xbb, 0x14, 0x0f, 0x2e, 0x58, 0x3c, 0xb2, 0xad, + 0x15, 0xb4, 0xaa, 0x5b, 0x65, 0x50, 0x16, 0xa8, + 0x44, 0x92, 0x77, 0xdb, 0xd4, 0x77, 0xef, 0x2c, + 0x8d, 0x6c, 0x01, 0x7d, 0xb7, 0x38, 0xb1, 0x8d, + 0xeb, 0x4a, 0x42, 0x7d, 0x19, 0x23, 0xce, 0x3f, + 0xf2, 0x62, 0x73, 0x57, 0x79, 0xa4, 0x18, 0xf2, + 0x0a, 0x28, 0x2d, 0xf9, 0x20, 0x14, 0x7b, 0xea, + 0xbe, 0x42, 0x1e, 0xe5, 0x31, 0x9d, 0x05, 0x68 }, + .rlen = 512, + } +}; + +static struct cipher_testvec aes_xts_dec_tv_template[] = { + /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */ + { /* XTS-AES 1 */ + .key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .klen = 32, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0x91, 0x7c, 0xf6, 0x9e, 0xbd, 0x68, 0xb2, 0xec, + 0x9b, 0x9f, 0xe9, 0xa3, 0xea, 0xdd, 0xa6, 0x92, + 0xcd, 0x43, 0xd2, 0xf5, 0x95, 0x98, 0xed, 0x85, + 0x8c, 0x02, 0xc2, 0x65, 0x2f, 0xbf, 0x92, 0x2e }, + .ilen = 32, + .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .rlen = 32, + }, { /* XTS-AES 2 */ + .key = { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22 }, + .klen = 32, + .iv = { 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0xc4, 0x54, 0x18, 0x5e, 0x6a, 0x16, 0x93, 0x6e, + 0x39, 0x33, 0x40, 0x38, 0xac, 0xef, 0x83, 0x8b, + 0xfb, 0x18, 0x6f, 0xff, 0x74, 0x80, 0xad, 0xc4, + 0x28, 0x93, 0x82, 0xec, 0xd6, 0xd3, 0x94, 0xf0 }, + .ilen = 32, + .result = { 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44 }, + .rlen = 32, + }, { /* XTS-AES 3 */ + .key = { 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, + 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22 }, + .klen = 32, + .iv = { 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0xaf, 0x85, 0x33, 0x6b, 0x59, 0x7a, 0xfc, 0x1a, + 0x90, 0x0b, 0x2e, 0xb2, 0x1e, 0xc9, 0x49, 0xd2, + 0x92, 0xdf, 0x4c, 0x04, 0x7e, 0x0b, 0x21, 0x53, + 0x21, 0x86, 0xa5, 0x97, 0x1a, 0x22, 0x7a, 0x89 }, + .ilen = 32, + .result = { 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, + 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44 }, + .rlen = 32, + }, { /* XTS-AES 4 */ + .key = { 0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45, + 0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26, + 0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93, + 0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95 }, + .klen = 32, + .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, + .input = { 0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76, + 0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2, + 0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25, + 0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c, + 0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f, + 0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00, + 0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad, + 0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12, + 0x32, 0x80, 0x63, 0xfd, 0x2a, 0xab, 0x53, 0xe5, + 0xea, 0x1e, 0x0a, 0x9f, 0x33, 0x25, 0x00, 0xa5, + 0xdf, 0x94, 0x87, 0xd0, 0x7a, 0x5c, 0x92, 0xcc, + 0x51, 0x2c, 0x88, 0x66, 0xc7, 0xe8, 0x60, 0xce, + 0x93, 0xfd, 0xf1, 0x66, 0xa2, 0x49, 0x12, 0xb4, + 0x22, 0x97, 0x61, 0x46, 0xae, 0x20, 0xce, 0x84, + 0x6b, 0xb7, 0xdc, 0x9b, 0xa9, 0x4a, 0x76, 0x7a, + 0xae, 0xf2, 0x0c, 0x0d, 0x61, 0xad, 0x02, 0x65, + 0x5e, 0xa9, 0x2d, 0xc4, 0xc4, 0xe4, 0x1a, 0x89, + 0x52, 0xc6, 0x51, 0xd3, 0x31, 0x74, 0xbe, 0x51, + 0xa1, 0x0c, 0x42, 0x11, 0x10, 0xe6, 0xd8, 0x15, + 0x88, 0xed, 0xe8, 0x21, 0x03, 0xa2, 0x52, 0xd8, + 0xa7, 0x50, 0xe8, 0x76, 0x8d, 0xef, 0xff, 0xed, + 0x91, 0x22, 0x81, 0x0a, 0xae, 0xb9, 0x9f, 0x91, + 0x72, 0xaf, 0x82, 0xb6, 0x04, 0xdc, 0x4b, 0x8e, + 0x51, 0xbc, 0xb0, 0x82, 0x35, 0xa6, 0xf4, 0x34, + 0x13, 0x32, 0xe4, 0xca, 0x60, 0x48, 0x2a, 0x4b, + 0xa1, 0xa0, 0x3b, 0x3e, 0x65, 0x00, 0x8f, 0xc5, + 0xda, 0x76, 0xb7, 0x0b, 0xf1, 0x69, 0x0d, 0xb4, + 0xea, 0xe2, 0x9c, 0x5f, 0x1b, 0xad, 0xd0, 0x3c, + 0x5c, 0xcf, 0x2a, 0x55, 0xd7, 0x05, 0xdd, 0xcd, + 0x86, 0xd4, 0x49, 0x51, 0x1c, 0xeb, 0x7e, 0xc3, + 0x0b, 0xf1, 0x2b, 0x1f, 0xa3, 0x5b, 0x91, 0x3f, + 0x9f, 0x74, 0x7a, 0x8a, 0xfd, 0x1b, 0x13, 0x0e, + 0x94, 0xbf, 0xf9, 0x4e, 0xff, 0xd0, 0x1a, 0x91, + 0x73, 0x5c, 0xa1, 0x72, 0x6a, 0xcd, 0x0b, 0x19, + 0x7c, 0x4e, 0x5b, 0x03, 0x39, 0x36, 0x97, 0xe1, + 0x26, 0x82, 0x6f, 0xb6, 0xbb, 0xde, 0x8e, 0xcc, + 0x1e, 0x08, 0x29, 0x85, 0x16, 0xe2, 0xc9, 0xed, + 0x03, 0xff, 0x3c, 0x1b, 0x78, 0x60, 0xf6, 0xde, + 0x76, 0xd4, 0xce, 0xcd, 0x94, 0xc8, 0x11, 0x98, + 0x55, 0xef, 0x52, 0x97, 0xca, 0x67, 0xe9, 0xf3, + 0xe7, 0xff, 0x72, 0xb1, 0xe9, 0x97, 0x85, 0xca, + 0x0a, 0x7e, 0x77, 0x20, 0xc5, 0xb3, 0x6d, 0xc6, + 0xd7, 0x2c, 0xac, 0x95, 0x74, 0xc8, 0xcb, 0xbc, + 0x2f, 0x80, 0x1e, 0x23, 0xe5, 0x6f, 0xd3, 0x44, + 0xb0, 0x7f, 0x22, 0x15, 0x4b, 0xeb, 0xa0, 0xf0, + 0x8c, 0xe8, 0x89, 0x1e, 0x64, 0x3e, 0xd9, 0x95, + 0xc9, 0x4d, 0x9a, 0x69, 0xc9, 0xf1, 0xb5, 0xf4, + 0x99, 0x02, 0x7a, 0x78, 0x57, 0x2a, 0xee, 0xbd, + 0x74, 0xd2, 0x0c, 0xc3, 0x98, 0x81, 0xc2, 0x13, + 0xee, 0x77, 0x0b, 0x10, 0x10, 0xe4, 0xbe, 0xa7, + 0x18, 0x84, 0x69, 0x77, 0xae, 0x11, 0x9f, 0x7a, + 0x02, 0x3a, 0xb5, 0x8c, 0xca, 0x0a, 0xd7, 0x52, + 0xaf, 0xe6, 0x56, 0xbb, 0x3c, 0x17, 0x25, 0x6a, + 0x9f, 0x6e, 0x9b, 0xf1, 0x9f, 0xdd, 0x5a, 0x38, + 0xfc, 0x82, 0xbb, 0xe8, 0x72, 0xc5, 0x53, 0x9e, + 0xdb, 0x60, 0x9e, 0xf4, 0xf7, 0x9c, 0x20, 0x3e, + 0xbb, 0x14, 0x0f, 0x2e, 0x58, 0x3c, 0xb2, 0xad, + 0x15, 0xb4, 0xaa, 0x5b, 0x65, 0x50, 0x16, 0xa8, + 0x44, 0x92, 0x77, 0xdb, 0xd4, 0x77, 0xef, 0x2c, + 0x8d, 0x6c, 0x01, 0x7d, 0xb7, 0x38, 0xb1, 0x8d, + 0xeb, 0x4a, 0x42, 0x7d, 0x19, 0x23, 0xce, 0x3f, + 0xf2, 0x62, 0x73, 0x57, 0x79, 0xa4, 0x18, 0xf2, + 0x0a, 0x28, 0x2d, 0xf9, 0x20, 0x14, 0x7b, 0xea, + 0xbe, 0x42, 0x1e, 0xe5, 0x31, 0x9d, 0x05, 0x68 }, + .ilen = 512, + .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, + 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, + 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }, + .rlen = 512, + } +}; + /* Cast5 test vectors from RFC 2144 */ #define CAST5_ENC_TEST_VECTORS 3 #define CAST5_DEC_TEST_VECTORS 3 @@ -3831,6 +4227,96 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = { }, }; +/* + * SEED test vectors + */ +#define SEED_ENC_TEST_VECTORS 4 +#define SEED_DEC_TEST_VECTORS 4 + +static struct cipher_testvec seed_enc_tv_template[] = { + { + .key = { [0 ... 15] = 0x00 }, + .klen = 16, + .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .ilen = 16, + .result = { 0x5e, 0xba, 0xc6, 0xe0, 0x05, 0x4e, 0x16, 0x68, + 0x19, 0xaf, 0xf1, 0xcc, 0x6d, 0x34, 0x6c, 0xdb }, + .rlen = 16, + }, { + .key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .klen = 16, + .input = { [0 ... 15] = 0x00 }, + .ilen = 16, + .result = { 0xc1, 0x1f, 0x22, 0xf2, 0x01, 0x40, 0x50, 0x50, + 0x84, 0x48, 0x35, 0x97, 0xe4, 0x37, 0x0f, 0x43 }, + .rlen = 16, + }, { + .key = { 0x47, 0x06, 0x48, 0x08, 0x51, 0xe6, 0x1b, 0xe8, + 0x5d, 0x74, 0xbf, 0xb3, 0xfd, 0x95, 0x61, 0x85 }, + .klen = 16, + .input = { 0x83, 0xa2, 0xf8, 0xa2, 0x88, 0x64, 0x1f, 0xb9, + 0xa4, 0xe9, 0xa5, 0xcc, 0x2f, 0x13, 0x1c, 0x7d }, + .ilen = 16, + .result = { 0xee, 0x54, 0xd1, 0x3e, 0xbc, 0xae, 0x70, 0x6d, + 0x22, 0x6b, 0xc3, 0x14, 0x2c, 0xd4, 0x0d, 0x4a }, + .rlen = 16, + }, { + .key = { 0x28, 0xdb, 0xc3, 0xbc, 0x49, 0xff, 0xd8, 0x7d, + 0xcf, 0xa5, 0x09, 0xb1, 0x1d, 0x42, 0x2b, 0xe7 }, + .klen = 16, + .input = { 0xb4, 0x1e, 0x6b, 0xe2, 0xeb, 0xa8, 0x4a, 0x14, + 0x8e, 0x2e, 0xed, 0x84, 0x59, 0x3c, 0x5e, 0xc7 }, + .ilen = 16, + .result = { 0x9b, 0x9b, 0x7b, 0xfc, 0xd1, 0x81, 0x3c, 0xb9, + 0x5d, 0x0b, 0x36, 0x18, 0xf4, 0x0f, 0x51, 0x22 }, + .rlen = 16, + } +}; + +static struct cipher_testvec seed_dec_tv_template[] = { + { + .key = { [0 ... 15] = 0x00 }, + .klen = 16, + .input = { 0x5e, 0xba, 0xc6, 0xe0, 0x05, 0x4e, 0x16, 0x68, + 0x19, 0xaf, 0xf1, 0xcc, 0x6d, 0x34, 0x6c, 0xdb }, + .ilen = 16, + .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .rlen = 16, + }, { + .key = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, + .klen = 16, + .input = { 0xc1, 0x1f, 0x22, 0xf2, 0x01, 0x40, 0x50, 0x50, + 0x84, 0x48, 0x35, 0x97, 0xe4, 0x37, 0x0f, 0x43 }, + .ilen = 16, + .result = { [0 ... 15] = 0x00 }, + .rlen = 16, + }, { + .key = { 0x47, 0x06, 0x48, 0x08, 0x51, 0xe6, 0x1b, 0xe8, + 0x5d, 0x74, 0xbf, 0xb3, 0xfd, 0x95, 0x61, 0x85 }, + .klen = 16, + .input = { 0xee, 0x54, 0xd1, 0x3e, 0xbc, 0xae, 0x70, 0x6d, + 0x22, 0x6b, 0xc3, 0x14, 0x2c, 0xd4, 0x0d, 0x4a }, + .ilen = 16, + .result = { 0x83, 0xa2, 0xf8, 0xa2, 0x88, 0x64, 0x1f, 0xb9, + 0xa4, 0xe9, 0xa5, 0xcc, 0x2f, 0x13, 0x1c, 0x7d }, + .rlen = 16, + }, { + .key = { 0x28, 0xdb, 0xc3, 0xbc, 0x49, 0xff, 0xd8, 0x7d, + 0xcf, 0xa5, 0x09, 0xb1, 0x1d, 0x42, 0x2b, 0xe7 }, + .klen = 16, + .input = { 0x9b, 0x9b, 0x7b, 0xfc, 0xd1, 0x81, 0x3c, 0xb9, + 0x5d, 0x0b, 0x36, 0x18, 0xf4, 0x0f, 0x51, 0x22 }, + .ilen = 16, + .result = { 0xb4, 0x1e, 0x6b, 0xe2, 0xeb, 0xa8, 0x4a, 0x14, + 0x8e, 0x2e, 0xed, 0x84, 0x59, 0x3c, 0x5e, 0xc7 }, + .rlen = 16, + } +}; + /* * Compression stuff. */ @@ -4193,6 +4679,27 @@ static struct cipher_speed aes_lrw_speed_template[] = { { .klen = 0, .blen = 0, } }; +static struct cipher_speed aes_xts_speed_template[] = { + { .klen = 32, .blen = 16, }, + { .klen = 32, .blen = 64, }, + { .klen = 32, .blen = 256, }, + { .klen = 32, .blen = 1024, }, + { .klen = 32, .blen = 8192, }, + { .klen = 48, .blen = 16, }, + { .klen = 48, .blen = 64, }, + { .klen = 48, .blen = 256, }, + { .klen = 48, .blen = 1024, }, + { .klen = 48, .blen = 8192, }, + { .klen = 64, .blen = 16, }, + { .klen = 64, .blen = 64, }, + { .klen = 64, .blen = 256, }, + { .klen = 64, .blen = 1024, }, + { .klen = 64, .blen = 8192, }, + + /* End marker */ + { .klen = 0, .blen = 0, } +}; + static struct cipher_speed des3_ede_speed_template[] = { { .klen = 24, .blen = 16, }, { .klen = 24, .blen = 64, }, diff --git a/crypto/xts.c b/crypto/xts.c new file mode 100644 index 0000000000000000000000000000000000000000..8eb08bfaf7c099cc5b4dd0e06cbd48eb4f381c26 --- /dev/null +++ b/crypto/xts.c @@ -0,0 +1,292 @@ +/* XTS: as defined in IEEE1619/D16 + * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf + * (sector sizes which are not a multiple of 16 bytes are, + * however currently unsupported) + * + * Copyright (c) 2007 Rik Snel + * + * Based om ecb.c + * Copyright (c) 2006 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct priv { + struct crypto_cipher *child; + struct crypto_cipher *tweak; +}; + +static int setkey(struct crypto_tfm *parent, const u8 *key, + unsigned int keylen) +{ + struct priv *ctx = crypto_tfm_ctx(parent); + struct crypto_cipher *child = ctx->tweak; + u32 *flags = &parent->crt_flags; + int err; + + /* key consists of keys of equal size concatenated, therefore + * the length must be even */ + if (keylen % 2) { + /* tell the user why there was an error */ + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* we need two cipher instances: one to compute the inital 'tweak' + * by encrypting the IV (usually the 'plain' iv) and the other + * one to encrypt and decrypt the data */ + + /* tweak cipher, uses Key2 i.e. the second half of *key */ + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(child, key + keylen/2, keylen/2); + if (err) + return err; + + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + child = ctx->child; + + /* data cipher, uses Key1 i.e. the first half of *key */ + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(child, key, keylen/2); + if (err) + return err; + + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + + return 0; +} + +struct sinfo { + be128 t; + struct crypto_tfm *tfm; + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); +}; + +static inline void xts_round(struct sinfo *s, void *dst, const void *src) +{ + be128_xor(dst, &s->t, src); /* PP <- T xor P */ + s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ + be128_xor(dst, dst, &s->t); /* C <- T xor CC */ +} + +static int crypt(struct blkcipher_desc *d, + struct blkcipher_walk *w, struct priv *ctx, + void (*tw)(struct crypto_tfm *, u8 *, const u8 *), + void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) +{ + int err; + unsigned int avail; + const int bs = crypto_cipher_blocksize(ctx->child); + struct sinfo s = { + .tfm = crypto_cipher_tfm(ctx->child), + .fn = fn + }; + be128 *iv; + u8 *wsrc; + u8 *wdst; + + err = blkcipher_walk_virt(d, w); + if (!w->nbytes) + return err; + + avail = w->nbytes; + + wsrc = w->src.virt.addr; + wdst = w->dst.virt.addr; + + /* calculate first value of T */ + iv = (be128 *)w->iv; + tw(crypto_cipher_tfm(ctx->tweak), (void *)&s.t, w->iv); + + goto first; + + for (;;) { + do { + gf128mul_x_ble(&s.t, &s.t); + +first: + xts_round(&s, wdst, wsrc); + + wsrc += bs; + wdst += bs; + } while ((avail -= bs) >= bs); + + err = blkcipher_walk_done(d, w, avail); + if (!w->nbytes) + break; + + avail = w->nbytes; + + wsrc = w->src.virt.addr; + wdst = w->dst.virt.addr; + } + + return err; +} + +static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk w; + + blkcipher_walk_init(&w, dst, src, nbytes); + return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, + crypto_cipher_alg(ctx->child)->cia_encrypt); +} + +static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk w; + + blkcipher_walk_init(&w, dst, src, nbytes); + return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt, + crypto_cipher_alg(ctx->child)->cia_decrypt); +} + +static int init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_cipher *cipher; + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct priv *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + if (crypto_cipher_blocksize(cipher) != 16) { + *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; + crypto_free_cipher(cipher); + return -EINVAL; + } + + ctx->child = cipher; + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) { + crypto_free_cipher(ctx->child); + return PTR_ERR(cipher); + } + + /* this check isn't really needed, leave it here just in case */ + if (crypto_cipher_blocksize(cipher) != 16) { + crypto_free_cipher(cipher); + crypto_free_cipher(ctx->child); + *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; + return -EINVAL; + } + + ctx->tweak = cipher; + + return 0; +} + +static void exit_tfm(struct crypto_tfm *tfm) +{ + struct priv *ctx = crypto_tfm_ctx(tfm); + crypto_free_cipher(ctx->child); + crypto_free_cipher(ctx->tweak); +} + +static struct crypto_instance *alloc(struct rtattr **tb) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); + if (err) + return ERR_PTR(err); + + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = crypto_alloc_instance("xts", alg); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = alg->cra_blocksize; + + if (alg->cra_alignmask < 7) + inst->alg.cra_alignmask = 7; + else + inst->alg.cra_alignmask = alg->cra_alignmask; + + inst->alg.cra_type = &crypto_blkcipher_type; + + inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; + inst->alg.cra_blkcipher.min_keysize = + 2 * alg->cra_cipher.cia_min_keysize; + inst->alg.cra_blkcipher.max_keysize = + 2 * alg->cra_cipher.cia_max_keysize; + + inst->alg.cra_ctxsize = sizeof(struct priv); + + inst->alg.cra_init = init_tfm; + inst->alg.cra_exit = exit_tfm; + + inst->alg.cra_blkcipher.setkey = setkey; + inst->alg.cra_blkcipher.encrypt = encrypt; + inst->alg.cra_blkcipher.decrypt = decrypt; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static void free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} + +static struct crypto_template crypto_tmpl = { + .name = "xts", + .alloc = alloc, + .free = free, + .module = THIS_MODULE, +}; + +static int __init crypto_module_init(void) +{ + return crypto_register_template(&crypto_tmpl); +} + +static void __exit crypto_module_exit(void) +{ + crypto_unregister_template(&crypto_tmpl); +} + +module_init(crypto_module_init); +module_exit(crypto_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("XTS block cipher mode"); diff --git a/drivers/Kconfig b/drivers/Kconfig index 3e1c442deff9b7a4f67fada30120a162288b8f87..7bdae47d6b91cea8848d2ce48e401afe9067c921 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -58,6 +58,8 @@ source "drivers/power/Kconfig" source "drivers/hwmon/Kconfig" +source "drivers/ssb/Kconfig" + source "drivers/mfd/Kconfig" source "drivers/media/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index f0878b2ec55ea47d5a87f5069890cca4e5de7ca3..a168eacdcd9c1d57333e9da931d26a712ffb01c2 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -88,3 +88,4 @@ obj-$(CONFIG_DMA_ENGINE) += dma/ obj-$(CONFIG_HID) += hid/ obj-$(CONFIG_PPC_PS3) += ps3/ obj-$(CONFIG_OF) += of/ +obj-$(CONFIG_SSB) += ssb/ diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index f8b1700f4c16df01d40401d83df1df811dd10889..eee54c0cde6819ad064ada9cb619bdb3954f0052 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3576,7 +3576,7 @@ init_card(struct atm_dev *dev) * XXX: */ sprintf(tname, "eth%d", card->index); - tmp = dev_get_by_name(tname); /* jhs: was "tmp = dev_get(tname);" */ + tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */ if (tmp) { memcpy(card->atmdev->esi, tmp->dev_addr, 6); diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 5abae34ad65baad472fcfee487bf354953c23dd4..99672017ca56f5212229db8fb9312e7c81842e7b 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "aoe.h" @@ -194,7 +195,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) sl = sl_tail = NULL; read_lock(&dev_base_lock); - for_each_netdev(ifp) { + for_each_netdev(&init_net, ifp) { dev_hold(ifp); if (!is_aoe_netif(ifp)) goto cont; diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index f9ddfda4d9cb5945e3800e4a6865691e6069765f..4dc0fb7da94b50dacbdb0084dc7f7f037f0db435 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "aoe.h" @@ -114,6 +115,9 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct aoe_hdr *h; u32 n; + if (ifp->nd_net != &init_net) + goto exit; + skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) return 0; diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index a7b9e9bb3e8d54017480857d2b7cc6dbceedcc6f..0e328d387af4cbecdb326238b12d904dfe4988b1 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -234,18 +234,6 @@ static void cn_rx_skb(struct sk_buff *__skb) kfree_skb(__skb); } -/* - * Netlink socket input callback - dequeues the skbs and calls the - * main netlink receiving function. - */ -static void cn_input(struct sock *sk, int len) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) - cn_rx_skb(skb); -} - /* * Notification routing. * @@ -442,11 +430,11 @@ static int __devinit cn_init(void) struct cn_dev *dev = &cdev; int err; - dev->input = cn_input; + dev->input = cn_rx_skb; dev->id.idx = cn_idx; dev->id.val = cn_val; - dev->nls = netlink_kernel_create(NETLINK_CONNECTOR, + dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, CN_NETLINK_USERS + 0xf, dev->input, NULL, THIS_MODULE); if (!dev->nls) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index c0fc4aeb8596071ff67f73852e8d9f475cb30ee4..5fd6688a444a3690c6340aa61fdf1652939def63 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -14,7 +14,6 @@ config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" depends on X86_32 select CRYPTO_ALGAPI - default m help Some VIA processors come with an integrated crypto engine (so called VIA PadLock ACE, Advanced Cryptography Engine) @@ -28,7 +27,6 @@ config CRYPTO_DEV_PADLOCK_AES tristate "PadLock driver for AES algorithm" depends on CRYPTO_DEV_PADLOCK select CRYPTO_BLKCIPHER - default m help Use VIA PadLock for AES algorithm. @@ -42,7 +40,6 @@ config CRYPTO_DEV_PADLOCK_SHA depends on CRYPTO_DEV_PADLOCK select CRYPTO_SHA1 select CRYPTO_SHA256 - default m help Use VIA PadLock for SHA1/SHA256 algorithms. @@ -58,7 +55,6 @@ config CRYPTO_DEV_GEODE depends on X86_32 && PCI select CRYPTO_ALGAPI select CRYPTO_BLKCIPHER - default m help Say 'Y' here to use the AMD Geode LX processor on-board AES engine for the CryptoAPI AES algorithm. @@ -70,7 +66,6 @@ config ZCRYPT tristate "Support for PCI-attached cryptographic adapters" depends on S390 select ZCRYPT_MONOLITHIC if ZCRYPT="y" - default "m" help Select this option if you want to use a PCI-attached cryptographic adapter like: diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 6a86958b577f974bc423fbc22d48131c2ee79ecd..f9a34abbf4faed143e8597f2769f114534acaa64 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -473,6 +473,7 @@ geode_aes_exit(void) MODULE_AUTHOR("Advanced Micro Devices, Inc."); MODULE_DESCRIPTION("Geode LX Hardware AES driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("aes"); module_init(geode_aes_init); module_exit(geode_aes_exit); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index d4501dc7e65026d1fe87f5f1f751c8acff9e3810..abbcff0762b1d1f4bb7b77ff34f5d1c728f2aabc 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -5,7 +5,7 @@ * * Copyright (c) 2004 Michal Ludvig * - * Key expansion routine taken from crypto/aes.c + * Key expansion routine taken from crypto/aes_generic.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -660,4 +660,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Ludvig"); -MODULE_ALIAS("aes-padlock"); +MODULE_ALIAS("aes"); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index a781fd23b607d805038f72078c4a56a010f9cfd5..4e8de162fc12e8506bd932794f94ad383d3513b1 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -24,12 +25,7 @@ #include "padlock.h" #define SHA1_DEFAULT_FALLBACK "sha1-generic" -#define SHA1_DIGEST_SIZE 20 -#define SHA1_HMAC_BLOCK_SIZE 64 - #define SHA256_DEFAULT_FALLBACK "sha256-generic" -#define SHA256_DIGEST_SIZE 32 -#define SHA256_HMAC_BLOCK_SIZE 64 struct padlock_sha_ctx { char *data; @@ -107,11 +103,11 @@ static void padlock_do_sha1(const char *in, char *out, int count) char buf[128+16]; char *result = NEAREST_ALIGNED(buf); - ((uint32_t *)result)[0] = 0x67452301; - ((uint32_t *)result)[1] = 0xEFCDAB89; - ((uint32_t *)result)[2] = 0x98BADCFE; - ((uint32_t *)result)[3] = 0x10325476; - ((uint32_t *)result)[4] = 0xC3D2E1F0; + ((uint32_t *)result)[0] = SHA1_H0; + ((uint32_t *)result)[1] = SHA1_H1; + ((uint32_t *)result)[2] = SHA1_H2; + ((uint32_t *)result)[3] = SHA1_H3; + ((uint32_t *)result)[4] = SHA1_H4; asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ : "+S"(in), "+D"(result) @@ -128,14 +124,14 @@ static void padlock_do_sha256(const char *in, char *out, int count) char buf[128+16]; char *result = NEAREST_ALIGNED(buf); - ((uint32_t *)result)[0] = 0x6A09E667; - ((uint32_t *)result)[1] = 0xBB67AE85; - ((uint32_t *)result)[2] = 0x3C6EF372; - ((uint32_t *)result)[3] = 0xA54FF53A; - ((uint32_t *)result)[4] = 0x510E527F; - ((uint32_t *)result)[5] = 0x9B05688C; - ((uint32_t *)result)[6] = 0x1F83D9AB; - ((uint32_t *)result)[7] = 0x5BE0CD19; + ((uint32_t *)result)[0] = SHA256_H0; + ((uint32_t *)result)[1] = SHA256_H1; + ((uint32_t *)result)[2] = SHA256_H2; + ((uint32_t *)result)[3] = SHA256_H3; + ((uint32_t *)result)[4] = SHA256_H4; + ((uint32_t *)result)[5] = SHA256_H5; + ((uint32_t *)result)[6] = SHA256_H6; + ((uint32_t *)result)[7] = SHA256_H7; asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ : "+S"(in), "+D"(result) @@ -215,7 +211,7 @@ static struct crypto_alg sha1_alg = { .cra_priority = PADLOCK_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, + .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), @@ -237,7 +233,7 @@ static struct crypto_alg sha256_alg = { .cra_priority = PADLOCK_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST | CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, + .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), @@ -253,19 +249,6 @@ static struct crypto_alg sha256_alg = { } }; -static void __init padlock_sha_check_fallbacks(void) -{ - if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK)) - printk(KERN_WARNING PFX - "Couldn't load fallback module for sha1.\n"); - - if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK)) - printk(KERN_WARNING PFX - "Couldn't load fallback module for sha256.\n"); -} - static int __init padlock_init(void) { int rc = -ENODEV; @@ -280,8 +263,6 @@ static int __init padlock_init(void) return -ENODEV; } - padlock_sha_check_fallbacks(); - rc = crypto_register_alg(&sha1_alg); if (rc) goto out; @@ -314,5 +295,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Ludvig"); +MODULE_ALIAS("sha1"); +MODULE_ALIAS("sha256"); MODULE_ALIAS("sha1-padlock"); MODULE_ALIAS("sha256-padlock"); diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 3a9d7e2d4de6f229aaad1a9e4b334c9a573f71c1..dc9dce22f6a8e2e0248edd20e9910285699db754 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -159,14 +159,16 @@ MODULE_PARM_DESC(max_partial_datagrams, static int ether1394_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len); + unsigned short type, const void *daddr, + const void *saddr, unsigned len); static int ether1394_rebuild_header(struct sk_buff *skb); -static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr); -static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh); +static int ether1394_header_parse(const struct sk_buff *skb, + unsigned char *haddr); +static int ether1394_header_cache(const struct neighbour *neigh, + struct hh_cache *hh); static void ether1394_header_cache_update(struct hh_cache *hh, - struct net_device *dev, - unsigned char *haddr); + const struct net_device *dev, + const unsigned char *haddr); static int ether1394_tx(struct sk_buff *skb, struct net_device *dev); static void ether1394_iso(struct hpsb_iso *iso); @@ -506,6 +508,14 @@ static void ether1394_reset_priv(struct net_device *dev, int set_mtu) spin_unlock_irqrestore(&priv->lock, flags); } +static const struct header_ops ether1394_header_ops = { + .create = ether1394_header, + .rebuild = ether1394_rebuild_header, + .cache = ether1394_header_cache, + .cache_update = ether1394_header_cache_update, + .parse = ether1394_header_parse, +}; + static void ether1394_init_dev(struct net_device *dev) { dev->open = ether1394_open; @@ -515,11 +525,7 @@ static void ether1394_init_dev(struct net_device *dev) dev->tx_timeout = ether1394_tx_timeout; dev->change_mtu = ether1394_change_mtu; - dev->hard_header = ether1394_header; - dev->rebuild_header = ether1394_rebuild_header; - dev->hard_header_cache = ether1394_header_cache; - dev->header_cache_update= ether1394_header_cache_update; - dev->hard_header_parse = ether1394_header_parse; + dev->header_ops = ðer1394_header_ops; SET_ETHTOOL_OPS(dev, ðtool_ops); @@ -598,7 +604,6 @@ static void ether1394_add_host(struct hpsb_host *host) goto out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &host->device); priv = netdev_priv(dev); @@ -711,8 +716,8 @@ static void ether1394_host_reset(struct hpsb_host *host) * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp). */ static int ether1394_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct eth1394hdr *eth = (struct eth1394hdr *)skb_push(skb, ETH1394_HLEN); @@ -752,15 +757,15 @@ static int ether1394_rebuild_header(struct sk_buff *skb) return 0; } -static int ether1394_header_parse(struct sk_buff *skb, unsigned char *haddr) +static int ether1394_header_parse(const struct sk_buff *skb, + unsigned char *haddr) { - struct net_device *dev = skb->dev; - - memcpy(haddr, dev->dev_addr, ETH1394_ALEN); + memcpy(haddr, skb->dev->dev_addr, ETH1394_ALEN); return ETH1394_ALEN; } -static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh) +static int ether1394_header_cache(const struct neighbour *neigh, + struct hh_cache *hh) { unsigned short type = hh->hh_type; struct net_device *dev = neigh->dev; @@ -779,8 +784,8 @@ static int ether1394_header_cache(struct neighbour *neigh, struct hh_cache *hh) /* Called by Address Resolution module to notify changes in address. */ static void ether1394_header_cache_update(struct hh_cache *hh, - struct net_device *dev, - unsigned char * haddr) + const struct net_device *dev, + const unsigned char * haddr) { memcpy((u8 *)hh->hh_data + 16 - ETH1394_HLEN, haddr, dev->addr_len); } @@ -900,8 +905,8 @@ static u16 ether1394_parse_encap(struct sk_buff *skb, struct net_device *dev, } /* Now add the ethernet header. */ - if (dev->hard_header(skb, dev, ntohs(ether_type), &dest_hw, NULL, - skb->len) >= 0) + if (dev_hard_header(skb, dev, ntohs(ether_type), &dest_hw, NULL, + skb->len) >= 0) ret = ether1394_type_trans(skb, dev); return ret; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 9ffb9987450a8ef2ddf294ab32dd300b9621c2cf..2e641b255db48b197ab51bcb49e2dcde94e1aae3 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1866,13 +1866,14 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list; - int port, ret; + int port, ret, low, high; bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; retry: + /* FIXME: add proper port randomization per like inet_csk_get_port */ do { ret = idr_get_new_above(ps, bind_list, next_port, &port); } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); @@ -1880,18 +1881,19 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) if (ret) goto err1; - if (port > sysctl_local_port_range[1]) { - if (next_port != sysctl_local_port_range[0]) { + inet_get_local_port_range(&low, &high); + if (port > high) { + if (next_port != low) { idr_remove(ps, port); - next_port = sysctl_local_port_range[0]; + next_port = low; goto retry; } ret = -EADDRNOTAVAIL; goto err2; } - if (port == sysctl_local_port_range[1]) - next_port = sysctl_local_port_range[0]; + if (port == high) + next_port = low; else next_port = port + 1; @@ -2769,12 +2771,12 @@ static void cma_remove_one(struct ib_device *device) static int cma_init(void) { - int ret; + int ret, low, high; get_random_bytes(&next_port, sizeof next_port); - next_port = ((unsigned int) next_port % - (sysctl_local_port_range[1] - sysctl_local_port_range[0])) + - sysctl_local_port_range[0]; + inet_get_local_port_range(&low, &high); + next_port = ((unsigned int) next_port % (high - low)) + low; + cma_wq = create_singlethread_workqueue("rdma_cm"); if (!cma_wq) return -ENOMEM; diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 0aecea67f3e69cb6339dfe5d6f29679eb36285e7..f283a9f0c23b198e616f041b1aa1e4f5713c60fb 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -886,7 +886,6 @@ static struct net_device *c2_devinit(struct c2_dev *c2dev, return NULL; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); netdev->open = c2_up; diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 997cf1530762458f80e285cd8f6297a5d8eeb94e..7a6cece6ea9d1ba0060ee630b3ab7a5931f8f638 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -715,7 +715,6 @@ static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu) static void setup(struct net_device *netdev) { - SET_MODULE_OWNER(netdev); netdev->open = c2_pseudo_up; netdev->stop = c2_pseudo_down; netdev->hard_start_xmit = c2_pseudo_xmit_frame; diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index beb2a381467f75abf2ae04711c810bb224f80888..eec6a30840ca0a16763105b67702c00f3e7c6836 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "cxio_resource.h" #include "cxio_hal.h" @@ -894,7 +895,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p) if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) { return -EBUSY; } - netdev_p = dev_get_by_name(rdev_p->dev_name); + netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name); if (!netdev_p) { return -EINVAL; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 285c143115ccda751d8f544621eaad9ec2287bef..34c6128d2a34836902307260ec6d158eea24e30b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -228,6 +228,8 @@ struct ipoib_dev_priv { struct net_device *dev; + struct napi_struct napi; + unsigned long flags; struct mutex mcast_mutex; @@ -278,8 +280,6 @@ struct ipoib_dev_priv { struct ib_event_handler event_handler; - struct net_device_stats stats; - struct net_device *parent; struct list_head child_intfs; struct list_head list; @@ -351,7 +351,7 @@ extern struct workqueue_struct *ipoib_workqueue; /* functions */ -int ipoib_poll(struct net_device *dev, int *budget); +int ipoib_poll(struct napi_struct *napi, int budget); void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); struct ipoib_ah *ipoib_create_ah(struct net_device *dev, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 08b4676a38201ad94fc5d165ce2a27ed139a36a1..1afd93cdd6bbb759bafd3878cbb962a0798efce3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -430,7 +430,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ipoib_dbg(priv, "cm recv error " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); - ++priv->stats.rx_dropped; + ++dev->stats.rx_dropped; goto repost; } @@ -457,7 +457,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) * this packet and reuse the old buffer. */ ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); - ++priv->stats.rx_dropped; + ++dev->stats.rx_dropped; goto repost; } @@ -474,8 +474,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb_pull(skb, IPOIB_ENCAP_LEN); dev->last_rx = jiffies; - ++priv->stats.rx_packets; - priv->stats.rx_bytes += skb->len; + ++dev->stats.rx_packets; + dev->stats.rx_bytes += skb->len; skb->dev = dev; /* XXX get correct PACKET_ type here */ @@ -512,8 +512,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ if (unlikely(skb->len > tx->mtu)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, tx->mtu); - ++priv->stats.tx_dropped; - ++priv->stats.tx_errors; + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); return; } @@ -532,7 +532,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ tx_req->skb = skb; addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { - ++priv->stats.tx_errors; + ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } @@ -542,7 +542,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), addr, skb->len))) { ipoib_warn(priv, "post_send failed\n"); - ++priv->stats.tx_errors; + ++dev->stats.tx_errors; ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } else { @@ -580,8 +580,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); /* FIXME: is this right? Shouldn't we only increment on success? */ - ++priv->stats.tx_packets; - priv->stats.tx_bytes += tx_req->skb->len; + ++dev->stats.tx_packets; + dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 10944888cffd79de3c8b4751ceac2929a0296141..0ec28c302fbf154c999541f4f14f477668530b3e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) * this packet and reuse the old buffer. */ if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { - ++priv->stats.rx_dropped; + ++dev->stats.rx_dropped; goto repost; } @@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) skb_pull(skb, IPOIB_ENCAP_LEN); dev->last_rx = jiffies; - ++priv->stats.rx_packets; - priv->stats.rx_bytes += skb->len; + ++dev->stats.rx_packets; + dev->stats.rx_bytes += skb->len; skb->dev = dev; /* XXX get correct PACKET_ type here */ @@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); - ++priv->stats.tx_packets; - priv->stats.tx_bytes += tx_req->skb->len; + ++dev->stats.tx_packets; + dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); @@ -281,63 +281,58 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) wc->status, wr_id, wc->vendor_err); } -int ipoib_poll(struct net_device *dev, int *budget) +int ipoib_poll(struct napi_struct *napi, int budget) { - struct ipoib_dev_priv *priv = netdev_priv(dev); - int max = min(*budget, dev->quota); + struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); + struct net_device *dev = priv->dev; int done; int t; - int empty; int n, i; done = 0; - empty = 0; - while (max) { +poll_more: + while (done < budget) { + int max = (budget - done); + t = min(IPOIB_NUM_WC, max); n = ib_poll_cq(priv->cq, t, priv->ibwc); - for (i = 0; i < n; ++i) { + for (i = 0; i < n; i++) { struct ib_wc *wc = priv->ibwc + i; if (wc->wr_id & IPOIB_CM_OP_SRQ) { ++done; - --max; ipoib_cm_handle_rx_wc(dev, wc); } else if (wc->wr_id & IPOIB_OP_RECV) { ++done; - --max; ipoib_ib_handle_rx_wc(dev, wc); } else ipoib_ib_handle_tx_wc(dev, wc); } - if (n != t) { - empty = 1; + if (n != t) break; - } } - dev->quota -= done; - *budget -= done; - - if (empty) { - netif_rx_complete(dev); + if (done < budget) { + netif_rx_complete(dev, napi); if (unlikely(ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && - netif_rx_reschedule(dev, 0)) - return 1; - - return 0; + netif_rx_reschedule(dev, napi)) + goto poll_more; } - return 1; + return done; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { - netif_rx_schedule(dev_ptr); + struct net_device *dev = dev_ptr; + struct ipoib_dev_priv *priv = netdev_priv(dev); + + netif_rx_schedule(dev, &priv->napi); } static inline int post_send(struct ipoib_dev_priv *priv, @@ -367,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); - ++priv->stats.tx_dropped; - ++priv->stats.tx_errors; + ++dev->stats.tx_dropped; + ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); return; } @@ -388,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { - ++priv->stats.tx_errors; + ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } @@ -397,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, addr, skb->len))) { ipoib_warn(priv, "post_send failed\n"); - ++priv->stats.tx_errors; + ++dev->stats.tx_errors; ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } else { @@ -577,7 +572,6 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) int i; clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - netif_poll_disable(dev); ipoib_cm_dev_stop(dev); @@ -660,7 +654,6 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) msleep(1); } - netif_poll_enable(dev); ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP); return 0; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 894b1dcdf3eb31ff0e326aaf9a0b9b793303b8ff..855c9deca8b716569d7014a0117ad220d00b6c73 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -98,16 +98,20 @@ int ipoib_open(struct net_device *dev) ipoib_dbg(priv, "bringing up interface\n"); + napi_enable(&priv->napi); set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); if (ipoib_pkey_dev_delay_open(dev)) return 0; - if (ipoib_ib_dev_open(dev)) + if (ipoib_ib_dev_open(dev)) { + napi_disable(&priv->napi); return -EINVAL; + } if (ipoib_ib_dev_up(dev)) { ipoib_ib_dev_stop(dev, 1); + napi_disable(&priv->napi); return -EINVAL; } @@ -140,6 +144,7 @@ static int ipoib_stop(struct net_device *dev) ipoib_dbg(priv, "stopping interface\n"); clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); + napi_disable(&priv->napi); netif_stop_queue(dev); @@ -512,7 +517,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) neigh = ipoib_neigh_alloc(skb->dst->neighbour); if (!neigh) { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); return; } @@ -577,7 +582,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) err_path: ipoib_neigh_free(dev, neigh); err_drop: - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); spin_unlock(&priv->lock); @@ -626,7 +631,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, } else __path_add(dev, path); } else { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } @@ -645,7 +650,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, skb_push(skb, sizeof *phdr); __skb_queue_tail(&path->queue, skb); } else { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } @@ -713,7 +718,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) __skb_queue_tail(&neigh->queue, skb); spin_unlock(&priv->lock); } else { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } } else { @@ -739,7 +744,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) IPOIB_QPN(phdr->hwaddr), IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); dev_kfree_skb_any(skb); - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; goto out; } @@ -753,13 +758,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static struct net_device_stats *ipoib_get_stats(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = netdev_priv(dev); - - return &priv->stats; -} - static void ipoib_timeout(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -775,7 +773,7 @@ static void ipoib_timeout(struct net_device *dev) static int ipoib_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned len) { struct ipoib_header *header; @@ -856,11 +854,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) { - struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; *to_ipoib_neigh(neigh->neighbour) = NULL; while ((skb = __skb_dequeue(&neigh->queue))) { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } if (ipoib_cm_get(neigh)) @@ -935,6 +932,10 @@ void ipoib_dev_cleanup(struct net_device *dev) priv->tx_ring = NULL; } +static const struct header_ops ipoib_header_ops = { + .create = ipoib_hard_header, +}; + static void ipoib_setup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -943,13 +944,12 @@ static void ipoib_setup(struct net_device *dev) dev->stop = ipoib_stop; dev->change_mtu = ipoib_change_mtu; dev->hard_start_xmit = ipoib_start_xmit; - dev->get_stats = ipoib_get_stats; dev->tx_timeout = ipoib_timeout; - dev->hard_header = ipoib_hard_header; + dev->header_ops = &ipoib_header_ops; dev->set_multicast_list = ipoib_set_mcast_list; dev->neigh_setup = ipoib_neigh_setup_dev; - dev->poll = ipoib_poll; - dev->weight = 100; + + netif_napi_add(dev, &priv->napi, ipoib_poll, 100); dev->watchdog_timeo = HZ; @@ -973,8 +973,6 @@ static void ipoib_setup(struct net_device *dev) netif_carrier_off(dev); - SET_MODULE_OWNER(dev); - priv->dev = dev; spin_lock_init(&priv->lock); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index aae367057a569d9f0889c3f2e3dfb16894b40fae..98e904a7f3e8f3226f6119a98eb8a22cdf7bd77f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -125,7 +125,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) } spin_lock_irqsave(&priv->tx_lock, flags); - priv->stats.tx_dropped += tx_dropped; + dev->stats.tx_dropped += tx_dropped; spin_unlock_irqrestore(&priv->tx_lock, flags); kfree(mcast); @@ -320,7 +320,7 @@ ipoib_mcast_sendonly_join_complete(int status, /* Flush out any queued packets */ spin_lock_irq(&priv->tx_lock); while (!skb_queue_empty(&mcast->pkt_queue)) { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); } spin_unlock_irq(&priv->tx_lock); @@ -675,7 +675,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || !priv->broadcast || !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); goto unlock; } @@ -690,7 +690,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) if (!mcast) { ipoib_warn(priv, "unable to allocate memory for " "multicast structure\n"); - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); goto out; } @@ -705,7 +705,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) skb_queue_tail(&mcast->pkt_queue, skb); else { - ++priv->stats.tx_dropped; + ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 559a0d0244cf38b59d6aa809bddee183089f8c44..4fd4c46892e3306ef3a18e181a4dc6e0735f4a9a 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -17,6 +17,7 @@ #include #endif #include +#include #include "isdn_divert.h" @@ -284,12 +285,12 @@ divert_dev_init(void) init_waitqueue_head(&rd_queue); #ifdef CONFIG_PROC_FS - isdn_proc_entry = proc_mkdir("net/isdn", NULL); + isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); if (!isdn_proc_entry) return (-1); isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); if (!isdn_divert_entry) { - remove_proc_entry("net/isdn", NULL); + remove_proc_entry("isdn", init_net.proc_net); return (-1); } isdn_divert_entry->proc_fops = &isdn_fops; @@ -309,7 +310,7 @@ divert_dev_deinit(void) #ifdef CONFIG_PROC_FS remove_proc_entry("divert", isdn_proc_entry); - remove_proc_entry("net/isdn", NULL); + remove_proc_entry("isdn", init_net.proc_net); #endif /* CONFIG_PROC_FS */ return (0); diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c index d755d904e62c693fec2d5444e09e4c677eb5ff40..993b14cf1778bb2ba85b2a43cbd0a7242f2a7370 100644 --- a/drivers/isdn/hardware/eicon/diva_didd.c +++ b/drivers/isdn/hardware/eicon/diva_didd.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "platform.h" #include "di_defs.h" @@ -86,7 +87,7 @@ proc_read(char *page, char **start, off_t off, int count, int *eof, static int DIVA_INIT_FUNCTION create_proc(void) { - proc_net_eicon = proc_mkdir("net/eicon", NULL); + proc_net_eicon = proc_mkdir("eicon", init_net.proc_net); if (proc_net_eicon) { if ((proc_didd = @@ -102,7 +103,7 @@ static int DIVA_INIT_FUNCTION create_proc(void) static void remove_proc(void) { remove_proc_entry(DRIVERLNAME, proc_net_eicon); - remove_proc_entry("net/eicon", NULL); + remove_proc_entry("eicon", init_net.proc_net); } static int DIVA_INIT_FUNCTION divadidd_init(void) diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index dc477e0aab0ef15c4cf9df56e96405a166f6b8aa..27d890b48f88a6d7d9fab0e78c6a596870391411 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "hysdn_defs.h" @@ -392,7 +393,7 @@ hysdn_procconf_init(void) hysdn_card *card; unsigned char conf_name[20]; - hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, proc_net); + hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net); if (!hysdn_proc_entry) { printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n"); return (-1); @@ -437,5 +438,5 @@ hysdn_procconf_release(void) card = card->next; /* point to next card */ } - remove_proc_entry(PROC_SUBDIR_NAME, proc_net); + remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net); } diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index aa83277aba74c78c011e3b071ba5df7c067b165d..7c9cb7e19f2e1c4fefd5db59a3d321f622ff522d 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -77,7 +77,7 @@ static __inline__ int isdn_net_device_started(isdn_net_dev *n) if (lp->master) dev = lp->master; else - dev = &n->dev; + dev = n->dev; return netif_running(dev); } @@ -90,7 +90,7 @@ static __inline__ void isdn_net_device_wake_queue(isdn_net_local *lp) if (lp->master) netif_wake_queue(lp->master); else - netif_wake_queue(&lp->netdev->dev); + netif_wake_queue(lp->netdev->dev); } /* @@ -102,7 +102,7 @@ static __inline__ void isdn_net_device_stop_queue(isdn_net_local *lp) if (lp->master) netif_stop_queue(lp->master); else - netif_stop_queue(&lp->netdev->dev); + netif_stop_queue(lp->netdev->dev); } /* @@ -287,7 +287,7 @@ isdn_net_unbind_channel(isdn_net_local * lp) BEWARE! This chunk of code cannot be called from hardware interrupt handler. I hope it is true. --ANK */ - qdisc_reset(lp->netdev->dev.qdisc); + qdisc_reset(lp->netdev->dev->qdisc); } lp->dialstate = 0; dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; @@ -345,27 +345,27 @@ isdn_net_autohup(void) l->chargetime += l->chargeint; if (time_after(jiffies, l->chargetime + l->chargeint - 2 * HZ)) if (l->outgoing || l->hupflags & ISDN_INHUP) - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); } else if (l->outgoing) { if (l->hupflags & ISDN_CHARGEHUP) { if (l->hupflags & ISDN_WAITCHARGE) { printk(KERN_DEBUG "isdn_net: Hupflags of %s are %X\n", l->name, l->hupflags); - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); } else if (time_after(jiffies, l->chargetime + l->chargeint)) { printk(KERN_DEBUG "isdn_net: %s: chtime = %lu, chint = %d\n", l->name, l->chargetime, l->chargeint); - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); } } else - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); } else if (l->hupflags & ISDN_INHUP) - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); } if(dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*l) == ISDN_NET_DM_OFF)) { - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); break; } } @@ -579,7 +579,7 @@ isdn_net_dial(void) if (!lp->dial) { printk(KERN_WARNING "%s: phone number deleted?\n", lp->name); - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); break; } anymore = 1; @@ -616,8 +616,8 @@ isdn_net_dial(void) s = "dial suppressed: isdn system stopped"; else s = "dial suppressed: dialmode `off'"; - isdn_net_unreachable(&p->dev, NULL, s); - isdn_net_hangup(&p->dev); + isdn_net_unreachable(p->dev, NULL, s); + isdn_net_hangup(p->dev); break; } cmd.driver = lp->isdn_device; @@ -633,7 +633,7 @@ isdn_net_dial(void) if (!lp->dial) { printk(KERN_WARNING "%s: phone number deleted?\n", lp->name); - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); break; } if (!strncmp(lp->dial->num, "LEASED", strlen("LEASED"))) { @@ -644,8 +644,8 @@ isdn_net_dial(void) if (time_after(jiffies, lp->dialstarted + lp->dialtimeout)) { lp->dialwait_timer = jiffies + lp->dialwait; lp->dialstarted = 0; - isdn_net_unreachable(&p->dev, NULL, "dial: timed out"); - isdn_net_hangup(&p->dev); + isdn_net_unreachable(p->dev, NULL, "dial: timed out"); + isdn_net_hangup(p->dev); break; } @@ -674,9 +674,9 @@ isdn_net_dial(void) if (lp->dialtimeout == 0) { lp->dialwait_timer = jiffies + lp->dialwait; lp->dialstarted = 0; - isdn_net_unreachable(&p->dev, NULL, "dial: tried all numbers dialmax times"); + isdn_net_unreachable(p->dev, NULL, "dial: tried all numbers dialmax times"); } - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); break; } } @@ -758,7 +758,7 @@ isdn_net_dial(void) cmd.arg = lp->isdn_channel + (lp->l3_proto << 8); isdn_command(&cmd); if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT15) - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); else { anymore = 1; lp->dialstate++; @@ -781,7 +781,7 @@ isdn_net_dial(void) printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer); #endif if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); else anymore = 1; break; @@ -1618,7 +1618,7 @@ isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp) __be32 addr = 0; /* local ipv4 address */ __be32 mask = 0; /* local netmask */ - if ((in_dev = lp->netdev->dev.ip_ptr) != NULL) { + if ((in_dev = lp->netdev->dev->ip_ptr) != NULL) { /* take primary(first) address of interface */ struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa != NULL) { @@ -1866,61 +1866,21 @@ isdn_net_rcv_skb(int idx, struct sk_buff *skb) isdn_net_local *lp = p->local; if ((lp->flags & ISDN_NET_CONNECTED) && (!lp->dialstate)) { - isdn_net_receive(&p->dev, skb); + isdn_net_receive(p->dev, skb); return 1; } } return 0; } -static int -my_eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) -{ - struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); - - /* - * Set the protocol type. For a packet of type ETH_P_802_3 we - * put the length here instead. It is up to the 802.2 layer to - * carry protocol information. - */ - - if (type != ETH_P_802_3) - eth->h_proto = htons(type); - else - eth->h_proto = htons(len); - - /* - * Set the source hardware address. - */ - if (saddr) - memcpy(eth->h_source, saddr, dev->addr_len); - else - memcpy(eth->h_source, dev->dev_addr, dev->addr_len); - - /* - * Anyway, the loopback-device should never use this function... - */ - - if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { - memset(eth->h_dest, 0, dev->addr_len); - return ETH_HLEN /*(dev->hard_header_len)*/; - } - if (daddr) { - memcpy(eth->h_dest, daddr, dev->addr_len); - return ETH_HLEN /*dev->hard_header_len*/; - } - return -ETH_HLEN /*dev->hard_header_len*/; -} - /* * build an header * depends on encaps that is being used. */ -static int -isdn_net_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned plen) +static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned plen) { isdn_net_local *lp = dev->priv; unsigned char *p; @@ -1928,7 +1888,7 @@ isdn_net_header(struct sk_buff *skb, struct net_device *dev, unsigned short type switch (lp->p_encap) { case ISDN_NET_ENCAP_ETHER: - len = my_eth_header(skb, dev, type, daddr, saddr, plen); + len = eth_header(skb, dev, type, daddr, saddr, plen); break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: @@ -2005,6 +1965,32 @@ isdn_net_rebuild_header(struct sk_buff *skb) return ret; } +static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh) +{ + const struct net_device *dev = neigh->dev; + isdn_net_local *lp = dev->priv; + + if (lp->p_encap == ISDN_NET_ENCAP_ETHER) + return eth_header_cache(neigh, hh); + return -1; +} + +static void isdn_header_cache_update(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr) +{ + isdn_net_local *lp = dev->priv; + if (lp->p_encap == ISDN_NET_ENCAP_ETHER) + return eth_header_cache_update(hh, dev, haddr); +} + +static const struct header_ops isdn_header_ops = { + .create = isdn_net_header, + .rebuild = isdn_net_rebuild_header, + .cache = isdn_header_cache, + .cache_update = isdn_header_cache_update, +}; + /* * Interface-setup. (just after registering a new interface) */ @@ -2012,18 +1998,12 @@ static int isdn_net_init(struct net_device *ndev) { ushort max_hlhdr_len = 0; - isdn_net_local *lp = (isdn_net_local *) ndev->priv; - int drvidx, i; + int drvidx; ether_setup(ndev); - lp->org_hhc = ndev->hard_header_cache; - lp->org_hcu = ndev->header_cache_update; + ndev->header_ops = NULL; /* Setup the generic properties */ - - ndev->hard_header = NULL; - ndev->hard_header_cache = NULL; - ndev->header_cache_update = NULL; ndev->mtu = 1500; ndev->flags = IFF_NOARP|IFF_POINTOPOINT; ndev->type = ARPHRD_ETHER; @@ -2032,9 +2012,6 @@ isdn_net_init(struct net_device *ndev) /* for clients with MPPP maybe higher values better */ ndev->tx_queue_len = 30; - for (i = 0; i < ETH_ALEN; i++) - ndev->broadcast[i] = 0xff; - /* The ISDN-specific entries in the device structure. */ ndev->open = &isdn_net_open; ndev->hard_start_xmit = &isdn_net_start_xmit; @@ -2052,7 +2029,6 @@ isdn_net_init(struct net_device *ndev) ndev->hard_header_len = ETH_HLEN + max_hlhdr_len; ndev->stop = &isdn_net_close; ndev->get_stats = &isdn_net_get_stats; - ndev->rebuild_header = &isdn_net_rebuild_header; ndev->do_ioctl = NULL; return 0; } @@ -2530,6 +2506,42 @@ isdn_net_force_dial(char *name) return (isdn_net_force_dial_lp(p->local)); } +/* + * Helper for alloc_netdev() + */ +static void _isdn_setup(struct net_device *dev) +{ + isdn_net_local *lp = dev->priv; + + dev->flags = IFF_NOARP | IFF_POINTOPOINT; + lp->p_encap = ISDN_NET_ENCAP_RAWIP; + lp->magic = ISDN_NET_MAGIC; + lp->last = lp; + lp->next = lp; + lp->isdn_device = -1; + lp->isdn_channel = -1; + lp->pre_device = -1; + lp->pre_channel = -1; + lp->exclusive = -1; + lp->ppp_slot = -1; + lp->pppbind = -1; + skb_queue_head_init(&lp->super_tx_queue); + lp->l2_proto = ISDN_PROTO_L2_X75I; + lp->l3_proto = ISDN_PROTO_L3_TRANS; + lp->triggercps = 6000; + lp->slavedelay = 10 * HZ; + lp->hupflags = ISDN_INHUP; /* Do hangup even on incoming calls */ + lp->onhtime = 10; /* Default hangup-time for saving costs */ + lp->dialmax = 1; + /* Hangup before Callback, manual dial */ + lp->flags = ISDN_NET_CBHUP | ISDN_NET_DM_MANUAL; + lp->cbdelay = 25; /* Wait 5 secs before Callback */ + lp->dialtimeout = -1; /* Infinite Dial-Timeout */ + lp->dialwait = 5 * HZ; /* Wait 5 sec. after failed dial */ + lp->dialstarted = 0; /* Jiffies of last dial-start */ + lp->dialwait_timer = 0; /* Jiffies of earliest next dial-start */ +} + /* * Allocate a new network-interface and initialize its data structures. */ @@ -2543,23 +2555,21 @@ isdn_net_new(char *name, struct net_device *master) printk(KERN_WARNING "isdn_net: interface %s already exists\n", name); return NULL; } + if (name == NULL) + name = " "; if (!(netdev = kzalloc(sizeof(isdn_net_dev), GFP_KERNEL))) { printk(KERN_WARNING "isdn_net: Could not allocate net-device\n"); return NULL; } - if (!(netdev->local = kzalloc(sizeof(isdn_net_local), GFP_KERNEL))) { - printk(KERN_WARNING "isdn_net: Could not allocate device locals\n"); + netdev->dev = alloc_netdev(sizeof(isdn_net_local), name, _isdn_setup); + if (!netdev->dev) { + printk(KERN_WARNING "isdn_net: Could not allocate network device\n"); kfree(netdev); return NULL; } - if (name == NULL) - strcpy(netdev->local->name, " "); - else - strcpy(netdev->local->name, name); - strcpy(netdev->dev.name, netdev->local->name); - netdev->dev.priv = netdev->local; - netdev->dev.init = isdn_net_init; - netdev->local->p_encap = ISDN_NET_ENCAP_RAWIP; + netdev->local = netdev->dev->priv; + strcpy(netdev->local->name, netdev->dev->name); + netdev->dev->init = isdn_net_init; if (master) { /* Device shall be a slave */ struct net_device *p = (((isdn_net_local *) master->priv)->slave); @@ -2571,60 +2581,33 @@ isdn_net_new(char *name, struct net_device *master) q = p; p = (((isdn_net_local *) p->priv)->slave); } - ((isdn_net_local *) q->priv)->slave = &(netdev->dev); + ((isdn_net_local *) q->priv)->slave = netdev->dev; } else { /* Device shall be a master */ /* * Watchdog timer (currently) for master only. */ - netdev->dev.tx_timeout = isdn_net_tx_timeout; - netdev->dev.watchdog_timeo = ISDN_NET_TX_TIMEOUT; - if (register_netdev(&netdev->dev) != 0) { + netdev->dev->tx_timeout = isdn_net_tx_timeout; + netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT; + if (register_netdev(netdev->dev) != 0) { printk(KERN_WARNING "isdn_net: Could not register net-device\n"); - kfree(netdev->local); + free_netdev(netdev->dev); kfree(netdev); return NULL; } } - netdev->local->magic = ISDN_NET_MAGIC; - netdev->queue = netdev->local; spin_lock_init(&netdev->queue_lock); - netdev->local->last = netdev->local; netdev->local->netdev = netdev; - netdev->local->next = netdev->local; INIT_WORK(&netdev->local->tqueue, isdn_net_softint); spin_lock_init(&netdev->local->xmit_lock); - netdev->local->isdn_device = -1; - netdev->local->isdn_channel = -1; - netdev->local->pre_device = -1; - netdev->local->pre_channel = -1; - netdev->local->exclusive = -1; - netdev->local->ppp_slot = -1; - netdev->local->pppbind = -1; - skb_queue_head_init(&netdev->local->super_tx_queue); - netdev->local->l2_proto = ISDN_PROTO_L2_X75I; - netdev->local->l3_proto = ISDN_PROTO_L3_TRANS; - netdev->local->triggercps = 6000; - netdev->local->slavedelay = 10 * HZ; - netdev->local->hupflags = ISDN_INHUP; /* Do hangup even on incoming calls */ - netdev->local->onhtime = 10; /* Default hangup-time for saving costs - of those who forget configuring this */ - netdev->local->dialmax = 1; - netdev->local->flags = ISDN_NET_CBHUP | ISDN_NET_DM_MANUAL; /* Hangup before Callback, manual dial */ - netdev->local->cbdelay = 25; /* Wait 5 secs before Callback */ - netdev->local->dialtimeout = -1; /* Infinite Dial-Timeout */ - netdev->local->dialwait = 5 * HZ; /* Wait 5 sec. after failed dial */ - netdev->local->dialstarted = 0; /* Jiffies of last dial-start */ - netdev->local->dialwait_timer = 0; /* Jiffies of earliest next dial-start */ - /* Put into to netdev-chain */ netdev->next = (void *) dev->netdev; dev->netdev = netdev; - return netdev->dev.name; + return netdev->dev->name; } char * @@ -2649,7 +2632,7 @@ isdn_net_newslave(char *parm) /* Master must not be started yet */ if (isdn_net_device_started(n)) return NULL; - return (isdn_net_new(newname, &(n->dev))); + return (isdn_net_new(newname, n->dev)); } return NULL; } @@ -2718,9 +2701,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) lp->name); return -EINVAL; #else - p->dev.type = ARPHRD_PPP; /* change ARP type */ - p->dev.addr_len = 0; - p->dev.do_ioctl = isdn_ppp_dev_ioctl; + p->dev->type = ARPHRD_PPP; /* change ARP type */ + p->dev->addr_len = 0; + p->dev->do_ioctl = isdn_ppp_dev_ioctl; #endif break; case ISDN_NET_ENCAP_X25IFACE: @@ -2729,12 +2712,12 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) p->local->name); return -EINVAL; #else - p->dev.type = ARPHRD_X25; /* change ARP type */ - p->dev.addr_len = 0; + p->dev->type = ARPHRD_X25; /* change ARP type */ + p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_CISCOHDLCK: - p->dev.do_ioctl = isdn_ciscohdlck_dev_ioctl; + p->dev->do_ioctl = isdn_ciscohdlck_dev_ioctl; break; default: if( cfg->p_encap >= 0 && @@ -2861,21 +2844,14 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) } if (cfg->p_encap != lp->p_encap) { if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) { - p->dev.hard_header = NULL; - p->dev.hard_header_cache = NULL; - p->dev.header_cache_update = NULL; - p->dev.flags = IFF_NOARP|IFF_POINTOPOINT; + p->dev->header_ops = NULL; + p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } else { - p->dev.hard_header = isdn_net_header; - if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) { - p->dev.hard_header_cache = lp->org_hhc; - p->dev.header_cache_update = lp->org_hcu; - p->dev.flags = IFF_BROADCAST | IFF_MULTICAST; - } else { - p->dev.hard_header_cache = NULL; - p->dev.header_cache_update = NULL; - p->dev.flags = IFF_NOARP|IFF_POINTOPOINT; - } + p->dev->header_ops = &isdn_header_ops; + if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) + p->dev->flags = IFF_BROADCAST | IFF_MULTICAST; + else + p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } } lp->p_encap = cfg->p_encap; @@ -3095,7 +3071,7 @@ isdn_net_force_hangup(char *name) isdn_net_hangup(q); q = (((isdn_net_local *) q->priv)->slave); } - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); return 0; } return -ENODEV; @@ -3123,13 +3099,11 @@ isdn_net_realrm(isdn_net_dev * p, isdn_net_dev * q) isdn_unexclusive_channel(p->local->pre_device, p->local->pre_channel); if (p->local->master) { /* It's a slave-device, so update master's slave-pointer if necessary */ - if (((isdn_net_local *) (p->local->master->priv))->slave == &p->dev) + if (((isdn_net_local *) (p->local->master->priv))->slave == p->dev) ((isdn_net_local *) (p->local->master->priv))->slave = p->local->slave; } else { /* Unregister only if it's a master-device */ - p->dev.hard_header_cache = p->local->org_hhc; - p->dev.header_cache_update = p->local->org_hcu; - unregister_netdev(&p->dev); + unregister_netdev(p->dev); } /* Unlink device from chain */ spin_lock_irqsave(&dev->lock, flags); @@ -3157,7 +3131,7 @@ isdn_net_realrm(isdn_net_dev * p, isdn_net_dev * q) /* If no more net-devices remain, disable auto-hangup timer */ if (dev->netdev == NULL) isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0); - kfree(p->local); + free_netdev(p->dev); kfree(p); return 0; diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 387392cb3d6804fabbf28b713f2a4ac009e81b97..0e5e59f843440364d6ab3dfa4f51145dfa7aa51a 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -360,7 +360,7 @@ isdn_ppp_release(int min, struct file *file) * isdn_ppp_free() sets is->lp to NULL and lp->ppp_slot to -1 * removing the IPPP_CONNECT flag omits calling of isdn_ppp_wakeup_daemon() */ - isdn_net_hangup(&p->dev); + isdn_net_hangup(p->dev); } for (i = 0; i < NUM_RCV_BUFFS; i++) { kfree(is->rq[i].buf); @@ -531,7 +531,7 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) if (lp) { /* OK .. we are ready to send buffers */ is->pppcfg = val; /* isdn_ppp_xmit test for SC_ENABLE_IP !!! */ - netif_wake_queue(&lp->netdev->dev); + netif_wake_queue(lp->netdev->dev); break; } } @@ -1023,7 +1023,7 @@ void isdn_ppp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buf static void isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb, int proto) { - struct net_device *dev = &net_dev->dev; + struct net_device *dev = net_dev->dev; struct ippp_struct *is, *mis; isdn_net_local *mlp = NULL; int slot; diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 2117377c141d206cce3d2710a1deeb93916efdb1..a33eb5988c42e8aa1fcf64c1959084617c3d8e8e 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -1220,10 +1220,17 @@ static struct net_device_stats * dvb_net_get_stats(struct net_device *dev) return &((struct dvb_net_priv*) dev->priv)->stats; } +static const struct header_ops dvb_header_ops = { + .create = eth_header, + .parse = eth_header_parse, + .rebuild = eth_rebuild_header, +}; + static void dvb_net_setup(struct net_device *dev) { ether_setup(dev); + dev->header_ops = &dvb_header_ops; dev->open = dvb_net_open; dev->stop = dvb_net_stop; dev->hard_start_xmit = dvb_net_tx; @@ -1232,7 +1239,7 @@ static void dvb_net_setup(struct net_device *dev) dev->set_mac_address = dvb_net_set_mac; dev->mtu = 4096; dev->mc_count = 0; - dev->hard_header_cache = NULL; + dev->flags |= IFF_NOARP; } diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 01fc397fdd979f7783bcd90d6c8b5069d1bd0c9e..3da4c37846ecc91f5bbc0f7b709e272fe014c7c6 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c @@ -1427,8 +1427,6 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) dlprintk((KERN_INFO MYNAM ": Finished registering dev " "and setting initial values\n")); - SET_MODULE_OWNER(dev); - if (register_netdev(dev) != 0) { free_netdev(dev); dev = NULL; diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c index 4bee99ba7dbb7a6a35f62929e995d5c75c53e047..be71868d15132d06fe28e062798ae19e0f255f17 100644 --- a/drivers/net/3c501.c +++ b/drivers/net/3c501.c @@ -174,8 +174,6 @@ struct net_device * __init el1_probe(int unit) mem_start = dev->mem_start & 7; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = el1_probe1(dev, io); } else if (io != 0) { @@ -317,7 +315,6 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr) dev->tx_timeout = &el_timeout; dev->watchdog_timeo = HZ; dev->stop = &el1_close; - dev->get_stats = &el1_get_stats; dev->set_multicast_list = &set_multicast_list; dev->ethtool_ops = &netdev_ethtool_ops; return 0; @@ -376,7 +373,7 @@ static void el_timeout(struct net_device *dev) if (el_debug) printk (KERN_DEBUG "%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", dev->name, inb(TX_STATUS), inb(AX_STATUS), inb(RX_STATUS)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; outb(TX_NORM, TX_CMD); outb(RX_NORM, RX_CMD); outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ @@ -443,7 +440,7 @@ static int el_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->tx_pkt_start = gp_start; lp->collisions = 0; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* * Command mode with status cleared should [in theory] @@ -590,7 +587,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) printk (KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n",dev->name); outb(AX_SYS, AX_CMD); lp->txing = 0; - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; netif_wake_queue(dev); } else if (txsr & TX_COLLISION) @@ -608,7 +605,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) outb(AX_SYS, AX_CMD); outw(lp->tx_pkt_start, GP_LOW); outb(AX_XMIT, AX_CMD); - lp->stats.collisions++; + dev->stats.collisions++; spin_unlock(&lp->lock); goto out; } @@ -617,7 +614,7 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) /* * It worked.. we will now fall through and receive */ - lp->stats.tx_packets++; + dev->stats.tx_packets++; if (el_debug > 6) printk(KERN_DEBUG " Tx succeeded %s\n", (txsr & TX_RDY) ? "." : "but tx is busy!"); @@ -642,10 +639,10 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) * Just reading rx_status fixes most errors. */ if (rxsr & RX_MISSED) - lp->stats.rx_missed_errors++; + dev->stats.rx_missed_errors++; else if (rxsr & RX_RUNT) { /* Handled to avoid board lock-up. */ - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (el_debug > 5) printk(KERN_DEBUG " runt.\n"); } @@ -696,7 +693,6 @@ static irqreturn_t el_interrupt(int irq, void *dev_id) static void el_receive(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int pkt_len; struct sk_buff *skb; @@ -710,7 +706,7 @@ static void el_receive(struct net_device *dev) { if (el_debug) printk(KERN_DEBUG "%s: bogus packet, length=%d\n", dev->name, pkt_len); - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; return; } @@ -729,7 +725,7 @@ static void el_receive(struct net_device *dev) if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } else @@ -744,8 +740,8 @@ static void el_receive(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes+=pkt_len; } return; } @@ -812,23 +808,6 @@ static int el1_close(struct net_device *dev) return 0; } -/** - * el1_get_stats: - * @dev: The card to get the statistics for - * - * In smarter devices this function is needed to pull statistics off the - * board itself. The 3c501 has no hardware statistics. We maintain them all - * so they are by definition always up to date. - * - * Returns the statistics for the card from the card private data - */ - -static struct net_device_stats *el1_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - /** * set_multicast_list: * @dev: The device to adjust diff --git a/drivers/net/3c501.h b/drivers/net/3c501.h index c56a2c62f7deaf094cf1b7e65dcb6bb8ee719bf8..cfec64efff78017b5db4ee3dd6d24ac42a983e32 100644 --- a/drivers/net/3c501.h +++ b/drivers/net/3c501.h @@ -11,7 +11,6 @@ static irqreturn_t el_interrupt(int irq, void *dev_id); static void el_receive(struct net_device *dev); static void el_reset(struct net_device *dev); static int el1_close(struct net_device *dev); -static struct net_device_stats *el1_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static const struct ethtool_ops netdev_ethtool_ops; @@ -29,7 +28,6 @@ static int el_debug = EL_DEBUG; struct net_local { - struct net_device_stats stats; int tx_pkt_start; /* The length of the current Tx packet. */ int collisions; /* Tx collisions this packet */ int loading; /* Spot buffer load collisions */ diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c index bc7e906571d38ec85ccad529f5858e403f80cbd1..9c23336750e240410e33cdd7b6fbf177100802eb 100644 --- a/drivers/net/3c503.c +++ b/drivers/net/3c503.c @@ -95,8 +95,6 @@ static int __init do_el2_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return el2_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -179,6 +177,7 @@ el2_probe1(struct net_device *dev, int ioaddr) int i, iobase_reg, membase_reg, saved_406, wordlength, retval; static unsigned version_printed; unsigned long vendor_id; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -228,7 +227,8 @@ el2_probe1(struct net_device *dev, int ioaddr) /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + printk("%s", print_mac(mac, dev->dev_addr)); /* Map the 8390 back into the window. */ outb(ECNTRL_THIN, ioaddr + 0x406); diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index e985a85a56237b6bccb13ac689a92e061ff418d1..9c6573419f5a3174d67b0ec92462983c3abcb315 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c @@ -167,21 +167,6 @@ static int elp_debug; * 3 = messages when interrupts received */ -/***************************************************************** - * - * useful macros - * - *****************************************************************/ - -#ifndef TRUE -#define TRUE 1 -#endif - -#ifndef FALSE -#define FALSE 0 -#endif - - /***************************************************************** * * List of I/O-addresses we try to auto-sense @@ -270,7 +255,7 @@ static inline void set_hsf(struct net_device *dev, int hsf) spin_unlock_irqrestore(&adapter->lock, flags); } -static int start_receive(struct net_device *, pcb_struct *); +static bool start_receive(struct net_device *, pcb_struct *); static inline void adapter_reset(struct net_device *dev) { @@ -328,28 +313,28 @@ static inline void check_3c505_dma(struct net_device *dev) } /* Primitive functions used by send_pcb() */ -static inline unsigned int send_pcb_slow(unsigned int base_addr, unsigned char byte) +static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte) { unsigned long timeout; outb_command(byte, base_addr); for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) { if (inb_status(base_addr) & HCRE) - return FALSE; + return false; } printk(KERN_WARNING "3c505: send_pcb_slow timed out\n"); - return TRUE; + return true; } -static inline unsigned int send_pcb_fast(unsigned int base_addr, unsigned char byte) +static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte) { unsigned int timeout; outb_command(byte, base_addr); for (timeout = 0; timeout < 40000; timeout++) { if (inb_status(base_addr) & HCRE) - return FALSE; + return false; } printk(KERN_WARNING "3c505: send_pcb_fast timed out\n"); - return TRUE; + return true; } /* Check to see if the receiver needs restarting, and kick it if so */ @@ -386,7 +371,7 @@ static inline void prime_rx(struct net_device *dev) * timeout is reduced to 500us). */ -static int send_pcb(struct net_device *dev, pcb_struct * pcb) +static bool send_pcb(struct net_device *dev, pcb_struct * pcb) { int i; unsigned long timeout; @@ -396,14 +381,14 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) check_3c505_dma(dev); if (adapter->dmaing && adapter->current_dma.direction == 0) - return FALSE; + return false; /* Avoid contention */ if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) { if (elp_debug >= 3) { printk(KERN_DEBUG "%s: send_pcb entered while threaded\n", dev->name); } - return FALSE; + return false; } /* * load each byte into the command register and @@ -435,7 +420,7 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) switch (GET_ASF(dev->base_addr)) { case ASF_PCB_ACK: adapter->send_pcb_semaphore = 0; - return TRUE; + return true; case ASF_PCB_NAK: #ifdef ELP_DEBUG @@ -453,7 +438,7 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) spin_unlock_irqrestore(&adapter->lock, flags); abort: adapter->send_pcb_semaphore = 0; - return FALSE; + return false; } @@ -470,7 +455,7 @@ static int send_pcb(struct net_device *dev, pcb_struct * pcb) * *****************************************************************/ -static int receive_pcb(struct net_device *dev, pcb_struct * pcb) +static bool receive_pcb(struct net_device *dev, pcb_struct * pcb) { int i, j; int total_length; @@ -487,7 +472,7 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout)); if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); - return FALSE; + return false; } pcb->command = inb_command(dev->base_addr); @@ -497,14 +482,14 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) if (time_after_eq(jiffies, timeout)) { TIMEOUT_MSG(__LINE__); printk(KERN_INFO "%s: status %02x\n", dev->name, stat); - return FALSE; + return false; } pcb->length = inb_command(dev->base_addr); if (pcb->length > MAX_PCB_DATA) { INVALID_PCB_MSG(pcb->length); adapter_reset(dev); - return FALSE; + return false; } /* read the data */ spin_lock_irqsave(&adapter->lock, flags); @@ -519,7 +504,7 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) spin_unlock_irqrestore(&adapter->lock, flags); if (j >= 20000) { TIMEOUT_MSG(__LINE__); - return FALSE; + return false; } /* woops, the last "data" byte was really the length! */ total_length = pcb->data.raw[--i]; @@ -529,7 +514,7 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) if (elp_debug >= 2) printk(KERN_WARNING "%s: mangled PCB received\n", dev->name); set_hsf(dev, HSF_PCB_NAK); - return FALSE; + return false; } if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) { @@ -538,14 +523,14 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) set_hsf(dev, HSF_PCB_NAK); printk(KERN_WARNING "%s: PCB rejected, transfer in progress and backlog full\n", dev->name); pcb->command = 0; - return TRUE; + return true; } else { pcb->command = 0xff; } } } set_hsf(dev, HSF_PCB_ACK); - return TRUE; + return true; } /****************************************************** @@ -555,9 +540,9 @@ static int receive_pcb(struct net_device *dev, pcb_struct * pcb) * ******************************************************/ -static int start_receive(struct net_device *dev, pcb_struct * tx_pcb) +static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) { - int status; + bool status; elp_device *adapter = dev->priv; if (elp_debug >= 3) @@ -984,7 +969,7 @@ static int elp_open(struct net_device *dev) * ******************************************************/ -static int send_packet(struct net_device *dev, struct sk_buff *skb) +static bool send_packet(struct net_device *dev, struct sk_buff *skb) { elp_device *adapter = dev->priv; unsigned long target; @@ -998,7 +983,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) if (test_and_set_bit(0, (void *) &adapter->busy)) { if (elp_debug >= 2) printk(KERN_DEBUG "%s: transmit blocked\n", dev->name); - return FALSE; + return false; } adapter->stats.tx_bytes += nlen; @@ -1015,7 +1000,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) if (!send_pcb(dev, &adapter->tx_pcb)) { adapter->busy = 0; - return FALSE; + return false; } /* if this happens, we die */ if (test_and_set_bit(0, (void *) &adapter->dmaing)) @@ -1047,7 +1032,7 @@ static int send_packet(struct net_device *dev, struct sk_buff *skb) if (elp_debug >= 3) printk(KERN_DEBUG "%s: DMA transfer started\n", dev->name); - return TRUE; + return true; } /* @@ -1401,8 +1386,7 @@ static int __init elplus_setup(struct net_device *dev) unsigned long timeout; unsigned long cookie = 0; int err = -ENODEV; - - SET_MODULE_OWNER(dev); + DECLARE_MAC_BUF(mac); /* * setup adapter structure @@ -1538,11 +1522,10 @@ static int __init elplus_setup(struct net_device *dev) /* * print remainder of startup message */ - printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, ", - dev->name, dev->base_addr, dev->irq, dev->dma); - printk("addr %02x:%02x:%02x:%02x:%02x:%02x, ", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, " + "addr %s, ", + dev->name, dev->base_addr, dev->irq, dev->dma, + print_mac(mac, dev->dev_addr)); /* * read more information from the adapter diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index eed4299dc42642eda3b70c1f6cc8ec91a8a65e12..964d31ac9449a2b90e1bc5921e774468d4e23a1c 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c @@ -118,7 +118,6 @@ enum commands { /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; int last_restart; ushort rx_head; ushort rx_tail; @@ -289,7 +288,6 @@ static int el16_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t el16_interrupt(int irq, void *dev_id); static void el16_rx(struct net_device *dev); static int el16_close(struct net_device *dev); -static struct net_device_stats *el16_get_stats(struct net_device *dev); static void el16_tx_timeout (struct net_device *dev); static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad); @@ -327,8 +325,6 @@ struct net_device * __init el16_probe(int unit) mem_start = dev->mem_start & 15; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) /* Check a single specified location. */ err = el16_probe1(dev, io); else if (io != 0) @@ -361,6 +357,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) static unsigned char init_ID_done, version_printed; int i, irq, irqval, retval; struct net_local *lp; + DECLARE_MAC_BUF(mac); if (init_ID_done == 0) { ushort lrs_state = 0xff; @@ -406,10 +403,9 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; outb(0x01, ioaddr + MISC_CTRL); - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(ioaddr + i); - printk(" %02x", dev->dev_addr[i]); - } + printk(" %s", print_mac(mac, dev->dev_addr)); if (mem_start) net_debug = mem_start & 7; @@ -457,7 +453,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr) dev->open = el16_open; dev->stop = el16_close; dev->hard_start_xmit = el16_send_packet; - dev->get_stats = el16_get_stats; dev->tx_timeout = el16_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; dev->ethtool_ops = &netdev_ethtool_ops; @@ -491,7 +486,7 @@ static void el16_tx_timeout (struct net_device *dev) readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" : "network cable problem"); /* Try to restart the adaptor. */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { if (net_debug > 1) printk ("Resetting board.\n"); /* Completely reset the adaptor. */ @@ -503,7 +498,7 @@ static void el16_tx_timeout (struct net_device *dev) printk ("Kicking board.\n"); writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD); outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } dev->trans_start = jiffies; netif_wake_queue (dev); @@ -522,7 +517,7 @@ static int el16_send_packet (struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave (&lp->lock, flags); - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; /* Disable the 82586's input to the interrupt line. */ outb (0x80, ioaddr + MISC_CTRL); @@ -581,14 +576,14 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) } /* Tx unsuccessful or some interesting status bit set. */ if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) { - lp->stats.tx_errors++; - if (tx_status & 0x0600) lp->stats.tx_carrier_errors++; - if (tx_status & 0x0100) lp->stats.tx_fifo_errors++; - if (!(tx_status & 0x0040)) lp->stats.tx_heartbeat_errors++; - if (tx_status & 0x0020) lp->stats.tx_aborted_errors++; - lp->stats.collisions += tx_status & 0xf; + dev->stats.tx_errors++; + if (tx_status & 0x0600) dev->stats.tx_carrier_errors++; + if (tx_status & 0x0100) dev->stats.tx_fifo_errors++; + if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++; + if (tx_status & 0x0020) dev->stats.tx_aborted_errors++; + dev->stats.collisions += tx_status & 0xf; } - lp->stats.tx_packets++; + dev->stats.tx_packets++; if (net_debug > 5) printk("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status); lp->tx_reap += TX_BUF_SIZE; @@ -667,17 +662,6 @@ static int el16_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *el16_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - /* ToDo: decide if there are any useful statistics from the SCB. */ - - return &lp->stats; -} - /* Initialize the Rx-block list. */ static void init_rx_bufs(struct net_device *dev) { @@ -854,12 +838,12 @@ static void el16_rx(struct net_device *dev) pkt_len); } else if ((frame_status & 0x2000) == 0) { /* Frame Rxed, but with error. */ - lp->stats.rx_errors++; - if (frame_status & 0x0800) lp->stats.rx_crc_errors++; - if (frame_status & 0x0400) lp->stats.rx_frame_errors++; - if (frame_status & 0x0200) lp->stats.rx_fifo_errors++; - if (frame_status & 0x0100) lp->stats.rx_over_errors++; - if (frame_status & 0x0080) lp->stats.rx_length_errors++; + dev->stats.rx_errors++; + if (frame_status & 0x0800) dev->stats.rx_crc_errors++; + if (frame_status & 0x0400) dev->stats.rx_frame_errors++; + if (frame_status & 0x0200) dev->stats.rx_fifo_errors++; + if (frame_status & 0x0100) dev->stats.rx_over_errors++; + if (frame_status & 0x0080) dev->stats.rx_length_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; @@ -868,7 +852,7 @@ static void el16_rx(struct net_device *dev) skb = dev_alloc_skb(pkt_len+2); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -880,8 +864,8 @@ static void el16_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } /* Clear the status word and set End-of-List on the rx frame. */ diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 127f60841b10db37878c081896fcab17a57351cd..edda6e10ebe50144304316737f2e9884197c16bf 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -299,7 +299,7 @@ static struct isapnp_device_id el3_isapnp_adapters[] __initdata = { { } /* terminate list */ }; -static u16 el3_isapnp_phys_addr[8][3]; +static __be16 el3_isapnp_phys_addr[8][3]; static int nopnp; #endif /* __ISAPNP__ */ @@ -313,8 +313,9 @@ static int nopnp; static int __init el3_common_init(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); - short i; int err; + DECLARE_MAC_BUF(mac); + const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; spin_lock_init(&lp->lock); @@ -346,17 +347,10 @@ static int __init el3_common_init(struct net_device *dev) return err; } - { - const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; - printk("%s: 3c5x9 found at %#3.3lx, %s port, address ", - dev->name, dev->base_addr, - if_names[(dev->if_port & 0x03)]); - } - - /* Read in the station address. */ - for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i]); - printk(", IRQ %d.\n", dev->irq); + printk(KERN_INFO "%s: 3c5x9 found at %#3.3lx, %s port, " + "address %s, IRQ %d.\n", + dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], + print_mac(mac, dev->dev_addr), dev->irq); if (el3_debug > 0) printk(KERN_INFO "%s", version); @@ -385,7 +379,7 @@ static int __init el3_probe(int card_idx) struct el3_private *lp; short lrs_state = 0xff, i; int ioaddr, irq, if_port; - u16 phys_addr[3]; + __be16 phys_addr[3]; static int current_tag; int err = -ENODEV; #if defined(__ISAPNP__) @@ -432,7 +426,6 @@ static int __init el3_probe(int card_idx) return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &idev->dev); pnp_cards++; @@ -524,8 +517,6 @@ static int __init el3_probe(int card_idx) if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - netdev_boot_setup_check(dev); /* Set passed-in IRQ or I/O Addr. */ @@ -644,7 +635,6 @@ static int __init el3_mca_probe(struct device *device) return -ENOMEM; } - SET_MODULE_OWNER(dev); netdev_boot_setup_check(dev); memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); @@ -704,8 +694,6 @@ static int __init el3_eisa_probe (struct device *device) return -ENOMEM; } - SET_MODULE_OWNER(dev); - netdev_boot_setup_check(dev); memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 290166d5e7d1c7929fb238aa5718e74e3bb5771a..275e7510ebaf64a67e2835f1adbdfc978cadc1a5 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c @@ -501,8 +501,6 @@ static struct net_device *corkscrew_scan(int unit) netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); - #ifdef __ISAPNP__ if(nopnp == 1) goto no_pnp; @@ -571,6 +569,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ int i; int irq; + DECLARE_MAC_BUF(mac); if (idev) { irq = pnp_irq(idev, 0); @@ -632,8 +631,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, checksum = (checksum ^ (checksum >> 8)) & 0xff; if (checksum != 0x00) printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); - for (i = 0; i < 6; i++) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); + printk(" %s", print_mac(mac, dev->dev_addr)); if (eeprom[16] == 0x11c7) { /* Corkscrew */ if (request_dma(dev->dma, "3c515")) { printk(", DMA %d allocation failed", dev->dma); diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index ab18343e58ef844068dce2716b55a2c474795d5f..239fc42fb8df6c97ffe535da49d9b0d2df3e5387 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -383,8 +383,8 @@ void alloc586(struct net_device *dev) static int elmc_getinfo(char *buf, int slot, void *d) { int len = 0; - struct net_device *dev = (struct net_device *) d; - int i; + struct net_device *dev = d; + DECLARE_MAC_BUF(mac); if (dev == NULL) return len; @@ -399,12 +399,8 @@ static int elmc_getinfo(char *buf, int slot, void *d) len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ? "External" : "Internal"); len += sprintf(buf + len, "Device: %s\n", dev->name); - len += sprintf(buf + len, "Hardware Address:"); - for (i = 0; i < 6; i++) { - len += sprintf(buf + len, " %02x", dev->dev_addr[i]); - } - buf[len++] = '\n'; - buf[len] = 0; + len += sprintf(buf + len, "Hardware Address: %s\n", + print_mac(mac, dev->dev_addr)); return len; } /* elmc_getinfo() */ @@ -422,8 +418,8 @@ static int __init do_elmc_probe(struct net_device *dev) unsigned int size = 0; int retval; struct priv *pr = dev->priv; + DECLARE_MAC_BUF(mac); - SET_MODULE_OWNER(dev); if (MCA_bus == 0) { return -ENODEV; } @@ -545,12 +541,11 @@ static int __init do_elmc_probe(struct net_device *dev) /* The hardware address for the 3c523 is stored in the first six bytes of the IO address. */ - printk(KERN_INFO "%s: hardware address ", dev->name); - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(dev->base_addr + i); - printk(" %02x", dev->dev_addr[i]); - } - printk("\n"); + + printk(KERN_INFO "%s: hardware address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); dev->open = &elmc_open; dev->stop = &elmc_close; diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index c7b571be20e0693a7ba1dadc4f60da2b305bdbff..b72b89d53ec8347061de4b4feb215bc633ed2edb 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -257,8 +257,6 @@ struct net_device *__init mc32_probe(int unit) if (unit >= 0) sprintf(dev->name, "eth%d", unit); - SET_MODULE_OWNER(dev); - /* Do not check any supplied i/o locations. POS registers usually don't fail :) */ @@ -338,6 +336,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) "82586 initialisation failure", "Adapter list configuration error" }; + DECLARE_MAC_BUF(mac); /* Time to play MCA games */ @@ -398,17 +397,17 @@ static int __init mc32_probe1(struct net_device *dev, int slot) * Go PROM browsing */ - printk("%s: Address ", dev->name); - /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) { mca_write_pos(slot, 6, i+12); mca_write_pos(slot, 7, 0); - printk(" %2.2x", dev->dev_addr[i] = mca_read_pos(slot,3)); + dev->dev_addr[i] = mca_read_pos(slot,3); } + printk("%s: Address %s", dev->name, print_mac(mac, dev->dev_addr)); + mca_write_pos(slot, 6, 0); mca_write_pos(slot, 7, 0); diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index a8c0f436cdd2b3088e4cdeb165841f243e2823d0..8d3893da06f539da701c45b29cb158f2705543d5 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -538,10 +538,10 @@ enum MasterCtrl { #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ struct boom_rx_desc { - u32 next; /* Last entry points to 0. */ - s32 status; - u32 addr; /* Up to 63 addr/len pairs possible. */ - s32 length; /* Set LAST_FRAG to indicate last pair. */ + __le32 next; /* Last entry points to 0. */ + __le32 status; + __le32 addr; /* Up to 63 addr/len pairs possible. */ + __le32 length; /* Set LAST_FRAG to indicate last pair. */ }; /* Values for the Rx status entry. */ enum rx_desc_status { @@ -558,16 +558,16 @@ enum rx_desc_status { #endif struct boom_tx_desc { - u32 next; /* Last entry points to 0. */ - s32 status; /* bits 0:12 length, others see below. */ + __le32 next; /* Last entry points to 0. */ + __le32 status; /* bits 0:12 length, others see below. */ #if DO_ZEROCOPY struct { - u32 addr; - s32 length; + __le32 addr; + __le32 length; } frag[1+MAX_SKB_FRAGS]; #else - u32 addr; - s32 length; + __le32 addr; + __le32 length; #endif }; @@ -705,7 +705,7 @@ static struct { static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, int chip_idx, int card_idx); -static void vortex_up(struct net_device *dev); +static int vortex_up(struct net_device *dev); static void vortex_down(struct net_device *dev, int final); static int vortex_open(struct net_device *dev); static void mdio_sync(void __iomem *ioaddr, int bits); @@ -841,8 +841,11 @@ static int vortex_resume(struct pci_dev *pdev) return -EBUSY; } if (netif_running(dev)) { - vortex_up(dev); - netif_device_attach(dev); + err = vortex_up(dev); + if (err) + return err; + else + netif_device_attach(dev); } } return 0; @@ -1011,6 +1014,7 @@ static int __devinit vortex_probe1(struct device *gendev, char *print_name = "3c59x"; struct pci_dev *pdev = NULL; struct eisa_device *edev = NULL; + DECLARE_MAC_BUF(mac); if (!printed_version) { printk (version); @@ -1033,7 +1037,6 @@ static int __devinit vortex_probe1(struct device *gendev, printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n"); goto out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, gendev); vp = netdev_priv(dev); @@ -1128,7 +1131,7 @@ static int __devinit vortex_probe1(struct device *gendev, + sizeof(struct boom_tx_desc) * TX_RING_SIZE, &vp->rx_ring_dma); retval = -ENOMEM; - if (vp->rx_ring == 0) + if (!vp->rx_ring) goto free_region; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); @@ -1201,12 +1204,10 @@ static int __devinit vortex_probe1(struct device *gendev, if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); for (i = 0; i < 3; i++) - ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); + ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (print_info) { - for (i = 0; i < 6; i++) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); - } + if (print_info) + printk(" %s", print_mac(mac, dev->dev_addr)); /* Unfortunately an all zero eeprom passes the checksum and this gets found in the wild in failure cases. Crypto is hard 8) */ if (!is_valid_ether_addr(dev->dev_addr)) { @@ -1484,19 +1485,24 @@ static void vortex_check_media(struct net_device *dev, unsigned int init) } } -static void +static int vortex_up(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned int config; - int i, mii_reg1, mii_reg5; + int i, mii_reg1, mii_reg5, err; if (VORTEX_PCI(vp)) { pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ if (vp->pm_state_valid) pci_restore_state(VORTEX_PCI(vp)); - pci_enable_device(VORTEX_PCI(vp)); + err = pci_enable_device(VORTEX_PCI(vp)); + if (err) { + printk(KERN_WARNING "%s: Could not enable device \n", + dev->name); + goto err_out; + } } /* Before initializing select the active media port. */ @@ -1661,6 +1667,8 @@ vortex_up(struct net_device *dev) if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ iowrite32(0x8000, vp->cb_fn_base + 4); netif_start_queue (dev); +err_out: + return err; } static int @@ -1674,7 +1682,7 @@ vortex_open(struct net_device *dev) if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) { printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq); - goto out; + goto err; } if (vp->full_bus_master_rx) { /* Boomerang bus master. */ @@ -1703,20 +1711,22 @@ vortex_open(struct net_device *dev) } } retval = -ENOMEM; - goto out_free_irq; + goto err_free_irq; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); } - vortex_up(dev); - return 0; + retval = vortex_up(dev); + if (!retval) + goto out; -out_free_irq: +err_free_irq: free_irq(dev->irq, dev); -out: +err: if (vortex_debug > 1) printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval); +out: return retval; } @@ -2490,7 +2500,7 @@ boomerang_rx(struct net_device *dev) /* Check if the packet is long enough to just accept without copying to a properly sized skbuff. */ - if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { + if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ @@ -2823,9 +2833,14 @@ static void vortex_set_msglevel(struct net_device *dev, u32 dbg) vortex_debug = dbg; } -static int vortex_get_stats_count(struct net_device *dev) +static int vortex_get_sset_count(struct net_device *dev, int sset) { - return VORTEX_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return VORTEX_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void vortex_get_ethtool_stats(struct net_device *dev, @@ -2882,7 +2897,7 @@ static const struct ethtool_ops vortex_ethtool_ops = { .get_msglevel = vortex_get_msglevel, .set_msglevel = vortex_set_msglevel, .get_ethtool_stats = vortex_get_ethtool_stats, - .get_stats_count = vortex_get_stats_count, + .get_sset_count = vortex_get_sset_count, .get_settings = vortex_get_settings, .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, @@ -2899,7 +2914,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; unsigned long flags; - int state = 0; + pci_power_t state = 0; if(VORTEX_PCI(vp)) state = VORTEX_PCI(vp)->current_state; diff --git a/drivers/net/7990.c b/drivers/net/7990.c index e89ace109a5d149b3c62b3628c72a9a722512596..224e0bff1ae085ab777e1699e80fbd982bd7601a 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -305,18 +305,18 @@ static int lance_rx (struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb (len+2); @@ -324,7 +324,7 @@ static int lance_rx (struct net_device *dev) if (skb == 0) { printk ("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; @@ -339,8 +339,8 @@ static int lance_rx (struct net_device *dev) skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } /* Return the packet to the pool */ @@ -377,12 +377,12 @@ static int lance_tx (struct net_device *dev) if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk("%s: Carrier Lost, trying %s\n", @@ -400,7 +400,7 @@ static int lance_tx (struct net_device *dev) /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -420,13 +420,13 @@ static int lance_tx (struct net_device *dev) /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; @@ -471,9 +471,9 @@ lance_interrupt (int irq, void *dev_id) /* Log misc errors. */ if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; /* Tx babble. */ + dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; /* Missed a Rx frame. */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk("%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); @@ -589,13 +589,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) return 0; } -struct net_device_stats *lance_get_stats (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* taken from the depca driver via a2065.c */ static void lance_load_multicast (struct net_device *dev) { diff --git a/drivers/net/7990.h b/drivers/net/7990.h index b1212b5ed92f04e68b281679591491328e4f5fdb..0a5837b9642194799bedaf26abf3721bafb94579 100644 --- a/drivers/net/7990.h +++ b/drivers/net/7990.h @@ -111,7 +111,6 @@ struct lance_private int lance_log_rx_bufs, lance_log_tx_bufs; int rx_ring_mod_mask, tx_ring_mod_mask; - struct net_device_stats stats; int tpe; /* TPE is selected */ int auto_select; /* cable-selection is by carrier */ unsigned short busmaster_regval; @@ -246,7 +245,6 @@ struct lance_private extern int lance_open(struct net_device *dev); extern int lance_close (struct net_device *dev); extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev); -extern struct net_device_stats *lance_get_stats (struct net_device *dev); extern void lance_set_multicast (struct net_device *dev); extern void lance_tx_timeout(struct net_device *dev); #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index a79f28c7a1009aac9cc023807b27d903a52807a4..a453eda834d52e3584755367169ada0ff22e4bc6 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -78,7 +78,7 @@ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define CP_VLAN_TAG_USED 1 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ - do { (tx_desc)->opts2 = (vlan_tag_value); } while (0) + do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0) #else #define CP_VLAN_TAG_USED 0 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \ @@ -303,25 +303,25 @@ static const unsigned int cp_rx_config = (RX_DMA_BURST << RxCfgDMAShift); struct cp_desc { - u32 opts1; - u32 opts2; - u64 addr; + __le32 opts1; + __le32 opts2; + __le64 addr; }; struct cp_dma_stats { - u64 tx_ok; - u64 rx_ok; - u64 tx_err; - u32 rx_err; - u16 rx_fifo; - u16 frame_align; - u32 tx_ok_1col; - u32 tx_ok_mcol; - u64 rx_ok_phys; - u64 rx_ok_bcast; - u32 rx_ok_mcast; - u16 tx_abort; - u16 tx_underrun; + __le64 tx_ok; + __le64 rx_ok; + __le64 tx_err; + __le32 rx_err; + __le16 rx_fifo; + __le16 frame_align; + __le32 tx_ok_1col; + __le32 tx_ok_mcol; + __le64 rx_ok_phys; + __le64 rx_ok_bcast; + __le32 rx_ok_mcast; + __le16 tx_abort; + __le16 tx_underrun; } __attribute__((packed)); struct cp_extra_stats { @@ -334,6 +334,8 @@ struct cp_private { spinlock_t lock; u32 msg_enable; + struct napi_struct napi; + struct pci_dev *pdev; u32 rx_config; u16 cpcmd; @@ -460,9 +462,9 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, cp->dev->last_rx = jiffies; #if CP_VLAN_TAG_USED - if (cp->vlgrp && (desc->opts2 & RxVlanTagged)) { + if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) { vlan_hwaccel_receive_skb(skb, cp->vlgrp, - be16_to_cpu(desc->opts2 & 0xffff)); + swab16(le32_to_cpu(desc->opts2) & 0xffff)); } else #endif netif_receive_skb(skb); @@ -501,12 +503,12 @@ static inline unsigned int cp_rx_csum_ok (u32 status) return 0; } -static int cp_rx_poll (struct net_device *dev, int *budget) +static int cp_rx_poll(struct napi_struct *napi, int budget) { - struct cp_private *cp = netdev_priv(dev); - unsigned rx_tail = cp->rx_tail; - unsigned rx_work = dev->quota; - unsigned rx; + struct cp_private *cp = container_of(napi, struct cp_private, napi); + struct net_device *dev = cp->dev; + unsigned int rx_tail = cp->rx_tail; + int rx; rx_status_loop: rx = 0; @@ -560,7 +562,7 @@ static int cp_rx_poll (struct net_device *dev, int *budget) skb_reserve(new_skb, RX_OFFSET); - pci_unmap_single(cp->pdev, mapping, + dma_unmap_single(&cp->pdev->dev, mapping, buflen, PCI_DMA_FROMDEVICE); /* Handle checksum offloading for incoming packets. */ @@ -571,7 +573,7 @@ static int cp_rx_poll (struct net_device *dev, int *budget) skb_put(skb, len); - mapping = pci_map_single(cp->pdev, new_skb->data, buflen, + mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, PCI_DMA_FROMDEVICE); cp->rx_skb[rx_tail] = new_skb; @@ -588,33 +590,28 @@ static int cp_rx_poll (struct net_device *dev, int *budget) desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); rx_tail = NEXT_RX(rx_tail); - if (!rx_work--) + if (rx >= budget) break; } cp->rx_tail = rx_tail; - dev->quota -= rx; - *budget -= rx; - /* if we did not reach work limit, then we're done with * this round of polling */ - if (rx_work) { + if (rx < budget) { unsigned long flags; if (cpr16(IntrStatus) & cp_rx_intr_mask) goto rx_status_loop; - local_irq_save(flags); + spin_lock_irqsave(&cp->lock, flags); cpw16_f(IntrMask, cp_intr_mask); - __netif_rx_complete(dev); - local_irq_restore(flags); - - return 0; /* done */ + __netif_rx_complete(dev, napi); + spin_unlock_irqrestore(&cp->lock, flags); } - return 1; /* not done */ + return rx; } static irqreturn_t cp_interrupt (int irq, void *dev_instance) @@ -647,9 +644,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance) } if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &cp->napi)) { cpw16_f(IntrMask, cp_norx_intr_mask); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &cp->napi); } if (status & (TxOK | TxErr | TxEmpty | SWInt)) @@ -704,7 +701,7 @@ static void cp_tx (struct cp_private *cp) skb = cp->tx_skb[tx_tail]; BUG_ON(!skb); - pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr), + dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); @@ -768,7 +765,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) #if CP_VLAN_TAG_USED if (cp->vlgrp && vlan_tx_tag_present(skb)) - vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb)); + vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb)); #endif entry = cp->tx_head; @@ -782,7 +779,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = skb->len; - mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); CP_VLAN_TX_TAG(txd, vlan_tag); txd->addr = cpu_to_le64(mapping); wmb(); @@ -818,7 +815,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) */ first_eor = eor; first_len = skb_headlen(skb); - first_mapping = pci_map_single(cp->pdev, skb->data, + first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_len, PCI_DMA_TODEVICE); cp->tx_skb[entry] = skb; entry = NEXT_TX(entry); @@ -830,7 +827,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; len = this_frag->size; - mapping = pci_map_single(cp->pdev, + mapping = dma_map_single(&cp->pdev->dev, ((void *) page_address(this_frag->page) + this_frag->page_offset), len, PCI_DMA_TODEVICE); @@ -1021,8 +1018,8 @@ static void cp_init_hw (struct cp_private *cp) cpw8_f (Cfg9346, Cfg9346_Unlock); /* Restore our idea of the MAC address. */ - cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); - cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); + cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); + cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -1069,8 +1066,8 @@ static int cp_refill_rx (struct cp_private *cp) skb_reserve(skb, RX_OFFSET); - mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, + cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp->rx_skb[i] = skb; cp->rx_ring[i].opts2 = 0; @@ -1110,7 +1107,8 @@ static int cp_alloc_rings (struct cp_private *cp) { void *mem; - mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma); + mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES, + &cp->ring_dma, GFP_KERNEL); if (!mem) return -ENOMEM; @@ -1128,7 +1126,7 @@ static void cp_clean_rings (struct cp_private *cp) for (i = 0; i < CP_RX_RING_SIZE; i++) { if (cp->rx_skb[i]) { desc = cp->rx_ring + i; - pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), cp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(cp->rx_skb[i]); } @@ -1139,7 +1137,7 @@ static void cp_clean_rings (struct cp_private *cp) struct sk_buff *skb = cp->tx_skb[i]; desc = cp->tx_ring + i; - pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr), + dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), le32_to_cpu(desc->opts1) & 0xffff, PCI_DMA_TODEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) @@ -1158,7 +1156,8 @@ static void cp_clean_rings (struct cp_private *cp) static void cp_free_rings (struct cp_private *cp) { cp_clean_rings(cp); - pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); + dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, + cp->ring_dma); cp->rx_ring = NULL; cp->tx_ring = NULL; } @@ -1175,6 +1174,8 @@ static int cp_open (struct net_device *dev) if (rc) return rc; + napi_enable(&cp->napi); + cp_init_hw(cp); rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); @@ -1188,6 +1189,7 @@ static int cp_open (struct net_device *dev) return 0; err_out_hw: + napi_disable(&cp->napi); cp_stop_hw(cp); cp_free_rings(cp); return rc; @@ -1198,6 +1200,8 @@ static int cp_close (struct net_device *dev) struct cp_private *cp = netdev_priv(dev); unsigned long flags; + napi_disable(&cp->napi); + if (netif_msg_ifdown(cp)) printk(KERN_DEBUG "%s: disabling interface\n", dev->name); @@ -1379,9 +1383,14 @@ static int cp_get_regs_len(struct net_device *dev) return CP_REGS_SIZE; } -static int cp_get_stats_count (struct net_device *dev) +static int cp_get_sset_count (struct net_device *dev, int sset) { - return CP_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return CP_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@ -1517,7 +1526,8 @@ static void cp_get_ethtool_stats (struct net_device *dev, dma_addr_t dma; int i; - nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma); + nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), + &dma, GFP_KERNEL); if (!nic_stats) return; @@ -1552,13 +1562,13 @@ static void cp_get_ethtool_stats (struct net_device *dev, tmp_stats[i++] = cp->cp_stats.rx_frags; BUG_ON(i != CP_NUM_STATS); - pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); + dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); } static const struct ethtool_ops cp_ethtool_ops = { .get_drvinfo = cp_get_drvinfo, .get_regs_len = cp_get_regs_len, - .get_stats_count = cp_get_stats_count, + .get_sset_count = cp_get_sset_count, .get_settings = cp_get_settings, .set_settings = cp_set_settings, .nway_reset = cp_nway_reset, @@ -1567,11 +1577,8 @@ static const struct ethtool_ops cp_ethtool_ops = { .set_msglevel = cp_set_msglevel, .get_rx_csum = cp_get_rx_csum, .set_rx_csum = cp_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_regs = cp_get_regs, .get_wol = cp_get_wol, @@ -1821,6 +1828,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *regs; resource_size_t pciaddr; unsigned int addr_len, i, pci_using_dac; + DECLARE_MAC_BUF(mac); #ifndef MODULE static int version_printed; @@ -1840,7 +1848,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev(sizeof(struct cp_private)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); cp = netdev_priv(dev); @@ -1923,8 +1930,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* read MAC address from EEPROM */ addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6; for (i = 0; i < 3; i++) - ((u16 *) (dev->dev_addr))[i] = - le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); + ((__le16 *) (dev->dev_addr))[i] = + cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->open = cp_open; @@ -1933,11 +1940,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev->hard_start_xmit = cp_start_xmit; dev->get_stats = cp_get_stats; dev->do_ioctl = cp_ioctl; - dev->poll = cp_rx_poll; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = cp_poll_controller; #endif - dev->weight = 16; /* arbitrary? from NAPI_HOWTO.txt. */ + netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); #ifdef BROKEN dev->change_mtu = cp_change_mtu; #endif @@ -1964,13 +1970,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iomap; printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, " - "%02x:%02x:%02x:%02x:%02x:%02x, " - "IRQ %d\n", + "%s, IRQ %d\n", dev->name, dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index f4e4298d24b96ef40ab305fb3270855b388360d4..973b684c11e34f5055bf2fde0b86711043ddb5f1 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -291,198 +291,197 @@ static struct { /* Symbolic offsets to registers. */ enum RTL8139_registers { - MAC0 = 0, /* Ethernet hardware address. */ - MAR0 = 8, /* Multicast filter. */ - TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ - TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ - RxBuf = 0x30, - ChipCmd = 0x37, - RxBufPtr = 0x38, - RxBufAddr = 0x3A, - IntrMask = 0x3C, - IntrStatus = 0x3E, - TxConfig = 0x40, - RxConfig = 0x44, - Timer = 0x48, /* A general-purpose counter. */ - RxMissed = 0x4C, /* 24 bits valid, write clears. */ - Cfg9346 = 0x50, - Config0 = 0x51, - Config1 = 0x52, - FlashReg = 0x54, - MediaStatus = 0x58, - Config3 = 0x59, - Config4 = 0x5A, /* absent on RTL-8139A */ - HltClk = 0x5B, - MultiIntr = 0x5C, - TxSummary = 0x60, - BasicModeCtrl = 0x62, - BasicModeStatus = 0x64, - NWayAdvert = 0x66, - NWayLPAR = 0x68, - NWayExpansion = 0x6A, + MAC0 = 0, /* Ethernet hardware address. */ + MAR0 = 8, /* Multicast filter. */ + TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */ + TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */ + RxBuf = 0x30, + ChipCmd = 0x37, + RxBufPtr = 0x38, + RxBufAddr = 0x3A, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + Timer = 0x48, /* A general-purpose counter. */ + RxMissed = 0x4C, /* 24 bits valid, write clears. */ + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + FlashReg = 0x54, + MediaStatus = 0x58, + Config3 = 0x59, + Config4 = 0x5A, /* absent on RTL-8139A */ + HltClk = 0x5B, + MultiIntr = 0x5C, + TxSummary = 0x60, + BasicModeCtrl = 0x62, + BasicModeStatus = 0x64, + NWayAdvert = 0x66, + NWayLPAR = 0x68, + NWayExpansion = 0x6A, /* Undocumented registers, but required for proper operation. */ - FIFOTMS = 0x70, /* FIFO Control and test. */ - CSCR = 0x74, /* Chip Status and Configuration Register. */ - PARA78 = 0x78, - PARA7c = 0x7c, /* Magic transceiver parameter register. */ - Config5 = 0xD8, /* absent on RTL-8139A */ + FIFOTMS = 0x70, /* FIFO Control and test. */ + CSCR = 0x74, /* Chip Status and Configuration Register. */ + PARA78 = 0x78, + PARA7c = 0x7c, /* Magic transceiver parameter register. */ + Config5 = 0xD8, /* absent on RTL-8139A */ }; enum ClearBitMasks { - MultiIntrClear = 0xF000, - ChipCmdClear = 0xE2, - Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), + MultiIntrClear = 0xF000, + ChipCmdClear = 0xE2, + Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1), }; enum ChipCmdBits { - CmdReset = 0x10, - CmdRxEnb = 0x08, - CmdTxEnb = 0x04, - RxBufEmpty = 0x01, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, }; /* Interrupt register bits, using my own meaningful names. */ enum IntrStatusBits { - PCIErr = 0x8000, - PCSTimeout = 0x4000, - RxFIFOOver = 0x40, - RxUnderrun = 0x20, - RxOverflow = 0x10, - TxErr = 0x08, - TxOK = 0x04, - RxErr = 0x02, - RxOK = 0x01, - - RxAckBits = RxFIFOOver | RxOverflow | RxOK, + PCIErr = 0x8000, + PCSTimeout = 0x4000, + RxFIFOOver = 0x40, + RxUnderrun = 0x20, + RxOverflow = 0x10, + TxErr = 0x08, + TxOK = 0x04, + RxErr = 0x02, + RxOK = 0x01, + + RxAckBits = RxFIFOOver | RxOverflow | RxOK, }; enum TxStatusBits { - TxHostOwns = 0x2000, - TxUnderrun = 0x4000, - TxStatOK = 0x8000, - TxOutOfWindow = 0x20000000, - TxAborted = 0x40000000, - TxCarrierLost = 0x80000000, + TxHostOwns = 0x2000, + TxUnderrun = 0x4000, + TxStatOK = 0x8000, + TxOutOfWindow = 0x20000000, + TxAborted = 0x40000000, + TxCarrierLost = 0x80000000, }; enum RxStatusBits { - RxMulticast = 0x8000, - RxPhysical = 0x4000, - RxBroadcast = 0x2000, - RxBadSymbol = 0x0020, - RxRunt = 0x0010, - RxTooLong = 0x0008, - RxCRCErr = 0x0004, - RxBadAlign = 0x0002, - RxStatusOK = 0x0001, + RxMulticast = 0x8000, + RxPhysical = 0x4000, + RxBroadcast = 0x2000, + RxBadSymbol = 0x0020, + RxRunt = 0x0010, + RxTooLong = 0x0008, + RxCRCErr = 0x0004, + RxBadAlign = 0x0002, + RxStatusOK = 0x0001, }; /* Bits in RxConfig. */ enum rx_mode_bits { - AcceptErr = 0x20, - AcceptRunt = 0x10, - AcceptBroadcast = 0x08, - AcceptMulticast = 0x04, - AcceptMyPhys = 0x02, - AcceptAllPhys = 0x01, + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, }; /* Bits in TxConfig. */ enum tx_config_bits { - /* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */ - TxIFGShift = 24, - TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ - TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ - TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ - TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ - - TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ - TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */ - TxClearAbt = (1 << 0), /* Clear abort (WO) */ - TxDMAShift = 8, /* DMA burst value (0-7) is shifted this many bits */ - TxRetryShift = 4, /* TXRR value (0-15) is shifted this many bits */ - - TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ + TxIFGShift = 24, + TxIFG84 = (0 << TxIFGShift), /* 8.4us / 840ns (10 / 100Mbps) */ + TxIFG88 = (1 << TxIFGShift), /* 8.8us / 880ns (10 / 100Mbps) */ + TxIFG92 = (2 << TxIFGShift), /* 9.2us / 920ns (10 / 100Mbps) */ + TxIFG96 = (3 << TxIFGShift), /* 9.6us / 960ns (10 / 100Mbps) */ + + TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */ + TxCRC = (1 << 16), /* DISABLE Tx pkt CRC append */ + TxClearAbt = (1 << 0), /* Clear abort (WO) */ + TxDMAShift = 8, /* DMA burst value (0-7) is shifted X many bits */ + TxRetryShift = 4, /* TXRR value (0-15) is shifted X many bits */ + + TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */ }; /* Bits in Config1 */ enum Config1Bits { - Cfg1_PM_Enable = 0x01, - Cfg1_VPD_Enable = 0x02, - Cfg1_PIO = 0x04, - Cfg1_MMIO = 0x08, - LWAKE = 0x10, /* not on 8139, 8139A */ + Cfg1_PM_Enable = 0x01, + Cfg1_VPD_Enable = 0x02, + Cfg1_PIO = 0x04, + Cfg1_MMIO = 0x08, + LWAKE = 0x10, /* not on 8139, 8139A */ Cfg1_Driver_Load = 0x20, - Cfg1_LED0 = 0x40, - Cfg1_LED1 = 0x80, - SLEEP = (1 << 1), /* only on 8139, 8139A */ - PWRDN = (1 << 0), /* only on 8139, 8139A */ + Cfg1_LED0 = 0x40, + Cfg1_LED1 = 0x80, + SLEEP = (1 << 1), /* only on 8139, 8139A */ + PWRDN = (1 << 0), /* only on 8139, 8139A */ }; /* Bits in Config3 */ enum Config3Bits { - Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ - Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ - Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ - Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ - Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ - Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ - Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ - Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ + Cfg3_FBtBEn = (1 << 0), /* 1 = Fast Back to Back */ + Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */ + Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */ + Cfg3_CardB_En = (1 << 3), /* 1 = enable CardBus registers */ + Cfg3_LinkUp = (1 << 4), /* 1 = wake up on link up */ + Cfg3_Magic = (1 << 5), /* 1 = wake up on Magic Packet (tm) */ + Cfg3_PARM_En = (1 << 6), /* 0 = software can set twister parameters */ + Cfg3_GNTSel = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */ }; /* Bits in Config4 */ enum Config4Bits { - LWPTN = (1 << 2), /* not on 8139, 8139A */ + LWPTN = (1 << 2), /* not on 8139, 8139A */ }; /* Bits in Config5 */ enum Config5Bits { - Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ - Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ - Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ - Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */ - Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ - Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ - Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ + Cfg5_PME_STS = (1 << 0), /* 1 = PCI reset resets PME_Status */ + Cfg5_LANWake = (1 << 1), /* 1 = enable LANWake signal */ + Cfg5_LDPS = (1 << 2), /* 0 = save power when link is down */ + Cfg5_FIFOAddrPtr= (1 << 3), /* Realtek internal SRAM testing */ + Cfg5_UWF = (1 << 4), /* 1 = accept unicast wakeup frame */ + Cfg5_MWF = (1 << 5), /* 1 = accept multicast wakeup frame */ + Cfg5_BWF = (1 << 6), /* 1 = accept broadcast wakeup frame */ }; enum RxConfigBits { /* rx fifo threshold */ - RxCfgFIFOShift = 13, - RxCfgFIFONone = (7 << RxCfgFIFOShift), + RxCfgFIFOShift = 13, + RxCfgFIFONone = (7 << RxCfgFIFOShift), /* Max DMA burst */ - RxCfgDMAShift = 8, + RxCfgDMAShift = 8, RxCfgDMAUnlimited = (7 << RxCfgDMAShift), /* rx ring buffer length */ - RxCfgRcv8K = 0, - RxCfgRcv16K = (1 << 11), - RxCfgRcv32K = (1 << 12), - RxCfgRcv64K = (1 << 11) | (1 << 12), + RxCfgRcv8K = 0, + RxCfgRcv16K = (1 << 11), + RxCfgRcv32K = (1 << 12), + RxCfgRcv64K = (1 << 11) | (1 << 12), /* Disable packet wrap at end of Rx buffer. (not possible with 64k) */ - RxNoWrap = (1 << 7), + RxNoWrap = (1 << 7), }; /* Twister tuning parameters from RealTek. Completely undocumented, but required to tune bad links on some boards. */ enum CSCRBits { - CSCR_LinkOKBit = 0x0400, - CSCR_LinkChangeBit = 0x0800, - CSCR_LinkStatusBits = 0x0f000, - CSCR_LinkDownOffCmd = 0x003c0, - CSCR_LinkDownCmd = 0x0f3c0, + CSCR_LinkOKBit = 0x0400, + CSCR_LinkChangeBit = 0x0800, + CSCR_LinkStatusBits = 0x0f000, + CSCR_LinkDownOffCmd = 0x003c0, + CSCR_LinkDownCmd = 0x0f3c0, }; enum Cfg9346Bits { - Cfg9346_Lock = 0x00, - Cfg9346_Unlock = 0xC0, + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, }; typedef enum { - CH_8139 = 0, + CH_8139 = 0, CH_8139_K, CH_8139A, CH_8139A_G, @@ -495,8 +494,8 @@ typedef enum { } chip_t; enum chip_flags { - HasHltClk = (1 << 0), - HasLWake = (1 << 1), + HasHltClk = (1 << 0), + HasLWake = (1 << 1), }; #define HW_REVID(b30, b29, b28, b27, b26, b23, b22) \ @@ -569,36 +568,46 @@ struct rtl_extra_stats { }; struct rtl8139_private { - void __iomem *mmio_addr; - int drv_flags; - struct pci_dev *pci_dev; - u32 msg_enable; - struct net_device_stats stats; - unsigned char *rx_ring; - unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ - unsigned int tx_flag; - unsigned long cur_tx; - unsigned long dirty_tx; - unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ - unsigned char *tx_bufs; /* Tx bounce buffer region. */ - dma_addr_t rx_ring_dma; - dma_addr_t tx_bufs_dma; - signed char phys[4]; /* MII device addresses. */ - char twistie, twist_row, twist_col; /* Twister tune state. */ - unsigned int watchdog_fired : 1; - unsigned int default_port : 4; /* Last dev->if_port value. */ - unsigned int have_thread : 1; - spinlock_t lock; - spinlock_t rx_lock; - chip_t chipset; - u32 rx_config; - struct rtl_extra_stats xstats; - - struct delayed_work thread; - - struct mii_if_info mii; - unsigned int regs_len; - unsigned long fifo_copy_timeout; + void __iomem *mmio_addr; + int drv_flags; + struct pci_dev *pci_dev; + u32 msg_enable; + struct napi_struct napi; + struct net_device *dev; + struct net_device_stats stats; + + unsigned char *rx_ring; + unsigned int cur_rx; /* RX buf index of next pkt */ + dma_addr_t rx_ring_dma; + + unsigned int tx_flag; + unsigned long cur_tx; + unsigned long dirty_tx; + unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */ + unsigned char *tx_bufs; /* Tx bounce buffer region. */ + dma_addr_t tx_bufs_dma; + + signed char phys[4]; /* MII device addresses. */ + + /* Twister tune state. */ + char twistie, twist_row, twist_col; + + unsigned int watchdog_fired : 1; + unsigned int default_port : 4; /* Last dev->if_port value. */ + unsigned int have_thread : 1; + + spinlock_t lock; + spinlock_t rx_lock; + + chip_t chipset; + u32 rx_config; + struct rtl_extra_stats xstats; + + struct delayed_work thread; + + struct mii_if_info mii; + unsigned int regs_len; + unsigned long fifo_copy_timeout; }; MODULE_AUTHOR ("Jeff Garzik "); @@ -625,10 +634,10 @@ static void rtl8139_tx_timeout (struct net_device *dev); static void rtl8139_init_ring (struct net_device *dev); static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev); -static int rtl8139_poll(struct net_device *dev, int *budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void rtl8139_poll_controller(struct net_device *dev); #endif +static int rtl8139_poll(struct napi_struct *napi, int budget); static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance); static int rtl8139_close (struct net_device *dev); static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); @@ -646,24 +655,11 @@ static const struct ethtool_ops rtl8139_ethtool_ops; #define RTL_W16_F(reg, val16) do { iowrite16 ((val16), ioaddr + (reg)); ioread16 (ioaddr + (reg)); } while (0) #define RTL_W32_F(reg, val32) do { iowrite32 ((val32), ioaddr + (reg)); ioread32 (ioaddr + (reg)); } while (0) - -#define MMIO_FLUSH_AUDIT_COMPLETE 1 -#if MMIO_FLUSH_AUDIT_COMPLETE - /* write MMIO register */ #define RTL_W8(reg, val8) iowrite8 ((val8), ioaddr + (reg)) #define RTL_W16(reg, val16) iowrite16 ((val16), ioaddr + (reg)) #define RTL_W32(reg, val32) iowrite32 ((val32), ioaddr + (reg)) -#else - -/* write MMIO register, then flush */ -#define RTL_W8 RTL_W8_F -#define RTL_W16 RTL_W16_F -#define RTL_W32 RTL_W32_F - -#endif /* MMIO_FLUSH_AUDIT_COMPLETE */ - /* read MMIO register */ #define RTL_R8(reg) ioread8 (ioaddr + (reg)) #define RTL_R16(reg) ioread16 (ioaddr + (reg)) @@ -770,7 +766,6 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev, dev_err(&pdev->dev, "Unable to alloc new net device\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); @@ -931,6 +926,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, int i, addr_len, option; void __iomem *ioaddr; static int board_idx = -1; + DECLARE_MAC_BUF(mac); assert (pdev != NULL); assert (ent != NULL); @@ -963,6 +959,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, assert (dev != NULL); tp = netdev_priv(dev); + tp->dev = dev; ioaddr = tp->mmio_addr; assert (ioaddr != NULL); @@ -976,8 +973,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, /* The Rtl8139-specific entries in the device structure. */ dev->open = rtl8139_open; dev->hard_start_xmit = rtl8139_start_xmit; - dev->poll = rtl8139_poll; - dev->weight = 64; + netif_napi_add(dev, &tp->napi, rtl8139_poll, 64); dev->stop = rtl8139_close; dev->get_stats = rtl8139_get_stats; dev->set_multicast_list = rtl8139_set_rx_mode; @@ -1022,14 +1018,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, pci_set_drvdata (pdev, dev); printk (KERN_INFO "%s: %s at 0x%lx, " - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " - "IRQ %d\n", + "%s, IRQ %d\n", dev->name, board_info[ent->driver_data].name, dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", @@ -1314,24 +1307,26 @@ static int rtl8139_open (struct net_device *dev) if (retval) return retval; - tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN, - &tp->tx_bufs_dma); - tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN, - &tp->rx_ring_dma); + tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + &tp->tx_bufs_dma, GFP_KERNEL); + tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + &tp->rx_ring_dma, GFP_KERNEL); if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { free_irq(dev->irq, dev); if (tp->tx_bufs) - pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, tp->tx_bufs, tp->tx_bufs_dma); if (tp->rx_ring) - pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, tp->rx_ring, tp->rx_ring_dma); return -ENOMEM; } + napi_enable(&tp->napi); + tp->mii.full_duplex = tp->mii.force_media; tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000; @@ -2103,39 +2098,32 @@ static void rtl8139_weird_interrupt (struct net_device *dev, } } -static int rtl8139_poll(struct net_device *dev, int *budget) +static int rtl8139_poll(struct napi_struct *napi, int budget) { - struct rtl8139_private *tp = netdev_priv(dev); + struct rtl8139_private *tp = container_of(napi, struct rtl8139_private, napi); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->mmio_addr; - int orig_budget = min(*budget, dev->quota); - int done = 1; + int work_done; spin_lock(&tp->rx_lock); - if (likely(RTL_R16(IntrStatus) & RxAckBits)) { - int work_done; - - work_done = rtl8139_rx(dev, tp, orig_budget); - if (likely(work_done > 0)) { - *budget -= work_done; - dev->quota -= work_done; - done = (work_done < orig_budget); - } - } + work_done = 0; + if (likely(RTL_R16(IntrStatus) & RxAckBits)) + work_done += rtl8139_rx(dev, tp, budget); - if (done) { + if (work_done < budget) { unsigned long flags; /* * Order is important since data can get interrupted * again when we think we are done. */ - local_irq_save(flags); + spin_lock_irqsave(&tp->lock, flags); RTL_W16_F(IntrMask, rtl8139_intr_mask); - __netif_rx_complete(dev); - local_irq_restore(flags); + __netif_rx_complete(dev, napi); + spin_unlock_irqrestore(&tp->lock, flags); } spin_unlock(&tp->rx_lock); - return !done; + return work_done; } /* The interrupt handler does all of the Rx thread work and cleans up @@ -2180,9 +2168,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance) /* Receive packets are processed by poll routine. If not running start it now. */ if (status & RxAckBits){ - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &tp->napi)) { RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); - __netif_rx_schedule (dev); + __netif_rx_schedule(dev, &tp->napi); } } @@ -2223,7 +2211,8 @@ static int rtl8139_close (struct net_device *dev) void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; - netif_stop_queue (dev); + netif_stop_queue(dev); + napi_disable(&tp->napi); if (netif_msg_ifdown(tp)) printk(KERN_DEBUG "%s: Shutting down ethercard, status was 0x%4.4x.\n", @@ -2248,10 +2237,10 @@ static int rtl8139_close (struct net_device *dev) rtl8139_tx_clear (tp); - pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, - tp->rx_ring, tp->rx_ring_dma); - pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN, - tp->tx_bufs, tp->tx_bufs_dma); + dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, + tp->rx_ring, tp->rx_ring_dma); + dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, + tp->tx_bufs, tp->tx_bufs_dma); tp->rx_ring = NULL; tp->tx_bufs = NULL; @@ -2417,9 +2406,14 @@ static void rtl8139_get_regs(struct net_device *dev, struct ethtool_regs *regs, } #endif /* CONFIG_8139TOO_MMIO */ -static int rtl8139_get_stats_count(struct net_device *dev) +static int rtl8139_get_sset_count(struct net_device *dev, int sset) { - return RTL_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return RTL_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void rtl8139_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) @@ -2450,7 +2444,7 @@ static const struct ethtool_ops rtl8139_ethtool_ops = { .get_wol = rtl8139_get_wol, .set_wol = rtl8139_set_wol, .get_strings = rtl8139_get_strings, - .get_stats_count = rtl8139_get_stats_count, + .get_sset_count = rtl8139_get_sset_count, .get_ethtool_stats = rtl8139_get_ethtool_stats, }; diff --git a/drivers/net/82596.c b/drivers/net/82596.c index d915837193cce251f4cae93f9789e5d7545e93b7..bb30d5be7824c0f51ece10e49b3c5ee9e2ecf807 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -326,7 +326,6 @@ struct i596_private { struct i596_cmd *cmd_head; int cmd_backlog; unsigned long last_cmd; - struct net_device_stats stats; struct i596_rfd rfds[RX_RING_SIZE]; struct i596_rbd rbds[RX_RING_SIZE]; struct tx_cmd tx_cmds[TX_RING_SIZE]; @@ -360,7 +359,6 @@ static int i596_open(struct net_device *dev); static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); -static struct net_device_stats *i596_get_stats(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void i596_tx_timeout (struct net_device *dev); static void print_eth(unsigned char *buf, char *str); @@ -828,7 +826,7 @@ static inline int i596_rx(struct net_device *dev) if (skb == NULL) { /* XXX tulip.c can defer packets here!! */ printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { if (!rx_in_place) { @@ -844,28 +842,28 @@ static inline int i596_rx(struct net_device *dev) #endif netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes+=pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes+=pkt_len; } } else { DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", dev->name, rfd->stat)); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if ((rfd->stat) & 0x0001) - lp->stats.collisions++; + dev->stats.collisions++; if ((rfd->stat) & 0x0080) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if ((rfd->stat) & 0x0100) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if ((rfd->stat) & 0x0200) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if ((rfd->stat) & 0x0400) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if ((rfd->stat) & 0x0800) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if ((rfd->stat) & 0x1000) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* Clear the buffer descriptor count and EOF + F flags */ @@ -916,8 +914,8 @@ static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) dev_kfree_skb(skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; ptr->v_next = ptr->b_next = I596_NULL; tx_cmd->cmd.command = 0; /* Mark as free */ @@ -1038,10 +1036,10 @@ static void i596_tx_timeout (struct net_device *dev) DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", dev->name)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adaptor */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); /* Shutdown and restart */ i596_reset (dev, lp, ioaddr); @@ -1050,7 +1048,7 @@ static void i596_tx_timeout (struct net_device *dev) DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); lp->scb.command = CUC_START | RX_START; CA (dev); - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } dev->trans_start = jiffies; @@ -1082,7 +1080,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) if (tx_cmd->cmd.command) { printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", dev->name); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } else { @@ -1107,8 +1105,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); i596_add_cmd(dev, &tx_cmd->cmd); - lp->stats.tx_packets++; - lp->stats.tx_bytes += length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; } netif_start_queue(dev); @@ -1118,15 +1116,12 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) static void print_eth(unsigned char *add, char *str) { - int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); - printk(KERN_DEBUG "i596 0x%p, ", add); - for (i = 0; i < 6; i++) - printk(" %02X", add[i + 6]); - printk(" -->"); - for (i = 0; i < 6; i++) - printk(" %02X", add[i]); - printk(" %02X%02X, %s\n", add[12], add[13], str); + printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n", + add, print_mac(mac, add + 6), print_mac(mac2, add), + add[12], add[13], str); } static int io = 0x300; @@ -1234,11 +1229,9 @@ struct net_device * __init i82596_probe(int unit) DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); /* The 82596-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->open = i596_open; dev->stop = i596_close; dev->hard_start_xmit = i596_start_xmit; - dev->get_stats = i596_get_stats; dev->set_multicast_list = set_multicast_list; dev->tx_timeout = i596_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1344,17 +1337,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) if ((ptr->status) & STAT_OK) { DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if ((ptr->status) & 0x0020) - lp->stats.collisions++; + dev->stats.collisions++; if (!((ptr->status) & 0x0040)) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if ((ptr->status) & 0x0400) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if ((ptr->status) & 0x0800) - lp->stats.collisions++; + dev->stats.collisions++; if ((ptr->status) & 0x1000) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } dev_kfree_skb_irq(skb); @@ -1409,8 +1402,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) if (netif_running(dev)) { DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); ack_cmd |= RX_START; - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; rebuild_rx_bufs(dev); } } @@ -1493,14 +1486,6 @@ static int i596_close(struct net_device *dev) return 0; } -static struct net_device_stats * - i596_get_stats(struct net_device *dev) -{ - struct i596_private *lp = dev->priv; - - return &lp->stats; -} - /* * Set or clear the multicast filter for this adaptor. */ @@ -1551,6 +1536,7 @@ static void set_multicast_list(struct net_device *dev) struct dev_mc_list *dmi; unsigned char *cp; struct mc_cmd *cmd; + DECLARE_MAC_BUF(mac); if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) return; @@ -1561,8 +1547,8 @@ static void set_multicast_list(struct net_device *dev) for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) { memcpy(cp, dmi->dmi_addr, 6); if (i596_debug > 1) - DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5])); + DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %s\n", + dev->name, print_mac(mac, cp))); } i596_add_cmd(dev, &cmd->cmd); } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c5519250efd978235a9a4ba73ed1f80f641978cf..9c635a237a9d4c937bd97bc724ec9714389c2f4b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -135,6 +135,12 @@ config TUN If you don't know what to use this for, you don't need it. +config VETH + tristate "Virtual ethernet device" + ---help--- + The device is an ethernet tunnel. Devices are created in pairs. When + one end receives the packet it appears on its pair and vice versa. + config NET_SB1000 tristate "General Instruments Surfboard 1000" depends on PNP @@ -159,6 +165,15 @@ config NET_SB1000 If you don't have this card, of course say N. +config IP1000 + tristate "IP1000 Gigabit Ethernet support" + depends on PCI && EXPERIMENTAL + ---help--- + This driver supports IP1000 gigabit Ethernet cards. + + To compile this driver as a module, choose M here: the module + will be called ipg. This is recommended. + source "drivers/net/arcnet/Kconfig" source "drivers/net/phy/Kconfig" @@ -225,6 +240,13 @@ config AX88796 AX88796 driver, using platform bus to provide chip detection and resources +config AX88796_93CX6 + bool "ASIX AX88796 external 93CX6 eeprom support" + depends on AX88796 + select EEPROM_93CX6 + help + Select this if your platform comes with an external 93CX6 eeprom. + config MACE tristate "MACE (Power Mac ethernet) support" depends on PPC_PMAC && PPC32 @@ -466,10 +488,6 @@ config MIPS_AU1X00_ENET If you have an Alchemy Semi AU1X00 based system say Y. Otherwise, say N. -config NET_SB1250_MAC - tristate "SB1250 Ethernet support" - depends on SIBYTE_SB1xxx_SOC - config SGI_IOC3_ETH bool "SGI IOC3 Ethernet" depends on PCI && SGI_IP27 @@ -480,26 +498,6 @@ config SGI_IOC3_ETH the Ethernet-HOWTO, available from . -config SGI_IOC3_ETH_HW_RX_CSUM - bool "Receive hardware checksums" - depends on SGI_IOC3_ETH && INET - default y - help - The SGI IOC3 network adapter supports TCP and UDP checksums in - hardware to offload processing of these checksums from the CPU. At - the moment only acceleration of IPv4 is supported. This option - enables offloading for checksums on receive. If unsure, say Y. - -config SGI_IOC3_ETH_HW_TX_CSUM - bool "Transmit hardware checksums" - depends on SGI_IOC3_ETH && INET - default y - help - The SGI IOC3 network adapter supports TCP and UDP checksums in - hardware to offload processing of these checksums from the CPU. At - the moment only acceleration of IPv4 is supported. This option - enables offloading for checksums on transmit. If unsure, say Y. - config MIPS_SIM_NET tristate "MIPS simulator Network device" depends on MIPS_SIM @@ -843,6 +841,8 @@ config BFIN_MAC tristate "Blackfin 536/537 on-chip mac support" depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H) select CRC32 + select MII + select PHYLIB select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE help This is the driver for blackfin on-chip mac device. Say Y if you want it @@ -944,7 +944,7 @@ config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 select MII - depends on ARCH_PXA + depends on ARCH_PXA || SUPERH help This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. @@ -1250,75 +1250,8 @@ config IBMVETH . The module will be called ibmveth. -config IBM_EMAC - tristate "PowerPC 4xx on-chip Ethernet support" - depends on 4xx && !PPC_MERGE - help - This driver supports the PowerPC 4xx EMAC family of on-chip - Ethernet controllers. - -config IBM_EMAC_RXB - int "Number of receive buffers" - depends on IBM_EMAC - default "128" - -config IBM_EMAC_TXB - int "Number of transmit buffers" - depends on IBM_EMAC - default "64" - -config IBM_EMAC_POLL_WEIGHT - int "MAL NAPI polling weight" - depends on IBM_EMAC - default "32" - -config IBM_EMAC_RX_COPY_THRESHOLD - int "RX skb copy threshold (bytes)" - depends on IBM_EMAC - default "256" - -config IBM_EMAC_RX_SKB_HEADROOM - int "Additional RX skb headroom (bytes)" - depends on IBM_EMAC - default "0" - help - Additional receive skb headroom. Note, that driver - will always reserve at least 2 bytes to make IP header - aligned, so usually there is no need to add any additional - headroom. - - If unsure, set to 0. - -config IBM_EMAC_PHY_RX_CLK_FIX - bool "PHY Rx clock workaround" - depends on IBM_EMAC && (405EP || 440GX || 440EP || 440GR) - help - Enable this if EMAC attached to a PHY which doesn't generate - RX clock if there is no link, if this is the case, you will - see "TX disable timeout" or "RX disable timeout" in the system - log. - - If unsure, say N. - -config IBM_EMAC_DEBUG - bool "Debugging" - depends on IBM_EMAC - default n - -config IBM_EMAC_ZMII - bool - depends on IBM_EMAC && (NP405H || NP405L || 44x) - default y - -config IBM_EMAC_RGMII - bool - depends on IBM_EMAC && 440GX - default y - -config IBM_EMAC_TAH - bool - depends on IBM_EMAC && 440GX - default y +source "drivers/net/ibm_emac/Kconfig" +source "drivers/net/ibm_newemac/Kconfig" config NET_PCI bool "EISA, VLB, PCI and on board controllers" @@ -1453,18 +1386,38 @@ config APRICOT called apricot. config B44 - tristate "Broadcom 4400 ethernet support" - depends on NET_PCI && PCI + tristate "Broadcom 440x/47xx ethernet support" + depends on SSB_POSSIBLE + select SSB select MII help - If you have a network (Ethernet) controller of this type, say Y and - read the Ethernet-HOWTO, available from + If you have a network (Ethernet) controller of this type, say Y + or M and read the Ethernet-HOWTO, available from . To compile this driver as a module, choose M here and read . The module will be called b44. +# Auto-select SSB PCI-HOST support, if possible +config B44_PCI_AUTOSELECT + bool + depends on B44 && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B44_PCICORE_AUTOSELECT + bool + depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B44_PCI + bool + depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT + default y + config FORCEDETH tristate "nForce Ethernet support" depends on NET_PCI && PCI @@ -1513,21 +1466,6 @@ config TC35815 depends on NET_PCI && PCI && MIPS select MII -config DGRS - tristate "Digi Intl. RightSwitch SE-X support" - depends on NET_PCI && (PCI || EISA) - ---help--- - This is support for the Digi International RightSwitch series of - PCI/EISA Ethernet switch cards. These include the SE-4 and the SE-6 - models. If you have a network card of this type, say Y and read the - Ethernet-HOWTO, available from - . More specific - information is contained in . - - To compile this driver as a module, choose M here and read - . The module - will be called dgrs. - config EEPRO100 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)" depends on NET_PCI && PCI @@ -2009,14 +1947,15 @@ config ACENIC_OMIT_TIGON_I The safe and default value for this is N. config DL2K - tristate "D-Link DL2000-based Gigabit Ethernet support" + tristate "DL2000/TC902x-based Gigabit Ethernet support" depends on PCI select CRC32 help - This driver supports D-Link 2000-based gigabit ethernet cards, which - includes + This driver supports DL2000/TC902x-based Gigabit ethernet cards, + which includes D-Link DGE-550T Gigabit Ethernet Adapter. D-Link DL2000-based Gigabit Ethernet Adapter. + Sundance/Tamarack TC902x Gigabit Ethernet Adapter. To compile this driver as a module, choose M here: the module will be called dl2k. @@ -2069,6 +2008,29 @@ config E1000_DISABLE_PACKET_SPLIT If in doubt, say N. +config E1000E + tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" + depends on PCI + ---help--- + This driver supports the PCI-Express Intel(R) PRO/1000 gigabit + ethernet family of adapters. For PCI or PCI-X e1000 adapters, + use the regular e1000 driver For more information on how to + identify your adapter, go to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here and read + . The module + will be called e1000e. + source "drivers/net/ixp2000/Kconfig" config MYRI_SBUS @@ -2153,6 +2115,19 @@ config R8169_VLAN If in doubt, say Y. +config SB1250_MAC + tristate "SB1250 Gigabit Ethernet support" + depends on SIBYTE_SB1xxx_SOC + select PHYLIB + ---help--- + This driver supports Gigabit Ethernet interfaces based on the + Broadcom SiByte family of System-On-a-Chip parts. They include + the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455 + and BCM1480 chips. + + To compile this driver as a module, choose M here: the module + will be called sb1250-mac. + config SIS190 tristate "SiS190/SiS191 gigabit ethernet support" depends on PCI @@ -2501,18 +2476,42 @@ config CHELSIO_T3 config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS + select INET_LRO ---help--- This driver supports the IBM pSeries eHEA ethernet adapter. To compile the driver as a module, choose M here. The module will be called ehea. +config IXGBE + tristate "Intel(R) 10GbE PCI Express adapters support" + depends on PCI + ---help--- + This driver supports Intel(R) 10GbE PCI Express family of + adapters. For more information on how to identify your adapter, go + to the Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here and read + . The module + will be called ixgbe. + config IXGB tristate "Intel(R) PRO/10GbE support" depends on PCI ---help--- - This driver supports Intel(R) PRO/10GbE family of - adapters. For more information on how to identify your adapter, go + This driver supports Intel(R) PRO/10GbE family of adapters for + PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver + instead. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide at: @@ -2576,6 +2575,7 @@ config MYRI10GE depends on PCI select FW_LOADER select CRC32 + select INET_LRO ---help--- This driver supports Myricom Myri-10G Dual Protocol interface in Ethernet mode. If the eeprom on your board is not recent enough, @@ -2594,6 +2594,13 @@ config NETXEN_NIC help This enables the support for NetXen's Gigabit Ethernet card. +config NIU + tristate "Sun Neptune 10Gbit Ethernet support" + depends on PCI + help + This enables support for cards based upon Sun's + Neptune chipset. + config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" depends on PPC64 && PCI @@ -2617,6 +2624,12 @@ config MLX4_DEBUG debug_level module parameter (which can also be set after the driver is loaded through sysfs). +config TEHUTI + tristate "Tehuti Networks 10G Ethernet" + depends on PCI + help + Tehuti Networks 10G Ethernet NIC + endif # NETDEV_10000 source "drivers/net/tokenring/Kconfig" @@ -3074,6 +3087,16 @@ config NETCONSOLE If you want to log kernel messages over the network, enable this. See for details. +config NETCONSOLE_DYNAMIC + bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)" + depends on NETCONSOLE && SYSFS && EXPERIMENTAL + select CONFIGFS_FS + help + This option enables the ability to dynamically reconfigure target + parameters (interface, IP addresses, port numbers, MAC addresses) + at runtime through a userspace interface exported using configfs. + See for details. + config NETPOLL def_bool NETCONSOLE diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 9c928a84584188716f7cd4d8d9a143eb1e012007..d2e0f35da42e0e6f9f2b184876d2a5cd45f03c13 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -3,14 +3,19 @@ # obj-$(CONFIG_E1000) += e1000/ +obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_IBM_EMAC) += ibm_emac/ +obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ +obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGB) += ixgb/ +obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_ATL1) += atl1/ obj-$(CONFIG_GIANFAR) += gianfar_driver.o +obj-$(CONFIG_TEHUTI) += tehuti.o gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o \ @@ -39,7 +44,6 @@ obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o -obj-$(CONFIG_DGRS) += dgrs.o obj-$(CONFIG_VORTEX) += 3c59x.o obj-$(CONFIG_TYPHOON) += typhoon.o obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o @@ -106,7 +110,7 @@ obj-$(CONFIG_E2100) += e2100.o 8390.o obj-$(CONFIG_ES3210) += es3210.o 8390.o obj-$(CONFIG_LNE390) += lne390.o 8390.o obj-$(CONFIG_NE3210) += ne3210.o 8390.o -obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o +obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o obj-$(CONFIG_NE_H8300) += ne-h8300.o @@ -193,6 +197,7 @@ obj-$(CONFIG_MACSONIC) += macsonic.o obj-$(CONFIG_MACMACE) += macmace.o obj-$(CONFIG_MAC89x0) += mac89x0.o obj-$(CONFIG_TUN) += tun.o +obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_DL2K) += dl2k.o obj-$(CONFIG_R8169) += r8169.o @@ -236,3 +241,4 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_NETXEN_NIC) += netxen/ +obj-$(CONFIG_NIU) += niu.o diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index a45de6975bfe74735feacd88e71df684b6904017..18f7f815f66e2166cf4d12af0927cc85a7ec6cd3 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -119,7 +119,6 @@ struct lance_private { int lance_log_rx_bufs, lance_log_tx_bufs; int rx_ring_mod_mask, tx_ring_mod_mask; - struct net_device_stats stats; int tpe; /* cable-selection is TPE */ int auto_select; /* cable-selection by carrier */ unsigned short busmaster_regval; @@ -294,18 +293,18 @@ static int lance_rx (struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb (len+2); @@ -313,7 +312,7 @@ static int lance_rx (struct net_device *dev) if (skb == 0) { printk(KERN_WARNING "%s: Memory squeeze, " "deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; @@ -328,8 +327,8 @@ static int lance_rx (struct net_device *dev) skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } /* Return the packet to the pool */ @@ -364,12 +363,12 @@ static int lance_tx (struct net_device *dev) if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_ERR "%s: Carrier Lost, " @@ -388,7 +387,7 @@ static int lance_tx (struct net_device *dev) /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, " "restarting\n", dev->name); @@ -408,13 +407,13 @@ static int lance_tx (struct net_device *dev) /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; @@ -459,9 +458,9 @@ static irqreturn_t lance_interrupt (int irq, void *dev_id) /* Log misc errors. */ if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; /* Tx babble. */ + dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; /* Missed a Rx frame. */ + dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk(KERN_ERR "%s: Bus master arbitration failure, status " "%4.4x.\n", dev->name, csr0); @@ -606,7 +605,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; - lp->stats.tx_bytes += skblen; + dev->stats.tx_bytes += skblen; if (TX_BUFFS_AVAIL <= 0) netif_stop_queue(dev); @@ -621,13 +620,6 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) return status; } -static struct net_device_stats *lance_get_stats (struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* taken from the depca driver */ static void lance_load_multicast (struct net_device *dev) { @@ -724,6 +716,7 @@ static int __devinit a2065_init_one(struct zorro_dev *z, unsigned long board, base_addr, mem_start; struct resource *r1, *r2; int err; + DECLARE_MAC_BUF(mac); board = z->resource.start; base_addr = board+A2065_LANCE; @@ -746,7 +739,6 @@ static int __devinit a2065_init_one(struct zorro_dev *z, return -ENOMEM; } - SET_MODULE_OWNER(dev); priv = netdev_priv(dev); r1->name = dev->name; @@ -783,7 +775,6 @@ static int __devinit a2065_init_one(struct zorro_dev *z, dev->hard_start_xmit = &lance_start_xmit; dev->tx_timeout = &lance_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->dma = 0; @@ -802,9 +793,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + "%s\n", dev->name, board, + print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index 644c408515df9b643823557ce7459b985164d2a7..5136d94923aa3910e0007982dd7893c493e2fd62 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c @@ -103,8 +103,6 @@ static int __init do_ac3200_probe(struct net_device *dev) int irq = dev->irq; int mem_start = dev->mem_start; - SET_MODULE_OWNER(dev); - if (ioaddr > 0x1ff) /* Check a single specified location. */ return ac_probe1(ioaddr, dev); else if (ioaddr > 0) /* Don't probe at all. */ @@ -148,6 +146,7 @@ struct net_device * __init ac3200_probe(int unit) static int __init ac_probe1(int ioaddr, struct net_device *dev) { int i, retval; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -169,10 +168,11 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev) inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3)); #endif - printk("AC3200 in EISA slot %d, node", ioaddr/0x1000); - for(i = 0; i < 6; i++) - printk(" %02x", dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i)); + for (i = 0; i < 6; i++) + dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i); + printk(KERN_DEBUG "AC3200 in EISA slot %d, node %s", + ioaddr/0x1000, print_mac(mac, dev->dev_addr)); #if 0 /* Check the vendor ID/prefix. Redundant after checking the EISA ID */ if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0 diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 62e660a79387e92fbaabd7edd8d84b508ee841db..6c192650d34943451317142cdf8300052c8a43c2 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -406,7 +406,7 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); #define DEF_STAT (2 * TICKS_PER_SEC) -static int link[ACE_MAX_MOD_PARMS]; +static int link_state[ACE_MAX_MOD_PARMS]; static int trace[ACE_MAX_MOD_PARMS]; static int tx_coal_tick[ACE_MAX_MOD_PARMS]; static int rx_coal_tick[ACE_MAX_MOD_PARMS]; @@ -419,7 +419,7 @@ MODULE_AUTHOR("Jes Sorensen "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); -module_param_array(link, int, NULL, 0); +module_param_array_named(link, link_state, int, NULL, 0); module_param_array(trace, int, NULL, 0); module_param_array(tx_coal_tick, int, NULL, 0); module_param_array(max_tx_desc, int, NULL, 0); @@ -465,7 +465,6 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev, return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); ap = dev->priv; @@ -894,6 +893,7 @@ static int __devinit ace_init(struct net_device *dev) int board_idx, ecode = 0; short i; unsigned char cache_size; + DECLARE_MAC_BUF(mac); ap = netdev_priv(dev); regs = ap->regs; @@ -987,36 +987,32 @@ static int __devinit ace_init(struct net_device *dev) mac1 = 0; for(i = 0; i < 4; i++) { - int tmp; + int t; mac1 = mac1 << 8; - tmp = read_eeprom_byte(dev, 0x8c+i); - if (tmp < 0) { + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { ecode = -EIO; goto init_error; } else - mac1 |= (tmp & 0xff); + mac1 |= (t & 0xff); } mac2 = 0; for(i = 4; i < 8; i++) { - int tmp; + int t; mac2 = mac2 << 8; - tmp = read_eeprom_byte(dev, 0x8c+i); - if (tmp < 0) { + t = read_eeprom_byte(dev, 0x8c+i); + if (t < 0) { ecode = -EIO; goto init_error; } else - mac2 |= (tmp & 0xff); + mac2 |= (t & 0xff); } writel(mac1, ®s->MacAddrHi); writel(mac2, ®s->MacAddrLo); - printk("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", - (mac1 >> 8) & 0xff, mac1 & 0xff, (mac2 >> 24) &0xff, - (mac2 >> 16) & 0xff, (mac2 >> 8) & 0xff, mac2 & 0xff); - dev->dev_addr[0] = (mac1 >> 8) & 0xff; dev->dev_addr[1] = mac1 & 0xff; dev->dev_addr[2] = (mac2 >> 24) & 0xff; @@ -1024,6 +1020,8 @@ static int __devinit ace_init(struct net_device *dev) dev->dev_addr[4] = (mac2 >> 8) & 0xff; dev->dev_addr[5] = mac2 & 0xff; + printk("MAC: %s\n", print_mac(mac, dev->dev_addr)); + /* * Looks like this is necessary to deal with on all architectures, * even this %$#%$# N440BX Intel based thing doesn't get it right. @@ -1307,10 +1305,10 @@ static int __devinit ace_init(struct net_device *dev) writel(TX_RING_BASE, ®s->WinBase); if (ACE_IS_TIGON_I(ap)) { - ap->tx_ring = (struct tx_desc *) regs->Window; + ap->tx_ring = (__force struct tx_desc *) regs->Window; for (i = 0; i < (TIGON_I_TX_RING_ENTRIES * sizeof(struct tx_desc)) / sizeof(u32); i++) - writel(0, (void __iomem *)ap->tx_ring + i * 4); + writel(0, (__force void __iomem *)ap->tx_ring + i * 4); set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); } else { @@ -1396,8 +1394,8 @@ static int __devinit ace_init(struct net_device *dev) /* * Override link default parameters */ - if ((board_idx >= 0) && link[board_idx]) { - int option = link[board_idx]; + if ((board_idx >= 0) && link_state[board_idx]) { + int option = link_state[board_idx]; tmp = LNK_ENABLE; @@ -2385,8 +2383,9 @@ static int ace_close(struct net_device *dev) if (mapping) { if (ACE_IS_TIGON_I(ap)) { - struct tx_desc __iomem *tx - = (struct tx_desc __iomem *) &ap->tx_ring[i]; + /* NB: TIGON_1 is special, tx_ring is in io space */ + struct tx_desc __iomem *tx; + tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; writel(0, &tx->addr.addrhi); writel(0, &tx->addr.addrlo); writel(0, &tx->flagsize); @@ -2446,7 +2445,7 @@ ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, #endif if (ACE_IS_TIGON_I(ap)) { - struct tx_desc __iomem *io = (struct tx_desc __iomem *) desc; + struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; writel(addr >> 32, &io->addr.addrhi); writel(addr & 0xffffffff, &io->addr.addrlo); writel(flagsize, &io->flagsize); @@ -2938,7 +2937,7 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz * This operation requires the NIC to be halted and is performed with * interrupts disabled and with the spinlock hold. */ -int __devinit ace_load_firmware(struct net_device *dev) +static int __devinit ace_load_firmware(struct net_device *dev) { struct ace_private *ap = netdev_priv(dev); struct ace_regs __iomem *regs = ap->regs; diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index a61b2f89fc336c3f7bc75a9f2dece28c37124586..1cc74ec88a5860e00a895bd05b75882dac7fe9b2 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -709,7 +709,8 @@ static int amd8111e_tx(struct net_device *dev) lp->tx_complete_idx++; /*COAL update tx coalescing parameters */ lp->coal_conf.tx_packets++; - lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count; + lp->coal_conf.tx_bytes += + le16_to_cpu(lp->tx_ring[tx_index].buff_count); if (netif_queue_stopped(dev) && lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ @@ -723,9 +724,10 @@ static int amd8111e_tx(struct net_device *dev) #ifdef CONFIG_AMD8111E_NAPI /* This function handles the driver receive operation in polling mode */ -static int amd8111e_rx_poll(struct net_device *dev, int * budget) +static int amd8111e_rx_poll(struct napi_struct *napi, int budget) { - struct amd8111e_priv *lp = netdev_priv(dev); + struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi); + struct net_device *dev = lp->amd8111e_net_dev; int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; void __iomem *mmio = lp->mmio; struct sk_buff *skb,*new_skb; @@ -737,7 +739,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) #if AMD8111E_VLAN_TAG_USED short vtag; #endif - int rx_pkt_limit = dev->quota; + int rx_pkt_limit = budget; unsigned long flags; do{ @@ -838,21 +840,14 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) } while(intr0 & RINT0); /* Receive descriptor is empty now */ - dev->quota -= num_rx_pkt; - *budget -= num_rx_pkt; - spin_lock_irqsave(&lp->lock, flags); - netif_rx_complete(dev); + __netif_rx_complete(dev, napi); writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); - return 0; rx_not_empty: - /* Do not call a netif_rx_complete */ - dev->quota -= num_rx_pkt; - *budget -= num_rx_pkt; - return 1; + return num_rx_pkt; } #else @@ -1287,11 +1282,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) /* Check if Receive Interrupt has occurred. */ #ifdef CONFIG_AMD8111E_NAPI if(intr0 & RINT0){ - if(netif_rx_schedule_prep(dev)){ + if(netif_rx_schedule_prep(dev, &lp->napi)){ /* Disable receive interupts */ writel(RINTEN0, mmio + INTEN0); /* Schedule a polling routine */ - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &lp->napi); } else if (intren0 & RINTEN0) { printk("************Driver bug! \ @@ -1345,6 +1340,8 @@ static int amd8111e_close(struct net_device * dev) struct amd8111e_priv *lp = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&lp->napi); + spin_lock_irq(&lp->lock); amd8111e_disable_interrupt(lp); @@ -1375,12 +1372,15 @@ static int amd8111e_open(struct net_device * dev ) dev->name, dev)) return -EAGAIN; + napi_enable(&lp->napi); + spin_lock_irq(&lp->lock); amd8111e_init_hw_default(lp); if(amd8111e_restart(dev)){ spin_unlock_irq(&lp->lock); + napi_disable(&lp->napi); if (dev->irq) free_irq(dev->irq, dev); return -ENOMEM; @@ -1405,7 +1405,7 @@ This function checks if there is any transmit descriptors available to queue mo static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp ) { int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK; - if(lp->tx_skbuff[tx_index] != 0) + if (lp->tx_skbuff[tx_index]) return -1; else return 0; @@ -1442,7 +1442,7 @@ static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev) lp->tx_dma_addr[tx_index] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); lp->tx_ring[tx_index].buff_phy_addr = - (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]); + cpu_to_le32(lp->tx_dma_addr[tx_index]); /* Set FCS and LTINT bits */ wmb(); @@ -1935,6 +1935,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, unsigned long reg_addr,reg_len; struct amd8111e_priv* lp; struct net_device* dev; + DECLARE_MAC_BUF(mac); err = pci_enable_device(pdev); if(err){ @@ -1983,7 +1984,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, goto err_free_reg; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #if AMD8111E_VLAN_TAG_USED @@ -1999,7 +1999,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, spin_lock_init(&lp->lock); lp->mmio = ioremap(reg_addr, reg_len); - if (lp->mmio == 0) { + if (!lp->mmio) { printk(KERN_ERR "amd8111e: Cannot map device registers, " "exiting\n"); err = -ENOMEM; @@ -2008,7 +2008,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, /* Initializing MAC address */ for(i = 0; i < ETH_ADDR_LEN; i++) - dev->dev_addr[i] =readb(lp->mmio + PADR + i); + dev->dev_addr[i] = readb(lp->mmio + PADR + i); /* Setting user defined parametrs */ lp->ext_phy_option = speed_duplex[card_idx]; @@ -2031,8 +2031,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, dev->tx_timeout = amd8111e_tx_timeout; dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; #ifdef CONFIG_AMD8111E_NAPI - dev->poll = amd8111e_rx_poll; - dev->weight = 32; + netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32); #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = amd8111e_poll; @@ -2078,11 +2077,10 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev, /* display driver and device information */ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; - printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERS); - printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version); - for (i = 0; i < 6; i++) - printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':'); - printk( "\n"); + printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", + dev->name,MODULE_VERS); + printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %s\n", + dev->name, chip_version, print_mac(mac, dev->dev_addr)); if (lp->ext_phy_id) printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n", dev->name, lp->ext_phy_id, lp->ext_phy_addr); diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h index e65080a5994a62010ec66fa79075997bb507e104..28c60a71ed509e87bb19a41dfd556ad344b78875 100644 --- a/drivers/net/amd8111e.h +++ b/drivers/net/amd8111e.h @@ -655,32 +655,32 @@ typedef enum { struct amd8111e_tx_dr{ - u16 buff_count; /* Size of the buffer pointed by this descriptor */ + __le16 buff_count; /* Size of the buffer pointed by this descriptor */ - u16 tx_flags; + __le16 tx_flags; - u16 tag_ctrl_info; + __le16 tag_ctrl_info; - u16 tag_ctrl_cmd; + __le16 tag_ctrl_cmd; - u32 buff_phy_addr; + __le32 buff_phy_addr; - u32 reserved; + __le32 reserved; }; struct amd8111e_rx_dr{ - u32 reserved; + __le32 reserved; - u16 msg_count; /* Received message len */ + __le16 msg_count; /* Received message len */ - u16 tag_ctrl_info; + __le16 tag_ctrl_info; - u16 buff_count; /* Len of the buffer pointed by descriptor. */ + __le16 buff_count; /* Len of the buffer pointed by descriptor. */ - u16 rx_flags; + __le16 rx_flags; - u32 buff_phy_addr; + __le32 buff_phy_addr; }; struct amd8111e_link_config{ @@ -763,6 +763,8 @@ struct amd8111e_priv{ /* Reg memory mapped address */ void __iomem *mmio; + struct napi_struct napi; + spinlock_t lock; /* Guard lock */ unsigned long rx_idx, tx_idx; /* The next free ring entry */ unsigned long tx_complete_idx; diff --git a/drivers/net/apne.c b/drivers/net/apne.c index 954191119d35ceef89331927950e08465cf2b42a..c12cbdf368b1edbe37b3077e9debdcf4f93cc847 100644 --- a/drivers/net/apne.c +++ b/drivers/net/apne.c @@ -148,7 +148,6 @@ struct net_device * __init apne_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); /* disable pcmcia irq for readtuple */ pcmcia_disable_irq(); @@ -205,6 +204,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) int neX000, ctron; #endif static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (ei_debug && version_printed++ == 0) printk(version); @@ -247,7 +247,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) {0x00, NE_EN0_RSARHI}, {E8390_RREAD+E8390_START, NE_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) { + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { outb(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -317,12 +317,12 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr) i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; - } - printk("\n%s: %s found.\n", dev->name, name); + printk(" %s\n", print_mac(mac, dev->dev_addr)); + + printk("%s: %s found.\n", dev->name, name); ei_status.name = name; ei_status.tx_start_page = start_page; diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index da6ffa8cd81e35c9960c9d9d61cd8677842e4c84..92c3a4cf0bb18d15c83a3ce5f7206a34172514d1 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -194,10 +194,6 @@ static void cops_timeout(struct net_device *dev); static void cops_rx (struct net_device *dev); static int cops_send_packet (struct sk_buff *skb, struct net_device *dev); static void set_multicast_list (struct net_device *dev); -static int cops_hard_header (struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len); - static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); static int cops_close (struct net_device *dev); static struct net_device_stats *cops_get_stats (struct net_device *dev); @@ -235,8 +231,6 @@ struct net_device * __init cops_probe(int unit) base_addr = dev->base_addr = io; } - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) { /* Check a single specified location. */ err = cops_probe1(dev, base_addr); } else if (base_addr != 0) { /* Don't probe at all. */ @@ -333,7 +327,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; lp = netdev_priv(dev); - memset(lp, 0, sizeof(struct cops_local)); spin_lock_init(&lp->lock); /* Copy local board variable to lp struct. */ @@ -342,7 +335,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->hard_start_xmit = cops_send_packet; dev->tx_timeout = cops_timeout; dev->watchdog_timeo = HZ * 2; - dev->hard_header = cops_hard_header; + dev->get_stats = cops_get_stats; dev->open = cops_open; dev->stop = cops_close; @@ -946,19 +939,6 @@ static void set_multicast_list(struct net_device *dev) printk("%s: set_multicast_list executed\n", dev->name); } -/* - * Another Dummy function to keep the Appletalk layer happy. - */ - -static int cops_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) -{ - if(cops_debug >= 3) - printk("%s: cops_hard_header executed. Wow!\n", dev->name); - return 0; -} - /* * System ioctls for the COPS LocalTalk card. */ diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index f22e46dfd770681c2772b1b9d904811e55e33475..1071144edd66fbdf0f594ec0104c065fdee06e82 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -65,7 +65,6 @@ static struct net_device * __init ipddp_init(void) if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); strcpy(dev->name, "ipddp%d"); if (version_printed++ == 0) @@ -117,7 +116,7 @@ static struct net_device_stats *ipddp_get_stats(struct net_device *dev) */ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev) { - u32 paddr = ((struct rtable*)skb->dst)->rt_gateway; + __be32 paddr = ((struct rtable*)skb->dst)->rt_gateway; struct ddpehdr *ddp; struct ipddp_route *rt; struct atalk_addr *our_addr; diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h index 52072fb0c610da46bc0e2491e15f5dcc0265b086..531519da99a324c618f8aaf898c0fee0cdf3ba43 100644 --- a/drivers/net/appletalk/ipddp.h +++ b/drivers/net/appletalk/ipddp.h @@ -14,7 +14,7 @@ struct ipddp_route { struct net_device *dev; /* Carrier device */ - __u32 ip; /* IP address */ + __be32 ip; /* IP address */ struct atalk_addr at; /* Gateway appletalk address */ int flags; struct ipddp_route *next; diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 6a6cbd331a16f92e2696b7483f79a0a8aaf55598..6ab2c2d4d673de32baf44307a32a0d5baf00183b 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -870,15 +870,6 @@ static void set_multicast_list(struct net_device *dev) /* Actually netatalk needs fixing! */ } -static int ltpc_hard_header (struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, unsigned len) -{ - if(debug & DEBUG_VERBOSE) - printk("ltpc_hard_header called for device %s\n", - dev->name); - return 0; -} - static int ltpc_poll_counter; static void ltpc_poll(unsigned long l) @@ -1046,8 +1037,6 @@ struct net_device * __init ltpc_probe(void) if (!dev) goto out; - SET_MODULE_OWNER(dev); - /* probe for the I/O port address */ if (io != 0x240 && request_region(0x220,8,"ltpc")) { @@ -1143,7 +1132,6 @@ struct net_device * __init ltpc_probe(void) /* Fill in the fields of the device structure with ethernet-generic values. */ dev->hard_start_xmit = ltpc_xmit; - dev->hard_header = ltpc_hard_header; dev->get_stats = ltpc_get_stats; /* add the ltpc-specific things */ diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index 681e20b8466f7cc9e45bf549817817cf6dcfd4ce..c59c8067de991989498bceb5b9f0dbc9a60c2e03 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -102,8 +102,8 @@ static int arcnet_close(struct net_device *dev); static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); static void arcnet_timeout(struct net_device *dev); static int arcnet_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len); + unsigned short type, const void *daddr, + const void *saddr, unsigned len); static int arcnet_rebuild_header(struct sk_buff *skb); static struct net_device_stats *arcnet_get_stats(struct net_device *dev); static int go_tx(struct net_device *dev); @@ -317,11 +317,17 @@ static int choose_mtu(void) return mtu == 65535 ? XMTU : mtu; } +static const struct header_ops arcnet_header_ops = { + .create = arcnet_header, + .rebuild = arcnet_rebuild_header, +}; + /* Setup a struct device for ARCnet. */ static void arcdev_setup(struct net_device *dev) { dev->type = ARPHRD_ARCNET; + dev->header_ops = &arcnet_header_ops; dev->hard_header_len = sizeof(struct archdr); dev->mtu = choose_mtu(); @@ -342,8 +348,6 @@ static void arcdev_setup(struct net_device *dev) dev->hard_start_xmit = arcnet_send_packet; dev->tx_timeout = arcnet_timeout; dev->get_stats = arcnet_get_stats; - dev->hard_header = arcnet_header; - dev->rebuild_header = arcnet_rebuild_header; } struct net_device *alloc_arcdev(char *name) @@ -488,10 +492,10 @@ static int arcnet_close(struct net_device *dev) static int arcnet_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { - struct arcnet_local *lp = dev->priv; + const struct arcnet_local *lp = netdev_priv(dev); uint8_t _daddr, proto_num; struct ArcProto *proto; diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 1f03027354167cd29ba1c82467192f4d6892eecb..6599f1046c7b844aec858e5456196122e67ced64 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -398,8 +398,6 @@ static int __init com90io_init(void) if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - dev->base_addr = io; dev->irq = irq; if (dev->irq == 2) diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c index 2de8877ece292df4e84c85e2c6ba01c58366a74b..dab185bc51f1a1bc05b3c689ed7fac5834b06c96 100644 --- a/drivers/net/arcnet/rfc1051.c +++ b/drivers/net/arcnet/rfc1051.c @@ -34,7 +34,7 @@ #define VERSION "arcnet: RFC1051 \"simple standard\" (`s') encapsulation support loaded.\n" -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev); +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev); static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int build_header(struct sk_buff *skb, struct net_device *dev, @@ -86,7 +86,7 @@ MODULE_LICENSE("GPL"); * * With ARCnet we have to convert everything to Ethernet-style stuff. */ -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) { struct arcnet_local *lp = dev->priv; struct archdr *pkt = (struct archdr *) skb->data; diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c index 460a095000c22a756ae24274d8aeaa96db9fb07f..6d6d95cc4404b7a727a20f049dc8240db595351d 100644 --- a/drivers/net/arcnet/rfc1201.c +++ b/drivers/net/arcnet/rfc1201.c @@ -34,7 +34,7 @@ MODULE_LICENSE("GPL"); #define VERSION "arcnet: RFC1201 \"standard\" (`a') encapsulation support loaded.\n" -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev); +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev); static void rx(struct net_device *dev, int bufnum, struct archdr *pkthdr, int length); static int build_header(struct sk_buff *skb, struct net_device *dev, @@ -88,7 +88,7 @@ module_exit(arcnet_rfc1201_exit); * * With ARCnet we have to convert everything to Ethernet-style stuff. */ -static unsigned short type_trans(struct sk_buff *skb, struct net_device *dev) +static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) { struct archdr *pkt = (struct archdr *) skb->data; struct arc_rfc1201 *soft = &pkt->soft.rfc1201; @@ -456,7 +456,7 @@ static void load_pkt(struct net_device *dev, struct arc_hardware *hard, excsoft.proto = soft->proto; excsoft.split_flag = 0xff; - excsoft.sequence = 0xffff; + excsoft.sequence = htons(0xffff); hard->offset[0] = 0; ofs = 512 - softlen; diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index bc5a38a6705f06df72d8452e555b0de6f008ade0..3fa3bccd1adb26194314986764e6031efb5a8b41 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -166,6 +166,7 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, struct net_device *dev; struct ariadne_private *priv; int err; + DECLARE_MAC_BUF(mac); r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); if (!r1) @@ -183,7 +184,6 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, return -ENOMEM; } - SET_MODULE_OWNER(dev); priv = netdev_priv(dev); r1->name = dev->name; @@ -217,9 +217,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: Ariadne at 0x%08lx, Ethernet Address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, board, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + "%s\n", dev->name, board, + print_mac(mac, dev->dev_addr)); return 0; } @@ -615,21 +614,17 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Fill in a Tx ring entry */ #if 0 - printk(KERN_DEBUG "TX pkt type 0x%04x from ", ((u_short *)skb->data)[6]); - { - int i; - u_char *ptr = &((u_char *)skb->data)[6]; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); - } - printk(" to "); - { - int i; - u_char *ptr = (u_char *)skb->data; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); - } - printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len); +{ + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG "TX pkt type 0x%04x from %s to %s " + " data 0x%08x len %d\n", + ((u_short *)skb->data)[6], + print_mac(mac, ((const u8 *)skb->data)+6), + print_mac(mac, (const u8 *)skb->data), + (int)skb->data, (int)skb->len); +} #endif local_irq_save(flags); @@ -749,22 +744,22 @@ static int ariadne_rx(struct net_device *dev) skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len); skb->protocol=eth_type_trans(skb,dev); #if 0 +{ + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "RX pkt type 0x%04x from ", ((u_short *)skb->data)[6]); { - int i; u_char *ptr = &((u_char *)skb->data)[6]; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); + printk("%s", print_mac(mac, ptr)); } printk(" to "); { - int i; u_char *ptr = (u_char *)skb->data; - for (i = 0; i < 6; i++) - printk("%02x", ptr[i]); + printk("%s", print_mac(mac, ptr)); } printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len); +} #endif netif_rx(skb); diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 2143eeb7a2b0fdf34143d8c14bfe0a623a5c7894..ba6bd03a015ff6afad60ae65d8c614b35a6671d4 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -414,7 +414,7 @@ static void am79c961_setmulticastlist (struct net_device *dev) /* * Update the multicast hash table */ - for (i = 0; i < sizeof(multi_hash) / sizeof(multi_hash[0]); i++) + for (i = 0; i < ARRAY_SIZE(multi_hash); i++) write_rreg(dev->base_addr, i + LADRL, multi_hash[i]); /* @@ -741,12 +741,10 @@ static int __init am79c961_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret == 0) { - printk(KERN_INFO "%s: ether address ", dev->name); - - /* Retrive and print the ethernet address. */ - for (i = 0; i < 6; i++) - printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]); + DECLARE_MAC_BUF(mac); + printk(KERN_INFO "%s: ether address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index ef2cc80256a36bc0692ba7fe2857e06fc1045eaf..25b114a4e2b16f57373481da83b6066090518543 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -485,6 +485,7 @@ static void update_mac_address(struct net_device *dev) static int set_mac_address(struct net_device *dev, void* addr) { struct sockaddr *address = addr; + DECLARE_MAC_BUF(mac); if (!is_valid_ether_addr(address->sa_data)) return -EADDRNOTAVAIL; @@ -492,9 +493,8 @@ static int set_mac_address(struct net_device *dev, void* addr) memcpy(dev->dev_addr, address->sa_data, dev->addr_len); update_mac_address(dev); - printk("%s: Setting MAC address to %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk("%s: Setting MAC address to %s\n", dev->name, + print_mac(mac, dev->dev_addr)); return 0; } @@ -979,6 +979,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add struct at91_private *lp; unsigned int val; int res; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct at91_private)); if (!dev) @@ -986,7 +987,6 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add dev->base_addr = AT91_VA_BASE_EMAC; dev->irq = AT91RM9200_ID_EMAC; - SET_MODULE_OWNER(dev); /* Install the interrupt handler */ if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { @@ -1082,12 +1082,11 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add } /* Display ethernet banner */ - printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%02x:%02x:%02x:%02x:%02x:%02x)\n", - dev->name, (uint) dev->base_addr, dev->irq, - at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", - at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n", + dev->name, (uint) dev->base_addr, dev->irq, + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", + at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", + print_mac(mac, dev->dev_addr)); if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); else if (phy_type == MII_LXT971A_ID) diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index f6ece1d43f6e81494d6fd1ca6f1d45112882b15e..7f016f3d5bf0ea3cd4f7675d50e0dd1c24d8f5f9 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -169,6 +169,9 @@ struct ep93xx_priv spinlock_t tx_pending_lock; unsigned int tx_pending; + struct net_device *dev; + struct napi_struct napi; + struct net_device_stats stats; struct mii_if_info mii; @@ -190,15 +193,11 @@ static struct net_device_stats *ep93xx_get_stats(struct net_device *dev) return &(ep->stats); } -static int ep93xx_rx(struct net_device *dev, int *budget) +static int ep93xx_rx(struct net_device *dev, int processed, int budget) { struct ep93xx_priv *ep = netdev_priv(dev); - int rx_done; - int processed; - rx_done = 0; - processed = 0; - while (*budget > 0) { + while (processed < budget) { int entry; struct ep93xx_rstat *rstat; u32 rstat0; @@ -211,10 +210,8 @@ static int ep93xx_rx(struct net_device *dev, int *budget) rstat0 = rstat->rstat0; rstat1 = rstat->rstat1; - if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) { - rx_done = 1; + if (!(rstat0 & RSTAT0_RFP) || !(rstat1 & RSTAT1_RFP)) break; - } rstat->rstat0 = 0; rstat->rstat1 = 0; @@ -275,8 +272,6 @@ static int ep93xx_rx(struct net_device *dev, int *budget) err: ep->rx_pointer = (entry + 1) & (RX_QUEUE_ENTRIES - 1); processed++; - dev->quota--; - (*budget)--; } if (processed) { @@ -284,7 +279,7 @@ static int ep93xx_rx(struct net_device *dev, int *budget) wrw(ep, REG_RXSTSENQ, processed); } - return !rx_done; + return processed; } static int ep93xx_have_more_rx(struct ep93xx_priv *ep) @@ -293,36 +288,32 @@ static int ep93xx_have_more_rx(struct ep93xx_priv *ep) return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP)); } -static int ep93xx_poll(struct net_device *dev, int *budget) +static int ep93xx_poll(struct napi_struct *napi, int budget) { - struct ep93xx_priv *ep = netdev_priv(dev); - - /* - * @@@ Have to stop polling if device is downed while we - * are polling. - */ + struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); + struct net_device *dev = ep->dev; + int rx = 0; poll_some_more: - if (ep93xx_rx(dev, budget)) - return 1; - - netif_rx_complete(dev); - - spin_lock_irq(&ep->rx_lock); - wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); - if (ep93xx_have_more_rx(ep)) { - wrl(ep, REG_INTEN, REG_INTEN_TX); - wrl(ep, REG_INTSTSP, REG_INTSTS_RX); + rx = ep93xx_rx(dev, rx, budget); + if (rx < budget) { + int more = 0; + + spin_lock_irq(&ep->rx_lock); + __netif_rx_complete(dev, napi); + wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); + if (ep93xx_have_more_rx(ep)) { + wrl(ep, REG_INTEN, REG_INTEN_TX); + wrl(ep, REG_INTSTSP, REG_INTSTS_RX); + more = 1; + } spin_unlock_irq(&ep->rx_lock); - if (netif_rx_reschedule(dev, 0)) + if (more && netif_rx_reschedule(dev, napi)) goto poll_some_more; - - return 0; } - spin_unlock_irq(&ep->rx_lock); - return 0; + return rx; } static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) @@ -426,9 +417,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) if (status & REG_INTSTS_RX) { spin_lock(&ep->rx_lock); - if (likely(__netif_rx_schedule_prep(dev))) { + if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) { wrl(ep, REG_INTEN, REG_INTEN_TX); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &ep->napi); } spin_unlock(&ep->rx_lock); } @@ -648,7 +639,10 @@ static int ep93xx_open(struct net_device *dev) dev->dev_addr[4], dev->dev_addr[5]); } + napi_enable(&ep->napi); + if (ep93xx_start_hw(dev)) { + napi_disable(&ep->napi); ep93xx_free_buffers(ep); return -EIO; } @@ -662,6 +656,7 @@ static int ep93xx_open(struct net_device *dev) err = request_irq(ep->irq, ep93xx_irq, IRQF_SHARED, dev->name, dev); if (err) { + napi_disable(&ep->napi); ep93xx_stop_hw(dev); ep93xx_free_buffers(ep); return err; @@ -678,6 +673,7 @@ static int ep93xx_close(struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); + napi_disable(&ep->napi); netif_stop_queue(dev); wrl(ep, REG_GIINTMSK, 0); @@ -788,14 +784,12 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) dev->get_stats = ep93xx_get_stats; dev->ethtool_ops = &ep93xx_ethtool_ops; - dev->poll = ep93xx_poll; dev->hard_start_xmit = ep93xx_xmit; dev->open = ep93xx_open; dev->stop = ep93xx_close; dev->do_ioctl = ep93xx_ioctl; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - dev->weight = 64; return dev; } @@ -847,6 +841,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev) goto err_out; } ep = netdev_priv(dev); + ep->dev = dev; + netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); platform_set_drvdata(pdev, dev); diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index 80f33b6d57138145b6cc41a2a9654803cc0db045..3bb9e293e2efea6ba066b6ad61960002f89d1675 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c @@ -996,6 +996,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) { struct net_device *dev; int i, ret = 0; + DECLARE_MAC_BUF(mac); ether1_banner(); @@ -1009,7 +1010,6 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &ec->dev); dev->irq = ec->irq; @@ -1044,12 +1044,9 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) if (ret) goto free; - printk(KERN_INFO "%s: ether1 in slot %d, ", - dev->name, ec->slot_no); + printk(KERN_INFO "%s: ether1 in slot %d, %s\n", + dev->name, ec->slot_no, print_mac(mac, dev->dev_addr)); - for (i = 0; i < 6; i++) - printk ("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); - ecard_set_drvdata(ec, dev); return 0; diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index 3805506a3ab8475f0bf84629b687a2b2da8d7ac1..67e96ae85035638ee009f2aadac0e5a7a538269b 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c @@ -775,7 +775,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct ether3_data *data = id->data; struct net_device *dev; - int i, bus_type, ret; + int bus_type, ret; + DECLARE_MAC_BUF(mac); ether3_banner(); @@ -789,7 +790,6 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &ec->dev); priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); @@ -859,9 +859,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) if (ret) goto free; - printk("%s: %s in slot %d, ", dev->name, data->name, ec->slot_no); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + printk("%s: %s in slot %d, %s\n", + dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr)); ecard_set_drvdata(ec, dev); return 0; diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 0d37d9d1fd787e2631c53b6982137fe38c6496d2..00081d2b9cd5f1f87b4eee443919c42523d2f585 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c @@ -648,6 +648,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) struct net_device *dev; struct etherh_priv *eh; int i, ret; + DECLARE_MAC_BUF(mac); etherh_banner(); @@ -661,7 +662,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) goto release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &ec->dev); dev->open = etherh_open; @@ -746,11 +746,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id) if (ret) goto free; - printk(KERN_INFO "%s: %s in slot %d, ", - dev->name, data->name, ec->slot_no); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + printk(KERN_INFO "%s: %s in slot %d, %s\n", + dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr)); ecard_set_drvdata(ec, dev); diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index bed8e0ebaf1979b6b1c2200e06b30ca10ba4f4ec..b032c1bf492fc06e03b067a0fe23ae2c24c36bfd 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -109,7 +109,6 @@ typedef unsigned char uchar; /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; spinlock_t lock; unsigned char mc_filter[8]; uint jumpered:1; /* Set iff the board has jumper config. */ @@ -164,7 +163,6 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t net_interrupt(int irq, void *dev_id); static void net_rx(struct net_device *dev); static int net_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static void net_tx_timeout (struct net_device *dev); @@ -225,8 +223,6 @@ struct net_device * __init at1700_probe(int unit) dev->irq = irq; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = at1700_probe1(dev, io); } else if (io != 0) { /* Don't probe at all. */ @@ -269,6 +265,7 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; int slot, ret = -ENODEV; struct net_local *lp = netdev_priv(dev); + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -392,16 +389,15 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) if (is_at1700) { for(i = 0; i < 3; i++) { unsigned short eeprom_val = read_eeprom(ioaddr, 4+i); - printk("%04x", eeprom_val); ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val); } } else { for(i = 0; i < 6; i++) { unsigned char val = inb(ioaddr + SAPROM + i); - printk("%02x", val); dev->dev_addr[i] = val; } } + printk("%s", print_mac(mac, dev->dev_addr)); /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals, rather than 150 ohm shielded twisted pair compensation. @@ -458,7 +454,6 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr) dev->open = net_open; dev->stop = net_close; dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; dev->set_multicast_list = &set_rx_mode; dev->tx_timeout = net_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -573,7 +568,7 @@ static void net_tx_timeout (struct net_device *dev) dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE), inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START), inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* ToDo: We should try to restart the adaptor... */ outw(0xffff, ioaddr + MODE24); outw (0xffff, ioaddr + TX_STATUS); @@ -693,10 +688,10 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) printk("%s: 16 Collision occur during Txing.\n", dev->name); /* Cancel sending a packet. */ outb(0x03, ioaddr + COL16CNTL); - lp->stats.collisions++; + dev->stats.collisions++; } if (status & 0x82) { - lp->stats.tx_packets++; + dev->stats.tx_packets++; /* The Tx queue has any packets and is not being transferred a packet from the host, start transmitting. */ @@ -721,7 +716,6 @@ static irqreturn_t net_interrupt(int irq, void *dev_id) static void net_rx(struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = 5; @@ -740,11 +734,11 @@ net_rx(struct net_device *dev) #endif if ((status & 0xF0) != 0x20) { /* There was an error. */ - lp->stats.rx_errors++; - if (status & 0x08) lp->stats.rx_length_errors++; - if (status & 0x04) lp->stats.rx_frame_errors++; - if (status & 0x02) lp->stats.rx_crc_errors++; - if (status & 0x01) lp->stats.rx_over_errors++; + dev->stats.rx_errors++; + if (status & 0x08) dev->stats.rx_length_errors++; + if (status & 0x04) dev->stats.rx_frame_errors++; + if (status & 0x02) dev->stats.rx_crc_errors++; + if (status & 0x01) dev->stats.rx_over_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; @@ -755,7 +749,7 @@ net_rx(struct net_device *dev) /* Prime the FIFO and then flush the packet. */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); outb(0x05, ioaddr + RX_CTRL); - lp->stats.rx_errors++; + dev->stats.rx_errors++; break; } skb = dev_alloc_skb(pkt_len+3); @@ -765,7 +759,7 @@ net_rx(struct net_device *dev) /* Prime the FIFO and then flush the packet. */ inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT); outb(0x05, ioaddr + RX_CTRL); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } skb_reserve(skb,2); @@ -774,8 +768,8 @@ net_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } if (--boguscount <= 0) break; @@ -824,17 +818,6 @@ static int net_close(struct net_device *dev) return 0; } -/* Get the current statistics. - This may be called with the card open or closed. - There are no on-chip counters, so this function is trivial. -*/ -static struct net_device_stats * -net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - /* Set the multicast/promiscuous mode for this adaptor. */ diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index dfa8b9ba4c8017d928f69e768f88afc762f82d33..ebf1a3a88e156aeed5491da8105d55ba9f21fba8 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -224,7 +224,6 @@ struct lance_private { int dirty_tx; /* Ring entries to be freed. */ /* copy function */ void *(*memcpy_f)( void *, const void *, size_t ); - struct net_device_stats stats; /* This must be long for set_bit() */ long tx_full; spinlock_t devlock; @@ -263,7 +262,7 @@ struct lance_addr { (highest byte stripped) */ }; -#define N_LANCE_ADDR (sizeof(lance_addr_list)/sizeof(*lance_addr_list)) +#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list) /* Definitions for the Lance */ @@ -347,7 +346,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id ); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); -static struct net_device_stats *lance_get_stats( struct net_device *dev ); static void set_multicast_list( struct net_device *dev ); static int lance_set_mac_address( struct net_device *dev, void *addr ); static void lance_tx_timeout (struct net_device *dev); @@ -390,7 +388,6 @@ struct net_device * __init atarilance_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); for( i = 0; i < N_LANCE_ADDR; ++i ) { if (lance_probe1( dev, &lance_addr_list[i] )) { @@ -470,6 +467,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, int i; static int did_version; unsigned short save1, save2; + DECLARE_MAC_BUF(mac); PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", (long)memaddr, (long)ioaddr )); @@ -598,8 +596,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, i = IO->mem; break; } - for( i = 0; i < 6; ++i ) - printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" ); + printk("%s\n", print_mac(mac, dev->dev_addr)); if (lp->cardtype == OLD_RIEBL) { printk( "%s: Warning: This is a default ethernet address!\n", dev->name ); @@ -632,7 +629,6 @@ static unsigned long __init lance_probe1( struct net_device *dev, dev->open = &lance_open; dev->hard_start_xmit = &lance_start_xmit; dev->stop = &lance_close; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &set_multicast_list; dev->set_mac_address = &lance_set_mac_address; @@ -640,13 +636,6 @@ static unsigned long __init lance_probe1( struct net_device *dev, dev->tx_timeout = lance_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - -#if 0 - dev->start = 0; -#endif - - memset( &lp->stats, 0, sizeof(lp->stats) ); - return( 1 ); } @@ -754,7 +743,7 @@ static void lance_tx_timeout (struct net_device *dev) * little endian mode. */ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0); - lp->stats.tx_errors++; + dev->stats.tx_errors++; #ifndef final_version { int i; DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n", @@ -790,6 +779,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) int entry, len; struct lance_tx_head *head; unsigned long flags; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, DREG )); @@ -812,17 +803,13 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) /* Fill in a Tx ring entry */ if (lance_debug >= 3) { - u_char *p; - int i; - printk( "%s: TX pkt type 0x%04x from ", dev->name, - ((u_short *)skb->data)[6]); - for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = (u_char *)skb->data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" data at 0x%08x len %d\n", (int)skb->data, - (int)skb->len ); + printk( "%s: TX pkt type 0x%04x from " + "%s to %s" + " data at 0x%08x len %d\n", + dev->name, ((u_short *)skb->data)[6], + print_mac(mac, &skb->data[6]), + print_mac(mac2, skb->data), + (int)skb->data, (int)skb->len ); } /* We're not prepared for the int until the last flags are set/reset. And @@ -842,7 +829,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) head->misc = 0; lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dev_kfree_skb( skb ); lp->cur_tx++; while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { @@ -913,13 +900,13 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) if (status & TMD1_ERR) { /* There was an major error, log it. */ int err_status = MEM->tx_head[entry].misc; - lp->stats.tx_errors++; - if (err_status & TMD3_RTRY) lp->stats.tx_aborted_errors++; - if (err_status & TMD3_LCAR) lp->stats.tx_carrier_errors++; - if (err_status & TMD3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++; if (err_status & TMD3_UFLO) { /* Ackk! On FIFO errors the Tx unit is turned off! */ - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; /* Remove this verbosity later! */ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n", dev->name, csr0 )); @@ -928,8 +915,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) } } else { if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF)) - lp->stats.collisions++; - lp->stats.tx_packets++; + dev->stats.collisions++; + dev->stats.tx_packets++; } /* XXX MSch: free skb?? */ @@ -956,8 +943,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id ) } /* Log misc errors. */ - if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ - if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & CSR0_MERR) { DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " "status %04x.\n", dev->name, csr0 )); @@ -998,11 +985,11 @@ static int lance_rx( struct net_device *dev ) buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & RMD1_ENP) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet.*/ - if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; - if (status & RMD1_OFLO) lp->stats.rx_over_errors++; - if (status & RMD1_CRC) lp->stats.rx_crc_errors++; - if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; head->flag &= (RMD1_ENP|RMD1_STP); } else { /* Malloc up new buffer, compatible with net-3. */ @@ -1011,7 +998,7 @@ static int lance_rx( struct net_device *dev ) if (pkt_len < 60) { printk( "%s: Runt packet!\n", dev->name ); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { skb = dev_alloc_skb( pkt_len+2 ); @@ -1024,7 +1011,7 @@ static int lance_rx( struct net_device *dev ) break; if (i > RX_RING_SIZE - 2) { - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; head->flag |= RMD1_OWN_CHIP; lp->cur_rx++; } @@ -1032,19 +1019,18 @@ static int lance_rx( struct net_device *dev ) } if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head), *p; - printk( "%s: RX pkt type 0x%04x from ", dev->name, - ((u_short *)data)[6]); - for( p = &data[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " + u_char *data = PKTBUF_ADDR(head); + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s ", + "data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d\n", + dev->name, ((u_short *)data)[6], + print_mac(mac, &data[6]), print_mac(mac2, data), data[15], data[16], data[17], data[18], data[19], data[20], data[21], data[22], - pkt_len ); + pkt_len); } skb_reserve( skb, 2 ); /* 16 byte align */ @@ -1053,8 +1039,8 @@ static int lance_rx( struct net_device *dev ) skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } @@ -1091,14 +1077,6 @@ static int lance_close( struct net_device *dev ) } -static struct net_device_stats *lance_get_stats( struct net_device *dev ) - -{ struct lance_private *lp = (struct lance_private *)dev->priv; - - return &lp->stats; -} - - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/atl1/atl1_ethtool.c b/drivers/net/atl1/atl1_ethtool.c index 1f616c5c14739437d0e8efffa856c6df7824825c..68a83be843ab5c232baece1655ff6f9b74aefae2 100644 --- a/drivers/net/atl1/atl1_ethtool.c +++ b/drivers/net/atl1/atl1_ethtool.c @@ -88,9 +88,14 @@ static void atl1_get_ethtool_stats(struct net_device *netdev, } -static int atl1_get_stats_count(struct net_device *netdev) +static int atl1_get_sset_count(struct net_device *netdev, int sset) { - return ARRAY_SIZE(atl1_gstrings_stats); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(atl1_gstrings_stats); + default: + return -EOPNOTSUPP; + } } static int atl1_get_settings(struct net_device *netdev, @@ -489,15 +494,12 @@ const struct ethtool_ops atl1_ethtool_ops = { .get_pauseparam = atl1_get_pauseparam, .set_pauseparam = atl1_set_pauseparam, .get_rx_csum = atl1_get_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, .get_link = ethtool_op_get_link, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_strings = atl1_get_strings, .nway_reset = atl1_nway_reset, .get_ethtool_stats = atl1_get_ethtool_stats, - .get_stats_count = atl1_get_stats_count, - .get_tso = ethtool_op_get_tso, + .get_sset_count = atl1_get_sset_count, .set_tso = ethtool_op_set_tso, }; diff --git a/drivers/net/atl1/atl1_hw.c b/drivers/net/atl1/atl1_hw.c index ef886bdeac13d5ed689c61bc6da41e8209051704..9d3bd22e3a82f96f289a462b60a9503954cb49a8 100644 --- a/drivers/net/atl1/atl1_hw.c +++ b/drivers/net/atl1/atl1_hw.c @@ -603,7 +603,7 @@ static struct atl1_spi_flash_dev flash_table[] = { static void atl1_init_flash_opcode(struct atl1_hw *hw) { - if (hw->flash_vendor >= sizeof(flash_table) / sizeof(flash_table[0])) + if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) hw->flash_vendor = 0; /* ATMEL */ /* Init OP table */ diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index f23e13c8f9a6e75ce0d9614bcb68d85c287ccd49..35b0a7dd4ef465cca27f5619d1f98965f99b83bc 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -76,7 +76,6 @@ #include #include #include -#include #include #include @@ -1368,7 +1367,6 @@ static void atl1_intr_rx(struct atl1_adapter *adapter) if (count) { u32 tpd_next_to_use; u32 rfd_next_to_use; - u32 rrd_next_to_clean; spin_lock(&adapter->mb_lock); @@ -1513,7 +1511,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, unsigned int f; u16 tpd_next_to_use; u16 proto_hdr_len; - u16 i, m, len12; + u16 len12; first_buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; @@ -1537,6 +1535,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, tpd_next_to_use = 0; if (first_buf_len > proto_hdr_len) { + int i, m; + len12 = first_buf_len - proto_hdr_len; m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; @@ -2210,8 +2210,14 @@ static int __devinit atl1_probe(struct pci_dev *pdev, return err; /* - * 64-bit DMA currently has data corruption problems, so let's just - * use 32-bit DMA for now. This is a big hack that is probably wrong. + * The atl1 chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. */ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { @@ -2235,7 +2241,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, err = -ENOMEM; goto err_alloc_etherdev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 82d78ff8399bbeea1046b1efa241e3bf2fd181bd..62f09e59d9c47cbcd29e84c55af5185fb4c7ee23 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c @@ -171,7 +171,6 @@ static char mux_8012[] = { 0xff, 0xf7, 0xff, 0xfb, 0xf3, 0xfb, 0xff, 0xf7,}; struct net_local { spinlock_t lock; struct net_device *next_module; - struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; @@ -205,7 +204,6 @@ static irqreturn_t atp_interrupt(int irq, void *dev_id); static void net_rx(struct net_device *dev); static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); static int net_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); static void set_rx_mode_8002(struct net_device *dev); static void set_rx_mode_8012(struct net_device *dev); static void tx_timeout(struct net_device *dev); @@ -250,6 +248,7 @@ static int __init atp_probe1(long ioaddr) struct net_local *lp; int saved_ctrl_reg, status, i; int res; + DECLARE_MAC_BUF(mac); outb(0xff, ioaddr + PAR_DATA); /* Save the original value of the Control register, in case we guessed @@ -299,7 +298,6 @@ static int __init atp_probe1(long ioaddr) dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); /* Find the IRQ used by triggering an interrupt. */ write_reg_byte(ioaddr, CMR2, 0x01); /* No accept mode, IRQ out. */ @@ -325,10 +323,9 @@ static int __init atp_probe1(long ioaddr) printk(KERN_INFO "%s", version); #endif - printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM " - "%02X:%02X:%02X:%02X:%02X:%02X.\n", dev->name, dev->base_addr, - dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, " + "SAPROM %s.\n", + dev->name, dev->base_addr, dev->irq, print_mac(mac, dev->dev_addr)); /* Reset the ethernet hardware and activate the printer pass-through. */ write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); @@ -349,7 +346,6 @@ static int __init atp_probe1(long ioaddr) dev->open = net_open; dev->stop = net_close; dev->hard_start_xmit = atp_send_packet; - dev->get_stats = net_get_stats; dev->set_multicast_list = lp->chip_type == RTL8002 ? &set_rx_mode_8002 : &set_rx_mode_8012; dev->tx_timeout = tx_timeout; @@ -539,18 +535,17 @@ static void write_packet(long ioaddr, int length, unsigned char *packet, int pad static void tx_timeout(struct net_device *dev) { - struct net_local *np = netdev_priv(dev); long ioaddr = dev->base_addr; printk(KERN_WARNING "%s: Transmit timed out, %s?\n", dev->name, inb(ioaddr + PAR_CONTROL) & 0x10 ? "network cable problem" : "IRQ conflict"); - np->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adapter. */ hardware_init(dev); dev->trans_start = jiffies; netif_wake_queue(dev); - np->stats.tx_errors++; + dev->stats.tx_errors++; } static int atp_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -630,7 +625,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) /* We acknowledged the normal Rx interrupt, so if the interrupt is still outstanding we must have a Rx error. */ if (read_status & (CMR1_IRQ << 3)) { /* Overrun. */ - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; /* Set to no-accept mode long enough to remove a packet. */ write_reg_high(ioaddr, CMR2, CMR2h_OFF); net_rx(dev); @@ -650,9 +645,9 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) and reinitialize the adapter. */ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); if (status & (ISR_TxErr<<3)) { - lp->stats.collisions++; + dev->stats.collisions++; if (++lp->re_tx > 15) { - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; hardware_init(dev); break; } @@ -661,7 +656,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) write_reg(ioaddr, CMR1, CMR1_ReXmit + CMR1_Xmit); } else { /* Finish up the transmit. */ - lp->stats.tx_packets++; + dev->stats.tx_packets++; lp->pac_cnt_in_tx_buf--; if ( lp->saved_tx_size) { trigger_send(ioaddr, lp->saved_tx_size); @@ -679,7 +674,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) "%ld jiffies status %02x CMR1 %02x.\n", dev->name, num_tx_since_rx, jiffies - dev->last_rx, status, (read_nibble(ioaddr, CMR1) >> 3) & 15); - lp->stats.rx_missed_errors++; + dev->stats.rx_missed_errors++; hardware_init(dev); num_tx_since_rx = 0; break; @@ -736,13 +731,13 @@ static void atp_timed_checker(unsigned long data) struct net_local *lp = netdev_priv(atp_timed_dev); write_reg_byte(ioaddr, PAR0 + i, atp_timed_dev->dev_addr[i]); if (i == 2) - lp->stats.tx_errors++; + dev->stats.tx_errors++; else if (i == 3) - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; else if (i == 4) - lp->stats.collisions++; + dev->stats.collisions++; else - lp->stats.rx_errors++; + dev->stats.rx_errors++; } #endif } @@ -766,14 +761,14 @@ static void net_rx(struct net_device *dev) printk(KERN_DEBUG " rx_count %04x %04x %04x %04x..", rx_head.pad, rx_head.rx_count, rx_head.rx_status, rx_head.cur_addr); if ((rx_head.rx_status & 0x77) != 0x01) { - lp->stats.rx_errors++; - if (rx_head.rx_status & 0x0004) lp->stats.rx_frame_errors++; - else if (rx_head.rx_status & 0x0002) lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + if (rx_head.rx_status & 0x0004) dev->stats.rx_frame_errors++; + else if (rx_head.rx_status & 0x0002) dev->stats.rx_crc_errors++; if (net_debug > 3) printk(KERN_DEBUG "%s: Unknown ATP Rx error %04x.\n", dev->name, rx_head.rx_status); if (rx_head.rx_status & 0x0020) { - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; write_reg_high(ioaddr, CMR1, CMR1h_TxENABLE); write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); } else if (rx_head.rx_status & 0x0050) @@ -788,7 +783,7 @@ static void net_rx(struct net_device *dev) if (skb == NULL) { printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; goto done; } @@ -797,8 +792,8 @@ static void net_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } done: write_reg(ioaddr, CMR1, CMR1_NextPkt); @@ -850,15 +845,6 @@ net_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats * -net_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - return &lp->stats; -} - /* * Set or clear the multicast filter for this adapter. */ diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index e86b3691765b7721eff0947e91d92d5d396c1c57..b46c5d8a77bdca845398d26a90d97d02f8ee60d6 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -90,7 +90,6 @@ static int au1000_rx(struct net_device *); static irqreturn_t au1000_interrupt(int, void *); static void au1000_tx_timeout(struct net_device *); static void set_rx_mode(struct net_device *); -static struct net_device_stats *au1000_get_stats(struct net_device *); static int au1000_ioctl(struct net_device *, struct ifreq *, int); static int mdio_read(struct net_device *, int, int); static void mdio_write(struct net_device *, int, int, u16); @@ -772,7 +771,6 @@ static struct net_device * au1000_probe(int port_num) dev->open = au1000_open; dev->hard_start_xmit = au1000_tx; dev->stop = au1000_close; - dev->get_stats = au1000_get_stats; dev->set_multicast_list = &set_rx_mode; dev->do_ioctl = &au1000_ioctl; SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); @@ -1038,7 +1036,7 @@ static void __exit au1000_cleanup_module(void) static void update_tx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - struct net_device_stats *ps = &aup->stats; + struct net_device_stats *ps = &dev->stats; if (status & TX_FRAME_ABORTED) { if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { @@ -1094,7 +1092,7 @@ static void au1000_tx_ack(struct net_device *dev) static int au1000_tx(struct sk_buff *skb, struct net_device *dev) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - struct net_device_stats *ps = &aup->stats; + struct net_device_stats *ps = &dev->stats; volatile tx_dma_t *ptxd; u32 buff_stat; db_dest_t *pDB; @@ -1148,7 +1146,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev) static inline void update_rx_stats(struct net_device *dev, u32 status) { struct au1000_private *aup = (struct au1000_private *) dev->priv; - struct net_device_stats *ps = &aup->stats; + struct net_device_stats *ps = &dev->stats; ps->rx_packets++; if (status & RX_MCAST_FRAME) @@ -1201,7 +1199,7 @@ static int au1000_rx(struct net_device *dev) printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name); - aup->stats.rx_dropped++; + dev->stats.rx_dropped++; continue; } skb_reserve(skb, 2); /* 16 byte IP header align */ @@ -1324,18 +1322,5 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); } -static struct net_device_stats *au1000_get_stats(struct net_device *dev) -{ - struct au1000_private *aup = (struct au1000_private *) dev->priv; - - if (au1000_debug > 4) - printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev); - - if (netif_device_present(dev)) { - return &aup->stats; - } - return 0; -} - module_init(au1000_init_module); module_exit(au1000_cleanup_module); diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h index 52fe00dd6d24d0402a878d87a70c0b6417b0f4a3..f3baeaa128543af59451e98c0d9d4e5bb7d3ee23 100644 --- a/drivers/net/au1000_eth.h +++ b/drivers/net/au1000_eth.h @@ -115,6 +115,5 @@ struct au1000_private { u32 vaddr; /* virtual address of rx/tx buffers */ dma_addr_t dma_addr; /* dma address of rx/tx buffers */ - struct net_device_stats stats; spinlock_t lock; /* Serialise access to device */ }; diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 90e0734e60375abe0325cc5cfd0c7a49e0b9c43f..9fe0517cf89319e70340d952fd189e8365400d4b 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -582,6 +583,37 @@ static const struct ethtool_ops ax_ethtool_ops = { .get_link = ax_get_link, }; +#ifdef CONFIG_AX88796_93CX6 +static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + eeprom->reg_data_in = reg & AX_MEMR_EEI; + eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */ + eeprom->reg_data_clock = reg & AX_MEMR_EECLK; + eeprom->reg_chip_select = reg & AX_MEMR_EECS; +} + +static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct ei_device *ei_local = eeprom->data; + u8 reg = ei_inb(ei_local->mem + AX_MEMR); + + reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS); + + if (eeprom->reg_data_in) + reg |= AX_MEMR_EEI; + if (eeprom->reg_data_clock) + reg |= AX_MEMR_EECLK; + if (eeprom->reg_chip_select) + reg |= AX_MEMR_EECS; + + ei_outb(reg, ei_local->mem + AX_MEMR); + udelay(10); +} +#endif + /* setup code */ static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) @@ -640,6 +672,23 @@ static int ax_init_dev(struct net_device *dev, int first_init) memcpy(dev->dev_addr, SA_prom, 6); } +#ifdef CONFIG_AX88796_93CX6 + if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { + unsigned char mac_addr[6]; + struct eeprom_93cx6 eeprom; + + eeprom.data = ei_local; + eeprom.register_read = ax_eeprom_register_read; + eeprom.register_write = ax_eeprom_register_write; + eeprom.width = PCI_EEPROM_WIDTH_93C56; + + eeprom_93cx6_multiread(&eeprom, 0, + (__le16 __force *)mac_addr, + sizeof(mac_addr) >> 1); + + memcpy(dev->dev_addr, mac_addr, 6); + } +#endif if (ax->plat->wordlength == 2) { /* We must set the 8390 for word mode. */ ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG); diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 0795df2354928e7caec1cda7b39d3743c6a2603d..3d247f3f4a3c375d285e10caf025fa6fc16f6d3b 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -1,8 +1,11 @@ -/* b44.c: Broadcom 4400 device driver. +/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver. * * Copyright (C) 2002 David S. Miller (davem@redhat.com) - * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org) + * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org) * Copyright (C) 2006 Broadcom Corporation. + * Copyright (C) 2007 Michael Buesch * * Distribute under GPL. */ @@ -21,17 +24,18 @@ #include #include #include +#include #include #include #include + #include "b44.h" #define DRV_MODULE_NAME "b44" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.01" -#define DRV_MODULE_RELDATE "Jun 16, 2006" +#define DRV_MODULE_VERSION "2.0" #define B44_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ @@ -85,10 +89,10 @@ #define B44_ETHIPV4UDP_HLEN 42 static char version[] __devinitdata = - DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n"; -MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller"); -MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver"); +MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller"); +MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -96,18 +100,28 @@ static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */ module_param(b44_debug, int, 0); MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value"); -static struct pci_device_id b44_pci_tbl[] = { - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, - { } /* terminate list with empty entry */ -}; +#ifdef CONFIG_B44_PCI +static const struct pci_device_id b44_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) }, + { 0 } /* terminate list with empty entry */ +}; MODULE_DEVICE_TABLE(pci, b44_pci_tbl); +static struct pci_driver b44_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = b44_pci_tbl, +}; +#endif /* CONFIG_B44_PCI */ + +static const struct ssb_device_id b44_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); + static void b44_halt(struct b44 *); static void b44_init_rings(struct b44 *); @@ -119,6 +133,7 @@ static void b44_init_hw(struct b44 *, int); static int dma_desc_align_mask; static int dma_desc_sync_size; +static int instance; static const char b44_gstrings[][ETH_GSTRING_LEN] = { #define _B44(x...) # x, @@ -126,35 +141,35 @@ B44_STAT_REG_DECLARE #undef _B44 }; -static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, - dma_addr_t dma_base, - unsigned long offset, - enum dma_data_direction dir) +static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) { - dma_sync_single_range_for_device(&pdev->dev, dma_base, - offset & dma_desc_align_mask, - dma_desc_sync_size, dir); + dma_sync_single_range_for_device(sdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); } -static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev, - dma_addr_t dma_base, - unsigned long offset, - enum dma_data_direction dir) +static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) { - dma_sync_single_range_for_cpu(&pdev->dev, dma_base, - offset & dma_desc_align_mask, - dma_desc_sync_size, dir); + dma_sync_single_range_for_cpu(sdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); } static inline unsigned long br32(const struct b44 *bp, unsigned long reg) { - return readl(bp->regs + reg); + return ssb_read32(bp->sdev, reg); } static inline void bw32(const struct b44 *bp, unsigned long reg, unsigned long val) { - writel(val, bp->regs + reg); + ssb_write32(bp->sdev, reg, val); } static int b44_wait_bit(struct b44 *bp, unsigned long reg, @@ -182,117 +197,29 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg, return 0; } -/* Sonics SiliconBackplane support routines. ROFL, you should see all the - * buzz words used on this company's website :-) - * - * All of these routines must be invoked with bp->lock held and - * interrupts disabled. - */ - -#define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */ -#define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */ - -static u32 ssb_get_core_rev(struct b44 *bp) -{ - return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK); -} - -static u32 ssb_pci_setup(struct b44 *bp, u32 cores) -{ - u32 bar_orig, pci_rev, val; - - pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig); - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR); - pci_rev = ssb_get_core_rev(bp); - - val = br32(bp, B44_SBINTVEC); - val |= cores; - bw32(bp, B44_SBINTVEC, val); - - val = br32(bp, SSB_PCI_TRANS_2); - val |= SSB_PCI_PREF | SSB_PCI_BURST; - bw32(bp, SSB_PCI_TRANS_2, val); - - pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig); - - return pci_rev; -} - -static void ssb_core_disable(struct b44 *bp) -{ - if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET) - return; - - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK)); - b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0); - b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1); - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK | - SBTMSLOW_REJECT | SBTMSLOW_RESET)); - br32(bp, B44_SBTMSLOW); - udelay(1); - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET)); - br32(bp, B44_SBTMSLOW); - udelay(1); -} - -static void ssb_core_reset(struct b44 *bp) +static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index) { u32 val; - ssb_core_disable(bp); - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC)); - br32(bp, B44_SBTMSLOW); - udelay(1); - - /* Clear SERR if set, this is a hw bug workaround. */ - if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR) - bw32(bp, B44_SBTMSHIGH, 0); - - val = br32(bp, B44_SBIMSTATE); - if (val & (SBIMSTATE_IBE | SBIMSTATE_TO)) - bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO)); - - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC)); - br32(bp, B44_SBTMSLOW); - udelay(1); - - bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK)); - br32(bp, B44_SBTMSLOW); - udelay(1); -} + bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ | + (index << CAM_CTRL_INDEX_SHIFT))); -static int ssb_core_unit(struct b44 *bp) -{ -#if 0 - u32 val = br32(bp, B44_SBADMATCH0); - u32 base; + b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1); - type = val & SBADMATCH0_TYPE_MASK; - switch (type) { - case 0: - base = val & SBADMATCH0_BS0_MASK; - break; + val = br32(bp, B44_CAM_DATA_LO); - case 1: - base = val & SBADMATCH0_BS1_MASK; - break; + data[2] = (val >> 24) & 0xFF; + data[3] = (val >> 16) & 0xFF; + data[4] = (val >> 8) & 0xFF; + data[5] = (val >> 0) & 0xFF; - case 2: - default: - base = val & SBADMATCH0_BS2_MASK; - break; - }; -#endif - return 0; -} + val = br32(bp, B44_CAM_DATA_HI); -static int ssb_is_core_up(struct b44 *bp) -{ - return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK)) - == SBTMSLOW_CLOCK); + data[0] = (val >> 8) & 0xFF; + data[1] = (val >> 0) & 0xFF; } -static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) +static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index) { u32 val; @@ -328,14 +255,14 @@ static void b44_enable_ints(struct b44 *bp) bw32(bp, B44_IMASK, bp->imask); } -static int b44_readphy(struct b44 *bp, int reg, u32 *val) +static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val) { int err; bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); @@ -344,29 +271,40 @@ static int b44_readphy(struct b44 *bp, int reg, u32 *val) return err; } -static int b44_writephy(struct b44 *bp, int reg, u32 val) +static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val) { bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) | - (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | + (phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) | (val & MDIO_DATA_DATA))); return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); } +static inline int b44_readphy(struct b44 *bp, int reg, u32 *val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_readphy(bp, bp->phy_addr, reg, val); +} + +static inline int b44_writephy(struct b44 *bp, int reg, u32 val) +{ + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; + + return __b44_writephy(bp, bp->phy_addr, reg, val); +} + /* miilib interface */ -/* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional - * due to code existing before miilib use was added to this driver. - * Someone should remove this artificial driver limitation in - * b44_{read,write}phy. bp->phy_addr itself is fine (and needed). - */ static int b44_mii_read(struct net_device *dev, int phy_id, int location) { u32 val; struct b44 *bp = netdev_priv(dev); - int rc = b44_readphy(bp, location, &val); + int rc = __b44_readphy(bp, phy_id, location, &val); if (rc) return 0xffffffff; return val; @@ -376,7 +314,7 @@ static void b44_mii_write(struct net_device *dev, int phy_id, int location, int val) { struct b44 *bp = netdev_priv(dev); - b44_writephy(bp, location, val); + __b44_writephy(bp, phy_id, location, val); } static int b44_phy_reset(struct b44 *bp) @@ -384,6 +322,8 @@ static int b44_phy_reset(struct b44 *bp) u32 val; int err; + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; err = b44_writephy(bp, MII_BMCR, BMCR_RESET); if (err) return err; @@ -442,11 +382,52 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) __b44_set_flow_ctrl(bp, pause_enab); } +#ifdef SSB_DRIVER_MIPS +extern char *nvram_get(char *name); +static void b44_wap54g10_workaround(struct b44 *bp) +{ + const char *str; + u32 val; + int err; + + /* + * workaround for bad hardware design in Linksys WAP54G v1.0 + * see https://dev.openwrt.org/ticket/146 + * check and reset bit "isolate" + */ + str = nvram_get("boardnum"); + if (!str) + return; + if (simple_strtoul(str, NULL, 0) == 2) { + err = __b44_readphy(bp, 0, MII_BMCR, &val); + if (err) + goto error; + if (!(val & BMCR_ISOLATE)) + return; + val &= ~BMCR_ISOLATE; + err = __b44_writephy(bp, 0, MII_BMCR, val); + if (err) + goto error; + } + return; +error: + printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n"); +} +#else +static inline void b44_wap54g10_workaround(struct b44 *bp) +{ +} +#endif + static int b44_setup_phy(struct b44 *bp) { u32 val; int err; + b44_wap54g10_workaround(bp); + + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) + return 0; if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0) goto out; if ((err = b44_writephy(bp, B44_MII_ALEDCTRL, @@ -542,6 +523,19 @@ static void b44_check_phy(struct b44 *bp) { u32 bmsr, aux; + if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { + bp->flags |= B44_FLAG_100_BASE_T; + bp->flags |= B44_FLAG_FULL_DUPLEX; + if (!netif_carrier_ok(bp->dev)) { + u32 val = br32(bp, B44_TX_CTRL); + val |= TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); + netif_carrier_on(bp->dev); + b44_link_report(bp); + } + return; + } + if (!b44_readphy(bp, MII_BMSR, &bmsr) && !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && (bmsr != 0xffff)) { @@ -617,10 +611,10 @@ static void b44_tx(struct b44 *bp) BUG_ON(skb == NULL); - pci_unmap_single(bp->pdev, - pci_unmap_addr(rp, mapping), + dma_unmap_single(bp->sdev->dev, + rp->mapping, skb->len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } @@ -657,9 +651,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) if (skb == NULL) return -ENOMEM; - mapping = pci_map_single(bp->pdev, skb->data, + mapping = dma_map_single(bp->sdev->dev, skb->data, RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); /* Hardware bug work-around, the chip is unable to do PCI DMA to/from anything above 1GB :-( */ @@ -667,18 +661,19 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { /* Sigh... */ if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, mapping, + RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; - mapping = pci_map_single(bp->pdev, skb->data, + mapping = dma_map_single(bp->sdev->dev, skb->data, RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(mapping) || mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return -ENOMEM; } @@ -691,7 +686,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) rh->flags = 0; map->skb = skb; - pci_unmap_addr_set(map, mapping, mapping); + map->mapping = mapping; if (src_map != NULL) src_map->skb = NULL; @@ -705,9 +700,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, - dest_idx * sizeof(dp), - DMA_BIDIRECTIONAL); + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(dp), + DMA_BIDIRECTIONAL); return RX_PKT_BUF_SZ; } @@ -730,13 +725,12 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) rh = (struct rx_header *) src_map->skb->data; rh->len = 0; rh->flags = 0; - pci_unmap_addr_set(dest_map, mapping, - pci_unmap_addr(src_map, mapping)); + dest_map->mapping = src_map->mapping; if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma, - src_idx * sizeof(src_desc), - DMA_BIDIRECTIONAL); + b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma, + src_idx * sizeof(src_desc), + DMA_BIDIRECTIONAL); ctrl = src_desc->ctrl; if (dest_idx == (B44_RX_RING_SIZE - 1)) @@ -750,13 +744,13 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) src_map->skb = NULL; if (bp->flags & B44_FLAG_RX_RING_HACK) - b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, - dest_idx * sizeof(dest_desc), - DMA_BIDIRECTIONAL); + b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, + dest_idx * sizeof(dest_desc), + DMA_BIDIRECTIONAL); - pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr), - RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); } static int b44_rx(struct b44 *bp, int budget) @@ -772,13 +766,13 @@ static int b44_rx(struct b44 *bp, int budget) while (cons != prod && budget > 0) { struct ring_info *rp = &bp->rx_buffers[cons]; struct sk_buff *skb = rp->skb; - dma_addr_t map = pci_unmap_addr(rp, mapping); + dma_addr_t map = rp->mapping; struct rx_header *rh; u16 len; - pci_dma_sync_single_for_cpu(bp->pdev, map, + dma_sync_single_for_cpu(bp->sdev->dev, map, RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rh = (struct rx_header *) skb->data; len = le16_to_cpu(rh->len); if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) || @@ -810,8 +804,8 @@ static int b44_rx(struct b44 *bp, int budget) skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); if (skb_size < 0) goto drop_it; - pci_unmap_single(bp->pdev, map, - skb_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, map, + skb_size, DMA_FROM_DEVICE); /* Leave out rx_header */ skb_put(skb, len + RX_PKT_OFFSET); skb_pull(skb, RX_PKT_OFFSET); @@ -848,10 +842,11 @@ static int b44_rx(struct b44 *bp, int budget) return received; } -static int b44_poll(struct net_device *netdev, int *budget) +static int b44_poll(struct napi_struct *napi, int budget) { - struct b44 *bp = netdev_priv(netdev); - int done; + struct b44 *bp = container_of(napi, struct b44, napi); + struct net_device *netdev = bp->dev; + int work_done; spin_lock_irq(&bp->lock); @@ -862,22 +857,9 @@ static int b44_poll(struct net_device *netdev, int *budget) } spin_unlock_irq(&bp->lock); - done = 1; - if (bp->istat & ISTAT_RX) { - int orig_budget = *budget; - int work_done; - - if (orig_budget > netdev->quota) - orig_budget = netdev->quota; - - work_done = b44_rx(bp, orig_budget); - - *budget -= work_done; - netdev->quota -= work_done; - - if (work_done >= orig_budget) - done = 0; - } + work_done = 0; + if (bp->istat & ISTAT_RX) + work_done += b44_rx(bp, budget); if (bp->istat & ISTAT_ERRORS) { unsigned long flags; @@ -888,15 +870,15 @@ static int b44_poll(struct net_device *netdev, int *budget) b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); netif_wake_queue(bp->dev); spin_unlock_irqrestore(&bp->lock, flags); - done = 1; + work_done = 0; } - if (done) { - netif_rx_complete(netdev); + if (work_done < budget) { + netif_rx_complete(netdev, napi); b44_enable_ints(bp); } - return (done ? 0 : 1); + return work_done; } static irqreturn_t b44_interrupt(int irq, void *dev_id) @@ -924,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id) goto irq_ack; } - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &bp->napi)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &bp->napi); } else { printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", dev->name); @@ -982,24 +964,25 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) goto err_out; } - mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { struct sk_buff *bounce_skb; /* Chip can't handle DMA to/from >1GB, use bounce buffer */ if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); + dma_unmap_single(bp->sdev->dev, mapping, len, + DMA_TO_DEVICE); bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; - mapping = pci_map_single(bp->pdev, bounce_skb->data, - len, PCI_DMA_TODEVICE); + mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, + len, DMA_TO_DEVICE); if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { if (!dma_mapping_error(mapping)) - pci_unmap_single(bp->pdev, mapping, - len, PCI_DMA_TODEVICE); + dma_unmap_single(bp->sdev->dev, mapping, + len, DMA_TO_DEVICE); dev_kfree_skb_any(bounce_skb); goto err_out; } @@ -1011,7 +994,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) entry = bp->tx_prod; bp->tx_buffers[entry].skb = skb; - pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping); + bp->tx_buffers[entry].mapping = mapping; ctrl = (len & DESC_CTRL_LEN); ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; @@ -1022,9 +1005,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); if (bp->flags & B44_FLAG_TX_RING_HACK) - b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma, - entry * sizeof(bp->tx_ring[0]), - DMA_TO_DEVICE); + b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma, + entry * sizeof(bp->tx_ring[0]), + DMA_TO_DEVICE); entry = NEXT_TX(entry); @@ -1097,10 +1080,8 @@ static void b44_free_rings(struct b44 *bp) if (rp->skb == NULL) continue; - pci_unmap_single(bp->pdev, - pci_unmap_addr(rp, mapping), - RX_PKT_BUF_SZ, - PCI_DMA_FROMDEVICE); + dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } @@ -1111,10 +1092,8 @@ static void b44_free_rings(struct b44 *bp) if (rp->skb == NULL) continue; - pci_unmap_single(bp->pdev, - pci_unmap_addr(rp, mapping), - rp->skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(rp->skb); rp->skb = NULL; } @@ -1136,14 +1115,14 @@ static void b44_init_rings(struct b44 *bp) memset(bp->tx_ring, 0, B44_TX_RING_BYTES); if (bp->flags & B44_FLAG_RX_RING_HACK) - dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma, - DMA_TABLE_BYTES, - PCI_DMA_BIDIRECTIONAL); + dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); if (bp->flags & B44_FLAG_TX_RING_HACK) - dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma, - DMA_TABLE_BYTES, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); for (i = 0; i < bp->rx_pending; i++) { if (b44_alloc_rx_skb(bp, -1, i) < 0) @@ -1163,24 +1142,24 @@ static void b44_free_consistent(struct b44 *bp) bp->tx_buffers = NULL; if (bp->rx_ring) { if (bp->flags & B44_FLAG_RX_RING_HACK) { - dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); kfree(bp->rx_ring); } else - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; bp->flags &= ~B44_FLAG_RX_RING_HACK; } if (bp->tx_ring) { if (bp->flags & B44_FLAG_TX_RING_HACK) { - dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); kfree(bp->tx_ring); } else - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, + dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; bp->flags &= ~B44_FLAG_TX_RING_HACK; @@ -1191,22 +1170,22 @@ static void b44_free_consistent(struct b44 *bp) * Must not be invoked with interrupt sources disabled and * the hardware shutdown down. Can sleep. */ -static int b44_alloc_consistent(struct b44 *bp) +static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) { int size; size = B44_RX_RING_SIZE * sizeof(struct ring_info); - bp->rx_buffers = kzalloc(size, GFP_KERNEL); + bp->rx_buffers = kzalloc(size, gfp); if (!bp->rx_buffers) goto out_err; size = B44_TX_RING_SIZE * sizeof(struct ring_info); - bp->tx_buffers = kzalloc(size, GFP_KERNEL); + bp->tx_buffers = kzalloc(size, gfp); if (!bp->tx_buffers) goto out_err; size = DMA_TABLE_BYTES; - bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); + bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { /* Allocation may have failed due to pci_alloc_consistent insisting on use of GFP_DMA, which is more restrictive @@ -1214,13 +1193,13 @@ static int b44_alloc_consistent(struct b44 *bp) struct dma_desc *rx_ring; dma_addr_t rx_ring_dma; - rx_ring = kzalloc(size, GFP_KERNEL); + rx_ring = kzalloc(size, gfp); if (!rx_ring) goto out_err; - rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, - DMA_TABLE_BYTES, - DMA_BIDIRECTIONAL); + rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); if (dma_mapping_error(rx_ring_dma) || rx_ring_dma + size > DMA_30BIT_MASK) { @@ -1233,21 +1212,21 @@ static int b44_alloc_consistent(struct b44 *bp) bp->flags |= B44_FLAG_RX_RING_HACK; } - bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); + bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp); if (!bp->tx_ring) { - /* Allocation may have failed due to pci_alloc_consistent + /* Allocation may have failed due to dma_alloc_coherent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *tx_ring; dma_addr_t tx_ring_dma; - tx_ring = kzalloc(size, GFP_KERNEL); + tx_ring = kzalloc(size, gfp); if (!tx_ring) goto out_err; - tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, - DMA_TABLE_BYTES, - DMA_TO_DEVICE); + tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); if (dma_mapping_error(tx_ring_dma) || tx_ring_dma + size > DMA_30BIT_MASK) { @@ -1282,7 +1261,9 @@ static void b44_clear_stats(struct b44 *bp) /* bp->lock is held. */ static void b44_chip_reset(struct b44 *bp) { - if (ssb_is_core_up(bp)) { + struct ssb_device *sdev = bp->sdev; + + if (ssb_device_is_enabled(bp->sdev)) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); @@ -1294,19 +1275,25 @@ static void b44_chip_reset(struct b44 *bp) } bw32(bp, B44_DMARX_CTRL, 0); bp->rx_prod = bp->rx_cons = 0; - } else { - ssb_pci_setup(bp, (bp->core_unit == 0 ? - SBINTVEC_ENET0 : - SBINTVEC_ENET1)); - } - - ssb_core_reset(bp); + } else + ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev); + ssb_device_enable(bp->sdev, 0); b44_clear_stats(bp); - /* Make PHY accessible. */ - bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | - (0x0d & MDIO_CTRL_MAXF_MASK))); + switch (sdev->bus->bustype) { + case SSB_BUSTYPE_SSB: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO) + & MDIO_CTRL_MAXF_MASK))); + break; + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | + (0x0d & MDIO_CTRL_MAXF_MASK))); + break; + } + br32(bp, B44_MDIO_CTRL); if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { @@ -1349,6 +1336,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p) { struct b44 *bp = netdev_priv(dev); struct sockaddr *addr = p; + u32 val; if (netif_running(dev)) return -EBUSY; @@ -1359,7 +1347,11 @@ static int b44_set_mac_addr(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_irq(&bp->lock); - __b44_set_mac_addr(bp); + + val = br32(bp, B44_RXCONFIG); + if (!(val & RXCONFIG_CAM_ABSENT)) + __b44_set_mac_addr(bp); + spin_unlock_irq(&bp->lock); return 0; @@ -1416,10 +1408,12 @@ static int b44_open(struct net_device *dev) struct b44 *bp = netdev_priv(dev); int err; - err = b44_alloc_consistent(bp); + err = b44_alloc_consistent(bp, GFP_KERNEL); if (err) goto out; + napi_enable(&bp->napi); + b44_init_rings(bp); b44_init_hw(bp, B44_FULL_RESET); @@ -1427,6 +1421,7 @@ static int b44_open(struct net_device *dev) err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); if (unlikely(err < 0)) { + napi_disable(&bp->napi); b44_chip_reset(bp); b44_free_rings(bp); b44_free_consistent(bp); @@ -1445,18 +1440,6 @@ static int b44_open(struct net_device *dev) return err; } -#if 0 -/*static*/ void b44_dump_state(struct b44 *bp) -{ - u32 val32, val32_2, val32_3, val32_4, val32_5; - u16 val16; - - pci_read_config_word(bp->pdev, PCI_STATUS, &val16); - printk("DEBUG: PCI status [%04x] \n", val16); - -} -#endif - #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools @@ -1567,10 +1550,24 @@ static void b44_setup_pseudo_magicp(struct b44 *bp) } +#ifdef CONFIG_B44_PCI +static void b44_setup_wol_pci(struct b44 *bp) +{ + u16 val; + + if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) { + bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE); + pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val); + pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE); + } +} +#else +static inline void b44_setup_wol_pci(struct b44 *bp) { } +#endif /* CONFIG_B44_PCI */ + static void b44_setup_wol(struct b44 *bp) { u32 val; - u16 pmval; bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI); @@ -1594,13 +1591,7 @@ static void b44_setup_wol(struct b44 *bp) } else { b44_setup_pseudo_magicp(bp); } - - val = br32(bp, B44_SBTMSLOW); - bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE); - - pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval); - pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE); - + b44_setup_wol_pci(bp); } static int b44_close(struct net_device *dev) @@ -1609,15 +1600,12 @@ static int b44_close(struct net_device *dev) netif_stop_queue(dev); - netif_poll_disable(dev); + napi_disable(&bp->napi); del_timer_sync(&bp->timer); spin_lock_irq(&bp->lock); -#if 0 - b44_dump_state(bp); -#endif b44_halt(bp); b44_free_rings(bp); netif_carrier_off(dev); @@ -1626,8 +1614,6 @@ static int b44_close(struct net_device *dev) free_irq(dev->irq, dev); - netif_poll_enable(dev); - if (bp->flags & B44_FLAG_WOL_ENABLE) { b44_init_hw(bp, B44_PARTIAL_RESET); b44_setup_wol(bp); @@ -1700,7 +1686,7 @@ static void __b44_set_rx_mode(struct net_device *dev) val = br32(bp, B44_RXCONFIG); val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); - if (dev->flags & IFF_PROMISC) { + if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) { val |= RXCONFIG_PROMISC; bw32(bp, B44_RXCONFIG, val); } else { @@ -1748,11 +1734,19 @@ static void b44_set_msglevel(struct net_device *dev, u32 value) static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { struct b44 *bp = netdev_priv(dev); - struct pci_dev *pci_dev = bp->pdev; + struct ssb_bus *bus = bp->sdev->bus; - strcpy (info->driver, DRV_MODULE_NAME); - strcpy (info->version, DRV_MODULE_VERSION); - strcpy (info->bus_info, pci_name(pci_dev)); + strncpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strncpy(info->version, DRV_MODULE_VERSION, sizeof(info->driver)); + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + strncpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info)); + break; + case SSB_BUSTYPE_PCMCIA: + case SSB_BUSTYPE_SSB: + strncpy(info->bus_info, "SSB", sizeof(info->bus_info)); + break; + } } static int b44_nway_reset(struct net_device *dev) @@ -1968,9 +1962,14 @@ static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int b44_get_stats_count(struct net_device *dev) +static int b44_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(b44_gstrings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(b44_gstrings); + default: + return -EOPNOTSUPP; + } } static void b44_get_ethtool_stats(struct net_device *dev, @@ -2031,7 +2030,7 @@ static const struct ethtool_ops b44_ethtool_ops = { .get_msglevel = b44_get_msglevel, .set_msglevel = b44_set_msglevel, .get_strings = b44_get_strings, - .get_stats_count = b44_get_stats_count, + .get_sset_count = b44_get_sset_count, .get_ethtool_stats = b44_get_ethtool_stats, }; @@ -2051,33 +2050,23 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } -/* Read 128-bytes of EEPROM. */ -static int b44_read_eeprom(struct b44 *bp, u8 *data) -{ - long i; - __le16 *ptr = (__le16 *) data; - - for (i = 0; i < 128; i += 2) - ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i)); - - return 0; -} - static int __devinit b44_get_invariants(struct b44 *bp) { - u8 eeprom[128]; - int err; + struct ssb_device *sdev = bp->sdev; + int err = 0; + u8 *addr; - err = b44_read_eeprom(bp, &eeprom[0]); - if (err) - goto out; + bp->dma_offset = ssb_dma_translation(sdev); - bp->dev->dev_addr[0] = eeprom[79]; - bp->dev->dev_addr[1] = eeprom[78]; - bp->dev->dev_addr[2] = eeprom[81]; - bp->dev->dev_addr[3] = eeprom[80]; - bp->dev->dev_addr[4] = eeprom[83]; - bp->dev->dev_addr[5] = eeprom[82]; + if (sdev->bus->bustype == SSB_BUSTYPE_SSB && + instance > 1) { + addr = sdev->bus->sprom.r1.et1mac; + bp->phy_addr = sdev->bus->sprom.r1.et1phyaddr; + } else { + addr = sdev->bus->sprom.r1.et0mac; + bp->phy_addr = sdev->bus->sprom.r1.et0phyaddr; + } + memcpy(bp->dev->dev_addr, addr, 6); if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){ printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n"); @@ -2086,103 +2075,53 @@ static int __devinit b44_get_invariants(struct b44 *bp) memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); - bp->phy_addr = eeprom[90] & 0x1f; - bp->imask = IMASK_DEF; - bp->core_unit = ssb_core_unit(bp); - bp->dma_offset = SB_PCI_DMA; - /* XXX - really required? bp->flags |= B44_FLAG_BUGGY_TXPTR; - */ + */ - if (ssb_get_core_rev(bp) >= 7) - bp->flags |= B44_FLAG_B0_ANDLATER; + if (bp->sdev->id.revision >= 7) + bp->flags |= B44_FLAG_B0_ANDLATER; -out: return err; } -static int __devinit b44_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __devinit b44_init_one(struct ssb_device *sdev, + const struct ssb_device_id *ent) { static int b44_version_printed = 0; - unsigned long b44reg_base, b44reg_len; struct net_device *dev; struct b44 *bp; - int err, i; + int err; + DECLARE_MAC_BUF(mac); + + instance++; if (b44_version_printed++ == 0) printk(KERN_INFO "%s", version); - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, " - "aborting.\n"); - return err; - } - - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, - "Cannot find proper PCI device " - "base address, aborting.\n"); - err = -ENODEV; - goto err_out_disable_pdev; - } - - err = pci_request_regions(pdev, DRV_MODULE_NAME); - if (err) { - dev_err(&pdev->dev, - "Cannot obtain PCI resources, aborting.\n"); - goto err_out_disable_pdev; - } - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n"); - goto err_out_free_res; - } - - err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK); - if (err) { - dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n"); - goto err_out_free_res; - } - - b44reg_base = pci_resource_start(pdev, 0); - b44reg_len = pci_resource_len(pdev, 0); dev = alloc_etherdev(sizeof(*bp)); if (!dev) { - dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); + dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n"); err = -ENOMEM; - goto err_out_free_res; + goto out; } - SET_MODULE_OWNER(dev); - SET_NETDEV_DEV(dev,&pdev->dev); + SET_NETDEV_DEV(dev, sdev->dev); /* No interesting netdevice features in this card... */ dev->features |= 0; bp = netdev_priv(dev); - bp->pdev = pdev; + bp->sdev = sdev; bp->dev = dev; bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); spin_lock_init(&bp->lock); - bp->regs = ioremap(b44reg_base, b44reg_len); - if (bp->regs == 0UL) { - dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); - err = -ENOMEM; - goto err_out_free_dev; - } - bp->rx_pending = B44_DEF_RX_RING_PENDING; bp->tx_pending = B44_DEF_TX_RING_PENDING; @@ -2194,23 +2133,34 @@ static int __devinit b44_init_one(struct pci_dev *pdev, dev->set_mac_address = b44_set_mac_addr; dev->do_ioctl = b44_ioctl; dev->tx_timeout = b44_tx_timeout; - dev->poll = b44_poll; - dev->weight = 64; + netif_napi_add(dev, &bp->napi, b44_poll, 64); dev->watchdog_timeo = B44_TX_TIMEOUT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = b44_poll_controller; #endif dev->change_mtu = b44_change_mtu; - dev->irq = pdev->irq; + dev->irq = sdev->irq; SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); netif_carrier_off(dev); + err = ssb_bus_powerup(sdev->bus, 0); + if (err) { + dev_err(sdev->dev, + "Failed to powerup the bus\n"); + goto err_out_free_dev; + } + err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK); + if (err) { + dev_err(sdev->dev, + "Required 30BIT DMA mask unsupported by the system.\n"); + goto err_out_powerdown; + } err = b44_get_invariants(bp); if (err) { - dev_err(&pdev->dev, + dev_err(sdev->dev, "Problem fetching invariants of chip, aborting.\n"); - goto err_out_iounmap; + goto err_out_powerdown; } bp->mii_if.dev = dev; @@ -2229,61 +2179,49 @@ static int __devinit b44_init_one(struct pci_dev *pdev, err = register_netdev(dev); if (err) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_iounmap; + dev_err(sdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_powerdown; } - pci_set_drvdata(pdev, dev); - - pci_save_state(bp->pdev); + ssb_set_drvdata(sdev, dev); /* Chip reset provides power to the b44 MAC & PCI cores, which * is necessary for MAC register access. */ b44_chip_reset(bp); - printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? '\n' : ':'); + printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; -err_out_iounmap: - iounmap(bp->regs); +err_out_powerdown: + ssb_bus_may_powerdown(sdev->bus); err_out_free_dev: free_netdev(dev); -err_out_free_res: - pci_release_regions(pdev); - -err_out_disable_pdev: - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); +out: return err; } -static void __devexit b44_remove_one(struct pci_dev *pdev) +static void __devexit b44_remove_one(struct ssb_device *sdev) { - struct net_device *dev = pci_get_drvdata(pdev); - struct b44 *bp = netdev_priv(dev); + struct net_device *dev = ssb_get_drvdata(sdev); unregister_netdev(dev); - iounmap(bp->regs); + ssb_bus_may_powerdown(sdev->bus); free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + ssb_set_drvdata(sdev, NULL); } -static int b44_suspend(struct pci_dev *pdev, pm_message_t state) +static int b44_suspend(struct ssb_device *sdev, pm_message_t state) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = ssb_get_drvdata(sdev); struct b44 *bp = netdev_priv(dev); - if (!netif_running(dev)) - return 0; + if (!netif_running(dev)) + return 0; del_timer_sync(&bp->timer); @@ -2301,33 +2239,29 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state) b44_init_hw(bp, B44_PARTIAL_RESET); b44_setup_wol(bp); } - pci_disable_device(pdev); + return 0; } -static int b44_resume(struct pci_dev *pdev) +static int b44_resume(struct ssb_device *sdev) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = ssb_get_drvdata(sdev); struct b44 *bp = netdev_priv(dev); int rc = 0; - pci_restore_state(pdev); - rc = pci_enable_device(pdev); + rc = ssb_bus_powerup(sdev->bus, 0); if (rc) { - printk(KERN_ERR PFX "%s: pci_enable_device failed\n", - dev->name); + dev_err(sdev->dev, + "Failed to powerup the bus\n"); return rc; } - pci_set_master(pdev); - if (!netif_running(dev)) return 0; rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); if (rc) { printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); - pci_disable_device(pdev); return rc; } @@ -2346,29 +2280,53 @@ static int b44_resume(struct pci_dev *pdev) return 0; } -static struct pci_driver b44_driver = { +static struct ssb_driver b44_ssb_driver = { .name = DRV_MODULE_NAME, - .id_table = b44_pci_tbl, + .id_table = b44_ssb_tbl, .probe = b44_init_one, .remove = __devexit_p(b44_remove_one), - .suspend = b44_suspend, - .resume = b44_resume, + .suspend = b44_suspend, + .resume = b44_resume, }; +static inline int b44_pci_init(void) +{ + int err = 0; +#ifdef CONFIG_B44_PCI + err = ssb_pcihost_register(&b44_pci_driver); +#endif + return err; +} + +static inline void b44_pci_exit(void) +{ +#ifdef CONFIG_B44_PCI + ssb_pcihost_unregister(&b44_pci_driver); +#endif +} + static int __init b44_init(void) { unsigned int dma_desc_align_size = dma_get_cache_alignment(); + int err; /* Setup paramaters for syncing RX/TX DMA descriptors */ dma_desc_align_mask = ~(dma_desc_align_size - 1); dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); - return pci_register_driver(&b44_driver); + err = b44_pci_init(); + if (err) + return err; + err = ssb_driver_register(&b44_ssb_driver); + if (err) + b44_pci_exit(); + return err; } static void __exit b44_cleanup(void) { - pci_unregister_driver(&b44_driver); + ssb_driver_unregister(&b44_ssb_driver); + b44_pci_exit(); } module_init(b44_init); diff --git a/drivers/net/b44.h b/drivers/net/b44.h index e537e63f292e6e948dfea4567cacfe99a0ef1a0b..7db0c84a795096451b92253576d38028a277eacd 100644 --- a/drivers/net/b44.h +++ b/drivers/net/b44.h @@ -129,6 +129,7 @@ #define RXCONFIG_FLOW 0x00000020 /* Flow Control Enable */ #define RXCONFIG_FLOW_ACCEPT 0x00000040 /* Accept Unicast Flow Control Frame */ #define RXCONFIG_RFILT 0x00000080 /* Reject Filter */ +#define RXCONFIG_CAM_ABSENT 0x00000100 /* CAM Absent */ #define B44_RXMAXLEN 0x0404UL /* EMAC RX Max Packet Length */ #define B44_TXMAXLEN 0x0408UL /* EMAC TX Max Packet Length */ #define B44_MDIO_CTRL 0x0410UL /* EMAC MDIO Control */ @@ -227,76 +228,6 @@ #define B44_RX_PAUSE 0x05D4UL /* MIB RX Pause Packets */ #define B44_RX_NPAUSE 0x05D8UL /* MIB RX Non-Pause Packets */ -/* Silicon backplane register definitions */ -#define B44_SBIMSTATE 0x0F90UL /* SB Initiator Agent State */ -#define SBIMSTATE_PC 0x0000000f /* Pipe Count */ -#define SBIMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ -#define SBIMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ -#define SBIMSTATE_AP_TS 0x00000010 /* Use timeslices only */ -#define SBIMSTATE_AP_TK 0x00000020 /* Use token only */ -#define SBIMSTATE_AP_RSV 0x00000030 /* Reserved */ -#define SBIMSTATE_IBE 0x00020000 /* In Band Error */ -#define SBIMSTATE_TO 0x00040000 /* Timeout */ -#define B44_SBINTVEC 0x0F94UL /* SB Interrupt Mask */ -#define SBINTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ -#define SBINTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ -#define SBINTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ -#define SBINTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ -#define SBINTVEC_USB 0x00000010 /* Enable interrupts for usb */ -#define SBINTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ -#define SBINTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ -#define B44_SBTMSLOW 0x0F98UL /* SB Target State Low */ -#define SBTMSLOW_RESET 0x00000001 /* Reset */ -#define SBTMSLOW_REJECT 0x00000002 /* Reject */ -#define SBTMSLOW_CLOCK 0x00010000 /* Clock Enable */ -#define SBTMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ -#define SBTMSLOW_PE 0x40000000 /* Power Management Enable */ -#define SBTMSLOW_BE 0x80000000 /* BIST Enable */ -#define B44_SBTMSHIGH 0x0F9CUL /* SB Target State High */ -#define SBTMSHIGH_SERR 0x00000001 /* S-error */ -#define SBTMSHIGH_INT 0x00000002 /* Interrupt */ -#define SBTMSHIGH_BUSY 0x00000004 /* Busy */ -#define SBTMSHIGH_GCR 0x20000000 /* Gated Clock Request */ -#define SBTMSHIGH_BISTF 0x40000000 /* BIST Failed */ -#define SBTMSHIGH_BISTD 0x80000000 /* BIST Done */ -#define B44_SBIDHIGH 0x0FFCUL /* SB Identification High */ -#define SBIDHIGH_RC_MASK 0x0000000f /* Revision Code */ -#define SBIDHIGH_CC_MASK 0x0000fff0 /* Core Code */ -#define SBIDHIGH_CC_SHIFT 4 -#define SBIDHIGH_VC_MASK 0xffff0000 /* Vendor Code */ -#define SBIDHIGH_VC_SHIFT 16 - -/* SSB PCI config space registers. */ -#define SSB_PMCSR 0x44 -#define SSB_PE 0x100 -#define SSB_BAR0_WIN 0x80 -#define SSB_BAR1_WIN 0x84 -#define SSB_SPROM_CONTROL 0x88 -#define SSB_BAR1_CONTROL 0x8c - -/* SSB core and host control registers. */ -#define SSB_CONTROL 0x0000UL -#define SSB_ARBCONTROL 0x0010UL -#define SSB_ISTAT 0x0020UL -#define SSB_IMASK 0x0024UL -#define SSB_MBOX 0x0028UL -#define SSB_BCAST_ADDR 0x0050UL -#define SSB_BCAST_DATA 0x0054UL -#define SSB_PCI_TRANS_0 0x0100UL -#define SSB_PCI_TRANS_1 0x0104UL -#define SSB_PCI_TRANS_2 0x0108UL -#define SSB_SPROM 0x0800UL - -#define SSB_PCI_MEM 0x00000000 -#define SSB_PCI_IO 0x00000001 -#define SSB_PCI_CFG0 0x00000002 -#define SSB_PCI_CFG1 0x00000003 -#define SSB_PCI_PREF 0x00000004 -#define SSB_PCI_BURST 0x00000008 -#define SSB_PCI_MASK0 0xfc000000 -#define SSB_PCI_MASK1 0xfc000000 -#define SSB_PCI_MASK2 0xc0000000 - /* 4400 PHY registers */ #define B44_MII_AUXCTRL 24 /* Auxiliary Control */ #define MII_AUXCTRL_DUPLEX 0x0001 /* Full Duplex */ @@ -346,10 +277,12 @@ struct rx_header { struct ring_info { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping); + dma_addr_t mapping; }; #define B44_MCAST_TABLE_SIZE 32 +#define B44_PHY_ADDR_NO_PHY 30 +#define B44_MDC_RATIO 5000000 #define B44_STAT_REG_DECLARE \ _B44(tx_good_octets) \ @@ -410,6 +343,8 @@ B44_STAT_REG_DECLARE #undef _B44 }; +struct ssb_device; + struct b44 { spinlock_t lock; @@ -423,6 +358,8 @@ struct b44 { struct ring_info *rx_buffers; struct ring_info *tx_buffers; + struct napi_struct napi; + u32 dma_offset; u32 flags; #define B44_FLAG_B0_ANDLATER 0x00000001 @@ -450,8 +387,7 @@ struct b44 { struct net_device_stats stats; struct b44_hw_stats hw_stats; - void __iomem *regs; - struct pci_dev *pdev; + struct ssb_device *sdev; struct net_device *dev; dma_addr_t rx_ring_dma, tx_ring_dma; @@ -459,7 +395,6 @@ struct b44 { u32 rx_pending; u32 tx_pending; u8 phy_addr; - u8 core_unit; struct mii_if_info mii_if; }; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 2bb97d464689e9bcbe90db886a08a7f06ef15243..53fe7ded5d509275b0827891f4551d376e6b749d 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -47,15 +47,11 @@ #include #include #include - +#include #include #include #include - #include -#include -#include -#include #include #include @@ -99,6 +95,9 @@ static struct net_dma_desc_tx *current_tx_ptr; static struct net_dma_desc_tx *tx_desc; static struct net_dma_desc_rx *rx_desc; +static void bf537mac_disable(void); +static void bf537mac_enable(void); + static void desc_list_free(void) { struct net_dma_desc_rx *r; @@ -287,8 +286,11 @@ static int setup_pin_mux(int action) return 0; } +/* + * MII operations + */ /* Wait until the previous MDC/MDIO transaction has completed */ -static void poll_mdc_done(void) +static void mdio_poll(void) { int timeout_cnt = MAX_TIMEOUT_CNT; @@ -304,154 +306,201 @@ static void poll_mdc_done(void) } /* Read an off-chip register in a PHY through the MDC/MDIO port */ -static u16 read_phy_reg(u16 PHYAddr, u16 RegAddr) +static int mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) { - poll_mdc_done(); + mdio_poll(); + /* read mode */ - bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) | - SET_REGAD(RegAddr) | + bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | + SET_REGAD((u16) regnum) | STABUSY); - poll_mdc_done(); - return (u16) bfin_read_EMAC_STADAT(); + mdio_poll(); + + return (int) bfin_read_EMAC_STADAT(); } /* Write an off-chip register in a PHY through the MDC/MDIO port */ -static void raw_write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data) +static int mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, + u16 value) { - bfin_write_EMAC_STADAT(Data); + mdio_poll(); + + bfin_write_EMAC_STADAT((u32) value); /* write mode */ - bfin_write_EMAC_STAADD(SET_PHYAD(PHYAddr) | - SET_REGAD(RegAddr) | + bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | + SET_REGAD((u16) regnum) | STAOP | STABUSY); - poll_mdc_done(); + mdio_poll(); + + return 0; } -static void write_phy_reg(u16 PHYAddr, u16 RegAddr, u32 Data) +static int mdiobus_reset(struct mii_bus *bus) { - poll_mdc_done(); - raw_write_phy_reg(PHYAddr, RegAddr, Data); + return 0; } -/* set up the phy */ -static void bf537mac_setphy(struct net_device *dev) +static void bf537_adjust_link(struct net_device *dev) { - u16 phydat; struct bf537mac_local *lp = netdev_priv(dev); + struct phy_device *phydev = lp->phydev; + unsigned long flags; + int new_state = 0; + + spin_lock_irqsave(&lp->lock, flags); + if (phydev->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != lp->old_duplex) { + u32 opmode = bfin_read_EMAC_OPMODE(); + new_state = 1; + + if (phydev->duplex) + opmode |= FDMODE; + else + opmode &= ~(FDMODE); + + bfin_write_EMAC_OPMODE(opmode); + lp->old_duplex = phydev->duplex; + } - /* Program PHY registers */ - pr_debug("start setting up phy\n"); - - /* issue a reset */ - raw_write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, 0x8000); - - /* wait half a second */ - msleep(500); - - phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL); - - /* advertise flow control supported */ - phydat = read_phy_reg(lp->PhyAddr, PHYREG_ANAR); - phydat |= (1 << 10); - write_phy_reg(lp->PhyAddr, PHYREG_ANAR, phydat); + if (phydev->speed != lp->old_speed) { +#if defined(CONFIG_BFIN_MAC_RMII) + u32 opmode = bfin_read_EMAC_OPMODE(); + bf537mac_disable(); + switch (phydev->speed) { + case 10: + opmode |= RMII_10; + break; + case 100: + opmode &= ~(RMII_10); + break; + default: + printk(KERN_WARNING + "%s: Ack! Speed (%d) is not 10/100!\n", + DRV_NAME, phydev->speed); + break; + } + bfin_write_EMAC_OPMODE(opmode); + bf537mac_enable(); +#endif - phydat = 0; - if (lp->Negotiate) - phydat |= 0x1000; /* enable auto negotiation */ - else { - if (lp->FullDuplex) - phydat |= (1 << 8); /* full duplex */ - else - phydat &= (~(1 << 8)); /* half duplex */ + new_state = 1; + lp->old_speed = phydev->speed; + } - if (!lp->Port10) - phydat |= (1 << 13); /* 100 Mbps */ - else - phydat &= (~(1 << 13)); /* 10 Mbps */ + if (!lp->old_link) { + new_state = 1; + lp->old_link = 1; + netif_schedule(dev); + } + } else if (lp->old_link) { + new_state = 1; + lp->old_link = 0; + lp->old_speed = 0; + lp->old_duplex = -1; } - if (lp->Loopback) - phydat |= (1 << 14); /* enable TX->RX loopback */ - - write_phy_reg(lp->PhyAddr, PHYREG_MODECTL, phydat); - msleep(500); - - phydat = read_phy_reg(lp->PhyAddr, PHYREG_MODECTL); - /* check for SMSC PHY */ - if ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID1) == 0x7) && - ((read_phy_reg(lp->PhyAddr, PHYREG_PHYID2) & 0xfff0) == 0xC0A0)) { - /* - * we have SMSC PHY so reqest interrupt - * on link down condition - */ - - /* enable interrupts */ - write_phy_reg(lp->PhyAddr, 30, 0x0ff); + if (new_state) { + u32 opmode = bfin_read_EMAC_OPMODE(); + phy_print_status(phydev); + pr_debug("EMAC_OPMODE = 0x%08x\n", opmode); } + + spin_unlock_irqrestore(&lp->lock, flags); } -/**************************************************************************/ -void setup_system_regs(struct net_device *dev) +static int mii_probe(struct net_device *dev) { - int phyaddr; - unsigned short sysctl, phydat; - u32 opmode; struct bf537mac_local *lp = netdev_priv(dev); - int count = 0; - - phyaddr = lp->PhyAddr; + struct phy_device *phydev = NULL; + unsigned short sysctl; + int i; - /* Enable PHY output */ + /* Enable PHY output early */ if (!(bfin_read_VR_CTL() & PHYCLKOE)) bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); /* MDC = 2.5 MHz */ - sysctl = SET_MDCDIV(24); - /* Odd word alignment for Receive Frame DMA word */ - /* Configure checksum support and rcve frame word alignment */ -#if defined(BFIN_MAC_CSUM_OFFLOAD) - sysctl |= RXDWA | RXCKS; -#else - sysctl |= RXDWA; -#endif + sysctl = bfin_read_EMAC_SYSCTL(); + sysctl |= SET_MDCDIV(24); bfin_write_EMAC_SYSCTL(sysctl); - /* auto negotiation on */ - /* full duplex */ - /* 100 Mbps */ - phydat = PHY_ANEG_EN | PHY_DUPLEX | PHY_SPD_SET; - write_phy_reg(phyaddr, PHYREG_MODECTL, phydat); - /* test if full duplex supported */ - do { - msleep(100); - phydat = read_phy_reg(phyaddr, PHYREG_MODESTAT); - if (count > 30) { - printk(KERN_NOTICE DRV_NAME ": Link is down\n"); - printk(KERN_NOTICE DRV_NAME - "please check your network connection\n"); - break; - } - count++; - } while (!(phydat & 0x0004)); + /* search for connect PHY device */ + for (i = 0; i < PHY_MAX_ADDR; i++) { + struct phy_device *const tmp_phydev = lp->mii_bus.phy_map[i]; - phydat = read_phy_reg(phyaddr, PHYREG_ANLPAR); + if (!tmp_phydev) + continue; /* no PHY here... */ - if ((phydat & 0x0100) || (phydat & 0x0040)) { - opmode = FDMODE; - } else { - opmode = 0; - printk(KERN_INFO DRV_NAME - ": Network is set to half duplex\n"); + phydev = tmp_phydev; + break; /* found it */ + } + + /* now we are supposed to have a proper phydev, to attach to... */ + if (!phydev) { + printk(KERN_INFO "%s: Don't found any phy device at all\n", + dev->name); + return -ENODEV; } #if defined(CONFIG_BFIN_MAC_RMII) - opmode |= RMII; /* For Now only 100MBit are supported */ + phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, + PHY_INTERFACE_MODE_RMII); +#else + phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, + PHY_INTERFACE_MODE_MII); #endif - bfin_write_EMAC_OPMODE(opmode); + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_Pause | SUPPORTED_Asym_Pause + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + + lp->old_link = 0; + lp->old_speed = 0; + lp->old_duplex = -1; + lp->phydev = phydev; + + printk(KERN_INFO "%s: attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", + DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq); + + return 0; +} + +/**************************************************************************/ +void setup_system_regs(struct net_device *dev) +{ + unsigned short sysctl; + + /* + * Odd word alignment for Receive Frame DMA word + * Configure checksum support and rcve frame word alignment + */ + sysctl = bfin_read_EMAC_SYSCTL(); +#if defined(BFIN_MAC_CSUM_OFFLOAD) + sysctl |= RXDWA | RXCKS; +#else + sysctl |= RXDWA; +#endif + bfin_write_EMAC_SYSCTL(sysctl); bfin_write_EMAC_MMC_CTL(RSTC | CROLL); @@ -468,7 +517,7 @@ void setup_system_regs(struct net_device *dev) bfin_write_DMA1_Y_MODIFY(0); } -void setup_mac_addr(u8 * mac_addr) +static void setup_mac_addr(u8 *mac_addr) { u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]); u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]); @@ -478,6 +527,16 @@ void setup_mac_addr(u8 * mac_addr) bfin_write_EMAC_ADDRHI(addr_hi); } +static int bf537mac_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + if (netif_running(dev)) + return -EBUSY; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + setup_mac_addr(dev->dev_addr); + return 0; +} + static void adjust_tx_list(void) { int timeout_cnt = MAX_TIMEOUT_CNT; @@ -584,8 +643,8 @@ static int bf537mac_hard_start_xmit(struct sk_buff *skb, adjust_tx_list(); current_tx_ptr = current_tx_ptr->next; dev->trans_start = jiffies; - lp->stats.tx_packets++; - lp->stats.tx_bytes += (skb->len); + dev->stats.tx_packets++; + dev->stats.tx_bytes += (skb->len); return 0; } @@ -601,7 +660,7 @@ static void bf537mac_rx(struct net_device *dev) if (!new_skb) { printk(KERN_NOTICE DRV_NAME ": rx: low on mem - packet dropped\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; goto out; } /* reserve 2 bytes for RXDWA padding */ @@ -623,8 +682,8 @@ static void bf537mac_rx(struct net_device *dev) #endif netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; current_rx_ptr->status.status_word = 0x00000000; current_rx_ptr = current_rx_ptr->next; @@ -667,7 +726,7 @@ static void bf537mac_poll(struct net_device *dev) } #endif /* CONFIG_NET_POLL_CONTROLLER */ -static void bf537mac_reset(void) +static void bf537mac_disable(void) { unsigned int opmode; @@ -681,18 +740,18 @@ static void bf537mac_reset(void) /* * Enable Interrupts, Receive, and Transmit */ -static int bf537mac_enable(struct net_device *dev) +static void bf537mac_enable(void) { u32 opmode; - pr_debug("%s: %s\n", dev->name, __FUNCTION__); + pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__); /* Set RX DMA */ bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); /* Wait MII done */ - poll_mdc_done(); + mdio_poll(); /* We enable only RX here */ /* ASTP : Enable Automatic Pad Stripping @@ -716,8 +775,6 @@ static int bf537mac_enable(struct net_device *dev) #endif /* Turn on the EMAC rx */ bfin_write_EMAC_OPMODE(opmode); - - return 0; } /* Our watchdog timed out. Called by the networking layer */ @@ -725,32 +782,18 @@ static void bf537mac_timeout(struct net_device *dev) { pr_debug("%s: %s\n", dev->name, __FUNCTION__); - bf537mac_reset(); + bf537mac_disable(); /* reset tx queue */ tx_list_tail = tx_list_head->next; - bf537mac_enable(dev); + bf537mac_enable(); /* We can accept TX packets again */ dev->trans_start = jiffies; netif_wake_queue(dev); } -/* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *bf537mac_query_statistics(struct net_device - *dev) -{ - struct bf537mac_local *lp = netdev_priv(dev); - - pr_debug("%s: %s\n", dev->name, __FUNCTION__); - - return &lp->stats; -} - /* * This routine will, depending on the values passed to it, * either make it accept multicast packets, go into @@ -798,6 +841,7 @@ static void bf537mac_shutdown(struct net_device *dev) */ static int bf537mac_open(struct net_device *dev) { + struct bf537mac_local *lp = netdev_priv(dev); int retval; pr_debug("%s: %s\n", dev->name, __FUNCTION__); @@ -817,10 +861,10 @@ static int bf537mac_open(struct net_device *dev) if (retval) return retval; - bf537mac_setphy(dev); + phy_start(lp->phydev); setup_system_regs(dev); - bf537mac_reset(); - bf537mac_enable(dev); + bf537mac_disable(); + bf537mac_enable(); pr_debug("hardware init finished\n"); netif_start_queue(dev); @@ -837,11 +881,14 @@ static int bf537mac_open(struct net_device *dev) */ static int bf537mac_close(struct net_device *dev) { + struct bf537mac_local *lp = netdev_priv(dev); pr_debug("%s: %s\n", dev->name, __FUNCTION__); netif_stop_queue(dev); netif_carrier_off(dev); + phy_stop(lp->phydev); + /* clear everything */ bf537mac_shutdown(dev); @@ -855,6 +902,7 @@ static int __init bf537mac_probe(struct net_device *dev) { struct bf537mac_local *lp = netdev_priv(dev); int retval; + int i; /* Grab the MAC address in the MAC */ *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); @@ -871,7 +919,6 @@ static int __init bf537mac_probe(struct net_device *dev) /* set the GPIO pins to Ethernet mode */ retval = setup_pin_mux(1); - if (retval) return retval; @@ -889,26 +936,36 @@ static int __init bf537mac_probe(struct net_device *dev) setup_mac_addr(dev->dev_addr); + /* MDIO bus initial */ + lp->mii_bus.priv = dev; + lp->mii_bus.read = mdiobus_read; + lp->mii_bus.write = mdiobus_write; + lp->mii_bus.reset = mdiobus_reset; + lp->mii_bus.name = "bfin_mac_mdio"; + lp->mii_bus.id = 0; + lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); + for (i = 0; i < PHY_MAX_ADDR; ++i) + lp->mii_bus.irq[i] = PHY_POLL; + + mdiobus_register(&lp->mii_bus); + + retval = mii_probe(dev); + if (retval) + return retval; + /* Fill in the fields of the device structure with ethernet values. */ ether_setup(dev); dev->open = bf537mac_open; dev->stop = bf537mac_close; dev->hard_start_xmit = bf537mac_hard_start_xmit; + dev->set_mac_address = bf537mac_set_mac_address; dev->tx_timeout = bf537mac_timeout; - dev->get_stats = bf537mac_query_statistics; dev->set_multicast_list = bf537mac_set_multicast_list; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = bf537mac_poll; #endif - /* fill in some of the fields */ - lp->version = 1; - lp->PhyAddr = 0x01; - lp->CLKIN = 25; - lp->FullDuplex = 0; - lp->Negotiate = 1; - lp->FlowControl = 0; spin_lock_init(&lp->lock); /* now, enable interrupts */ @@ -921,9 +978,6 @@ static int __init bf537mac_probe(struct net_device *dev) return -EBUSY; } - /* Enable PHY output early */ - if (!(bfin_read_VR_CTL() & PHYCLKOE)) - bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); retval = register_netdev(dev); if (retval == 0) { @@ -946,7 +1000,6 @@ static int bfin_mac_probe(struct platform_device *pdev) return -ENOMEM; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, ndev); @@ -978,15 +1031,30 @@ static int bfin_mac_remove(struct platform_device *pdev) return 0; } -static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t state) +#ifdef CONFIG_PM +static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) { + struct net_device *net_dev = platform_get_drvdata(pdev); + + if (netif_running(net_dev)) + bf537mac_close(net_dev); + return 0; } static int bfin_mac_resume(struct platform_device *pdev) { + struct net_device *net_dev = platform_get_drvdata(pdev); + + if (netif_running(net_dev)) + bf537mac_open(net_dev); + return 0; } +#else +#define bfin_mac_suspend NULL +#define bfin_mac_resume NULL +#endif /* CONFIG_PM */ static struct platform_driver bfin_mac_driver = { .probe = bfin_mac_probe, diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index af87189b85fafc6bab3f8c2b6a0e58d9a3707fa0..3a107ad75381ecd7ac18305952b0194f8f8844c0 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h @@ -31,32 +31,6 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -/* - * PHY REGISTER NAMES - */ -#define PHYREG_MODECTL 0x0000 -#define PHYREG_MODESTAT 0x0001 -#define PHYREG_PHYID1 0x0002 -#define PHYREG_PHYID2 0x0003 -#define PHYREG_ANAR 0x0004 -#define PHYREG_ANLPAR 0x0005 -#define PHYREG_ANER 0x0006 -#define PHYREG_NSR 0x0010 -#define PHYREG_LBREMR 0x0011 -#define PHYREG_REC 0x0012 -#define PHYREG_10CFG 0x0013 -#define PHYREG_PHY1_1 0x0014 -#define PHYREG_PHY1_2 0x0015 -#define PHYREG_PHY2 0x0016 -#define PHYREG_TW_1 0x0017 -#define PHYREG_TW_2 0x0018 -#define PHYREG_TEST 0x0019 - -#define PHY_RESET 0x8000 -#define PHY_ANEG_EN 0x1000 -#define PHY_DUPLEX 0x0100 -#define PHY_SPD_SET 0x2000 - #define BFIN_MAC_CSUM_OFFLOAD struct dma_descriptor { @@ -106,27 +80,16 @@ struct bf537mac_local { */ struct net_device_stats stats; - int version; - - int FlowEnabled; /* record if data flow is active */ - int EtherIntIVG; /* IVG for the ethernet interrupt */ - int RXIVG; /* IVG for the RX completion */ - int TXIVG; /* IVG for the TX completion */ - int PhyAddr; /* PHY address */ - int OpMode; /* set these bits n the OPMODE regs */ - int Port10; /* set port speed to 10 Mbit/s */ - int GenChksums; /* IP checksums to be calculated */ - int NoRcveLnth; /* dont insert recv length at start of buffer */ - int StripPads; /* remove trailing pad bytes */ - int FullDuplex; /* set full duplex mode */ - int Negotiate; /* enable auto negotiation */ - int Loopback; /* loopback at the PHY */ - int Cache; /* Buffers may be cached */ - int FlowControl; /* flow control active */ - int CLKIN; /* clock in value in MHZ */ - unsigned short IntMask; /* interrupt mask */ unsigned char Mac[6]; /* MAC address of the board */ spinlock_t lock; + + /* MII and PHY stuffs */ + int old_link; /* used by bf537_adjust_link */ + int old_speed; + int old_duplex; + + struct phy_device *phydev; + struct mii_bus mii_bus; }; extern void get_bf537_ether_addr(char *addr); diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 9b8d7d9dbe86cc99908842d54beb3930dad5f748..a42bd19646d371e0ce75245280e446215495258e 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -74,7 +75,6 @@ struct bmac_data { int tx_fill; int tx_empty; unsigned char tx_fullup; - struct net_device_stats stats; struct timer_list tx_timeout; int timeout_active; int sleeping; @@ -144,7 +144,6 @@ static unsigned char *bmac_emergency_rxbuf; static int bmac_open(struct net_device *dev); static int bmac_close(struct net_device *dev); static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *bmac_stats(struct net_device *dev); static void bmac_set_multicast(struct net_device *dev); static void bmac_reset_and_enable(struct net_device *dev); static void bmac_start_chip(struct net_device *dev); @@ -667,7 +666,7 @@ static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) bp->tx_bufs[bp->tx_fill] = skb; bp->tx_fill = i; - bp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dbdma_continue(td); @@ -706,8 +705,8 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) nb = RX_BUFLEN - residual - 2; if (nb < (ETHERMINPACKET - ETHERCRC)) { skb = NULL; - bp->stats.rx_length_errors++; - bp->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; } else { skb = bp->rx_bufs[i]; bp->rx_bufs[i] = NULL; @@ -718,10 +717,10 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - ++bp->stats.rx_packets; - bp->stats.rx_bytes += nb; + ++dev->stats.rx_packets; + dev->stats.rx_bytes += nb; } else { - ++bp->stats.rx_dropped; + ++dev->stats.rx_dropped; } dev->last_rx = jiffies; if ((skb = bp->rx_bufs[i]) == NULL) { @@ -784,7 +783,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) } if (bp->tx_bufs[bp->tx_empty]) { - ++bp->stats.tx_packets; + ++dev->stats.tx_packets; dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); } bp->tx_bufs[bp->tx_empty] = NULL; @@ -806,13 +805,6 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static struct net_device_stats *bmac_stats(struct net_device *dev) -{ - struct bmac_data *p = netdev_priv(dev); - - return &p->stats; -} - #ifndef SUNHME_MULTICAST /* Real fast bit-reversal algorithm, 6-bit values */ static int reverse6[64] = { @@ -1079,17 +1071,17 @@ static irqreturn_t bmac_misc_intr(int irq, void *dev_id) } /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ /* bmac_txdma_intr_inner(irq, dev_id); */ - /* if (status & FrameReceived) bp->stats.rx_dropped++; */ - if (status & RxErrorMask) bp->stats.rx_errors++; - if (status & RxCRCCntExp) bp->stats.rx_crc_errors++; - if (status & RxLenCntExp) bp->stats.rx_length_errors++; - if (status & RxOverFlow) bp->stats.rx_over_errors++; - if (status & RxAlignCntExp) bp->stats.rx_frame_errors++; - - /* if (status & FrameSent) bp->stats.tx_dropped++; */ - if (status & TxErrorMask) bp->stats.tx_errors++; - if (status & TxUnderrun) bp->stats.tx_fifo_errors++; - if (status & TxNormalCollExp) bp->stats.collisions++; + /* if (status & FrameReceived) dev->stats.rx_dropped++; */ + if (status & RxErrorMask) dev->stats.rx_errors++; + if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; + if (status & RxLenCntExp) dev->stats.rx_length_errors++; + if (status & RxOverFlow) dev->stats.rx_over_errors++; + if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; + + /* if (status & FrameSent) dev->stats.tx_dropped++; */ + if (status & TxErrorMask) dev->stats.tx_errors++; + if (status & TxUnderrun) dev->stats.tx_fifo_errors++; + if (status & TxNormalCollExp) dev->stats.collisions++; return IRQ_HANDLED; } @@ -1246,6 +1238,17 @@ static void bmac_reset_and_enable(struct net_device *dev) } spin_unlock_irqrestore(&bp->lock, flags); } +static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct bmac_data *bp = netdev_priv(dev); + strcpy(info->driver, "bmac"); + strcpy(info->bus_info, bp->mdev->ofdev.dev.bus_id); +} + +static const struct ethtool_ops bmac_ethtool_ops = { + .get_drvinfo = bmac_get_drvinfo, + .get_link = ethtool_op_get_link, +}; static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) { @@ -1255,6 +1258,7 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i unsigned char addr[6]; struct net_device *dev; int is_bmac_plus = ((int)match->data) != 0; + DECLARE_MAC_BUF(mac); if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); @@ -1279,7 +1283,6 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i } bp = netdev_priv(dev); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); macio_set_drvdata(mdev, dev); @@ -1311,8 +1314,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i dev->open = bmac_open; dev->stop = bmac_close; + dev->ethtool_ops = &bmac_ethtool_ops; dev->hard_start_xmit = bmac_output; - dev->get_stats = bmac_stats; dev->set_multicast_list = bmac_set_multicast; dev->set_mac_address = bmac_set_address; @@ -1365,9 +1368,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i goto err_out_irq2; } - printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": "")); - for (j = 0; j < 6; ++j) - printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); + printk(KERN_INFO "%s: BMAC%s at %s", + dev->name, (is_bmac_plus ? "+" : ""), print_mac(mac, dev->dev_addr)); XXDEBUG((", base_addr=%#0lx", dev->base_addr)); printk("\n"); @@ -1530,7 +1532,7 @@ static void bmac_tx_timeout(unsigned long data) XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", bp->tx_empty, bp->tx_fill, bp->tx_fullup)); i = bp->tx_empty; - ++bp->stats.tx_errors; + ++dev->stats.tx_errors; if (i != bp->tx_fill) { dev_kfree_skb(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 66eed22cbd21963e70b7edddc9929ef192dae186..d68accea380bb3adcf40aaeece00d607d006d4bc 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -52,10 +52,12 @@ #include "bnx2_fw.h" #include "bnx2_fw2.h" +#define FW_BUF_SIZE 0x8000 + #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.6.5" -#define DRV_MODULE_RELDATE "September 20, 2007" +#define DRV_MODULE_VERSION "1.6.7" +#define DRV_MODULE_RELDATE "October 10, 2007" #define RUN_AT(x) (jiffies + (x)) @@ -428,7 +430,7 @@ bnx2_netif_stop(struct bnx2 *bp) { bnx2_disable_int_sync(bp); if (netif_running(bp->dev)) { - netif_poll_disable(bp->dev); + napi_disable(&bp->napi); netif_tx_disable(bp->dev); bp->dev->trans_start = jiffies; /* prevent tx timeout */ } @@ -440,7 +442,7 @@ bnx2_netif_start(struct bnx2 *bp) if (atomic_dec_and_test(&bp->intr_sem)) { if (netif_running(bp->dev)) { netif_wake_queue(bp->dev); - netif_poll_enable(bp->dev); + napi_enable(&bp->napi); bnx2_enable_int(bp); } } @@ -2551,7 +2553,7 @@ bnx2_msi(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev); + netif_rx_schedule(dev, &bp->napi); return IRQ_HANDLED; } @@ -2568,7 +2570,7 @@ bnx2_msi_1shot(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev); + netif_rx_schedule(dev, &bp->napi); return IRQ_HANDLED; } @@ -2604,9 +2606,9 @@ bnx2_interrupt(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &bp->napi)) { bp->last_status_idx = sblk->status_idx; - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &bp->napi); } return IRQ_HANDLED; @@ -2631,10 +2633,8 @@ bnx2_has_work(struct bnx2 *bp) return 0; } -static int -bnx2_poll(struct net_device *dev, int *budget) +static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget) { - struct bnx2 *bp = netdev_priv(dev); struct status_block *sblk = bp->status_blk; u32 status_attn_bits = sblk->status_attn_bits; u32 status_attn_bits_ack = sblk->status_attn_bits_ack; @@ -2655,41 +2655,47 @@ bnx2_poll(struct net_device *dev, int *budget) if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons) bnx2_tx_int(bp); - if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) { - int orig_budget = *budget; - int work_done; + if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) + work_done += bnx2_rx_int(bp, budget - work_done); - if (orig_budget > dev->quota) - orig_budget = dev->quota; + return work_done; +} - work_done = bnx2_rx_int(bp, orig_budget); - *budget -= work_done; - dev->quota -= work_done; - } +static int bnx2_poll(struct napi_struct *napi, int budget) +{ + struct bnx2 *bp = container_of(napi, struct bnx2, napi); + int work_done = 0; - bp->last_status_idx = bp->status_blk->status_idx; - rmb(); + while (1) { + work_done = bnx2_poll_work(bp, work_done, budget); - if (!bnx2_has_work(bp)) { - netif_rx_complete(dev); - if (likely(bp->flags & USING_MSI_FLAG)) { + if (unlikely(work_done >= budget)) + break; + + if (likely(!bnx2_has_work(bp))) { + bp->last_status_idx = bp->status_blk->status_idx; + rmb(); + + netif_rx_complete(bp->dev, napi); + if (likely(bp->flags & USING_MSI_FLAG)) { + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bp->last_status_idx); + return 0; + } REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); - return 0; - } - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | - bp->last_status_idx); - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - bp->last_status_idx); - return 0; + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bp->last_status_idx); + break; + } } - return 1; + return work_done; } /* Called with rtnl_lock from vlan functions and also netif_tx_lock @@ -2767,92 +2773,6 @@ bnx2_set_rx_mode(struct net_device *dev) spin_unlock_bh(&bp->phy_lock); } -#define FW_BUF_SIZE 0x8000 - -static int -bnx2_gunzip_init(struct bnx2 *bp) -{ - if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL) - goto gunzip_nomem1; - - if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL) - goto gunzip_nomem2; - - bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); - if (bp->strm->workspace == NULL) - goto gunzip_nomem3; - - return 0; - -gunzip_nomem3: - kfree(bp->strm); - bp->strm = NULL; - -gunzip_nomem2: - vfree(bp->gunzip_buf); - bp->gunzip_buf = NULL; - -gunzip_nomem1: - printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for " - "uncompression.\n", bp->dev->name); - return -ENOMEM; -} - -static void -bnx2_gunzip_end(struct bnx2 *bp) -{ - kfree(bp->strm->workspace); - - kfree(bp->strm); - bp->strm = NULL; - - if (bp->gunzip_buf) { - vfree(bp->gunzip_buf); - bp->gunzip_buf = NULL; - } -} - -static int -bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen) -{ - int n, rc; - - /* check gzip header */ - if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) - return -EINVAL; - - n = 10; - -#define FNAME 0x8 - if (zbuf[3] & FNAME) - while ((zbuf[n++] != 0) && (n < len)); - - bp->strm->next_in = zbuf + n; - bp->strm->avail_in = len - n; - bp->strm->next_out = bp->gunzip_buf; - bp->strm->avail_out = FW_BUF_SIZE; - - rc = zlib_inflateInit2(bp->strm, -MAX_WBITS); - if (rc != Z_OK) - return rc; - - rc = zlib_inflate(bp->strm, Z_FINISH); - - *outlen = FW_BUF_SIZE - bp->strm->avail_out; - *outbuf = bp->gunzip_buf; - - if ((rc != Z_OK) && (rc != Z_STREAM_END)) - printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n", - bp->dev->name, bp->strm->msg); - - zlib_inflateEnd(bp->strm); - - if (rc == Z_STREAM_END) - return 0; - - return rc; -} - static void load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len, u32 rv2p_proc) @@ -2902,19 +2822,13 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) /* Load the Text area. */ offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base); if (fw->gz_text) { - u32 text_len; - void *text; + int j; - rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text, - &text_len); - if (rc) + rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text, + fw->gz_text_len); + if (rc < 0) return rc; - fw->text = text; - } - if (fw->gz_text) { - int j; - for (j = 0; j < (fw->text_len / 4); j++, offset += 4) { REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j])); } @@ -2932,21 +2846,21 @@ load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw) /* Load the SBSS area. */ offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base); - if (fw->sbss) { + if (fw->sbss_len) { int j; for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) { - REG_WR_IND(bp, offset, fw->sbss[j]); + REG_WR_IND(bp, offset, 0); } } /* Load the BSS area. */ offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base); - if (fw->bss) { + if (fw->bss_len) { int j; for (j = 0; j < (fw->bss_len/4); j++, offset += 4) { - REG_WR_IND(bp, offset, fw->bss[j]); + REG_WR_IND(bp, offset, 0); } } @@ -2979,27 +2893,24 @@ bnx2_init_cpus(struct bnx2 *bp) { struct cpu_reg cpu_reg; struct fw_info *fw; - int rc = 0; + int rc; void *text; - u32 text_len; - - if ((rc = bnx2_gunzip_init(bp)) != 0) - return rc; /* Initialize the RV2P processor. */ - rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text, - &text_len); - if (rc) + text = vmalloc(FW_BUF_SIZE); + if (!text) + return -ENOMEM; + rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1)); + if (rc < 0) goto init_cpu_err; - load_rv2p_fw(bp, text, text_len, RV2P_PROC1); + load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1); - rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text, - &text_len); - if (rc) + rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2)); + if (rc < 0) goto init_cpu_err; - load_rv2p_fw(bp, text, text_len, RV2P_PROC2); + load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2); /* Initialize the RX Processor. */ cpu_reg.mode = BNX2_RXP_CPU_MODE; @@ -3020,6 +2931,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_rxp_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3043,6 +2955,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_txp_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3066,6 +2979,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_tpat_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3089,6 +3003,7 @@ bnx2_init_cpus(struct bnx2 *bp) else fw = &bnx2_com_fw_06; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; @@ -3110,12 +3025,13 @@ bnx2_init_cpus(struct bnx2 *bp) if (CHIP_NUM(bp) == CHIP_NUM_5709) { fw = &bnx2_cp_fw_09; + fw->text = text; rc = load_cpu_fw(bp, &cpu_reg, fw); if (rc) goto init_cpu_err; } init_cpu_err: - bnx2_gunzip_end(bp); + vfree(text); return rc; } @@ -3500,7 +3416,7 @@ bnx2_init_nvram(struct bnx2 *bp) /* Determine the selected interface. */ val = REG_RD(bp, BNX2_NVM_CFG1); - entry_count = sizeof(flash_table) / sizeof(struct flash_spec); + entry_count = ARRAY_SIZE(flash_table); if (val & 0x40000000) { @@ -3872,12 +3788,6 @@ bnx2_init_remote_phy(struct bnx2 *bp) return; if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) { - if (netif_running(bp->dev)) { - val = BNX2_DRV_ACK_CAP_SIGNATURE | - BNX2_FW_CAP_REMOTE_PHY_CAPABLE; - REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, - val); - } bp->phy_flags |= REMOTE_PHY_CAP_FLAG; val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS); @@ -3885,6 +3795,22 @@ bnx2_init_remote_phy(struct bnx2 *bp) bp->phy_port = PORT_FIBRE; else bp->phy_port = PORT_TP; + + if (netif_running(bp->dev)) { + u32 sig; + + if (val & BNX2_LINK_STATUS_LINK_UP) { + bp->link_up = 1; + netif_carrier_on(bp->dev); + } else { + bp->link_up = 0; + netif_carrier_off(bp->dev); + } + sig = BNX2_DRV_ACK_CAP_SIGNATURE | + BNX2_FW_CAP_REMOTE_PHY_CAPABLE; + REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB, + sig); + } } } @@ -3893,6 +3819,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) { u32 val; int i, rc = 0; + u8 old_port; /* Wait for the current PCI transaction to complete before * issuing a reset. */ @@ -3971,8 +3898,9 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) return rc; spin_lock_bh(&bp->phy_lock); + old_port = bp->phy_port; bnx2_init_remote_phy(bp); - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port) bnx2_set_default_remote_link(bp); spin_unlock_bh(&bp->phy_lock); @@ -4661,6 +4589,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) bnx2_set_mac_loopback(bp); } else if (loopback_mode == BNX2_PHY_LOOPBACK) { + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + return 0; + bp->loopback = PHY_LOOPBACK; bnx2_set_phy_loopback(bp); } @@ -4829,6 +4760,11 @@ bnx2_test_link(struct bnx2 *bp) { u32 bmsr; + if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) { + if (bp->link_up) + return 0; + return -ENODEV; + } spin_lock_bh(&bp->phy_lock); bnx2_enable_bmsr1(bp); bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); @@ -5039,6 +4975,8 @@ bnx2_open(struct net_device *dev) if (rc) return rc; + napi_enable(&bp->napi); + if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) { if (pci_enable_msi(bp->pdev) == 0) { bp->flags |= USING_MSI_FLAG; @@ -5049,6 +4987,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_request_irq(bp); if (rc) { + napi_disable(&bp->napi); bnx2_free_mem(bp); return rc; } @@ -5056,6 +4995,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_init_nic(bp); if (rc) { + napi_disable(&bp->napi); bnx2_free_irq(bp); bnx2_free_skbs(bp); bnx2_free_mem(bp); @@ -5088,6 +5028,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_request_irq(bp); if (rc) { + napi_disable(&bp->napi); bnx2_free_skbs(bp); bnx2_free_mem(bp); del_timer_sync(&bp->timer); @@ -5301,7 +5242,8 @@ bnx2_close(struct net_device *dev) while (bp->in_reset_task) msleep(1); - bnx2_netif_stop(bp); + bnx2_disable_int_sync(bp); + napi_disable(&bp->napi); del_timer_sync(&bp->timer); if (bp->flags & NO_WOL_FLAG) reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; @@ -6070,9 +6012,16 @@ static struct { }; static int -bnx2_self_test_count(struct net_device *dev) +bnx2_get_sset_count(struct net_device *dev, int sset) { - return BNX2_NUM_TESTS; + switch (sset) { + case ETH_SS_TEST: + return BNX2_NUM_TESTS; + case ETH_SS_STATS: + return BNX2_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void @@ -6146,12 +6095,6 @@ bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } -static int -bnx2_get_stats_count(struct net_device *dev) -{ - return BNX2_NUM_STATS; -} - static void bnx2_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) @@ -6259,18 +6202,14 @@ static const struct ethtool_ops bnx2_ethtool_ops = { .set_pauseparam = bnx2_set_pauseparam, .get_rx_csum = bnx2_get_rx_csum, .set_rx_csum = bnx2_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = bnx2_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = bnx2_set_tso, - .self_test_count = bnx2_self_test_count, .self_test = bnx2_self_test, .get_strings = bnx2_get_strings, .phys_id = bnx2_phys_id, - .get_stats_count = bnx2_get_stats_count, .get_ethtool_stats = bnx2_get_ethtool_stats, + .get_sset_count = bnx2_get_sset_count, }; /* Called with rtnl_lock */ @@ -6476,7 +6415,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) u32 reg; u64 dma_mask, persist_dma_mask; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); bp = netdev_priv(dev); @@ -6643,8 +6581,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (i != 2) bp->fw_version[j++] = '.'; } - if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) & - BNX2_PORT_FEATURE_ASF_ENABLED) { + reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE); + if (reg & BNX2_PORT_FEATURE_WOL_ENABLED) + bp->wol = 1; + + if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) { bp->flags |= ASF_ENABLE_FLAG; for (i = 0; i < 30; i++) { @@ -6714,11 +6655,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->phy_port = PORT_TP; if (bp->phy_flags & PHY_SERDES_FLAG) { bp->phy_port = PORT_FIBRE; - bp->flags |= NO_WOL_FLAG; + reg = REG_RD_IND(bp, bp->shmem_base + + BNX2_SHARED_HW_CFG_CONFIG); + if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) { + bp->flags |= NO_WOL_FLAG; + bp->wol = 0; + } if (CHIP_NUM(bp) != CHIP_NUM_5706) { bp->phy_addr = 2; - reg = REG_RD_IND(bp, bp->shmem_base + - BNX2_SHARED_HW_CFG_CONFIG); if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; } @@ -6733,8 +6677,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || (CHIP_ID(bp) == CHIP_ID_5708_B0) || - (CHIP_ID(bp) == CHIP_ID_5708_B1)) + (CHIP_ID(bp) == CHIP_ID_5708_B1)) { bp->flags |= NO_WOL_FLAG; + bp->wol = 0; + } if (CHIP_ID(bp) == CHIP_ID_5706_A0) { bp->tx_quick_cons_trip_int = @@ -6827,8 +6773,9 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed = 0; struct net_device *dev = NULL; struct bnx2 *bp; - int rc, i; + int rc; char str[40]; + DECLARE_MAC_BUF(mac); if (version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -6858,11 +6805,10 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef BCM_VLAN dev->vlan_rx_register = bnx2_vlan_rx_register; #endif - dev->poll = bnx2_poll; dev->ethtool_ops = &bnx2_ethtool_ops; - dev->weight = 64; bp = netdev_priv(dev); + netif_napi_add(dev, &bp->napi, bnx2_poll, 64); #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) dev->poll_controller = poll_bnx2; @@ -6897,19 +6843,14 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " - "IRQ %d, ", + "IRQ %d, node addr %s\n", dev->name, bp->name, ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', ((CHIP_ID(bp) & 0x0ff0) >> 4), bnx2_bus_string(bp, str), dev->base_addr, - bp->pdev->irq); - - printk("node addr "); - for (i = 0; i < 6; i++) - printk("%2.2x", dev->dev_addr[i]); - printk("\n"); + bp->pdev->irq, print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 102adfe1e92327cba3e392a9b6dfe199ee67f5ae..1dce0d1a258167139e591381fcdff21681e20421 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -6473,6 +6473,8 @@ struct bnx2 { struct net_device *dev; struct pci_dev *pdev; + struct napi_struct napi; + atomic_t intr_sem; struct status_block *status_blk; @@ -6679,9 +6681,6 @@ struct bnx2 { u32 flash_size; int status_stats_size; - - struct z_stream_s *strm; - void *gunzip_buf; }; static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset); @@ -6753,13 +6752,11 @@ struct fw_info { const u32 sbss_addr; const u32 sbss_len; const u32 sbss_index; - const u32 *sbss; /* BSS section. */ const u32 bss_addr; const u32 bss_len; const u32 bss_index; - const u32 *bss; /* Read-only section. */ const u32 rodata_addr; @@ -6911,6 +6908,7 @@ struct fw_info { #define BNX2_SHARED_HW_CFG_LED_MODE_MAC 0 #define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1 0x100 #define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2 0x200 +#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX 0x8000 #define BNX2_SHARED_HW_CFG_CONFIG2 0x00000040 #define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK 0x00fff000 diff --git a/drivers/net/bnx2_fw.h b/drivers/net/bnx2_fw.h index b49f439e0f672b451c0f31cf6004e36f36fec834..a6d78243163b1b54ff6170f72609b1e1458639a7 100644 --- a/drivers/net/bnx2_fw.h +++ b/drivers/net/bnx2_fw.h @@ -15,7 +15,8 @@ */ static u8 bnx2_COM_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x45, 0x30, 0xe7, 0x45, 0x00, 0x03, 0xdc, 0x5a, +/* 0x1f, 0x8b, 0x08, 0x00, 0x45, 0x30, 0xe7, 0x45, 0x00, 0x03, */ + 0xdc, 0x5a, 0x6b, 0x6c, 0x1c, 0xd7, 0x75, 0x3e, 0x33, 0x3b, 0x4b, 0xae, 0xc8, 0x15, 0x35, 0xa2, 0xc6, 0xf4, 0x5a, 0xa2, 0xed, 0x5d, 0x72, 0x28, 0x12, 0x96, 0xec, 0x6e, 0x68, 0xda, 0x62, 0x8c, 0x8d, 0xb4, 0xd9, 0xa5, 0x0c, 0xa1, @@ -1047,8 +1048,6 @@ static const u32 bnx2_COM_b06FwRodata[(0x88/4) + 1] = { 0x08002bd8, 0x08002c08, 0x08002c38, 0x00000000, 0x080060cc, 0x080060cc, 0x080060cc, 0x080060cc, 0x080060cc, 0x08006100, 0x08006100, 0x08006140, 0x0800614c, 0x0800614c, 0x080060cc, 0x00000000, 0x00000000 }; -static const u32 bnx2_COM_b06FwBss[(0x88/4) + 1] = { 0x0 }; -static const u32 bnx2_COM_b06FwSbss[(0x60/4) + 1] = { 0x0 }; static struct fw_info bnx2_com_fw_06 = { .ver_major = 0x3, @@ -1071,12 +1070,10 @@ static struct fw_info bnx2_com_fw_06 = { .sbss_addr = 0x08007e00, .sbss_len = 0x60, .sbss_index = 0x0, - .sbss = bnx2_COM_b06FwSbss, .bss_addr = 0x08007e60, .bss_len = 0x88, .bss_index = 0x0, - .bss = bnx2_COM_b06FwBss, .rodata_addr = 0x08007d58, .rodata_len = 0x88, @@ -1085,8 +1082,9 @@ static struct fw_info bnx2_com_fw_06 = { }; static u8 bnx2_RXP_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x08, 0xcb, 0xa3, 0x46, 0x45, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xec, 0x5c, 0x6f, 0x6c, +/* 0x1f, 0x8b, 0x08, 0x08, 0xcb, 0xa3, 0x46, 0x45, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xec, 0x5c, 0x6f, 0x6c, 0x1c, 0xc7, 0x75, 0x7f, 0x3b, 0xbb, 0xa4, 0x4e, 0xd4, 0x91, 0x5c, 0x1e, 0x4f, 0xf4, 0x49, 0x66, 0x94, 0x5d, 0x71, 0x25, 0x5e, 0x2d, 0xc6, 0x5d, 0x31, 0x57, 0x9b, 0x08, 0xce, 0xf1, 0x79, 0xef, 0x64, 0xb1, 0x86, 0x0a, @@ -1760,9 +1758,6 @@ static u32 bnx2_RXP_b06FwRodata[(0x278/4) + 1] = { 0x08006030, 0x08006030, 0x08006018, 0x08006030, 0x08006030, 0x08006030, 0x08006024, 0x00000000, 0x00000000 }; -static u32 bnx2_RXP_b06FwBss[(0x13dc/4) + 1] = { 0x0 }; -static u32 bnx2_RXP_b06FwSbss[(0x2c/4) + 1] = { 0x0 }; - static struct fw_info bnx2_rxp_fw_06 = { .ver_major = 0x2, .ver_minor = 0x8, @@ -1784,12 +1779,10 @@ static struct fw_info bnx2_rxp_fw_06 = { .sbss_addr = 0x080069c0, .sbss_len = 0x2c, .sbss_index = 0x0, - .sbss = bnx2_RXP_b06FwSbss, .bss_addr = 0x080069f0, .bss_len = 0x13dc, .bss_index = 0x0, - .bss = bnx2_RXP_b06FwBss, .rodata_addr = 0x08006728, .rodata_len = 0x278, @@ -1798,8 +1791,9 @@ static struct fw_info bnx2_rxp_fw_06 = { }; static u8 bnx2_rv2p_proc1[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x5e, 0xd0, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xc5, 0x56, 0xcf, 0x6b, +/* 0x1f, 0x8b, 0x08, 0x08, 0x5e, 0xd0, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xc5, 0x56, 0xcf, 0x6b, 0x13, 0x51, 0x10, 0x9e, 0xec, 0x6e, 0xb2, 0xdb, 0x74, 0xbb, 0x1b, 0x2b, 0xda, 0xa0, 0xb1, 0x8d, 0x51, 0x6a, 0x7f, 0xa4, 0xb4, 0x11, 0x0f, 0x82, 0x42, 0x25, 0x3d, 0x04, 0x54, 0x44, 0x7a, 0x28, 0x22, 0x82, 0x36, 0x8a, @@ -1877,8 +1871,9 @@ static u8 bnx2_rv2p_proc1[] = { 0x12, 0x3d, 0x80, 0x0b, 0x00, 0x00, 0x00 }; static u8 bnx2_rv2p_proc2[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x7e, 0xd1, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xcd, 0x58, 0x5b, 0x6c, +/* 0x1f, 0x8b, 0x08, 0x08, 0x7e, 0xd1, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xcd, 0x58, 0x5b, 0x6c, 0x54, 0x55, 0x14, 0x3d, 0xf3, 0xe8, 0xcc, 0x9d, 0xe9, 0xed, 0x9d, 0xf2, 0xb2, 0x03, 0xad, 0x08, 0xe5, 0xd1, 0x56, 0x29, 0xe8, 0x54, 0xab, 0x18, 0x15, 0x2c, 0x5a, 0x8c, 0x26, 0x68, 0xf0, 0xf9, 0x63, 0x14, 0x04, 0xda, @@ -2057,8 +2052,9 @@ static u8 bnx2_rv2p_proc2[] = { 0x17, 0x00, 0x00, 0x00 }; static u8 bnx2_TPAT_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x47, 0xd2, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xc5, 0x57, 0x4d, 0x68, +/* 0x1f, 0x8b, 0x08, 0x08, 0x47, 0xd2, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xc5, 0x57, 0x4d, 0x68, 0x1c, 0xe7, 0x19, 0x7e, 0xe7, 0x77, 0x47, 0x62, 0x25, 0x8d, 0x93, 0x3d, 0xac, 0x5d, 0xa5, 0x99, 0x91, 0x46, 0x3f, 0x54, 0x26, 0x9e, 0x84, 0xa5, 0x56, 0x61, 0x20, 0xe3, 0x99, 0x95, 0x2c, 0x0c, 0x05, 0x07, 0x42, 0x08, @@ -2252,8 +2248,6 @@ static u8 bnx2_TPAT_b06FwText[] = { static u32 bnx2_TPAT_b06FwData[(0x0/4) + 1] = { 0x0 }; static u32 bnx2_TPAT_b06FwRodata[(0x0/4) + 1] = { 0x0 }; -static u32 bnx2_TPAT_b06FwBss[(0x250/4) + 1] = { 0x0 }; -static u32 bnx2_TPAT_b06FwSbss[(0x34/4) + 1] = { 0x0 }; static struct fw_info bnx2_tpat_fw_06 = { .ver_major = 0x1, @@ -2276,12 +2270,10 @@ static struct fw_info bnx2_tpat_fw_06 = { .sbss_addr = 0x08001a60, .sbss_len = 0x34, .sbss_index = 0x0, - .sbss = bnx2_TPAT_b06FwSbss, .bss_addr = 0x08001aa0, .bss_len = 0x250, .bss_index = 0x0, - .bss = bnx2_TPAT_b06FwBss, .rodata_addr = 0x00000000, .rodata_len = 0x0, @@ -2290,8 +2282,9 @@ static struct fw_info bnx2_tpat_fw_06 = { }; static u8 bnx2_TXP_b06FwText[] = { - 0x1f, 0x8b, 0x08, 0x08, 0x21, 0xd3, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, - 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, 0xed, 0x5c, 0x6d, 0x6c, +/* 0x1f, 0x8b, 0x08, 0x08, 0x21, 0xd3, 0x41, 0x44, 0x00, 0x03, 0x74, 0x65, + 0x73, 0x74, 0x31, 0x2e, 0x62, 0x69, 0x6e, 0x00, */ + 0xed, 0x5c, 0x6d, 0x6c, 0x1b, 0xf7, 0x79, 0x7f, 0xee, 0x85, 0xd2, 0x51, 0x96, 0xe9, 0x93, 0xc2, 0x78, 0x6c, 0xc0, 0xa6, 0x77, 0xd6, 0x51, 0x66, 0x20, 0xb5, 0xa0, 0x05, 0x36, 0x55, 0x87, 0x43, 0x73, 0x3e, 0x52, 0x2f, 0x4e, 0x5c, 0x57, 0x71, @@ -2708,8 +2701,6 @@ static u8 bnx2_TXP_b06FwText[] = { static u32 bnx2_TXP_b06FwData[(0x0/4) + 1] = { 0x0 }; static u32 bnx2_TXP_b06FwRodata[(0x0/4) + 1] = { 0x0 }; -static u32 bnx2_TXP_b06FwBss[(0x1c4/4) + 1] = { 0x0 }; -static u32 bnx2_TXP_b06FwSbss[(0x38/4) + 1] = { 0x0 }; static struct fw_info bnx2_txp_fw_06 = { .ver_major = 0x1, @@ -2732,12 +2723,10 @@ static struct fw_info bnx2_txp_fw_06 = { .sbss_addr = 0x08005760, .sbss_len = 0x38, .sbss_index = 0x0, - .sbss = bnx2_TXP_b06FwSbss, .bss_addr = 0x080057a0, .bss_len = 0x1c4, .bss_index = 0x0, - .bss = bnx2_TXP_b06FwBss, .rodata_addr = 0x00000000, .rodata_len = 0x0, diff --git a/drivers/net/bnx2_fw2.h b/drivers/net/bnx2_fw2.h index 2c067531f0310961e96b934adf01965a2be79c86..5bd52bead9bf38b860b29412cfc6cfdda51c430d 100644 --- a/drivers/net/bnx2_fw2.h +++ b/drivers/net/bnx2_fw2.h @@ -15,7 +15,8 @@ */ static u8 bnx2_COM_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xdc, 0x5b, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xdc, 0x5b, 0x6d, 0x70, 0x5c, 0xd5, 0x79, 0x7e, 0xef, 0xd9, 0xbb, 0xf2, 0x5a, 0x92, 0xe5, 0x6b, 0x79, 0x23, 0x16, 0x4b, 0xc0, 0xae, 0x75, 0x6d, 0x69, 0xb0, 0x43, 0x16, 0xa1, 0x80, 0x9a, 0xd9, 0xc0, 0xb2, 0x2b, 0x33, 0x9e, 0x0c, @@ -1045,8 +1046,6 @@ static const u32 bnx2_COM_b09FwRodata[(0x88/4) + 1] = { 0x08002b3c, 0x08002b6c, 0x08002b9c, 0x00000000, 0x0800604c, 0x0800604c, 0x0800604c, 0x0800604c, 0x0800604c, 0x08006078, 0x08006078, 0x080060b8, 0x080060c4, 0x080060c4, 0x0800604c, 0x00000000, 0x00000000 }; -static const u32 bnx2_COM_b09FwBss[(0x88/4) + 1] = { 0x0 }; -static const u32 bnx2_COM_b09FwSbss[(0x60/4) + 1] = { 0x0 }; static struct fw_info bnx2_com_fw_09 = { .ver_major = 0x3, @@ -1069,12 +1068,10 @@ static struct fw_info bnx2_com_fw_09 = { .sbss_addr = 0x08007e60, .sbss_len = 0x60, .sbss_index = 0x0, - .sbss = bnx2_COM_b09FwSbss, .bss_addr = 0x08007ec0, .bss_len = 0x88, .bss_index = 0x0, - .bss = bnx2_COM_b09FwBss, .rodata_addr = 0x08007dc0, .rodata_len = 0x88, @@ -1083,7 +1080,8 @@ static struct fw_info bnx2_com_fw_09 = { }; static u8 bnx2_CP_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0f, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xbd, 0x7d, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0f, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xbd, 0x7d, 0x0d, 0x74, 0x5c, 0x57, 0x7d, 0xe7, 0xff, 0xdd, 0x19, 0x49, 0x63, 0x59, 0x96, 0x9f, 0xe5, 0x89, 0x32, 0x51, 0x84, 0x3d, 0x23, 0x3d, 0xd9, 0x22, 0x12, 0xe1, 0xc5, 0x11, 0xac, 0xda, 0x2a, 0xe9, 0x30, 0x92, 0x3f, 0x12, @@ -2241,8 +2239,6 @@ static const u32 bnx2_CP_b09FwRodata[(0x118/4) + 1] = { 0x080032e8, 0x08003300, 0x08003320, 0x08003358, 0x08003338, 0x08003338, 0x080050d4, 0x080050d4, 0x080050d4, 0x080050d4, 0x080050d4, 0x080050fc, 0x080050fc, 0x08005124, 0x08005174, 0x08005144, 0x00000000 }; -static const u32 bnx2_CP_b09FwBss[(0x3b0/4) + 1] = { 0x0 }; -static const u32 bnx2_CP_b09FwSbss[(0xa1/4) + 1] = { 0x0 }; static struct fw_info bnx2_cp_fw_09 = { .ver_major = 0x3, @@ -2265,12 +2261,10 @@ static struct fw_info bnx2_cp_fw_09 = { .sbss_addr = 0x08007024, .sbss_len = 0xa1, .sbss_index = 0x0, - .sbss = bnx2_CP_b09FwSbss, .bss_addr = 0x080070d0, .bss_len = 0x3b0, .bss_index = 0x0, - .bss = bnx2_CP_b09FwBss, .rodata_addr = 0x08006ee8, .rodata_len = 0x118, @@ -2279,7 +2273,8 @@ static struct fw_info bnx2_cp_fw_09 = { }; static u8 bnx2_RXP_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xec, 0x5c, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xec, 0x5c, 0x5d, 0x6c, 0x1c, 0xd7, 0x75, 0x3e, 0xf3, 0x43, 0x6a, 0x49, 0xf1, 0x67, 0xb8, 0x5c, 0xb1, 0x2b, 0x99, 0x96, 0x77, 0xc9, 0x91, 0xc8, 0x58, 0x8a, 0x31, 0xa2, 0x09, 0x5b, 0x48, 0x17, 0xf6, 0x76, 0x76, 0x25, 0xb1, 0xb1, @@ -2950,8 +2945,6 @@ static const u32 bnx2_RXP_b09FwRodata[(0x278/4) + 1] = { 0x08006058, 0x08006070, 0x08006070, 0x08006070, 0x08006058, 0x08006070, 0x08006070, 0x08006070, 0x08006058, 0x08006070, 0x08006070, 0x08006070, 0x08006064, 0x00000000, 0x00000000 }; -static const u32 bnx2_RXP_b09FwBss[(0x13dc/4) + 1] = { 0x0 }; -static const u32 bnx2_RXP_b09FwSbss[(0x20/4) + 1] = { 0x0 }; static struct fw_info bnx2_rxp_fw_09 = { .ver_major = 0x3, @@ -2974,12 +2967,10 @@ static struct fw_info bnx2_rxp_fw_09 = { .sbss_addr = 0x08006a00, .sbss_len = 0x20, .sbss_index = 0x0, - .sbss = bnx2_RXP_b09FwSbss, .bss_addr = 0x08006a20, .bss_len = 0x13dc, .bss_index = 0x0, - .bss = bnx2_RXP_b09FwBss, .rodata_addr = 0x08006768, .rodata_len = 0x278, @@ -2988,7 +2979,8 @@ static struct fw_info bnx2_rxp_fw_09 = { }; static u8 bnx2_TPAT_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xcd, 0x58, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xcd, 0x58, 0x5d, 0x68, 0x1c, 0xd7, 0x15, 0x3e, 0xf3, 0xb7, 0x3b, 0x52, 0x24, 0xeb, 0x5a, 0xd9, 0xa6, 0xeb, 0xa0, 0x34, 0x33, 0xda, 0x91, 0xac, 0x22, 0x13, 0x4f, 0x9d, 0x25, 0x16, 0x65, 0x21, 0x93, 0xd9, 0x91, 0xac, 0x98, 0x3c, @@ -3241,8 +3233,6 @@ static u8 bnx2_TPAT_b09FwText[] = { static const u32 bnx2_TPAT_b09FwData[(0x0/4) + 1] = { 0x0 }; static const u32 bnx2_TPAT_b09FwRodata[(0x0/4) + 1] = { 0x0 }; -static const u32 bnx2_TPAT_b09FwBss[(0x850/4) + 1] = { 0x0 }; -static const u32 bnx2_TPAT_b09FwSbss[(0x2c/4) + 1] = { 0x0 }; static struct fw_info bnx2_tpat_fw_09 = { .ver_major = 0x3, @@ -3265,12 +3255,10 @@ static struct fw_info bnx2_tpat_fw_09 = { .sbss_addr = 0x08002088, .sbss_len = 0x2c, .sbss_index = 0x0, - .sbss = bnx2_TPAT_b09FwSbss, .bss_addr = 0x080020c0, .bss_len = 0x850, .bss_index = 0x0, - .bss = bnx2_TPAT_b09FwBss, .rodata_addr = 0x00000000, .rodata_len = 0x0, @@ -3279,7 +3267,8 @@ static struct fw_info bnx2_tpat_fw_09 = { }; static u8 bnx2_TXP_b09FwText[] = { - 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, 0xcd, 0x7c, +/* 0x1f, 0x8b, 0x08, 0x00, 0x0e, 0x34, 0xe7, 0x45, 0x00, 0x03, */ + 0xcd, 0x7c, 0x6f, 0x70, 0x5b, 0xd7, 0x95, 0xdf, 0x79, 0xef, 0x81, 0x24, 0x48, 0xd1, 0xd4, 0x13, 0x17, 0x56, 0x60, 0x87, 0x71, 0x00, 0xf1, 0x81, 0x66, 0x42, 0xae, 0x04, 0x2b, 0x4c, 0xc2, 0x6d, 0xd1, 0xf8, 0x05, 0x00, 0x29, 0x48, @@ -4055,8 +4044,6 @@ static const u32 bnx2_TXP_b09FwRodata[(0x30/4) + 1] = { 0x08004060, 0x0800408c, 0x080040d4, 0x080040d4, 0x08003f60, 0x08003f8c, 0x08003f8c, 0x080040d4, 0x080040d4, 0x080040d4, 0x08003ff4, 0x00000000, 0x00000000 }; -static const u32 bnx2_TXP_b09FwBss[(0xa20/4) + 1] = { 0x0 }; -static const u32 bnx2_TXP_b09FwSbss[(0x8c/4) + 1] = { 0x0 }; static struct fw_info bnx2_txp_fw_09 = { .ver_major = 0x3, @@ -4079,12 +4066,10 @@ static struct fw_info bnx2_txp_fw_09 = { .sbss_addr = 0x08004750, .sbss_len = 0x8c, .sbss_index = 0x0, - .sbss = bnx2_TXP_b09FwSbss, .bss_addr = 0x080047e0, .bss_len = 0xa20, .bss_index = 0x0, - .bss = bnx2_TXP_b09FwBss, .rodata_addr = 0x08004638, .rodata_len = 0x30, diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index f829e4ad8b4970aacd1cc893e0c65a7855f751bd..7a045a37056e0de7a93e6d44d296e1801f48db6b 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "bonding.h" #include "bond_3ad.h" @@ -100,7 +101,6 @@ static u16 __get_link_speed(struct port *port); static u8 __get_duplex(struct port *port); static inline void __initialize_port_locks(struct port *port); //conversions -static void __htons_lacpdu(struct lacpdu *lacpdu); static u16 __ad_timer_to_ticks(u16 timer_type, u16 Par); @@ -419,26 +419,6 @@ static inline void __initialize_port_locks(struct port *port) } //conversions -/** - * __htons_lacpdu - convert the contents of a LACPDU to network byte order - * @lacpdu: the speicifed lacpdu - * - * For each multi-byte field in the lacpdu, convert its content - */ -static void __htons_lacpdu(struct lacpdu *lacpdu) -{ - if (lacpdu) { - lacpdu->actor_system_priority = htons(lacpdu->actor_system_priority); - lacpdu->actor_key = htons(lacpdu->actor_key); - lacpdu->actor_port_priority = htons(lacpdu->actor_port_priority); - lacpdu->actor_port = htons(lacpdu->actor_port); - lacpdu->partner_system_priority = htons(lacpdu->partner_system_priority); - lacpdu->partner_key = htons(lacpdu->partner_key); - lacpdu->partner_port_priority = htons(lacpdu->partner_port_priority); - lacpdu->partner_port = htons(lacpdu->partner_port); - lacpdu->collector_max_delay = htons(lacpdu->collector_max_delay); - } -} /** * __ad_timer_to_ticks - convert a given timer type to AD module ticks @@ -826,11 +806,11 @@ static inline void __update_lacpdu_from_port(struct port *port) * lacpdu->actor_information_length initialized */ - lacpdu->actor_system_priority = port->actor_system_priority; + lacpdu->actor_system_priority = htons(port->actor_system_priority); lacpdu->actor_system = port->actor_system; - lacpdu->actor_key = port->actor_oper_port_key; - lacpdu->actor_port_priority = port->actor_port_priority; - lacpdu->actor_port = port->actor_port_number; + lacpdu->actor_key = htons(port->actor_oper_port_key); + lacpdu->actor_port_priority = htons(port->actor_port_priority); + lacpdu->actor_port = htons(port->actor_port_number); lacpdu->actor_state = port->actor_oper_port_state; /* lacpdu->reserved_3_1 initialized @@ -838,11 +818,11 @@ static inline void __update_lacpdu_from_port(struct port *port) * lacpdu->partner_information_length initialized */ - lacpdu->partner_system_priority = port->partner_oper_system_priority; + lacpdu->partner_system_priority = htons(port->partner_oper_system_priority); lacpdu->partner_system = port->partner_oper_system; - lacpdu->partner_key = port->partner_oper_key; - lacpdu->partner_port_priority = port->partner_oper_port_priority; - lacpdu->partner_port = port->partner_oper_port_number; + lacpdu->partner_key = htons(port->partner_oper_key); + lacpdu->partner_port_priority = htons(port->partner_oper_port_priority); + lacpdu->partner_port = htons(port->partner_oper_port_number); lacpdu->partner_state = port->partner_oper_port_state; /* lacpdu->reserved_3_2 initialized @@ -854,9 +834,6 @@ static inline void __update_lacpdu_from_port(struct port *port) * terminator_length initialized * reserved_50[50] initialized */ - - /* Convert all non u8 parameters to Big Endian for transmit */ - __htons_lacpdu(lacpdu); } ////////////////////////////////////////////////////////////////////////////////////// @@ -1833,7 +1810,7 @@ static void ad_initialize_lacpdu(struct lacpdu *lacpdu) } lacpdu->tlv_type_collector_info = 0x03; lacpdu->collector_information_length= 0x10; - lacpdu->collector_max_delay = AD_COLLECTOR_MAX_DELAY; + lacpdu->collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY); for (index=0; index<=11; index++) { lacpdu->reserved_12[index]=0; } @@ -2448,6 +2425,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac struct slave *slave = NULL; int ret = NET_RX_DROP; + if (dev->nd_net != &init_net) + goto out; + if (!(dev->flags & IFF_MASTER)) goto out; diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 6ad5ad6e65d50536aabc652d7ada6e2135c5156b..862952fa6fd99e84962d2187699c2c1255f1684d 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -108,7 +108,7 @@ typedef enum { typedef struct ad_header { struct mac_addr destination_address; struct mac_addr source_address; - u16 length_type; + __be16 length_type; } ad_header_t; // Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) @@ -117,25 +117,25 @@ typedef struct lacpdu { u8 version_number; u8 tlv_type_actor_info; // = actor information(type/length/value) u8 actor_information_length; // = 20 - u16 actor_system_priority; + __be16 actor_system_priority; struct mac_addr actor_system; - u16 actor_key; - u16 actor_port_priority; - u16 actor_port; + __be16 actor_key; + __be16 actor_port_priority; + __be16 actor_port; u8 actor_state; u8 reserved_3_1[3]; // = 0 u8 tlv_type_partner_info; // = partner information u8 partner_information_length; // = 20 - u16 partner_system_priority; + __be16 partner_system_priority; struct mac_addr partner_system; - u16 partner_key; - u16 partner_port_priority; - u16 partner_port; + __be16 partner_key; + __be16 partner_port_priority; + __be16 partner_port; u8 partner_state; u8 reserved_3_2[3]; // = 0 u8 tlv_type_collector_info; // = collector information u8 collector_information_length; // = 16 - u16 collector_max_delay; + __be16 collector_max_delay; u8 reserved_12[12]; u8 tlv_type_terminator; // = terminator u8 terminator_length; // = 0 diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 92c3b6f6a8e7977779ca5c013745bd37ea95da32..aea2217c56ebeb327f1dd81dbbcdb45a716eeedf 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -87,20 +87,20 @@ static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; struct learning_pkt { u8 mac_dst[ETH_ALEN]; u8 mac_src[ETH_ALEN]; - u16 type; + __be16 type; u8 padding[ETH_ZLEN - ETH_HLEN]; }; struct arp_pkt { - u16 hw_addr_space; - u16 prot_addr_space; + __be16 hw_addr_space; + __be16 prot_addr_space; u8 hw_addr_len; u8 prot_addr_len; - u16 op_code; + __be16 op_code; u8 mac_src[ETH_ALEN]; /* sender hardware address */ - u32 ip_src; /* sender IP address */ + __be32 ip_src; /* sender IP address */ u8 mac_dst[ETH_ALEN]; /* target hardware address */ - u32 ip_dst; /* target IP address */ + __be32 ip_dst; /* target IP address */ }; #pragma pack() @@ -345,6 +345,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct struct arp_pkt *arp = (struct arp_pkt *)skb->data; int res = NET_RX_DROP; + if (bond_dev->nd_net != &init_net) + goto out; + if (!(bond_dev->flags & IFF_MASTER)) goto out; @@ -579,7 +582,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla } /* mark all clients using src_ip to be updated */ -static void rlb_req_update_subnet_clients(struct bonding *bond, u32 src_ip) +static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct rlb_client_info *client_info; @@ -1264,7 +1267,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) struct ethhdr *eth_data; struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *tx_slave = NULL; - static const u32 ip_bcast = 0xffffffff; + static const __be32 ip_bcast = htonl(0xffffffff); int hash_size = 0; int do_tx_balance = 1; u32 hash_index = 0; @@ -1308,8 +1311,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) hash_size = sizeof(ipv6_hdr(skb)->daddr); break; case ETH_P_IPX: - if (ipx_hdr(skb)->ipx_checksum != - __constant_htons(IPX_NO_CHECKSUM)) { + if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { /* something is wrong with this packet */ do_tx_balance = 0; break; diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index 28f2a2fd1b5a956cb896561d2646690ed49bf230..fd87264298901f47afe92b96cef0b7aff6dcbddf 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -60,8 +60,8 @@ struct tlb_client_info { * ------------------------------------------------------------------------- */ struct rlb_client_info { - u32 ip_src; /* the server IP address */ - u32 ip_dst; /* the client IP address */ + __be32 ip_src; /* the server IP address */ + __be32 ip_dst; /* the client IP address */ u8 mac_dst[ETH_ALEN]; /* the client MAC address */ u32 next; /* The next Hash table entry index */ u32 prev; /* The previous Hash table entry index */ diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1afda3230defc01c1d38db13f9e17cf8b80397cd..64bfec32e2a6e99ffd5c8225ba1992e2625d0dc2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -75,6 +75,7 @@ #include #include #include +#include #include "bonding.h" #include "bond_3ad.h" #include "bond_alb.h" @@ -143,7 +144,7 @@ static struct proc_dir_entry *bond_proc_dir = NULL; #endif extern struct rw_semaphore bonding_rwsem; -static u32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; +static __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; static int arp_ip_count = 0; static int bond_mode = BOND_MODE_ROUNDROBIN; static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; @@ -1603,6 +1604,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) struct slave *slave, *oldcurrent; struct sockaddr addr; int mac_addr_differ; + DECLARE_MAC_BUF(mac); /* slave is not a slave or master is not master of this slave */ if (!(slave_dev->flags & IFF_SLAVE) || @@ -1630,19 +1632,13 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) ETH_ALEN); if (!mac_addr_differ && (bond->slave_cnt > 1)) { printk(KERN_WARNING DRV_NAME - ": %s: Warning: the permanent HWaddr of %s " - "- %02X:%02X:%02X:%02X:%02X:%02X - is " - "still in use by %s. Set the HWaddr of " - "%s to a different address to avoid " - "conflicts.\n", + ": %s: Warning: the permanent HWaddr of %s - " + "%s - is still in use by %s. " + "Set the HWaddr of %s to a different address " + "to avoid conflicts.\n", bond_dev->name, slave_dev->name, - slave->perm_hwaddr[0], - slave->perm_hwaddr[1], - slave->perm_hwaddr[2], - slave->perm_hwaddr[3], - slave->perm_hwaddr[4], - slave->perm_hwaddr[5], + print_mac(mac, slave->perm_hwaddr), bond_dev->name, slave_dev->name); } @@ -2230,7 +2226,7 @@ void bond_mii_monitor(struct net_device *bond_dev) } -static u32 bond_glean_dev_ip(struct net_device *dev) +static __be32 bond_glean_dev_ip(struct net_device *dev) { struct in_device *idev; struct in_ifaddr *ifa; @@ -2273,7 +2269,7 @@ static int bond_has_ip(struct bonding *bond) return 0; } -static int bond_has_this_ip(struct bonding *bond, u32 ip) +static int bond_has_this_ip(struct bonding *bond, __be32 ip) { struct vlan_entry *vlan, *vlan_next; @@ -2297,7 +2293,7 @@ static int bond_has_this_ip(struct bonding *bond, u32 ip) * switches in VLAN mode (especially if ports are configured as * "native" to a VLAN) might not pass non-tagged frames. */ -static void bond_arp_send(struct net_device *slave_dev, int arp_op, u32 dest_ip, u32 src_ip, unsigned short vlan_id) +static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id) { struct sk_buff *skb; @@ -2325,7 +2321,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, u32 dest_ip, static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { int i, vlan_id, rv; - u32 *targets = bond->params.arp_targets; + __be32 *targets = bond->params.arp_targets; struct vlan_entry *vlan, *vlan_next; struct net_device *vlan_dev; struct flowi fl; @@ -2430,10 +2426,10 @@ static void bond_send_gratuitous_arp(struct bonding *bond) } } -static void bond_validate_arp(struct bonding *bond, struct slave *slave, u32 sip, u32 tip) +static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) { int i; - u32 *targets = bond->params.arp_targets; + __be32 *targets = bond->params.arp_targets; targets = bond->params.arp_targets; for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { @@ -2455,7 +2451,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack struct slave *slave; struct bonding *bond; unsigned char *arp_ptr; - u32 sip, tip; + __be32 sip, tip; + + if (dev->nd_net != &init_net) + goto out; if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) goto out; @@ -3002,6 +3001,7 @@ static void bond_info_show_master(struct seq_file *seq) if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; + DECLARE_MAC_BUF(mac); seq_puts(seq, "\n802.3ad info\n"); seq_printf(seq, "LACP rate: %s\n", @@ -3021,13 +3021,8 @@ static void bond_info_show_master(struct seq_file *seq) ad_info.actor_key); seq_printf(seq, "\tPartner Key: %d\n", ad_info.partner_key); - seq_printf(seq, "\tPartner Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", - ad_info.partner_system[0], - ad_info.partner_system[1], - ad_info.partner_system[2], - ad_info.partner_system[3], - ad_info.partner_system[4], - ad_info.partner_system[5]); + seq_printf(seq, "\tPartner Mac Address: %s\n", + print_mac(mac, ad_info.partner_system)); } } } @@ -3035,6 +3030,7 @@ static void bond_info_show_master(struct seq_file *seq) static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave) { struct bonding *bond = seq->private; + DECLARE_MAC_BUF(mac); seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); seq_printf(seq, "MII Status: %s\n", @@ -3043,10 +3039,8 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave slave->link_failure_count); seq_printf(seq, - "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", - slave->perm_hwaddr[0], slave->perm_hwaddr[1], - slave->perm_hwaddr[2], slave->perm_hwaddr[3], - slave->perm_hwaddr[4], slave->perm_hwaddr[5]); + "Permanent HW addr: %s\n", + print_mac(mac, slave->perm_hwaddr)); if (bond->params.mode == BOND_MODE_8023AD) { const struct aggregator *agg @@ -3144,7 +3138,7 @@ static void bond_create_proc_dir(void) { int len = strlen(DRV_NAME); - for (bond_proc_dir = proc_net->subdir; bond_proc_dir; + for (bond_proc_dir = init_net.proc_net->subdir; bond_proc_dir; bond_proc_dir = bond_proc_dir->next) { if ((bond_proc_dir->namelen == len) && !memcmp(bond_proc_dir->name, DRV_NAME, len)) { @@ -3153,7 +3147,7 @@ static void bond_create_proc_dir(void) } if (!bond_proc_dir) { - bond_proc_dir = proc_mkdir(DRV_NAME, proc_net); + bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net); if (bond_proc_dir) { bond_proc_dir->owner = THIS_MODULE; } else { @@ -3188,7 +3182,7 @@ static void bond_destroy_proc_dir(void) bond_proc_dir->owner = NULL; } } else { - remove_proc_entry(DRV_NAME, proc_net); + remove_proc_entry(DRV_NAME, init_net.proc_net); bond_proc_dir = NULL; } } @@ -3295,6 +3289,9 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v { struct net_device *event_dev = (struct net_device *)ptr; + if (event_dev->nd_net != &init_net) + return NOTIFY_DONE; + dprintk("event_dev: %s, event: %lx\n", (event_dev ? event_dev->name : "None"), event); @@ -3430,14 +3427,14 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, { struct ethhdr *data = (struct ethhdr *)skb->data; struct iphdr *iph = ip_hdr(skb); - u16 *layer4hdr = (u16 *)((u32 *)iph + iph->ihl); + __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); int layer4_xor = 0; if (skb->protocol == __constant_htons(ETH_P_IP)) { if (!(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) && (iph->protocol == IPPROTO_TCP || iph->protocol == IPPROTO_UDP)) { - layer4_xor = htons((*layer4hdr ^ *(layer4hdr + 1))); + layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; @@ -3712,7 +3709,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd } down_write(&(bonding_rwsem)); - slave_dev = dev_get_by_name(ifr->ifr_slave); + slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave); dprintk("slave_dev=%p: \n", slave_dev); @@ -4195,10 +4192,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, } static const struct ethtool_ops bond_ethtool_ops = { - .get_tx_csum = ethtool_op_get_tx_csum, - .get_tso = ethtool_op_get_tso, - .get_ufo = ethtool_op_get_ufo, - .get_sg = ethtool_op_get_sg, .get_drvinfo = bond_ethtool_get_drvinfo, }; @@ -4528,7 +4521,7 @@ static int bond_check_params(struct bond_params *params) arp_ip_target[arp_ip_count]); arp_interval = 0; } else { - u32 ip = in_aton(arp_ip_target[arp_ip_count]); + __be32 ip = in_aton(arp_ip_target[arp_ip_count]); arp_target[arp_ip_count] = ip; } } @@ -4667,8 +4660,6 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond goto out_netdev; } - SET_MODULE_OWNER(bond_dev); - res = register_netdevice(bond_dev); if (res < 0) { goto out_bond; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 60cccf2aa9594e5c98eefd4e93622fdf5257bec9..6f49ca7e9b6686aa9841fe78e61bf3e56c841bb5 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -31,10 +31,10 @@ #include #include #include -#include #include #include #include +#include /* #define BONDING_DEBUG 1 */ #include "bonding.h" @@ -299,7 +299,7 @@ static ssize_t bonding_store_slaves(struct device *d, read_unlock_bh(&bond->lock); printk(KERN_INFO DRV_NAME ": %s: Adding slave %s.\n", bond->dev->name, ifname); - dev = dev_get_by_name(ifname); + dev = dev_get_by_name(&init_net, ifname); if (!dev) { printk(KERN_INFO DRV_NAME ": %s: Interface %s does not exist!\n", @@ -682,16 +682,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { - u32 newtarget; + __be32 newtarget; int i = 0, done = 0, ret = count; struct bonding *bond = to_bond(d); - u32 *targets; + __be32 *targets; targets = bond->params.arp_targets; newtarget = in_aton(buf + 1); /* look for adds */ if (buf[0] == '+') { - if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) { + if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { printk(KERN_ERR DRV_NAME ": %s: invalid ARP target %u.%u.%u.%u specified for addition\n", bond->dev->name, NIPQUAD(newtarget)); @@ -727,7 +727,7 @@ static ssize_t bonding_store_arp_targets(struct device *d, } else if (buf[0] == '-') { - if ((newtarget == 0) || (newtarget == INADDR_BROADCAST)) { + if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { printk(KERN_ERR DRV_NAME ": %s: invalid ARP target %d.%d.%d.%d specified for removal\n", bond->dev->name, NIPQUAD(newtarget)); @@ -1361,17 +1361,14 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d, { int count = 0; struct bonding *bond = to_bond(d); + DECLARE_MAC_BUF(mac); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; if (!bond_3ad_get_active_agg_info(bond, &ad_info)) { - count = sprintf(buf,"%02x:%02x:%02x:%02x:%02x:%02x\n", - ad_info.partner_system[0], - ad_info.partner_system[1], - ad_info.partner_system[2], - ad_info.partner_system[3], - ad_info.partner_system[4], - ad_info.partner_system[5]) + 1; + count = sprintf(buf,"%s\n", + print_mac(mac, ad_info.partner_system)) + + 1; } } else diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 6dcbd25e3ef03671dce73fd1b5aa2d9399898b08..2a6af7d23728357c1d60f091b6a6864bb26593be 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -132,7 +132,7 @@ struct bond_params { int downdelay; int lacp_fast; char primary[IFNAMSIZ]; - u32 arp_targets[BOND_MAX_ARP_TARGETS]; + __be32 arp_targets[BOND_MAX_ARP_TARGETS]; }; struct bond_parm_tbl { @@ -142,7 +142,7 @@ struct bond_parm_tbl { struct vlan_entry { struct list_head vlan_list; - u32 vlan_ip; + __be32 vlan_ip; unsigned short vlan_id; }; @@ -193,7 +193,7 @@ struct bonding { struct list_head bond_list; struct dev_mc_list *mc_list; int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); - u32 master_ip; + __be32 master_ip; u16 flags; struct ad_bond_info ad_info; struct alb_bond_info alb_info; diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index f6e4030c73d118c043ca7daebfdc051ef78320f3..563bf5f6fa2aa40c324129291ed9bacf6efde8a3 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2485,7 +2485,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &cp->napi); #else cas_rx_ringN(cp, ring, 0); #endif @@ -2536,7 +2536,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &cp->napi); #else cas_rx_ringN(cp, 1, 0); #endif @@ -2592,7 +2592,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) if (status & INTR_RX_DONE) { #ifdef USE_NAPI cas_mask_intr(cp); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &cp->napi); #else cas_rx_ringN(cp, 0, 0); #endif @@ -2607,9 +2607,10 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id) #ifdef USE_NAPI -static int cas_poll(struct net_device *dev, int *budget) +static int cas_poll(struct napi_struct *napi, int budget) { - struct cas *cp = netdev_priv(dev); + struct cas *cp = container_of(napi, struct cas, napi); + struct net_device *dev = cp->dev; int i, enable_intr, todo, credits; u32 status = readl(cp->regs + REG_INTR_STATUS); unsigned long flags; @@ -2620,20 +2621,18 @@ static int cas_poll(struct net_device *dev, int *budget) /* NAPI rx packets. we spread the credits across all of the * rxc rings - */ - todo = min(*budget, dev->quota); - - /* to make sure we're fair with the work we loop through each + * + * to make sure we're fair with the work we loop through each * ring N_RX_COMP_RING times with a request of - * todo / N_RX_COMP_RINGS + * budget / N_RX_COMP_RINGS */ enable_intr = 1; credits = 0; for (i = 0; i < N_RX_COMP_RINGS; i++) { int j; for (j = 0; j < N_RX_COMP_RINGS; j++) { - credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); - if (credits >= todo) { + credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); + if (credits >= budget) { enable_intr = 0; goto rx_comp; } @@ -2641,9 +2640,6 @@ static int cas_poll(struct net_device *dev, int *budget) } rx_comp: - *budget -= credits; - dev->quota -= credits; - /* final rx completion */ spin_lock_irqsave(&cp->lock, flags); if (status) @@ -2674,11 +2670,10 @@ static int cas_poll(struct net_device *dev, int *budget) #endif spin_unlock_irqrestore(&cp->lock, flags); if (enable_intr) { - netif_rx_complete(dev); + netif_rx_complete(dev, napi); cas_unmask_intr(cp); - return 0; } - return 1; + return credits; } #endif @@ -4351,6 +4346,9 @@ static int cas_open(struct net_device *dev) goto err_spare; } +#ifdef USE_NAPI + napi_enable(&cp->napi); +#endif /* init hw */ cas_lock_all_save(cp, flags); cas_clean_rings(cp); @@ -4376,6 +4374,9 @@ static int cas_close(struct net_device *dev) unsigned long flags; struct cas *cp = netdev_priv(dev); +#ifdef USE_NAPI + napi_enable(&cp->napi); +#endif /* Make sure we don't get distracted by suspend/resume */ mutex_lock(&cp->pm_mutex); @@ -4771,9 +4772,14 @@ static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, cas_read_regs(cp, p, regs->len / sizeof(u32)); } -static int cas_get_stats_count(struct net_device *dev) +static int cas_get_sset_count(struct net_device *dev, int sset) { - return CAS_NUM_STAT_KEYS; + switch (sset) { + case ETH_SS_STATS: + return CAS_NUM_STAT_KEYS; + default: + return -EOPNOTSUPP; + } } static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -4817,7 +4823,7 @@ static const struct ethtool_ops cas_ethtool_ops = { .set_msglevel = cas_set_msglevel, .get_regs_len = cas_get_regs_len, .get_regs = cas_get_regs, - .get_stats_count = cas_get_stats_count, + .get_sset_count = cas_get_sset_count, .get_strings = cas_get_strings, .get_ethtool_stats = cas_get_ethtool_stats, }; @@ -4876,6 +4882,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, int i, err, pci_using_dac; u16 pci_cmd; u8 orig_cacheline_size = 0, cas_cacheline_size = 0; + DECLARE_MAC_BUF(mac); if (cas_version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -4899,7 +4906,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev, err = -ENOMEM; goto err_out_disable_pdev; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); err = pci_request_regions(pdev, dev->name); @@ -5062,8 +5068,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, dev->watchdog_timeo = CAS_TX_TIMEOUT; dev->change_mtu = cas_change_mtu; #ifdef USE_NAPI - dev->poll = cas_poll; - dev->weight = 64; + netif_napi_add(dev, &cp->napi, cas_poll, 64); #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = cas_netpoll; @@ -5085,16 +5090,12 @@ static int __devinit cas_init_one(struct pci_dev *pdev, i = readl(cp->regs + REG_BIM_CFG); printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " - "Ethernet[%d] ", dev->name, + "Ethernet[%d] %s\n", dev->name, (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", (i & BIM_CFG_32BIT) ? "32" : "64", (i & BIM_CFG_66MHZ) ? "66" : "33", - (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, + print_mac(mac, dev->dev_addr)); pci_set_drvdata(pdev, dev); cp->hw_running = 1; diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h index a970804487c7b0136736e575487d95fc1082915f..2f93f83342d2575e146f8999ff37bafb55170dd6 100644 --- a/drivers/net/cassini.h +++ b/drivers/net/cassini.h @@ -4280,6 +4280,8 @@ struct cas { int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; int rx_last[N_RX_DESC_RINGS]; + struct napi_struct napi; + /* Set when chip is actually in operational state * (ie. not power managed) */ int hw_running; diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile index 743ad8b41b5edce3eeb833cb3aed185904126038..57a4b262fd3fcf088f5092c210a9d22be788ad74 100644 --- a/drivers/net/chelsio/Makefile +++ b/drivers/net/chelsio/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb.o -cxgb-$(CONFIG_CHELSIO_T1_1G) += mac.o mv88e1xxx.o vsc7326.o +cxgb-$(CONFIG_CHELSIO_T1_1G) += mv88e1xxx.o vsc7326.o cxgb-objs := cxgb2.o espi.o tp.o pm3393.o sge.o subr.o \ mv88x201x.o my3126.o $(cxgb-y) diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index 8ba702c8b560c8b9cab8816e9f60f7e07a0c7bc0..846ca5383d3c875364bac967772d3290efdfd9f6 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h @@ -278,6 +278,7 @@ struct adapter { struct peespi *espi; struct petp *tp; + struct napi_struct napi; struct port_info port[MAX_NPORTS]; struct delayed_work stats_update_task; struct timer_list stats_update_timer; @@ -371,6 +372,7 @@ extern void t1_interrupts_enable(adapter_t *adapter); extern void t1_interrupts_disable(adapter_t *adapter); extern void t1_interrupts_clear(adapter_t *adapter); extern int t1_elmer0_ext_intr_handler(adapter_t *adapter); +extern void t1_elmer0_ext_intr(adapter_t *adapter); extern int t1_slow_intr_handler(adapter_t *adapter); extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 231ce43b97cfeb68817eabce4148feeaf8285449..2dbf8dc116c66acab2c5fa805442d81dcfd18edd 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c @@ -255,8 +255,11 @@ static int cxgb_open(struct net_device *dev) struct adapter *adapter = dev->priv; int other_ports = adapter->open_device_map & PORT_MASK; - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + napi_enable(&adapter->napi); + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { + napi_disable(&adapter->napi); return err; + } __set_bit(dev->if_port, &adapter->open_device_map); link_start(&adapter->port[dev->if_port]); @@ -274,6 +277,7 @@ static int cxgb_close(struct net_device *dev) struct cmac *mac = p->mac; netif_stop_queue(dev); + napi_disable(&adapter->napi); mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); netif_carrier_off(dev); @@ -435,9 +439,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->bus_info, pci_name(adapter->pdev)); } -static int get_stats_count(struct net_device *dev) +static int get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(stats_strings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -790,17 +799,14 @@ static const struct ethtool_ops t1_ethtool_ops = { .set_pauseparam = set_pauseparam, .get_rx_csum = get_rx_csum, .set_rx_csum = set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_link = ethtool_op_get_link, .get_strings = get_strings, - .get_stats_count = get_stats_count, + .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, - .get_tso = ethtool_op_get_tso, .set_tso = set_tso, }; @@ -1032,7 +1038,6 @@ static int __devinit init_one(struct pci_dev *pdev, goto out_free_dev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); if (!adapter) { @@ -1113,8 +1118,7 @@ static int __devinit init_one(struct pci_dev *pdev, netdev->poll_controller = t1_netpoll; #endif #ifdef CONFIG_CHELSIO_T1_NAPI - netdev->weight = 64; - netdev->poll = t1_poll; + netif_napi_add(netdev, &adapter->napi, t1_poll, 64); #endif SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); diff --git a/drivers/net/chelsio/mac.c b/drivers/net/chelsio/mac.c deleted file mode 100644 index 1d972825eac3dfca9f6c94968056ca08b1a3ce22..0000000000000000000000000000000000000000 --- a/drivers/net/chelsio/mac.c +++ /dev/null @@ -1,368 +0,0 @@ -/* $Date: 2005/10/22 00:42:59 $ $RCSfile: mac.c,v $ $Revision: 1.32 $ */ -#include "gmac.h" -#include "regs.h" -#include "fpga_defs.h" - -#define MAC_CSR_INTERFACE_GMII 0x0 -#define MAC_CSR_INTERFACE_TBI 0x1 -#define MAC_CSR_INTERFACE_MII 0x2 -#define MAC_CSR_INTERFACE_RMII 0x3 - -/* Chelsio's MAC statistics. */ -struct mac_statistics { - - /* Transmit */ - u32 TxFramesTransmittedOK; - u32 TxReserved1; - u32 TxReserved2; - u32 TxOctetsTransmittedOK; - u32 TxFramesWithDeferredXmissions; - u32 TxLateCollisions; - u32 TxFramesAbortedDueToXSCollisions; - u32 TxFramesLostDueToIntMACXmitError; - u32 TxReserved3; - u32 TxMulticastFrameXmittedOK; - u32 TxBroadcastFramesXmittedOK; - u32 TxFramesWithExcessiveDeferral; - u32 TxPAUSEMACCtrlFramesTransmitted; - - /* Receive */ - u32 RxFramesReceivedOK; - u32 RxFrameCheckSequenceErrors; - u32 RxAlignmentErrors; - u32 RxOctetsReceivedOK; - u32 RxFramesLostDueToIntMACRcvError; - u32 RxMulticastFramesReceivedOK; - u32 RxBroadcastFramesReceivedOK; - u32 RxInRangeLengthErrors; - u32 RxTxOutOfRangeLengthField; - u32 RxFrameTooLongErrors; - u32 RxPAUSEMACCtrlFramesReceived; -}; - -static int static_aPorts[] = { - FPGA_GMAC_INTERRUPT_PORT0, - FPGA_GMAC_INTERRUPT_PORT1, - FPGA_GMAC_INTERRUPT_PORT2, - FPGA_GMAC_INTERRUPT_PORT3 -}; - -struct _cmac_instance { - u32 index; -}; - -static int mac_intr_enable(struct cmac *mac) -{ - u32 mac_intr; - - if (t1_is_asic(mac->adapter)) { - /* ASIC */ - - /* We don't use the on chip MAC for ASIC products. */ - } else { - /* FPGA */ - - /* Set parent gmac interrupt. */ - mac_intr = readl(mac->adapter->regs + A_PL_ENABLE); - mac_intr |= FPGA_PCIX_INTERRUPT_GMAC; - writel(mac_intr, mac->adapter->regs + A_PL_ENABLE); - - mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - mac_intr |= static_aPorts[mac->instance->index]; - writel(mac_intr, - mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - } - - return 0; -} - -static int mac_intr_disable(struct cmac *mac) -{ - u32 mac_intr; - - if (t1_is_asic(mac->adapter)) { - /* ASIC */ - - /* We don't use the on chip MAC for ASIC products. */ - } else { - /* FPGA */ - - /* Set parent gmac interrupt. */ - mac_intr = readl(mac->adapter->regs + A_PL_ENABLE); - mac_intr &= ~FPGA_PCIX_INTERRUPT_GMAC; - writel(mac_intr, mac->adapter->regs + A_PL_ENABLE); - - mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - mac_intr &= ~(static_aPorts[mac->instance->index]); - writel(mac_intr, - mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_ENABLE); - } - - return 0; -} - -static int mac_intr_clear(struct cmac *mac) -{ - u32 mac_intr; - - if (t1_is_asic(mac->adapter)) { - /* ASIC */ - - /* We don't use the on chip MAC for ASIC products. */ - } else { - /* FPGA */ - - /* Set parent gmac interrupt. */ - writel(FPGA_PCIX_INTERRUPT_GMAC, - mac->adapter->regs + A_PL_CAUSE); - mac_intr = readl(mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); - mac_intr |= (static_aPorts[mac->instance->index]); - writel(mac_intr, - mac->adapter->regs + FPGA_GMAC_ADDR_INTERRUPT_CAUSE); - } - - return 0; -} - -static int mac_get_address(struct cmac *mac, u8 addr[6]) -{ - u32 data32_lo, data32_hi; - - data32_lo = readl(mac->adapter->regs - + MAC_REG_IDLO(mac->instance->index)); - data32_hi = readl(mac->adapter->regs - + MAC_REG_IDHI(mac->instance->index)); - - addr[0] = (u8) ((data32_hi >> 8) & 0xFF); - addr[1] = (u8) ((data32_hi) & 0xFF); - addr[2] = (u8) ((data32_lo >> 24) & 0xFF); - addr[3] = (u8) ((data32_lo >> 16) & 0xFF); - addr[4] = (u8) ((data32_lo >> 8) & 0xFF); - addr[5] = (u8) ((data32_lo) & 0xFF); - return 0; -} - -static int mac_reset(struct cmac *mac) -{ - u32 data32; - int mac_in_reset, time_out = 100; - int idx = mac->instance->index; - - data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx)); - writel(data32 | F_MAC_RESET, - mac->adapter->regs + MAC_REG_CSR(idx)); - - do { - data32 = readl(mac->adapter->regs + MAC_REG_CSR(idx)); - - mac_in_reset = data32 & F_MAC_RESET; - if (mac_in_reset) - udelay(1); - } while (mac_in_reset && --time_out); - - if (mac_in_reset) { - CH_ERR("%s: MAC %d reset timed out\n", - mac->adapter->name, idx); - return 2; - } - - return 0; -} - -static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm) -{ - u32 val; - - val = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - val &= ~(F_MAC_PROMISC | F_MAC_MC_ENABLE); - val |= V_MAC_PROMISC(t1_rx_mode_promisc(rm) != 0); - val |= V_MAC_MC_ENABLE(t1_rx_mode_allmulti(rm) != 0); - writel(val, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - - return 0; -} - -static int mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, - int fc) -{ - u32 data32; - - data32 = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - data32 &= ~(F_MAC_HALF_DUPLEX | V_MAC_SPEED(M_MAC_SPEED) | - V_INTERFACE(M_INTERFACE) | F_MAC_TX_PAUSE_ENABLE | - F_MAC_RX_PAUSE_ENABLE); - - switch (speed) { - case SPEED_10: - case SPEED_100: - data32 |= V_INTERFACE(MAC_CSR_INTERFACE_MII); - data32 |= V_MAC_SPEED(speed == SPEED_10 ? 0 : 1); - break; - case SPEED_1000: - data32 |= V_INTERFACE(MAC_CSR_INTERFACE_GMII); - data32 |= V_MAC_SPEED(2); - break; - } - - if (duplex >= 0) - data32 |= V_MAC_HALF_DUPLEX(duplex == DUPLEX_HALF); - - if (fc >= 0) { - data32 |= V_MAC_RX_PAUSE_ENABLE((fc & PAUSE_RX) != 0); - data32 |= V_MAC_TX_PAUSE_ENABLE((fc & PAUSE_TX) != 0); - } - - writel(data32, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - return 0; -} - -static int mac_enable(struct cmac *mac, int which) -{ - u32 val; - - val = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - if (which & MAC_DIRECTION_RX) - val |= F_MAC_RX_ENABLE; - if (which & MAC_DIRECTION_TX) - val |= F_MAC_TX_ENABLE; - writel(val, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - return 0; -} - -static int mac_disable(struct cmac *mac, int which) -{ - u32 val; - - val = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - if (which & MAC_DIRECTION_RX) - val &= ~F_MAC_RX_ENABLE; - if (which & MAC_DIRECTION_TX) - val &= ~F_MAC_TX_ENABLE; - writel(val, - mac->adapter->regs + MAC_REG_CSR(mac->instance->index)); - return 0; -} - -#if 0 -static int mac_set_ifs(struct cmac *mac, u32 mode) -{ - t1_write_reg_4(mac->adapter, - MAC_REG_IFS(mac->instance->index), - mode); - return 0; -} - -static int mac_enable_isl(struct cmac *mac) -{ - u32 data32 = readl(mac->adapter->regs - + MAC_REG_CSR(mac->instance->index)); - data32 |= F_MAC_RX_ENABLE | F_MAC_TX_ENABLE; - t1_write_reg_4(mac->adapter, - MAC_REG_CSR(mac->instance->index), - data32); - return 0; -} -#endif - -static int mac_set_mtu(struct cmac *mac, int mtu) -{ - if (mtu > 9600) - return -EINVAL; - writel(mtu + ETH_HLEN + VLAN_HLEN, - mac->adapter->regs + MAC_REG_LARGEFRAMELENGTH(mac->instance->index)); - - return 0; -} - -static const struct cmac_statistics *mac_update_statistics(struct cmac *mac, - int flag) -{ - struct mac_statistics st; - u32 *p = (u32 *) & st, i; - - writel(0, - mac->adapter->regs + MAC_REG_RMCNT(mac->instance->index)); - - for (i = 0; i < sizeof(st) / sizeof(u32); i++) - *p++ = readl(mac->adapter->regs - + MAC_REG_RMDATA(mac->instance->index)); - - /* XXX convert stats */ - return &mac->stats; -} - -static void mac_destroy(struct cmac *mac) -{ - kfree(mac); -} - -static struct cmac_ops chelsio_mac_ops = { - .destroy = mac_destroy, - .reset = mac_reset, - .interrupt_enable = mac_intr_enable, - .interrupt_disable = mac_intr_disable, - .interrupt_clear = mac_intr_clear, - .enable = mac_enable, - .disable = mac_disable, - .set_mtu = mac_set_mtu, - .set_rx_mode = mac_set_rx_mode, - .set_speed_duplex_fc = mac_set_speed_duplex_fc, - .macaddress_get = mac_get_address, - .statistics_update = mac_update_statistics, -}; - -static struct cmac *mac_create(adapter_t *adapter, int index) -{ - struct cmac *mac; - u32 data32; - - if (index >= 4) - return NULL; - - mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); - if (!mac) - return NULL; - - mac->ops = &chelsio_mac_ops; - mac->instance = (cmac_instance *) (mac + 1); - - mac->instance->index = index; - mac->adapter = adapter; - - data32 = readl(adapter->regs + MAC_REG_CSR(mac->instance->index)); - data32 &= ~(F_MAC_RESET | F_MAC_PROMISC | F_MAC_PROMISC | - F_MAC_LB_ENABLE | F_MAC_RX_ENABLE | F_MAC_TX_ENABLE); - data32 |= F_MAC_JUMBO_ENABLE; - writel(data32, adapter->regs + MAC_REG_CSR(mac->instance->index)); - - /* Initialize the random backoff seed. */ - data32 = 0x55aa + (3 * index); - writel(data32, - adapter->regs + MAC_REG_GMRANDBACKOFFSEED(mac->instance->index)); - - /* Check to see if the mac address needs to be set manually. */ - data32 = readl(adapter->regs + MAC_REG_IDLO(mac->instance->index)); - if (data32 == 0 || data32 == 0xffffffff) { - /* - * Add a default MAC address if we can't read one. - */ - writel(0x43FFFFFF - index, - adapter->regs + MAC_REG_IDLO(mac->instance->index)); - writel(0x0007, - adapter->regs + MAC_REG_IDHI(mac->instance->index)); - } - - (void) mac_set_mtu(mac, 1500); - return mac; -} - -const struct gmac t1_chelsio_mac_ops = { - .create = mac_create -}; diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index e4f874a70fe5d562e3f95e2687838f06bb899ca8..ffa7e649a6ef37514333cd225ab07c6e1fe61d12 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1620,23 +1620,20 @@ static int process_pure_responses(struct adapter *adapter) * or protection from interrupts as data interrupts are off at this point and * other adapter interrupts do not interfere. */ -int t1_poll(struct net_device *dev, int *budget) +int t1_poll(struct napi_struct *napi, int budget) { - struct adapter *adapter = dev->priv; + struct adapter *adapter = container_of(napi, struct adapter, napi); + struct net_device *dev = adapter->port[0].dev; int work_done; - work_done = process_responses(adapter, min(*budget, dev->quota)); - *budget -= work_done; - dev->quota -= work_done; - - if (unlikely(responses_pending(adapter))) - return 1; - - netif_rx_complete(dev); - writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - - return 0; + work_done = process_responses(adapter, budget); + if (likely(!responses_pending(adapter))) { + netif_rx_complete(dev, napi); + writel(adapter->sge->respQ.cidx, + adapter->regs + A_SG_SLEEPING); + } + return work_done; } /* @@ -1653,13 +1650,13 @@ irqreturn_t t1_interrupt(int irq, void *data) writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); - if (__netif_rx_schedule_prep(dev)) { + if (napi_schedule_prep(&adapter->napi)) { if (process_pure_responses(adapter)) - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &adapter->napi); else { /* no data, no NAPI needed */ writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); - netif_poll_enable(dev); /* undo schedule_prep */ + napi_enable(&adapter->napi); /* undo schedule_prep */ } } return IRQ_HANDLED; diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index d132a0ef2a2204361ec3f7a16635d9058bf25c29..713d9c55f24dd6be1b57c62460a23f108677ac73 100644 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h @@ -77,7 +77,7 @@ int t1_sge_configure(struct sge *, struct sge_params *); int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); void t1_sge_destroy(struct sge *); irqreturn_t t1_interrupt(int irq, void *cookie); -int t1_poll(struct net_device *, int *); +int t1_poll(struct napi_struct *, int); int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); void t1_set_vlan_accel(struct adapter *adapter, int on_off); diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c index 7de9a611e1f7dac5d4582f70a968119797d00198..dc50151bed811c01610a1bbe632c678812b53c70 100644 --- a/drivers/net/chelsio/subr.c +++ b/drivers/net/chelsio/subr.c @@ -884,7 +884,7 @@ static int asic_slow_intr(adapter_t *adapter) if (cause & F_PL_INTR_PCIX) t1_pci_intr_handler(adapter); if (cause & F_PL_INTR_EXT) - t1_elmer0_ext_intr_handler(adapter); + t1_elmer0_ext_intr(adapter); /* Clear the interrupts just processed. */ writel(cause, adapter->regs + A_PL_CAUSE); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 5bdf5ca85a65b8629446e1e0b243c22717ea59bd..314b2f68f78fc14f6140aa743aca4910c1da7a93 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -618,12 +618,8 @@ e100_set_mac_address(struct net_device *dev, void *p) /* show it in the log as well */ - printk(KERN_INFO "%s: changed MAC to ", dev->name); - - for (i = 0; i < 5; i++) - printk("%02X:", dev->dev_addr[i]); - - printk("%02X\n", dev->dev_addr[i]); + printk(KERN_INFO "%s: changed MAC to %s\n", + dev->name, print_mac(mac, dev->dev_addr)); spin_unlock(&np->lock); diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 9774bb1b3e80a34ef486e9357ace4ba72eecdb6c..571750975137a3adb5ef7d034e42f72ad3341461 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -516,8 +516,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) unsigned rev_type = 0; int eeprom_buff[CHKSUM_LEN]; int retval; + DECLARE_MAC_BUF(mac); - SET_MODULE_OWNER(dev); /* Initialize the device structure. */ if (!modular) { memset(lp, 0, sizeof(*lp)); @@ -806,7 +806,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) i = cs8900_irq_map[0]; #else /* Translate the IRQ using the IRQ mapping table. */ - if (i >= sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0])) + if (i >= ARRAY_SIZE(cs8900_irq_map)) printk("\ncs89x0: invalid ISA interrupt number %d\n", i); else i = cs8900_irq_map[i]; @@ -841,11 +841,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) } /* print the ethernet address. */ - printk(", MAC"); - for (i = 0; i < ETH_ALEN; i++) - { - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - } + printk(", MAC %s", print_mac(mac, dev->dev_addr)); dev->open = net_open; dev->stop = net_close; @@ -1247,11 +1243,11 @@ write_irq(struct net_device *dev, int chip_type, int irq) if (chip_type == CS8900) { /* Search the mapping table for the corresponding IRQ pin. */ - for (i = 0; i != sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0]); i++) + for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) if (cs8900_irq_map[i] == irq) break; /* Not found */ - if (i == sizeof(cs8900_irq_map)/sizeof(cs8900_irq_map[0])) + if (i == ARRAY_SIZE(cs8900_irq_map)) i = 3; writereg(dev, PP_CS8900_ISAINT, i); } else { @@ -1807,17 +1803,15 @@ static int set_mac_address(struct net_device *dev, void *p) int i; struct sockaddr *addr = p; - if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (net_debug) { - printk("%s: Setting MAC address to ", dev->name); - for (i = 0; i < dev->addr_len; i++) - printk(" %2.2x", dev->dev_addr[i]); - printk(".\n"); + DECLARE_MAC_BUF(mac); + printk("%s: Setting MAC address to %s.\n", + dev->name, print_mac(mac, dev->dev_addr)); } /* set the Ethernet address */ for (i=0; i < ETH_ALEN/2; i++) diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 20e887de2545cb2951baa398402e2ae3c20a966f..044261703381a86439adcdab0f4808834cd1aa09 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -49,11 +49,13 @@ typedef irqreturn_t(*intr_handler_t) (int, void *); struct vlan_group; - struct adapter; +struct sge_qset; + struct port_info { struct adapter *adapter; struct vlan_group *vlan_grp; + struct sge_qset *qs; const struct port_type_info *port_type; u8 port_id; u8 rx_csum_offload; @@ -173,10 +175,12 @@ enum { /* per port SGE statistics */ }; struct sge_qset { /* an SGE queue set */ + struct adapter *adap; + struct napi_struct napi; struct sge_rspq rspq; struct sge_fl fl[SGE_RXQ_PER_SET]; struct sge_txq txq[SGE_TXQ_PER_SET]; - struct net_device *netdev; /* associated net device */ + struct net_device *netdev; unsigned long txq_stopped; /* which Tx queues are stopped */ struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ unsigned long port_stats[SGE_PSTAT_MAX]; @@ -221,12 +225,6 @@ struct adapter { struct delayed_work adap_check_task; struct work_struct ext_intr_handler_task; - /* - * Dummy netdevices are needed when using multiple receive queues with - * NAPI as each netdevice can service only one queue. - */ - struct net_device *dummy_netdev[SGE_QSETS - 1]; - struct dentry *debugfs_root; struct mutex mdio_lock; @@ -253,12 +251,6 @@ static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) return netdev_priv(adap->port[idx]); } -/* - * We use the spare atalk_ptr to map a net device to its SGE queue set. - * This is a macro so it can be used as l-value. - */ -#define dev2qset(netdev) ((netdev)->atalk_ptr) - #define OFFLOAD_DEVMAP_BIT 15 #define tdev2adap(d) container_of(d, struct adapter, tdev) @@ -284,7 +276,7 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, - int ntxq, struct net_device *netdev); + int ntxq, struct net_device *dev); int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, unsigned char *data); irqreturn_t t3_sge_intr_msix(int irq, void *cookie); diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h index 2129210a67c11803aedefd49f06b7f15e72d1a54..99c75d30f67a8538ddd921646666d39d53901295 100644 --- a/drivers/net/cxgb3/common.h +++ b/drivers/net/cxgb3/common.h @@ -97,6 +97,7 @@ enum { MAX_NPORTS = 2, /* max # of ports */ MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */ EEPROMSIZE = 8192, /* Serial EEPROM size */ + SERNUM_LEN = 16, /* Serial # length */ RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */ TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ @@ -104,7 +105,7 @@ enum { PROTO_SRAM_LINES = 128, /* size of TP sram */ }; -#define MAX_RX_COALESCING_LEN 16224U +#define MAX_RX_COALESCING_LEN 12288U enum { PAUSE_RX = 1 << 0, @@ -126,8 +127,8 @@ enum { /* adapter interrupt-maintained statistics */ enum { TP_VERSION_MAJOR = 1, - TP_VERSION_MINOR = 0, - TP_VERSION_MICRO = 44 + TP_VERSION_MINOR = 1, + TP_VERSION_MICRO = 0 }; #define S_TP_VERSION_MAJOR 16 @@ -167,8 +168,8 @@ enum { }; struct sg_ent { /* SGE scatter/gather entry */ - u32 len[2]; - u64 addr[2]; + __be32 len[2]; + __be64 addr[2]; }; #ifndef SGE_NUM_GENBITS @@ -391,6 +392,7 @@ struct vpd_params { unsigned int uclk; unsigned int mdc; unsigned int mem_timing; + u8 sn[SERNUM_LEN + 1]; u8 eth_base[6]; u8 port_type[MAX_NPORTS]; unsigned short xauicfg[2]; @@ -436,6 +438,7 @@ enum { /* chip revisions */ T3_REV_A = 0, T3_REV_B = 2, T3_REV_B2 = 3, + T3_REV_C = 4, }; struct trace_params { @@ -507,9 +510,11 @@ struct cmac { unsigned int tx_xcnt; u64 tx_mcnt; unsigned int rx_xcnt; + unsigned int rx_ocnt; u64 rx_mcnt; unsigned int toggle_cnt; unsigned int txen; + u64 rx_pause; struct mac_stats stats; }; @@ -687,7 +692,7 @@ int t3_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); int t3_get_fw_version(struct adapter *adapter, u32 *vers); -int t3_check_fw_version(struct adapter *adapter); +int t3_check_fw_version(struct adapter *adapter, int *must_load); int t3_init_hw(struct adapter *adapter, u32 fw_params); void mac_prep(struct cmac *mac, struct adapter *adapter, int index); void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h index 2095ddacff786902b1b0b6d8fb1389cb4231d5f0..6c4f32066919b89271678151d763d9278eed7368 100644 --- a/drivers/net/cxgb3/cxgb3_ctl_defs.h +++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h @@ -33,27 +33,29 @@ #define _CXGB3_OFFLOAD_CTL_DEFS_H enum { - GET_MAX_OUTSTANDING_WR, - GET_TX_MAX_CHUNK, - GET_TID_RANGE, - GET_STID_RANGE, - GET_RTBL_RANGE, - GET_L2T_CAPACITY, - GET_MTUS, - GET_WR_LEN, - GET_IFF_FROM_MAC, - GET_DDP_PARAMS, - GET_PORTS, - - ULP_ISCSI_GET_PARAMS, - ULP_ISCSI_SET_PARAMS, - - RDMA_GET_PARAMS, - RDMA_CQ_OP, - RDMA_CQ_SETUP, - RDMA_CQ_DISABLE, - RDMA_CTRL_QP_SETUP, - RDMA_GET_MEM, + GET_MAX_OUTSTANDING_WR = 0, + GET_TX_MAX_CHUNK = 1, + GET_TID_RANGE = 2, + GET_STID_RANGE = 3, + GET_RTBL_RANGE = 4, + GET_L2T_CAPACITY = 5, + GET_MTUS = 6, + GET_WR_LEN = 7, + GET_IFF_FROM_MAC = 8, + GET_DDP_PARAMS = 9, + GET_PORTS = 10, + + ULP_ISCSI_GET_PARAMS = 11, + ULP_ISCSI_SET_PARAMS = 12, + + RDMA_GET_PARAMS = 13, + RDMA_CQ_OP = 14, + RDMA_CQ_SETUP = 15, + RDMA_CQ_DISABLE = 16, + RDMA_CTRL_QP_SETUP = 17, + RDMA_GET_MEM = 18, + + GET_RX_PAGE_INFO = 50, }; /* @@ -161,4 +163,12 @@ struct rdma_ctrlqp_setup { unsigned long long base_addr; unsigned int size; }; + +/* + * Offload TX/RX page information. + */ +struct ofld_page_info { + unsigned int page_size; /* Page size, should be a power of 2 */ + unsigned int num; /* Number of pages */ +}; #endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */ diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h index 483a594210a725d065227737b392adec46e99e11..45e92164c26047765ed0aacb6cdb76025042c9b2 100644 --- a/drivers/net/cxgb3/cxgb3_defs.h +++ b/drivers/net/cxgb3/cxgb3_defs.h @@ -79,9 +79,17 @@ static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t, static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, unsigned int tid) { + union listen_entry *e; + if (tid < t->stid_base || tid >= t->stid_base + t->nstids) return NULL; - return &(stid2entry(t, tid)->t3c_tid); + + e = stid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; } /* @@ -90,9 +98,17 @@ static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t, static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t, unsigned int tid) { + union active_open_entry *e; + if (tid < t->atid_base || tid >= t->atid_base + t->natids) return NULL; - return &(atid2entry(t, tid)->t3c_tid); + + e = atid2entry(t, tid); + if ((void *)e->next >= (void *)t->tid_tab && + (void *)e->next < (void *)&t->atid_tab[t->natids]) + return NULL; + + return &e->t3c_tid; } int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 5ab319cfe5de8dc0000376ee9291c70474b50ed3..61ffc925eae777b065c37dea73c9b938e49ccd8e 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -339,49 +339,17 @@ static void setup_rss(struct adapter *adap) V_RRCPLCPUSIZE(6), cpus, rspq_map); } -/* - * If we have multiple receive queues per port serviced by NAPI we need one - * netdevice per queue as NAPI operates on netdevices. We already have one - * netdevice, namely the one associated with the interface, so we use dummy - * ones for any additional queues. Note that these netdevices exist purely - * so that NAPI has something to work with, they do not represent network - * ports and are not registered. - */ -static int init_dummy_netdevs(struct adapter *adap) +static void init_napi(struct adapter *adap) { - int i, j, dummy_idx = 0; - struct net_device *nd; - - for_each_port(adap, i) { - struct net_device *dev = adap->port[i]; - const struct port_info *pi = netdev_priv(dev); - - for (j = 0; j < pi->nqsets - 1; j++) { - if (!adap->dummy_netdev[dummy_idx]) { - struct port_info *p; - - nd = alloc_netdev(sizeof(*p), "", ether_setup); - if (!nd) - goto free_all; + int i; - p = netdev_priv(nd); - p->adapter = adap; - nd->weight = 64; - set_bit(__LINK_STATE_START, &nd->state); - adap->dummy_netdev[dummy_idx] = nd; - } - strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name); - dummy_idx++; - } - } - return 0; + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; -free_all: - while (--dummy_idx >= 0) { - free_netdev(adap->dummy_netdev[dummy_idx]); - adap->dummy_netdev[dummy_idx] = NULL; + if (qs->adap) + netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, + 64); } - return -ENOMEM; } /* @@ -392,20 +360,18 @@ static int init_dummy_netdevs(struct adapter *adap) static void quiesce_rx(struct adapter *adap) { int i; - struct net_device *dev; - for_each_port(adap, i) { - dev = adap->port[i]; - while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) - msleep(1); - } + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_disable(&adap->sge.qs[i].napi); +} - for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) { - dev = adap->dummy_netdev[i]; - if (dev) - while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) - msleep(1); - } +static void enable_all_napi(struct adapter *adap) +{ + int i; + for (i = 0; i < SGE_QSETS; i++) + if (adap->sge.qs[i].adap) + napi_enable(&adap->sge.qs[i].napi); } /** @@ -418,7 +384,7 @@ static void quiesce_rx(struct adapter *adap) */ static int setup_sge_qsets(struct adapter *adap) { - int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0; + int i, j, err, irq_idx = 0, qset_idx = 0; unsigned int ntxq = SGE_TXQ_PER_SET; if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) @@ -426,15 +392,14 @@ static int setup_sge_qsets(struct adapter *adap) for_each_port(adap, i) { struct net_device *dev = adap->port[i]; - const struct port_info *pi = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); + pi->qs = &adap->sge.qs[pi->first_qset]; for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { err = t3_sge_alloc_qset(adap, qset_idx, 1, (adap->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, - &adap->params.sge.qset[qset_idx], ntxq, - j == 0 ? dev : - adap-> dummy_netdev[dummy_dev_idx++]); + &adap->params.sge.qset[qset_idx], ntxq, dev); if (err) { t3_free_sge_resources(adap); return err; @@ -768,11 +733,14 @@ static inline char t3rev2char(struct adapter *adapter) case T3_REV_B2: rev = 'b'; break; + case T3_REV_C: + rev = 'c'; + break; } return rev; } -int update_tpsram(struct adapter *adap) +static int update_tpsram(struct adapter *adap) { const struct firmware *tpsram; char buf[64]; @@ -828,15 +796,16 @@ int update_tpsram(struct adapter *adap) */ static int cxgb_up(struct adapter *adap) { - int err = 0; + int err; int must_load; if (!(adap->flags & FULL_INIT_DONE)) { - err = t3_check_fw_version(adap); - if (err == -EINVAL) + err = t3_check_fw_version(adap, &must_load); + if (err == -EINVAL) { err = upgrade_fw(adap); - if (err) - goto out; + if (err && must_load) + goto out; + } err = t3_check_tpsram_version(adap, &must_load); if (err == -EINVAL) { @@ -845,21 +814,18 @@ static int cxgb_up(struct adapter *adap) goto out; } - err = init_dummy_netdevs(adap); - if (err) - goto out; - err = t3_init_hw(adap, 0); if (err) goto out; t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); - + err = setup_sge_qsets(adap); if (err) goto out; setup_rss(adap); + init_napi(adap); adap->flags |= FULL_INIT_DONE; } @@ -886,6 +852,7 @@ static int cxgb_up(struct adapter *adap) adap->name, adap))) goto irq_err; + enable_all_napi(adap); t3_sge_start(adap); t3_intr_enable(adap); @@ -944,7 +911,7 @@ static int offload_open(struct net_device *dev) struct adapter *adapter = pi->adapter; struct t3cdev *tdev = dev2t3cdev(dev); int adap_up = adapter->open_device_map & PORT_MASK; - int err = 0; + int err; if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) return 0; @@ -1012,8 +979,10 @@ static int cxgb_open(struct net_device *dev) int other_ports = adapter->open_device_map & PORT_MASK; int err; - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { + quiesce_rx(adapter); return err; + } set_bit(pi->port_id, &adapter->open_device_map); if (is_offload(adapter) && !ofld_disable) { @@ -1162,9 +1131,14 @@ static char stats_strings[][ETH_GSTRING_LEN] = { }; -static int get_stats_count(struct net_device *dev) +static int get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(stats_strings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } } #define T3_REGMAP_SIZE (3 * 1024) @@ -1601,7 +1575,7 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct adapter *adapter = pi->adapter; u32 aligned_offset, aligned_len, *p; u8 *buf; - int err = 0; + int err; if (eeprom->magic != EEPROM_MAGIC) return -EINVAL; @@ -1665,20 +1639,17 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_pauseparam = set_pauseparam, .get_rx_csum = get_rx_csum, .set_rx_csum = set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_link = ethtool_op_get_link, .get_strings = get_strings, .phys_id = cxgb3_phys_id, .nway_reset = restart_autoneg, - .get_stats_count = get_stats_count, + .get_sset_count = get_sset_count, .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, .get_wol = get_wol, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, }; @@ -1798,7 +1769,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) } case CHELSIO_SET_QSET_NUM:{ struct ch_reg edata; - struct port_info *pi = netdev_priv(dev); unsigned int i, first_qset = 0, other_qsets = 0; if (!capable(CAP_NET_ADMIN)) @@ -1830,7 +1800,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) } case CHELSIO_GET_QSET_NUM:{ struct ch_reg edata; - struct port_info *pi = netdev_priv(dev); edata.cmd = CHELSIO_GET_QSET_NUM; edata.val = pi->nqsets; @@ -2332,6 +2301,10 @@ void t3_fatal_err(struct adapter *adapter) if (adapter->flags & FULL_INIT_DONE) { t3_sge_stop(adapter); + t3_write_reg(adapter, A_XGM_TX_CTRL, 0); + t3_write_reg(adapter, A_XGM_RX_CTRL, 0); + t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); + t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); t3_intr_disable(adapter); } CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); @@ -2391,10 +2364,12 @@ static void __devinit print_port_info(struct adapter *adap, (adap->flags & USING_MSIX) ? " MSI-X" : (adap->flags & USING_MSI) ? " MSI" : ""); if (adap->name == dev->name && adap->params.vpd.mclk) - printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n", + printk(KERN_INFO + "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", adap->name, t3_mc7_size(&adap->cm) >> 20, t3_mc7_size(&adap->pmtx) >> 20, - t3_mc7_size(&adap->pmrx) >> 20); + t3_mc7_size(&adap->pmrx) >> 20, + adap->params.vpd.sn); } } @@ -2490,7 +2465,6 @@ static int __devinit init_one(struct pci_dev *pdev, goto out_free_dev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); adapter->port[i] = netdev; @@ -2524,7 +2498,6 @@ static int __devinit init_one(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = cxgb_netpoll; #endif - netdev->weight = 64; SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); } @@ -2625,12 +2598,6 @@ static void __devexit remove_one(struct pci_dev *pdev) t3_free_sge_resources(adapter); cxgb_disable_msi(adapter); - for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++) - if (adapter->dummy_netdev[i]) { - free_netdev(adapter->dummy_netdev[i]); - adapter->dummy_netdev[i] = NULL; - } - for_each_port(adapter, i) if (adapter->port[i]) free_netdev(adapter->port[i]); diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index bdff7baeb59da4d5683270d50a1cea9da5fcb845..bd25421bc12a67302eeeb6dcf81296c3c1d71ef2 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -57,7 +57,7 @@ static DEFINE_RWLOCK(adapter_list_lock); static LIST_HEAD(adapter_list); static const unsigned int MAX_ATIDS = 64 * 1024; -static const unsigned int ATID_BASE = 0x100000; +static const unsigned int ATID_BASE = 0x10000; static inline int offload_activated(struct t3cdev *tdev) { @@ -222,32 +222,32 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) int ret = 0; switch (req) { - case RDMA_GET_PARAMS:{ - struct rdma_info *req = data; + case RDMA_GET_PARAMS: { + struct rdma_info *rdma = data; struct pci_dev *pdev = adapter->pdev; - req->udbell_physbase = pci_resource_start(pdev, 2); - req->udbell_len = pci_resource_len(pdev, 2); - req->tpt_base = + rdma->udbell_physbase = pci_resource_start(pdev, 2); + rdma->udbell_len = pci_resource_len(pdev, 2); + rdma->tpt_base = t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); - req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); - req->pbl_base = + rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); + rdma->pbl_base = t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); - req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); - req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); - req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); - req->kdb_addr = adapter->regs + A_SG_KDOORBELL; - req->pdev = pdev; + rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); + rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); + rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); + rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; + rdma->pdev = pdev; break; } case RDMA_CQ_OP:{ unsigned long flags; - struct rdma_cq_op *req = data; + struct rdma_cq_op *rdma = data; /* may be called in any context */ spin_lock_irqsave(&adapter->sge.reg_lock, flags); - ret = t3_sge_cqcntxt_op(adapter, req->id, req->op, - req->credits); + ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, + rdma->credits); spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); break; } @@ -274,15 +274,15 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) break; } case RDMA_CQ_SETUP:{ - struct rdma_cq_setup *req = data; + struct rdma_cq_setup *rdma = data; spin_lock_irq(&adapter->sge.reg_lock); ret = - t3_sge_init_cqcntxt(adapter, req->id, - req->base_addr, req->size, + t3_sge_init_cqcntxt(adapter, rdma->id, + rdma->base_addr, rdma->size, ASYNC_NOTIF_RSPQ, - req->ovfl_mode, req->credits, - req->credit_thres); + rdma->ovfl_mode, rdma->credits, + rdma->credit_thres); spin_unlock_irq(&adapter->sge.reg_lock); break; } @@ -292,13 +292,13 @@ static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) spin_unlock_irq(&adapter->sge.reg_lock); break; case RDMA_CTRL_QP_SETUP:{ - struct rdma_ctrlqp_setup *req = data; + struct rdma_ctrlqp_setup *rdma = data; spin_lock_irq(&adapter->sge.reg_lock); ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, SGE_CNTXT_RDMA, ASYNC_NOTIF_RSPQ, - req->base_addr, req->size, + rdma->base_addr, rdma->size, FW_RI_TID_START, 1, 0); spin_unlock_irq(&adapter->sge.reg_lock); break; @@ -317,6 +317,8 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) struct iff_mac *iffmacp; struct ddp_params *ddpp; struct adap_ports *ports; + struct ofld_page_info *rx_page_info; + struct tp_params *tp = &adapter->params.tp; int i; switch (req) { @@ -382,6 +384,11 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) if (!offload_running(adapter)) return -EAGAIN; return cxgb_rdma_ctl(adapter, req, data); + case GET_RX_PAGE_INFO: + rx_page_info = data; + rx_page_info->page_size = tp->rx_pg_size; + rx_page_info->num = tp->rx_num_pgs; + break; default: return -EOPNOTSUPP; } @@ -687,10 +694,19 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) { struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); - t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); - if (t3c_tid->ctx && t3c_tid->client->handlers && + if (unlikely(tid >= t->ntids)) { + printk("%s: passive open TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_stid(t, stid); + if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] (dev, skb, t3c_tid->ctx); @@ -772,16 +788,25 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) { struct cpl_act_establish *req = cplhdr(skb); unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); + struct tid_info *t = &(T3C_DATA(dev))->tid_maps; struct t3c_tid_entry *t3c_tid; + unsigned int tid = GET_TID(req); - t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); + if (unlikely(tid >= t->ntids)) { + printk("%s: active establish TID %u too large\n", + dev->name, tid); + t3_fatal_err(tdev2adap(dev)); + return CPL_RET_BUF_DONE; + } + + t3c_tid = lookup_atid(t, atid); if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] (dev, skb, t3c_tid->ctx); } else { printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", - dev->name, CPL_PASS_ACCEPT_REQ); + dev->name, CPL_ACT_ESTABLISH); return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; } } diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index aa80313c922ef3913e5f85796c51921111505a45..5e1bc0dec5f185a0c146a283e112a6389a29693e 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -172,6 +172,14 @@ #define A_SG_INT_CAUSE 0x5c +#define S_HIPIODRBDROPERR 11 +#define V_HIPIODRBDROPERR(x) ((x) << S_HIPIODRBDROPERR) +#define F_HIPIODRBDROPERR V_HIPIODRBDROPERR(1U) + +#define S_LOPIODRBDROPERR 10 +#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) +#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) + #define S_RSPQDISABLED 3 #define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) #define F_RSPQDISABLED V_RSPQDISABLED(1U) @@ -1318,6 +1326,7 @@ #define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT) #define A_PM1_RX_CFG 0x5c0 +#define A_PM1_RX_MODE 0x5c4 #define A_PM1_RX_INT_ENABLE 0x5d8 @@ -1386,6 +1395,7 @@ #define A_PM1_RX_INT_CAUSE 0x5dc #define A_PM1_TX_CFG 0x5e0 +#define A_PM1_TX_MODE 0x5e4 #define A_PM1_TX_INT_ENABLE 0x5f8 diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 58a5f60521ed87194db262b5a2577e7df90c02d7..994b5d6404df3568113b462b70d6d044b03b740e 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -79,7 +79,7 @@ enum { }; struct tx_desc { - u64 flit[TX_DESC_FLITS]; + __be64 flit[TX_DESC_FLITS]; }; struct rx_desc { @@ -544,7 +544,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, * as HW contexts, packet buffers, and descriptor rings. Traffic to the * queue set must be quiesced prior to calling this. */ -void t3_free_qset(struct adapter *adapter, struct sge_qset *q) +static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) { int i; struct pci_dev *pdev = adapter->pdev; @@ -591,9 +591,6 @@ void t3_free_qset(struct adapter *adapter, struct sge_qset *q) q->rspq.desc, q->rspq.phys_addr); } - if (q->netdev) - q->netdev->atalk_ptr = NULL; - memset(q, 0, sizeof(*q)); } @@ -907,8 +904,8 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb, const struct sge_txq *q, const struct sg_ent *sgl, unsigned int flits, unsigned int sgl_flits, - unsigned int gen, unsigned int wr_hi, - unsigned int wr_lo) + unsigned int gen, __be32 wr_hi, + __be32 wr_lo) { struct work_request_hdr *wrp = (struct work_request_hdr *)d; struct tx_sw_desc *sd = &q->sdesc[pidx]; @@ -1074,7 +1071,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int ndesc, pidx, credits, gen, compl; const struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; - struct sge_qset *qs = dev2qset(dev); + struct sge_qset *qs = pi->qs; struct sge_txq *q = &qs->txq[TXQ_ETH]; /* @@ -1182,8 +1179,8 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) * * Writes a packet as immediate data into a Tx descriptor. The packet * contains a work request at its beginning. We must write the packet - * carefully so the SGE doesn't read accidentally before it's written in - * its entirety. + * carefully so the SGE doesn't read it accidentally before it's written + * in its entirety. */ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, unsigned int len, unsigned int gen) @@ -1191,7 +1188,11 @@ static inline void write_imm(struct tx_desc *d, struct sk_buff *skb, struct work_request_hdr *from = (struct work_request_hdr *)skb->data; struct work_request_hdr *to = (struct work_request_hdr *)d; - memcpy(&to[1], &from[1], len - sizeof(*from)); + if (likely(!skb->data_len)) + memcpy(&to[1], &from[1], len - sizeof(*from)); + else + skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); + to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | V_WR_BCNTLFLT(len & 7)); wmb(); @@ -1261,7 +1262,7 @@ static inline void reclaim_completed_tx_imm(struct sge_txq *q) static inline int immediate(const struct sk_buff *skb) { - return skb->len <= WR_LEN && !skb->data_len; + return skb->len <= WR_LEN; } /** @@ -1326,13 +1327,12 @@ static void restart_ctrlq(unsigned long data) struct sk_buff *skb; struct sge_qset *qs = (struct sge_qset *)data; struct sge_txq *q = &qs->txq[TXQ_CTRL]; - const struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; spin_lock(&q->lock); again:reclaim_completed_tx_imm(q); - while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) { + while (q->in_use < q->size && + (skb = __skb_dequeue(&q->sendq)) != NULL) { write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); @@ -1354,7 +1354,7 @@ static void restart_ctrlq(unsigned long data) } spin_unlock(&q->lock); - t3_write_reg(adap, A_SG_KDOORBELL, + t3_write_reg(qs->adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } @@ -1468,12 +1468,13 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, */ static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb) { - unsigned int flits, cnt = skb_shinfo(skb)->nr_frags; + unsigned int flits, cnt; - if (skb->len <= WR_LEN && cnt == 0) + if (skb->len <= WR_LEN) return 1; /* packet fits as immediate data */ flits = skb_transport_offset(skb) / 8; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; if (skb->tail != skb->transport_header) cnt++; return flits_to_desc(flits + sgl_len(cnt)); @@ -1638,8 +1639,7 @@ static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) else { struct sge_qset *qs = rspq_to_qset(q); - if (__netif_rx_schedule_prep(qs->netdev)) - __netif_rx_schedule(qs->netdev); + napi_schedule(&qs->napi); q->rx_head = skb; } q->rx_tail = skb; @@ -1675,34 +1675,30 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev, * receive handler. Batches need to be of modest size as we do prefetches * on the packets in each. */ -static int ofld_poll(struct net_device *dev, int *budget) +static int ofld_poll(struct napi_struct *napi, int budget) { - const struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - struct sge_qset *qs = dev2qset(dev); + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); struct sge_rspq *q = &qs->rspq; - int work_done, limit = min(*budget, dev->quota), avail = limit; + struct adapter *adapter = qs->adap; + int work_done = 0; - while (avail) { + while (work_done < budget) { struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; int ngathered; spin_lock_irq(&q->lock); head = q->rx_head; if (!head) { - work_done = limit - avail; - *budget -= work_done; - dev->quota -= work_done; - __netif_rx_complete(dev); + napi_complete(napi); spin_unlock_irq(&q->lock); - return 0; + return work_done; } tail = q->rx_tail; q->rx_head = q->rx_tail = NULL; spin_unlock_irq(&q->lock); - for (ngathered = 0; avail && head; avail--) { + for (ngathered = 0; work_done < budget && head; work_done++) { prefetch(head->data); skbs[ngathered] = head; head = head->next; @@ -1724,10 +1720,8 @@ static int ofld_poll(struct net_device *dev, int *budget) } deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); } - work_done = limit - avail; - *budget -= work_done; - dev->quota -= work_done; - return 1; + + return work_done; } /** @@ -2071,50 +2065,47 @@ static inline int is_pure_response(const struct rsp_desc *r) /** * napi_rx_handler - the NAPI handler for Rx processing - * @dev: the net device + * @napi: the napi instance * @budget: how many packets we can process in this round * * Handler for new data events when using NAPI. */ -static int napi_rx_handler(struct net_device *dev, int *budget) +static int napi_rx_handler(struct napi_struct *napi, int budget) { - const struct port_info *pi = netdev_priv(dev); - struct adapter *adap = pi->adapter; - struct sge_qset *qs = dev2qset(dev); - int effective_budget = min(*budget, dev->quota); + struct sge_qset *qs = container_of(napi, struct sge_qset, napi); + struct adapter *adap = qs->adap; + int work_done = process_responses(adap, qs, budget); - int work_done = process_responses(adap, qs, effective_budget); - *budget -= work_done; - dev->quota -= work_done; + if (likely(work_done < budget)) { + napi_complete(napi); - if (work_done >= effective_budget) - return 1; - - netif_rx_complete(dev); - - /* - * Because we don't atomically flush the following write it is - * possible that in very rare cases it can reach the device in a way - * that races with a new response being written plus an error interrupt - * causing the NAPI interrupt handler below to return unhandled status - * to the OS. To protect against this would require flushing the write - * and doing both the write and the flush with interrupts off. Way too - * expensive and unjustifiable given the rarity of the race. - * - * The race cannot happen at all with MSI-X. - */ - t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | - V_NEWTIMER(qs->rspq.next_holdoff) | - V_NEWINDEX(qs->rspq.cidx)); - return 0; + /* + * Because we don't atomically flush the following + * write it is possible that in very rare cases it can + * reach the device in a way that races with a new + * response being written plus an error interrupt + * causing the NAPI interrupt handler below to return + * unhandled status to the OS. To protect against + * this would require flushing the write and doing + * both the write and the flush with interrupts off. + * Way too expensive and unjustifiable given the + * rarity of the race. + * + * The race cannot happen at all with MSI-X. + */ + t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | + V_NEWTIMER(qs->rspq.next_holdoff) | + V_NEWINDEX(qs->rspq.cidx)); + } + return work_done; } /* * Returns true if the device is already scheduled for polling. */ -static inline int napi_is_scheduled(struct net_device *dev) +static inline int napi_is_scheduled(struct napi_struct *napi) { - return test_bit(__LINK_STATE_RX_SCHED, &dev->state); + return test_bit(NAPI_STATE_SCHED, &napi->state); } /** @@ -2197,8 +2188,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); return 0; } - if (likely(__netif_rx_schedule_prep(qs->netdev))) - __netif_rx_schedule(qs->netdev); + napi_schedule(&qs->napi); return 1; } @@ -2209,8 +2199,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) irqreturn_t t3_sge_intr_msix(int irq, void *cookie) { struct sge_qset *qs = cookie; - const struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; + struct adapter *adap = qs->adap; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); @@ -2226,16 +2215,14 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie) * The MSI-X interrupt handler for an SGE response queue for the NAPI case * (i.e., response queue serviced by NAPI polling). */ -irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) +static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie) { struct sge_qset *qs = cookie; - const struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; struct sge_rspq *q = &qs->rspq; spin_lock(&q->lock); - if (handle_responses(adap, q) < 0) + if (handle_responses(qs->adap, q) < 0) q->unhandled_irqs++; spin_unlock(&q->lock); return IRQ_HANDLED; @@ -2278,11 +2265,13 @@ static irqreturn_t t3_intr_msi(int irq, void *cookie) return IRQ_HANDLED; } -static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q) +static int rspq_check_napi(struct sge_qset *qs) { - if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) { - if (likely(__netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); + struct sge_rspq *q = &qs->rspq; + + if (!napi_is_scheduled(&qs->napi) && + is_new_response(&q->desc[q->cidx], q)) { + napi_schedule(&qs->napi); return 1; } return 0; @@ -2295,7 +2284,7 @@ static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q) * one SGE response queue per port in this mode and protect all response * queues with queue 0's lock. */ -irqreturn_t t3_intr_msi_napi(int irq, void *cookie) +static irqreturn_t t3_intr_msi_napi(int irq, void *cookie) { int new_packets; struct adapter *adap = cookie; @@ -2303,10 +2292,9 @@ irqreturn_t t3_intr_msi_napi(int irq, void *cookie) spin_lock(&q->lock); - new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q); + new_packets = rspq_check_napi(&adap->sge.qs[0]); if (adap->params.nports == 2) - new_packets += rspq_check_napi(adap->sge.qs[1].netdev, - &adap->sge.qs[1].rspq); + new_packets += rspq_check_napi(&adap->sge.qs[1]); if (!new_packets && t3_slow_intr_handler(adap) == 0) q->unhandled_irqs++; @@ -2409,9 +2397,9 @@ static irqreturn_t t3b_intr(int irq, void *cookie) static irqreturn_t t3b_intr_napi(int irq, void *cookie) { u32 map; - struct net_device *dev; struct adapter *adap = cookie; - struct sge_rspq *q0 = &adap->sge.qs[0].rspq; + struct sge_qset *qs0 = &adap->sge.qs[0]; + struct sge_rspq *q0 = &qs0->rspq; t3_write_reg(adap, A_PL_CLI, 0); map = t3_read_reg(adap, A_SG_DATA_INTR); @@ -2424,18 +2412,11 @@ static irqreturn_t t3b_intr_napi(int irq, void *cookie) if (unlikely(map & F_ERRINTR)) t3_slow_intr_handler(adap); - if (likely(map & 1)) { - dev = adap->sge.qs[0].netdev; - - if (likely(__netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); - } - if (map & 2) { - dev = adap->sge.qs[1].netdev; + if (likely(map & 1)) + napi_schedule(&qs0->napi); - if (likely(__netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); - } + if (map & 2) + napi_schedule(&adap->sge.qs[1].napi); spin_unlock(&q0->lock); return IRQ_HANDLED; @@ -2482,6 +2463,10 @@ void t3_sge_err_intr_handler(struct adapter *adapter) "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff); } + if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) + CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", + status & F_HIPIODRBDROPERR ? "high" : "lo"); + t3_write_reg(adapter, A_SG_INT_CAUSE, status); if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED)) t3_fatal_err(adapter); @@ -2514,8 +2499,7 @@ static void sge_timer_cb(unsigned long data) { spinlock_t *lock; struct sge_qset *qs = (struct sge_qset *)data; - const struct port_info *pi = netdev_priv(qs->netdev); - struct adapter *adap = pi->adapter; + struct adapter *adap = qs->adap; if (spin_trylock(&qs->txq[TXQ_ETH].lock)) { reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]); @@ -2526,9 +2510,9 @@ static void sge_timer_cb(unsigned long data) spin_unlock(&qs->txq[TXQ_OFLD].lock); } lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock : - &adap->sge.qs[0].rspq.lock; + &adap->sge.qs[0].rspq.lock; if (spin_trylock_irq(lock)) { - if (!napi_is_scheduled(qs->netdev)) { + if (!napi_is_scheduled(&qs->napi)) { u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); if (qs->fl[0].credits < qs->fl[0].size) @@ -2562,12 +2546,9 @@ static void sge_timer_cb(unsigned long data) */ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) { - if (!qs->netdev) - return; - qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ qs->rspq.polling = p->polling; - qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll; + qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; } /** @@ -2587,7 +2568,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) */ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int irq_vec_idx, const struct qset_params *p, - int ntxq, struct net_device *netdev) + int ntxq, struct net_device *dev) { int i, ret = -ENOMEM; struct sge_qset *q = &adapter->sge.qs[id]; @@ -2708,16 +2689,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, } spin_unlock(&adapter->sge.reg_lock); - q->netdev = netdev; - t3_update_qset_coalesce(q, p); - /* - * We use atalk_ptr as a backpointer to a qset. In case a device is - * associated with multiple queue sets only the first one sets - * atalk_ptr. - */ - if (netdev->atalk_ptr == NULL) - netdev->atalk_ptr = q; + q->adap = adapter; + q->netdev = dev; + t3_update_qset_coalesce(q, p); refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h index 514869e26a767542233d897d0dd52c5477f3d8ab..29b6c800b23840732756986bf12e92c71c979ee2 100644 --- a/drivers/net/cxgb3/sge_defs.h +++ b/drivers/net/cxgb3/sge_defs.h @@ -106,6 +106,10 @@ #define V_CQ_GEN(x) ((x) << S_CQ_GEN) #define F_CQ_GEN V_CQ_GEN(1U) +#define S_CQ_ERR 30 +#define V_CQ_ERR(x) ((x) << S_CQ_ERR) +#define F_CQ_ERR V_CQ_ERR(1U) + #define S_CQ_OVERFLOW_MODE 31 #define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE) #define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index b02d15daf5d90f56fac24a6b531fbf40ac2aa8f0..d4ee00d32219c5a045bf0affad6d08f4853e23b2 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -119,9 +119,9 @@ void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, * Reads registers that are accessed indirectly through an address/data * register pair. */ -void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, - unsigned int data_reg, u32 *vals, unsigned int nregs, - unsigned int start_idx) +static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, + unsigned int nregs, unsigned int start_idx) { while (nregs--) { t3_write_reg(adap, addr_reg, start_idx); @@ -505,7 +505,7 @@ struct t3_vpd { u8 vpdr_len[2]; VPD_ENTRY(pn, 16); /* part number */ VPD_ENTRY(ec, 16); /* EC level */ - VPD_ENTRY(sn, 16); /* serial number */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ VPD_ENTRY(na, 12); /* MAC address base */ VPD_ENTRY(cclk, 6); /* core clock */ VPD_ENTRY(mclk, 6); /* mem clock */ @@ -648,6 +648,7 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10); p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10); p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10); + memcpy(p->sn, vpd.sn_data, SERNUM_LEN); /* Old eeproms didn't have port information */ if (adapter->params.rev == 0 && !vpd.port0_data[0]) { @@ -959,16 +960,18 @@ int t3_get_fw_version(struct adapter *adapter, u32 *vers) /** * t3_check_fw_version - check if the FW is compatible with this driver * @adapter: the adapter - * + * @must_load: set to 1 if loading a new FW image is required + * Checks if an adapter's FW is compatible with the driver. Returns 0 * if the versions are compatible, a negative error otherwise. */ -int t3_check_fw_version(struct adapter *adapter) +int t3_check_fw_version(struct adapter *adapter, int *must_load) { int ret; u32 vers; unsigned int type, major, minor; + *must_load = 1; ret = t3_get_fw_version(adapter, &vers); if (ret) return ret; @@ -981,9 +984,17 @@ int t3_check_fw_version(struct adapter *adapter) minor == FW_VERSION_MINOR) return 0; - CH_ERR(adapter, "found wrong FW version(%u.%u), " - "driver needs version %u.%u\n", major, minor, - FW_VERSION_MAJOR, FW_VERSION_MINOR); + if (major != FW_VERSION_MAJOR) + CH_ERR(adapter, "found wrong FW version(%u.%u), " + "driver needs version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + else { + *must_load = 0; + CH_WARN(adapter, "found wrong FW minor version(%u.%u), " + "driver compiled for version %u.%u\n", major, minor, + FW_VERSION_MAJOR, FW_VERSION_MINOR); + } + return -EINVAL; } @@ -1347,6 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter) {0} }; + if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR) + CH_ALERT(adapter, "PEX error code 0x%x\n", + t3_read_reg(adapter, A_PCIE_PEX_ERR)); + if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK, pcie_intr_info, adapter->irq_stats)) t3_fatal_err(adapter); @@ -1798,6 +1813,8 @@ void t3_intr_clear(struct adapter *adapter) for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i) t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff); + if (is_pcie(adapter)) + t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff); t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff); t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */ } @@ -1853,6 +1870,8 @@ void t3_port_intr_clear(struct adapter *adapter, int idx) phy->ops->intr_clear(phy); } +#define SG_CONTEXT_CMD_ATTEMPTS 100 + /** * t3_sge_write_context - write an SGE context * @adapter: the adapter @@ -1872,7 +1891,7 @@ static int t3_sge_write_context(struct adapter *adapter, unsigned int id, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2029,7 +2048,8 @@ int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr, base_addr >>= 32; t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) | - V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode)); + V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) | + V_CQ_ERR(ovfl_mode)); t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) | V_CQ_CREDIT_THRES(credit_thres)); return t3_sge_write_context(adapter, id, F_CQ); @@ -2057,7 +2077,7 @@ int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2081,7 +2101,7 @@ int t3_sge_disable_fl(struct adapter *adapter, unsigned int id) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2105,7 +2125,7 @@ int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2129,7 +2149,7 @@ int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id) t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id)); return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1); + 0, SG_CONTEXT_CMD_ATTEMPTS, 1); } /** @@ -2154,7 +2174,7 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) | V_CONTEXT(id) | F_CQ); if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, - 0, 5, 1, &val)) + 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val)) return -EIO; if (op >= 2 && op < 7) { @@ -2164,7 +2184,8 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id)); if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, - F_CONTEXT_CMD_BUSY, 0, 5, 1)) + F_CONTEXT_CMD_BUSY, 0, + SG_CONTEXT_CMD_ATTEMPTS, 1)) return -EIO; return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0)); } @@ -2190,7 +2211,7 @@ static int t3_sge_read_context(unsigned int type, struct adapter *adapter, t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id)); if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0, - 5, 1)) + SG_CONTEXT_CMD_ATTEMPTS, 1)) return -EIO; data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0); data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1); @@ -3222,6 +3243,8 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params) t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN); t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff); + t3_write_reg(adapter, A_PM1_RX_MODE, 0); + t3_write_reg(adapter, A_PM1_TX_MODE, 0); init_hw_for_avail_ports(adapter, adapter->params.nports); t3_sge_init(adapter, &adapter->params.sge); @@ -3384,7 +3407,7 @@ void early_hw_init(struct adapter *adapter, const struct adapter_info *ai) * Older PCIe cards lose their config space during reset, PCI-X * ones don't. */ -int t3_reset_adapter(struct adapter *adapter) +static int t3_reset_adapter(struct adapter *adapter) { int i, save_and_restore_pcie = adapter->params.rev < T3_REV_B2 && is_pcie(adapter); diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h index eb508bf8022a0595bd228b2ede3e94040e39f782..ef1c6339c80607437dee90ce103257c5114df7c8 100644 --- a/drivers/net/cxgb3/version.h +++ b/drivers/net/cxgb3/version.h @@ -39,6 +39,6 @@ /* Firmware version */ #define FW_VERSION_MAJOR 4 -#define FW_VERSION_MINOR 3 +#define FW_VERSION_MINOR 6 #define FW_VERSION_MICRO 0 #endif /* __CHELSIO_VERSION_H */ diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index c302b1a30cba3beb7be123446bb8c7cb31e0221b..eeb766aeced951ea72fb4a20837cc909cad54555 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -142,7 +142,7 @@ int t3_mac_reset(struct cmac *mac) return 0; } -int t3b2_mac_reset(struct cmac *mac) +static int t3b2_mac_reset(struct cmac *mac) { struct adapter *adap = mac->adapter; unsigned int oft = mac->offset; @@ -437,12 +437,13 @@ int t3_mac_enable(struct cmac *mac, int which) struct mac_stats *s = &mac->stats; if (which & MAC_DIRECTION_TX) { - t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401); t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); + t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); mac->tx_mcnt = s->tx_frames; mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, @@ -451,9 +452,11 @@ int t3_mac_enable(struct cmac *mac, int which) A_XGM_TX_SPI4_SOP_EOP_CNT + oft))); mac->rx_mcnt = s->rx_frames; + mac->rx_pause = s->rx_pause; mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_RX_SPI4_SOP_EOP_CNT + oft))); + mac->rx_ocnt = s->rx_fifo_ovfl; mac->txen = F_TXEN; mac->toggle_cnt = 0; } @@ -464,24 +467,19 @@ int t3_mac_enable(struct cmac *mac, int which) int t3_mac_disable(struct cmac *mac, int which) { - int idx = macidx(mac); struct adapter *adap = mac->adapter; - int val; if (which & MAC_DIRECTION_TX) { t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); - t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f); - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); - t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); mac->txen = 0; } if (which & MAC_DIRECTION_RX) { + int val = F_MAC_RESET_; + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, F_PCS_RESET_, 0); msleep(100); t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); - val = F_MAC_RESET_; if (is_10G(adap)) val |= F_PCS_RESET_; else if (uses_xaui(adap)) @@ -507,7 +505,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac) tx_xcnt = 1; /* By default tx_xcnt is making progress */ tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt */ rx_xcnt = 1; /* By default rx_xcnt is making progress */ - if (tx_mcnt == mac->tx_mcnt) { + if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) { tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_TX_SPI4_SOP_EOP_CNT + mac->offset))); @@ -524,10 +522,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac) goto rxcheck; } - if (((tx_tcnt != mac->tx_tcnt) && - (tx_xcnt == 0) && (mac->tx_xcnt == 0)) || - ((mac->tx_mcnt == tx_mcnt) && - (tx_xcnt != 0) && (mac->tx_xcnt != 0))) { + if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) { if (mac->toggle_cnt > 4) { status = 2; goto out; @@ -541,11 +536,14 @@ int t3b2_mac_watchdog_task(struct cmac *mac) } rxcheck: - if (rx_mcnt != mac->rx_mcnt) + if (rx_mcnt != mac->rx_mcnt) { rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, A_XGM_RX_SPI4_SOP_EOP_CNT + - mac->offset))); - else + mac->offset))) + + (s->rx_fifo_ovfl - + mac->rx_ocnt); + mac->rx_ocnt = s->rx_fifo_ovfl; + } else goto out; if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && @@ -560,6 +558,7 @@ int t3b2_mac_watchdog_task(struct cmac *mac) mac->tx_mcnt = s->tx_frames; mac->rx_xcnt = rx_xcnt; mac->rx_mcnt = s->rx_frames; + mac->rx_pause = s->rx_pause; if (status == 1) { t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ diff --git a/drivers/net/de600.c b/drivers/net/de600.c index dae97b860daaf92b1a6f77e8d30a5ae530471480..cb849b091f980c9521e45ed0ac227420fa835c30 100644 --- a/drivers/net/de600.c +++ b/drivers/net/de600.c @@ -154,11 +154,6 @@ static int de600_close(struct net_device *dev) return 0; } -static struct net_device_stats *get_stats(struct net_device *dev) -{ - return (struct net_device_stats *)(dev->priv); -} - static inline void trigger_interrupt(struct net_device *dev) { de600_put_command(FLIP_IRQ); @@ -308,7 +303,7 @@ static int de600_tx_intr(struct net_device *dev, int irq_status) if (!(irq_status & TX_FAILED16)) { tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES; ++free_tx_pages; - ((struct net_device_stats *)(dev->priv))->tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); } @@ -375,8 +370,8 @@ static void de600_rx_intr(struct net_device *dev) /* update stats */ dev->last_rx = jiffies; - ((struct net_device_stats *)(dev->priv))->rx_packets++; /* count all receives */ - ((struct net_device_stats *)(dev->priv))->rx_bytes += size; /* count all received bytes */ + dev->stats.rx_packets++; /* count all receives */ + dev->stats.rx_bytes += size; /* count all received bytes */ /* * If any worth-while packets have been received, netif_rx() @@ -389,12 +384,12 @@ static struct net_device * __init de600_probe(void) int i; struct net_device *dev; int err; + DECLARE_MAC_BUF(mac); - dev = alloc_etherdev(sizeof(struct net_device_stats)); + dev = alloc_etherdev(0); if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); if (!request_region(DE600_IO, 3, "de600")) { printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO); @@ -444,12 +439,7 @@ static struct net_device * __init de600_probe(void) goto out1; } - printk(", Ethernet Address: %02X", dev->dev_addr[0]); - for (i = 1; i < ETH_ALEN; i++) - printk(":%02X",dev->dev_addr[i]); - printk("\n"); - - dev->get_stats = get_stats; + printk(", Ethernet Address: %s\n", print_mac(mac, dev->dev_addr)); dev->open = de600_open; dev->stop = de600_close; diff --git a/drivers/net/de600.h b/drivers/net/de600.h index 1288e48ba70411aab8a05ac21d099bb29a530818..e80ecbabcf4e32d4c6a0538c96a2efbce77ad544 100644 --- a/drivers/net/de600.h +++ b/drivers/net/de600.h @@ -121,7 +121,6 @@ static u8 de600_read_byte(unsigned char type, struct net_device *dev); /* Put in the device structure. */ static int de600_open(struct net_device *dev); static int de600_close(struct net_device *dev); -static struct net_device_stats *get_stats(struct net_device *dev); static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev); /* Dispatch from interrupts. */ diff --git a/drivers/net/de620.c b/drivers/net/de620.c index dc48924261740fa1fcfd730e85ec4afa0b9d4557..3f5190c654cf9285e4b9587fcdc53db53ddac952 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c @@ -216,7 +216,6 @@ MODULE_PARM_DESC(de620_debug, "DE-620 debug level (0-2)"); /* Put in the device structure. */ static int de620_open(struct net_device *); static int de620_close(struct net_device *); -static struct net_device_stats *get_stats(struct net_device *); static void de620_set_multicast_list(struct net_device *); static int de620_start_xmit(struct sk_buff *, struct net_device *); @@ -478,16 +477,6 @@ static int de620_close(struct net_device *dev) return 0; } -/********************************************* - * - * Return current statistics - * - */ -static struct net_device_stats *get_stats(struct net_device *dev) -{ - return (struct net_device_stats *)(dev->priv); -} - /********************************************* * * Set or clear the multicast filter for this adaptor. @@ -579,7 +568,7 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev) if(!(using_txbuf == (TXBF0 | TXBF1))) netif_wake_queue(dev); - ((struct net_device_stats *)(dev->priv))->tx_packets++; + dev->stats.tx_packets++; spin_unlock_irqrestore(&de620_lock, flags); dev_kfree_skb (skb); return 0; @@ -660,7 +649,7 @@ static int de620_rx_intr(struct net_device *dev) /* You win some, you lose some. And sometimes plenty... */ adapter_init(dev); netif_wake_queue(dev); - ((struct net_device_stats *)(dev->priv))->rx_over_errors++; + dev->stats.rx_over_errors++; return 0; } @@ -680,7 +669,7 @@ static int de620_rx_intr(struct net_device *dev) next_rx_page = header_buf.Rx_NextPage; /* at least a try... */ de620_send_command(dev, W_DUMMY); de620_set_register(dev, W_NPRF, next_rx_page); - ((struct net_device_stats *)(dev->priv))->rx_over_errors++; + dev->stats.rx_over_errors++; return 0; } next_rx_page = pagelink; @@ -693,7 +682,7 @@ static int de620_rx_intr(struct net_device *dev) skb = dev_alloc_skb(size+2); if (skb == NULL) { /* Yeah, but no place to put it... */ printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size); - ((struct net_device_stats *)(dev->priv))->rx_dropped++; + dev->stats.rx_dropped++; } else { /* Yep! Go get it! */ skb_reserve(skb,2); /* Align */ @@ -706,8 +695,8 @@ static int de620_rx_intr(struct net_device *dev) netif_rx(skb); /* deliver it "upstairs" */ dev->last_rx = jiffies; /* count all receives */ - ((struct net_device_stats *)(dev->priv))->rx_packets++; - ((struct net_device_stats *)(dev->priv))->rx_bytes += size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += size; } } @@ -818,13 +807,12 @@ struct net_device * __init de620_probe(int unit) struct net_device *dev; int err = -ENOMEM; int i; + DECLARE_MAC_BUF(mac); - dev = alloc_etherdev(sizeof(struct net_device_stats)); + dev = alloc_etherdev(0); if (!dev) goto out; - SET_MODULE_OWNER(dev); - spin_lock_init(&de620_lock); /* @@ -866,13 +854,14 @@ struct net_device * __init de620_probe(int unit) } /* else, got it! */ - printk(", Ethernet Address: %2.2X", - dev->dev_addr[0] = nic_data.NodeID[0]); + dev->dev_addr[0] = nic_data.NodeID[0]; for (i = 1; i < ETH_ALEN; i++) { - printk(":%2.2X", dev->dev_addr[i] = nic_data.NodeID[i]); + dev->dev_addr[i] = nic_data.NodeID[i]; dev->broadcast[i] = 0xff; } + printk(", Ethernet Address: %s", print_mac(mac, dev->dev_addr)); + printk(" (%dk RAM,", (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64); @@ -881,7 +870,6 @@ struct net_device * __init de620_probe(int unit) else printk(" UTP)\n"); - dev->get_stats = get_stats; dev->open = de620_open; dev->stop = de620_close; dev->hard_start_xmit = de620_start_xmit; diff --git a/drivers/net/declance.c b/drivers/net/declance.c index b2577f40124e3917e40c094f40a559eb8b5965d8..00e0194bfef005acea333ed70dd6db7aacc36d37 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -258,8 +258,6 @@ struct lance_private { int rx_new, tx_new; int rx_old, tx_old; - struct net_device_stats stats; - unsigned short busmaster_regval; struct timer_list multicast_timer; @@ -583,22 +581,22 @@ static int lance_rx(struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4; skb = dev_alloc_skb(len + 2); @@ -606,7 +604,7 @@ static int lance_rx(struct net_device *dev) if (skb == 0) { printk("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; *rds_ptr(rd, mblength, lp->type) = 0; *rds_ptr(rd, rmd1, lp->type) = ((lp->rx_buf_ptr_lnc[entry] >> 16) & @@ -614,7 +612,7 @@ static int lance_rx(struct net_device *dev) lp->rx_new = (entry + 1) & RX_RING_MOD_MASK; return 0; } - lp->stats.rx_bytes += len; + dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ @@ -625,7 +623,7 @@ static int lance_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } /* Return the packet to the pool */ @@ -660,14 +658,14 @@ static void lance_tx(struct net_device *dev) if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) { status = *tds_ptr(td, misc, lp->type); - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (status & LE_T3_RTY) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; printk("%s: Carrier Lost\n", dev->name); /* Stop the lance */ writereg(&ll->rap, LE_CSR0); @@ -681,7 +679,7 @@ static void lance_tx(struct net_device *dev) * transmitter, restart the adapter. */ if (status & (LE_T3_BUF | LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -702,13 +700,13 @@ static void lance_tx(struct net_device *dev) /* One collision before packet was sent. */ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = (j + 1) & TX_RING_MOD_MASK; } @@ -754,10 +752,10 @@ static irqreturn_t lance_interrupt(const int irq, void *dev_id) lance_tx(dev); if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { printk("%s: Memory error, status %04x\n", dev->name, csr0); @@ -912,7 +910,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) len = ETH_ZLEN; } - lp->stats.tx_bytes += len; + dev->stats.tx_bytes += len; entry = lp->tx_new; *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len); @@ -938,13 +936,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *lance_get_stats(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); @@ -1036,6 +1027,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type) int i, ret; unsigned long esar_base; unsigned char *esar; + DECLARE_MAC_BUF(mac); if (dec_lance_debug && version_printed++ == 0) printk(version); @@ -1223,28 +1215,26 @@ static int __init dec_lance_probe(struct device *bdev, const int type) */ switch (type) { case ASIC_LANCE: - printk("%s: IOASIC onboard LANCE, addr = ", name); + printk("%s: IOASIC onboard LANCE", name); break; case PMAD_LANCE: - printk("%s: PMAD-AA, addr = ", name); + printk("%s: PMAD-AA", name); break; case PMAX_LANCE: - printk("%s: PMAX onboard LANCE, addr = ", name); + printk("%s: PMAX onboard LANCE", name); break; } - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = esar[i * 4]; - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ',' : ':'); - } - printk(" irq = %d\n", dev->irq); + printk(", addr = %s, irq = %d\n", + print_mac(mac, dev->dev_addr), dev->irq); dev->open = &lance_open; dev->stop = &lance_close; dev->hard_start_xmit = &lance_start_xmit; dev->tx_timeout = &lance_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; /* lp->ll is the location of the registers for lance card */ diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index 9c8e3f9f5e58aa9080c896727d9f60c27a45a2e9..b07613e61f53048f5dd7aa6ea47606e0f2cb22c9 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c @@ -539,7 +539,6 @@ static int __devinit dfx_register(struct device *bdev) goto err_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, bdev); bp = netdev_priv(dev); diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 183497020bfcd3c461eaee74b2c808bb6f30cd0b..ace39ec0a367c121be87001b4e44bde930c0633f 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -485,7 +485,6 @@ struct depca_private { /* Kernel-only (not device) fields */ int rx_new, tx_new; /* The next free ring entry */ int rx_old, tx_old; /* The ring entries to be free()ed. */ - struct net_device_stats stats; spinlock_t lock; struct { /* Private stats counters */ u32 bins[DEPCA_PKT_STAT_SZ]; @@ -522,7 +521,6 @@ static irqreturn_t depca_interrupt(int irq, void *dev_id); static int depca_close(struct net_device *dev); static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void depca_tx_timeout(struct net_device *dev); -static struct net_device_stats *depca_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); /* @@ -575,6 +573,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) s16 nicsr; u_long ioaddr; u_long mem_start; + DECLARE_MAC_BUF(mac); /* * We are now supposed to enter this function with the @@ -634,14 +633,11 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) printk(", h/w address "); status = get_hw_addr(dev); + printk("%s", print_mac(mac, dev->dev_addr)); if (status != 0) { printk(" which has an Ethernet PROM CRC error.\n"); return -ENXIO; } - for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet address */ - printk("%2.2x:", dev->dev_addr[i]); - } - printk("%2.2x", dev->dev_addr[i]); /* Set up the maximum amount of network RAM(kB) */ netRAM = ((lp->adapter != DEPCA) ? 64 : 48); @@ -801,7 +797,6 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device) dev->open = &depca_open; dev->hard_start_xmit = &depca_start_xmit; dev->stop = &depca_close; - dev->get_stats = &depca_get_stats; dev->set_multicast_list = &set_multicast_list; dev->do_ioctl = &depca_ioctl; dev->tx_timeout = depca_tx_timeout; @@ -1026,15 +1021,15 @@ static int depca_rx(struct net_device *dev) } if (status & R_ENP) { /* Valid frame status */ if (status & R_ERR) { /* There was an error. */ - lp->stats.rx_errors++; /* Update the error stats. */ + dev->stats.rx_errors++; /* Update the error stats. */ if (status & R_FRAM) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & R_OFLO) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (status & R_CRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (status & R_BUFF) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } else { short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4; struct sk_buff *skb; @@ -1063,8 +1058,8 @@ static int depca_rx(struct net_device *dev) ** Update stats */ dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) { lp->pktStats.bins[i]++; @@ -1087,7 +1082,7 @@ static int depca_rx(struct net_device *dev) } } else { printk("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; /* Really, deferred. */ + dev->stats.rx_dropped++; /* Really, deferred. */ break; } } @@ -1125,24 +1120,24 @@ static int depca_tx(struct net_device *dev) break; } else if (status & T_ERR) { /* An error occurred. */ status = readl(&lp->tx_ring[entry].misc); - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (status & TMD3_RTRY) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & TMD3_LCAR) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (status & TMD3_LCOL) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (status & TMD3_UFLO) - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (status & (TMD3_BUFF | TMD3_UFLO)) { /* Trigger an immediate send demand. */ outw(CSR0, DEPCA_ADDR); outw(INEA | TDMD, DEPCA_DATA); } } else if (status & (T_MORE | T_ONE)) { - lp->stats.collisions++; + dev->stats.collisions++; } else { - lp->stats.tx_packets++; + dev->stats.tx_packets++; } /* Update all the pointers */ @@ -1234,15 +1229,6 @@ static int InitRestartDepca(struct net_device *dev) return status; } -static struct net_device_stats *depca_get_stats(struct net_device *dev) -{ - struct depca_private *lp = (struct depca_private *) dev->priv; - - /* Null body since there is no framing error counter */ - - return &lp->stats; -} - /* ** Set or clear the multicast filter for this adaptor. */ @@ -1855,6 +1841,7 @@ static void depca_dbg_open(struct net_device *dev) u_long ioaddr = dev->base_addr; struct depca_init *p = &lp->init_block; int i; + DECLARE_MAC_BUF(mac); if (depca_debug > 1) { /* Do not copy the shadow init block into shared memory */ @@ -1893,11 +1880,7 @@ static void depca_dbg_open(struct net_device *dev) printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base)); printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start); printk(" mode: 0x%4.4x\n", p->mode); - printk(" physical address: "); - for (i = 0; i < ETH_ALEN - 1; i++) { - printk("%2.2x:", p->phys_addr[i]); - } - printk("%2.2x\n", p->phys_addr[i]); + printk(" physical address: %s\n", print_mac(mac, p->phys_addr)); printk(" multicast hash table: "); for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) { printk("%2.2x:", p->mcast_table[i]); diff --git a/drivers/net/dgrs.c b/drivers/net/dgrs.c deleted file mode 100644 index df62c0232f369e431876a2da16b50fafbb2d7d43..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs.c +++ /dev/null @@ -1,1615 +0,0 @@ -/* - * Digi RightSwitch SE-X loadable device driver for Linux - * - * The RightSwitch is a 4 (EISA) or 6 (PCI) port etherswitch and - * a NIC on an internal board. - * - * Author: Rick Richardson, rick@remotepoint.com - * Derived from the SVR4.2 (UnixWare) driver for the same card. - * - * Copyright 1995-1996 Digi International Inc. - * - * This software may be used and distributed according to the terms - * of the GNU General Public License, incorporated herein by reference. - * - * For information on purchasing a RightSwitch SE-4 or SE-6 - * board, please contact Digi's sales department at 1-612-912-3444 - * or 1-800-DIGIBRD. Outside the U.S., please check our Web page - * at http://www.dgii.com for sales offices worldwide. - * - * OPERATION: - * When compiled as a loadable module, this driver can operate - * the board as either a 4/6 port switch with a 5th or 7th port - * that is a conventional NIC interface as far as the host is - * concerned, OR as 4/6 independent NICs. To select multi-NIC - * mode, add "nicmode=1" on the insmod load line for the driver. - * - * This driver uses the "dev" common ethernet device structure - * and a private "priv" (dev->priv) structure that contains - * mostly DGRS-specific information and statistics. To keep - * the code for both the switch mode and the multi-NIC mode - * as similar as possible, I have introduced the concept of - * "dev0"/"priv0" and "devN"/"privN" pointer pairs in subroutines - * where needed. The first pair of pointers points to the - * "dev" and "priv" structures of the zeroth (0th) device - * interface associated with a board. The second pair of - * pointers points to the current (Nth) device interface - * for the board: the one for which we are processing data. - * - * In switch mode, the pairs of pointers are always the same, - * that is, dev0 == devN and priv0 == privN. This is just - * like previous releases of this driver which did not support - * NIC mode. - * - * In multi-NIC mode, the pairs of pointers may be different. - * We use the devN and privN pointers to reference just the - * name, port number, and statistics for the current interface. - * We use the dev0 and priv0 pointers to access the variables - * that control access to the board, such as board address - * and simulated 82596 variables. This is because there is - * only one "fake" 82596 that serves as the interface to - * the board. We do not want to try to keep the variables - * associated with this 82596 in sync across all devices. - * - * This scheme works well. As you will see, except for - * initialization, there is very little difference between - * the two modes as far as this driver is concerned. On the - * receive side in NIC mode, the interrupt *always* comes in on - * the 0th interface (dev0/priv0). We then figure out which - * real 82596 port it came in on from looking at the "chan" - * member that the board firmware adds at the end of each - * RBD (a.k.a. TBD). We get the channel number like this: - * int chan = ((I596_RBD *) S2H(cbp->xmit.tbdp))->chan; - * - * On the transmit side in multi-NIC mode, we specify the - * output 82596 port by setting the new "dstchan" structure - * member that is at the end of the RFD, like this: - * priv0->rfdp->dstchan = privN->chan; - * - * TODO: - * - Multi-NIC mode is not yet supported when the driver is linked - * into the kernel. - * - Better handling of multicast addresses. - * - * Fixes: - * Arnaldo Carvalho de Melo - 11/01/2001 - * - fix dgrs_found_device wrt checking kmalloc return and - * rollbacking the partial steps of the whole process when - * one of the devices can't be allocated. Fix SET_MODULE_OWNER - * on the loop to use devN instead of repeated calls to dev. - * - * davej - 9/2/2001 - * - Enable PCI device before reading ioaddr/irq - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -static char version[] __initdata = - "$Id: dgrs.c,v 1.13 2000/06/06 04:07:00 rick Exp $"; - -/* - * DGRS include files - */ -typedef unsigned char uchar; -#define vol volatile - -#include "dgrs.h" -#include "dgrs_es4h.h" -#include "dgrs_plx9060.h" -#include "dgrs_i82596.h" -#include "dgrs_ether.h" -#include "dgrs_asstruct.h" -#include "dgrs_bcomm.h" - -#ifdef CONFIG_PCI -static struct pci_device_id dgrs_pci_tbl[] = { - { SE6_PCI_VENDOR_ID, SE6_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, }, - { } /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(pci, dgrs_pci_tbl); -#endif - -#ifdef CONFIG_EISA -static struct eisa_device_id dgrs_eisa_tbl[] = { - { "DBI0A01" }, - { } -}; -MODULE_DEVICE_TABLE(eisa, dgrs_eisa_tbl); -#endif - -MODULE_LICENSE("GPL"); - - -/* - * Firmware. Compiled separately for local compilation, - * but #included for Linux distribution. - */ -#ifndef NOFW - #include "dgrs_firmware.c" -#else - extern int dgrs_firmnum; - extern char dgrs_firmver[]; - extern char dgrs_firmdate[]; - extern uchar dgrs_code[]; - extern int dgrs_ncode; -#endif - -/* - * Linux out*() is backwards from all other operating systems - */ -#define OUTB(ADDR, VAL) outb(VAL, ADDR) -#define OUTW(ADDR, VAL) outw(VAL, ADDR) -#define OUTL(ADDR, VAL) outl(VAL, ADDR) - -/* - * Macros to convert switch to host and host to switch addresses - * (assumes a local variable priv points to board dependent struct) - */ -#define S2H(A) ( ((unsigned long)(A)&0x00ffffff) + priv0->vmem ) -#define S2HN(A) ( ((unsigned long)(A)&0x00ffffff) + privN->vmem ) -#define H2S(A) ( ((char *) (A) - priv0->vmem) + 0xA3000000 ) - -/* - * Convert a switch address to a "safe" address for use with the - * PLX 9060 DMA registers and the associated HW kludge that allows - * for host access of the DMA registers. - */ -#define S2DMA(A) ( (unsigned long)(A) & 0x00ffffff) - -/* - * "Space.c" variables, now settable from module interface - * Use the name below, minus the "dgrs_" prefix. See init_module(). - */ -static int dgrs_debug = 1; -static int dgrs_dma = 1; -static int dgrs_spantree = -1; -static int dgrs_hashexpire = -1; -static uchar dgrs_ipaddr[4] = { 0xff, 0xff, 0xff, 0xff}; -static uchar dgrs_iptrap[4] = { 0xff, 0xff, 0xff, 0xff}; -static __u32 dgrs_ipxnet = -1; -static int dgrs_nicmode; - -/* - * Private per-board data structure (dev->priv) - */ -typedef struct -{ - /* - * Stuff for generic ethercard I/F - */ - struct net_device_stats stats; - - /* - * DGRS specific data - */ - char *vmem; - - struct bios_comm *bcomm; /* Firmware BIOS comm structure */ - PORT *port; /* Ptr to PORT[0] struct in VM */ - I596_SCB *scbp; /* Ptr to SCB struct in VM */ - I596_RFD *rfdp; /* Current RFD list */ - I596_RBD *rbdp; /* Current RBD list */ - - volatile int intrcnt; /* Count of interrupts */ - - /* - * SE-4 (EISA) board variables - */ - uchar is_reg; /* EISA: Value for ES4H_IS reg */ - - /* - * SE-6 (PCI) board variables - * - * The PLX "expansion rom" space is used for DMA register - * access from the host on the SE-6. These are the physical - * and virtual addresses of that space. - */ - ulong plxreg; /* Phys address of PLX chip */ - char *vplxreg; /* Virtual address of PLX chip */ - ulong plxdma; /* Phys addr of PLX "expansion rom" */ - ulong volatile *vplxdma; /* Virtual addr of "expansion rom" */ - int use_dma; /* Flag: use DMA */ - DMACHAIN *dmadesc_s; /* area for DMA chains (SW addr.) */ - DMACHAIN *dmadesc_h; /* area for DMA chains (Host Virtual) */ - - /* - * Multi-NIC mode variables - * - * All entries of the devtbl[] array are valid for the 0th - * device (i.e. eth0, but not eth1...eth5). devtbl[0] is - * valid for all devices (i.e. eth0, eth1, ..., eth5). - */ - int nports; /* Number of physical ports (4 or 6) */ - int chan; /* Channel # (1-6) for this device */ - struct net_device *devtbl[6]; /* Ptrs to N device structs */ - -} DGRS_PRIV; - - -/* - * reset or un-reset the IDT processor - */ -static void -proc_reset(struct net_device *dev0, int reset) -{ - DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv; - - if (priv0->plxreg) - { - ulong val; - val = inl(dev0->base_addr + PLX_MISC_CSR); - if (reset) - val |= SE6_RESET; - else - val &= ~SE6_RESET; - OUTL(dev0->base_addr + PLX_MISC_CSR, val); - } - else - { - OUTB(dev0->base_addr + ES4H_PC, reset ? ES4H_PC_RESET : 0); - } -} - -/* - * See if the board supports bus master DMA - */ -static int -check_board_dma(struct net_device *dev0) -{ - DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv; - ulong x; - - /* - * If Space.c says not to use DMA, or if it's not a PLX based - * PCI board, or if the expansion ROM space is not PCI - * configured, then return false. - */ - if (!dgrs_dma || !priv0->plxreg || !priv0->plxdma) - return (0); - - /* - * Set the local address remap register of the "expansion rom" - * area to 0x80000000 so that we can use it to access the DMA - * registers from the host side. - */ - OUTL(dev0->base_addr + PLX_ROM_BASE_ADDR, 0x80000000); - - /* - * Set the PCI region descriptor to: - * Space 0: - * disable read-prefetch - * enable READY - * enable BURST - * 0 internal wait states - * Expansion ROM: (used for host DMA register access) - * disable read-prefetch - * enable READY - * disable BURST - * 0 internal wait states - */ - OUTL(dev0->base_addr + PLX_BUS_REGION, 0x49430343); - - /* - * Now map the DMA registers into our virtual space - */ - priv0->vplxdma = (ulong *) ioremap (priv0->plxdma, 256); - if (!priv0->vplxdma) - { - printk("%s: can't *remap() the DMA regs\n", dev0->name); - return (0); - } - - /* - * Now test to see if we can access the DMA registers - * If we write -1 and get back 1FFF, then we accessed the - * DMA register. Otherwise, we probably have an old board - * and wrote into regular RAM. - */ - priv0->vplxdma[PLX_DMA0_MODE/4] = 0xFFFFFFFF; - x = priv0->vplxdma[PLX_DMA0_MODE/4]; - if (x != 0x00001FFF) { - iounmap((void *)priv0->vplxdma); - return (0); - } - - return (1); -} - -/* - * Initiate DMA using PLX part on PCI board. Spin the - * processor until completed. All addresses are physical! - * - * If pciaddr is NULL, then it's a chaining DMA, and lcladdr is - * the address of the first DMA descriptor in the chain. - * - * If pciaddr is not NULL, then it's a single DMA. - * - * In either case, "lcladdr" must have been fixed up to make - * sure the MSB isn't set using the S2DMA macro before passing - * the address to this routine. - */ -static int -do_plx_dma( - struct net_device *dev, - ulong pciaddr, - ulong lcladdr, - int len, - int to_host -) -{ - int i; - ulong csr = 0; - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - - if (pciaddr) - { - /* - * Do a single, non-chain DMA - */ - priv->vplxdma[PLX_DMA0_PCI_ADDR/4] = pciaddr; - priv->vplxdma[PLX_DMA0_LCL_ADDR/4] = lcladdr; - priv->vplxdma[PLX_DMA0_SIZE/4] = len; - priv->vplxdma[PLX_DMA0_DESCRIPTOR/4] = to_host - ? PLX_DMA_DESC_TO_HOST - : PLX_DMA_DESC_TO_BOARD; - priv->vplxdma[PLX_DMA0_MODE/4] = - PLX_DMA_MODE_WIDTH32 - | PLX_DMA_MODE_WAITSTATES(0) - | PLX_DMA_MODE_READY - | PLX_DMA_MODE_NOBTERM - | PLX_DMA_MODE_BURST - | PLX_DMA_MODE_NOCHAIN; - } - else - { - /* - * Do a chaining DMA - */ - priv->vplxdma[PLX_DMA0_MODE/4] = - PLX_DMA_MODE_WIDTH32 - | PLX_DMA_MODE_WAITSTATES(0) - | PLX_DMA_MODE_READY - | PLX_DMA_MODE_NOBTERM - | PLX_DMA_MODE_BURST - | PLX_DMA_MODE_CHAIN; - priv->vplxdma[PLX_DMA0_DESCRIPTOR/4] = lcladdr; - } - - priv->vplxdma[PLX_DMA_CSR/4] = - PLX_DMA_CSR_0_ENABLE | PLX_DMA_CSR_0_START; - - /* - * Wait for DMA to complete - */ - for (i = 0; i < 1000000; ++i) - { - /* - * Spin the host CPU for 1 usec, so we don't thrash - * the PCI bus while the PLX 9060 is doing DMA. - */ - udelay(1); - - csr = (volatile unsigned long) priv->vplxdma[PLX_DMA_CSR/4]; - - if (csr & PLX_DMA_CSR_0_DONE) - break; - } - - if ( ! (csr & PLX_DMA_CSR_0_DONE) ) - { - printk("%s: DMA done never occurred. DMA disabled.\n", - dev->name); - priv->use_dma = 0; - return 1; - } - return 0; -} - -/* - * dgrs_rcv_frame() - * - * Process a received frame. This is called from the interrupt - * routine, and works for both switch mode and multi-NIC mode. - * - * Note that when in multi-NIC mode, we want to always access the - * hardware using the dev and priv structures of the first port, - * so that we are using only one set of variables to maintain - * the board interface status, but we want to use the Nth port - * dev and priv structures to maintain statistics and to pass - * the packet up. - * - * Only the first device structure is attached to the interrupt. - * We use the special "chan" variable at the end of the first RBD - * to select the Nth device in multi-NIC mode. - * - * We currently do chained DMA on a per-packet basis when the - * packet is "long", and we spin the CPU a short time polling - * for DMA completion. This avoids a second interrupt overhead, - * and gives the best performance for light traffic to the host. - * - * However, a better scheme that could be implemented would be - * to see how many packets are outstanding for the host, and if - * the number is "large", create a long chain to DMA several - * packets into the host in one go. In this case, we would set - * up some state variables to let the host CPU continue doing - * other things until a DMA completion interrupt comes along. - */ -static void -dgrs_rcv_frame( - struct net_device *dev0, - DGRS_PRIV *priv0, - I596_CB *cbp -) -{ - int len; - I596_TBD *tbdp; - struct sk_buff *skb; - uchar *putp; - uchar *p; - struct net_device *devN; - DGRS_PRIV *privN; - - /* - * Determine Nth priv and dev structure pointers - */ - if (dgrs_nicmode) - { /* Multi-NIC mode */ - int chan = ((I596_RBD *) S2H(cbp->xmit.tbdp))->chan; - - devN = priv0->devtbl[chan-1]; - /* - * If devN is null, we got an interrupt before the I/F - * has been initialized. Pitch the packet. - */ - if (devN == NULL) - goto out; - privN = (DGRS_PRIV *) devN->priv; - } - else - { /* Switch mode */ - devN = dev0; - privN = priv0; - } - - if (0) printk("%s: rcv len=%ld\n", devN->name, cbp->xmit.count); - - /* - * Allocate a message block big enough to hold the whole frame - */ - len = cbp->xmit.count; - if ((skb = dev_alloc_skb(len+5)) == NULL) - { - printk("%s: dev_alloc_skb failed for rcv buffer\n", devN->name); - ++privN->stats.rx_dropped; - /* discarding the frame */ - goto out; - } - skb_reserve(skb, 2); /* Align IP header */ - -again: - putp = p = skb_put(skb, len); - - /* - * There are three modes here for doing the packet copy. - * If we have DMA, and the packet is "long", we use the - * chaining mode of DMA. If it's shorter, we use single - * DMA's. Otherwise, we use memcpy(). - */ - if (priv0->use_dma && priv0->dmadesc_h && len > 64) - { - /* - * If we can use DMA and it's a long frame, copy it using - * DMA chaining. - */ - DMACHAIN *ddp_h; /* Host virtual DMA desc. pointer */ - DMACHAIN *ddp_s; /* Switch physical DMA desc. pointer */ - uchar *phys_p; - - /* - * Get the physical address of the STREAMS buffer. - * NOTE: allocb() guarantees that the whole buffer - * is in a single page if the length < 4096. - */ - phys_p = (uchar *) virt_to_phys(putp); - - ddp_h = priv0->dmadesc_h; - ddp_s = priv0->dmadesc_s; - tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp); - for (;;) - { - int count; - int amt; - - count = tbdp->count; - amt = count & 0x3fff; - if (amt == 0) - break; /* For safety */ - if ( (p-putp) >= len) - { - printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp)); - proc_reset(dev0, 1); /* Freeze IDT */ - break; /* For Safety */ - } - - ddp_h->pciaddr = (ulong) phys_p; - ddp_h->lcladdr = S2DMA(tbdp->buf); - ddp_h->len = amt; - - phys_p += amt; - p += amt; - - if (count & I596_TBD_EOF) - { - ddp_h->next = PLX_DMA_DESC_TO_HOST - | PLX_DMA_DESC_EOC; - ++ddp_h; - break; - } - else - { - ++ddp_s; - ddp_h->next = PLX_DMA_DESC_TO_HOST - | (ulong) ddp_s; - tbdp = (I596_TBD *) S2H(tbdp->next); - ++ddp_h; - } - } - if (ddp_h - priv0->dmadesc_h) - { - int rc; - - rc = do_plx_dma(dev0, - 0, (ulong) priv0->dmadesc_s, len, 0); - if (rc) - { - printk("%s: Chained DMA failure\n", devN->name); - goto again; - } - } - } - else if (priv0->use_dma) - { - /* - * If we can use DMA and it's a shorter frame, copy it - * using single DMA transfers. - */ - uchar *phys_p; - - /* - * Get the physical address of the STREAMS buffer. - * NOTE: allocb() guarantees that the whole buffer - * is in a single page if the length < 4096. - */ - phys_p = (uchar *) virt_to_phys(putp); - - tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp); - for (;;) - { - int count; - int amt; - int rc; - - count = tbdp->count; - amt = count & 0x3fff; - if (amt == 0) - break; /* For safety */ - if ( (p-putp) >= len) - { - printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp)); - proc_reset(dev0, 1); /* Freeze IDT */ - break; /* For Safety */ - } - rc = do_plx_dma(dev0, (ulong) phys_p, - S2DMA(tbdp->buf), amt, 1); - if (rc) - { - memcpy(p, S2H(tbdp->buf), amt); - printk("%s: Single DMA failed\n", devN->name); - } - phys_p += amt; - p += amt; - if (count & I596_TBD_EOF) - break; - tbdp = (I596_TBD *) S2H(tbdp->next); - } - } - else - { - /* - * Otherwise, copy it piece by piece using memcpy() - */ - tbdp = (I596_TBD *) S2H(cbp->xmit.tbdp); - for (;;) - { - int count; - int amt; - - count = tbdp->count; - amt = count & 0x3fff; - if (amt == 0) - break; /* For safety */ - if ( (p-putp) >= len) - { - printk("%s: cbp = %lx\n", devN->name, (long) H2S(cbp)); - proc_reset(dev0, 1); /* Freeze IDT */ - break; /* For Safety */ - } - memcpy(p, S2H(tbdp->buf), amt); - p += amt; - if (count & I596_TBD_EOF) - break; - tbdp = (I596_TBD *) S2H(tbdp->next); - } - } - - /* - * Pass the frame to upper half - */ - skb->protocol = eth_type_trans(skb, devN); - netif_rx(skb); - devN->last_rx = jiffies; - ++privN->stats.rx_packets; - privN->stats.rx_bytes += len; - -out: - cbp->xmit.status = I596_CB_STATUS_C | I596_CB_STATUS_OK; -} - -/* - * Start transmission of a frame - * - * The interface to the board is simple: we pretend that we are - * a fifth 82596 ethernet controller 'receiving' data, and copy the - * data into the same structures that a real 82596 would. This way, - * the board firmware handles the host 'port' the same as any other. - * - * NOTE: we do not use Bus master DMA for this routine. Turns out - * that it is not needed. Slave writes over the PCI bus are about - * as fast as DMA, due to the fact that the PLX part can do burst - * writes. The same is not true for data being read from the board. - * - * For multi-NIC mode, we tell the firmware the desired 82596 - * output port by setting the special "dstchan" member at the - * end of the traditional 82596 RFD structure. - */ - -static int dgrs_start_xmit(struct sk_buff *skb, struct net_device *devN) -{ - DGRS_PRIV *privN = (DGRS_PRIV *) devN->priv; - struct net_device *dev0; - DGRS_PRIV *priv0; - I596_RBD *rbdp; - int count; - int i, len, amt; - - /* - * Determine 0th priv and dev structure pointers - */ - if (dgrs_nicmode) - { - dev0 = privN->devtbl[0]; - priv0 = (DGRS_PRIV *) dev0->priv; - } - else - { - dev0 = devN; - priv0 = privN; - } - - if (dgrs_debug > 1) - printk("%s: xmit len=%d\n", devN->name, (int) skb->len); - - devN->trans_start = jiffies; - netif_start_queue(devN); - - if (priv0->rfdp->cmd & I596_RFD_EL) - { /* Out of RFD's */ - if (0) printk("%s: NO RFD's\n", devN->name); - goto no_resources; - } - - rbdp = priv0->rbdp; - count = 0; - priv0->rfdp->rbdp = (I596_RBD *) H2S(rbdp); - - i = 0; len = skb->len; - for (;;) - { - if (rbdp->size & I596_RBD_EL) - { /* Out of RBD's */ - if (0) printk("%s: NO RBD's\n", devN->name); - goto no_resources; - } - - amt = min_t(unsigned int, len, rbdp->size - count); - skb_copy_from_linear_data_offset(skb, i, S2H(rbdp->buf) + count, amt); - i += amt; - count += amt; - len -= amt; - if (len == 0) - { - if (skb->len < 60) - rbdp->count = 60 | I596_RBD_EOF; - else - rbdp->count = count | I596_RBD_EOF; - rbdp = (I596_RBD *) S2H(rbdp->next); - goto frame_done; - } - else if (count < 32) - { - /* More data to come, but we used less than 32 - * bytes of this RBD. Keep filling this RBD. - */ - {} /* Yes, we do nothing here */ - } - else - { - rbdp->count = count; - rbdp = (I596_RBD *) S2H(rbdp->next); - count = 0; - } - } - -frame_done: - priv0->rbdp = rbdp; - if (dgrs_nicmode) - priv0->rfdp->dstchan = privN->chan; - priv0->rfdp->status = I596_RFD_C | I596_RFD_OK; - priv0->rfdp = (I596_RFD *) S2H(priv0->rfdp->next); - - ++privN->stats.tx_packets; - - dev_kfree_skb (skb); - return (0); - -no_resources: - priv0->scbp->status |= I596_SCB_RNR; /* simulate I82596 */ - return (-EAGAIN); -} - -/* - * Open the interface - */ -static int -dgrs_open( struct net_device *dev ) -{ - netif_start_queue(dev); - return (0); -} - -/* - * Close the interface - */ -static int dgrs_close( struct net_device *dev ) -{ - netif_stop_queue(dev); - return (0); -} - -/* - * Get statistics - */ -static struct net_device_stats *dgrs_get_stats( struct net_device *dev ) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - - return (&priv->stats); -} - -/* - * Set multicast list and/or promiscuous mode - */ - -static void dgrs_set_multicast_list( struct net_device *dev) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - - priv->port->is_promisc = (dev->flags & IFF_PROMISC) ? 1 : 0; -} - -/* - * Unique ioctl's - */ -static int dgrs_ioctl(struct net_device *devN, struct ifreq *ifr, int cmd) -{ - DGRS_PRIV *privN = (DGRS_PRIV *) devN->priv; - DGRS_IOCTL ioc; - int i; - - if (cmd != DGRSIOCTL) - return -EINVAL; - - if(copy_from_user(&ioc, ifr->ifr_data, sizeof(DGRS_IOCTL))) - return -EFAULT; - - switch (ioc.cmd) - { - case DGRS_GETMEM: - if (ioc.len != sizeof(ulong)) - return -EINVAL; - if(copy_to_user(ioc.data, &devN->mem_start, ioc.len)) - return -EFAULT; - return (0); - case DGRS_SETFILTER: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (ioc.port > privN->bcomm->bc_nports) - return -EINVAL; - if (ioc.filter >= NFILTERS) - return -EINVAL; - if (ioc.len > privN->bcomm->bc_filter_area_len) - return -EINVAL; - - /* Wait for old command to finish */ - for (i = 0; i < 1000; ++i) - { - if ( (volatile long) privN->bcomm->bc_filter_cmd <= 0 ) - break; - udelay(1); - } - if (i >= 1000) - return -EIO; - - privN->bcomm->bc_filter_port = ioc.port; - privN->bcomm->bc_filter_num = ioc.filter; - privN->bcomm->bc_filter_len = ioc.len; - - if (ioc.len) - { - if(copy_from_user(S2HN(privN->bcomm->bc_filter_area), - ioc.data, ioc.len)) - return -EFAULT; - privN->bcomm->bc_filter_cmd = BC_FILTER_SET; - } - else - privN->bcomm->bc_filter_cmd = BC_FILTER_CLR; - return(0); - default: - return -EOPNOTSUPP; - } -} - -/* - * Process interrupts - * - * dev, priv will always refer to the 0th device in Multi-NIC mode. - */ - -static irqreturn_t dgrs_intr(int irq, void *dev_id) -{ - struct net_device *dev0 = dev_id; - DGRS_PRIV *priv0 = dev0->priv; - I596_CB *cbp; - int cmd; - int i; - - ++priv0->intrcnt; - if (1) ++priv0->bcomm->bc_cnt[4]; - if (0) - { - static int cnt = 100; - if (--cnt > 0) - printk("%s: interrupt: irq %d\n", dev0->name, irq); - } - - /* - * Get 596 command - */ - cmd = priv0->scbp->cmd; - - /* - * See if RU has been restarted - */ - if ( (cmd & I596_SCB_RUC) == I596_SCB_RUC_START) - { - if (0) printk("%s: RUC start\n", dev0->name); - priv0->rfdp = (I596_RFD *) S2H(priv0->scbp->rfdp); - priv0->rbdp = (I596_RBD *) S2H(priv0->rfdp->rbdp); - priv0->scbp->status &= ~(I596_SCB_RNR|I596_SCB_RUS); - /* - * Tell upper half (halves) - */ - if (dgrs_nicmode) - { - for (i = 0; i < priv0->nports; ++i) - netif_wake_queue (priv0->devtbl[i]); - } - else - netif_wake_queue (dev0); - /* if (bd->flags & TX_QUEUED) - DL_sched(bd, bdd); */ - } - - /* - * See if any CU commands to process - */ - if ( (cmd & I596_SCB_CUC) != I596_SCB_CUC_START) - { - priv0->scbp->cmd = 0; /* Ignore all other commands */ - goto ack_intr; - } - priv0->scbp->status &= ~(I596_SCB_CNA|I596_SCB_CUS); - - /* - * Process a command - */ - cbp = (I596_CB *) S2H(priv0->scbp->cbp); - priv0->scbp->cmd = 0; /* Safe to clear the command */ - for (;;) - { - switch (cbp->nop.cmd & I596_CB_CMD) - { - case I596_CB_CMD_XMIT: - dgrs_rcv_frame(dev0, priv0, cbp); - break; - default: - cbp->nop.status = I596_CB_STATUS_C | I596_CB_STATUS_OK; - break; - } - if (cbp->nop.cmd & I596_CB_CMD_EL) - break; - cbp = (I596_CB *) S2H(cbp->nop.next); - } - priv0->scbp->status |= I596_SCB_CNA; - - /* - * Ack the interrupt - */ -ack_intr: - if (priv0->plxreg) - OUTL(dev0->base_addr + PLX_LCL2PCI_DOORBELL, 1); - - return IRQ_HANDLED; -} - -/* - * Download the board firmware - */ -static int __init -dgrs_download(struct net_device *dev0) -{ - DGRS_PRIV *priv0 = (DGRS_PRIV *) dev0->priv; - int is; - unsigned long i; - - static const int iv2is[16] = { - 0, 0, 0, ES4H_IS_INT3, - 0, ES4H_IS_INT5, 0, ES4H_IS_INT7, - 0, 0, ES4H_IS_INT10, ES4H_IS_INT11, - ES4H_IS_INT12, 0, 0, ES4H_IS_INT15 }; - - /* - * Map in the dual port memory - */ - priv0->vmem = ioremap(dev0->mem_start, 2048*1024); - if (!priv0->vmem) - { - printk("%s: cannot map in board memory\n", dev0->name); - return -ENXIO; - } - - /* - * Hold the processor and configure the board addresses - */ - if (priv0->plxreg) - { /* PCI bus */ - proc_reset(dev0, 1); - } - else - { /* EISA bus */ - is = iv2is[dev0->irq & 0x0f]; - if (!is) - { - printk("%s: Illegal IRQ %d\n", dev0->name, dev0->irq); - iounmap(priv0->vmem); - priv0->vmem = NULL; - return -ENXIO; - } - OUTB(dev0->base_addr + ES4H_AS_31_24, - (uchar) (dev0->mem_start >> 24) ); - OUTB(dev0->base_addr + ES4H_AS_23_16, - (uchar) (dev0->mem_start >> 16) ); - priv0->is_reg = ES4H_IS_LINEAR | is | - ((uchar) (dev0->mem_start >> 8) & ES4H_IS_AS15); - OUTB(dev0->base_addr + ES4H_IS, priv0->is_reg); - OUTB(dev0->base_addr + ES4H_EC, ES4H_EC_ENABLE); - OUTB(dev0->base_addr + ES4H_PC, ES4H_PC_RESET); - OUTB(dev0->base_addr + ES4H_MW, ES4H_MW_ENABLE | 0x00); - } - - /* - * See if we can do DMA on the SE-6 - */ - priv0->use_dma = check_board_dma(dev0); - if (priv0->use_dma) - printk("%s: Bus Master DMA is enabled.\n", dev0->name); - - /* - * Load and verify the code at the desired address - */ - memcpy(priv0->vmem, dgrs_code, dgrs_ncode); /* Load code */ - if (memcmp(priv0->vmem, dgrs_code, dgrs_ncode)) - { - iounmap(priv0->vmem); - priv0->vmem = NULL; - printk("%s: download compare failed\n", dev0->name); - return -ENXIO; - } - - /* - * Configurables - */ - priv0->bcomm = (struct bios_comm *) (priv0->vmem + 0x0100); - priv0->bcomm->bc_nowait = 1; /* Tell board to make printf not wait */ - priv0->bcomm->bc_squelch = 0; /* Flag from Space.c */ - priv0->bcomm->bc_150ohm = 0; /* Flag from Space.c */ - - priv0->bcomm->bc_spew = 0; /* Debug flag from Space.c */ - priv0->bcomm->bc_maxrfd = 0; /* Debug flag from Space.c */ - priv0->bcomm->bc_maxrbd = 0; /* Debug flag from Space.c */ - - /* - * Tell board we are operating in switch mode (1) or in - * multi-NIC mode (2). - */ - priv0->bcomm->bc_host = dgrs_nicmode ? BC_MULTINIC : BC_SWITCH; - - /* - * Request memory space on board for DMA chains - */ - if (priv0->use_dma) - priv0->bcomm->bc_hostarea_len = (2048/64) * 16; - - /* - * NVRAM configurables from Space.c - */ - priv0->bcomm->bc_spantree = dgrs_spantree; - priv0->bcomm->bc_hashexpire = dgrs_hashexpire; - memcpy(priv0->bcomm->bc_ipaddr, dgrs_ipaddr, 4); - memcpy(priv0->bcomm->bc_iptrap, dgrs_iptrap, 4); - memcpy(priv0->bcomm->bc_ipxnet, &dgrs_ipxnet, 4); - - /* - * Release processor, wait 8 seconds for board to initialize - */ - proc_reset(dev0, 0); - - for (i = jiffies + 8 * HZ; time_after(i, jiffies); ) - { - barrier(); /* Gcc 2.95 needs this */ - if (priv0->bcomm->bc_status >= BC_RUN) - break; - } - - if (priv0->bcomm->bc_status < BC_RUN) - { - printk("%s: board not operating\n", dev0->name); - iounmap(priv0->vmem); - priv0->vmem = NULL; - return -ENXIO; - } - - priv0->port = (PORT *) S2H(priv0->bcomm->bc_port); - priv0->scbp = (I596_SCB *) S2H(priv0->port->scbp); - priv0->rfdp = (I596_RFD *) S2H(priv0->scbp->rfdp); - priv0->rbdp = (I596_RBD *) S2H(priv0->rfdp->rbdp); - - priv0->scbp->status = I596_SCB_CNA; /* CU is idle */ - - /* - * Get switch physical and host virtual pointers to DMA - * chaining area. NOTE: the MSB of the switch physical - * address *must* be turned off. Otherwise, the HW kludge - * that allows host access of the PLX DMA registers will - * erroneously select the PLX registers. - */ - priv0->dmadesc_s = (DMACHAIN *) S2DMA(priv0->bcomm->bc_hostarea); - if (priv0->dmadesc_s) - priv0->dmadesc_h = (DMACHAIN *) S2H(priv0->dmadesc_s); - else - priv0->dmadesc_h = NULL; - - /* - * Enable board interrupts - */ - if (priv0->plxreg) - { /* PCI bus */ - OUTL(dev0->base_addr + PLX_INT_CSR, - inl(dev0->base_addr + PLX_INT_CSR) - | PLX_PCI_DOORBELL_IE); /* Enable intr to host */ - OUTL(dev0->base_addr + PLX_LCL2PCI_DOORBELL, 1); - } - else - { /* EISA bus */ - } - - return (0); -} - -/* - * Probe (init) a board - */ -static int __init -dgrs_probe1(struct net_device *dev) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - unsigned long i; - int rc; - - printk("%s: Digi RightSwitch io=%lx mem=%lx irq=%d plx=%lx dma=%lx\n", - dev->name, dev->base_addr, dev->mem_start, dev->irq, - priv->plxreg, priv->plxdma); - - /* - * Download the firmware and light the processor - */ - rc = dgrs_download(dev); - if (rc) - goto err_out; - - /* - * Get ether address of board - */ - printk("%s: Ethernet address", dev->name); - memcpy(dev->dev_addr, priv->port->ethaddr, 6); - for (i = 0; i < 6; ++i) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); - printk("\n"); - - if (dev->dev_addr[0] & 1) - { - printk("%s: Illegal Ethernet Address\n", dev->name); - rc = -ENXIO; - goto err_out; - } - - /* - * ACK outstanding interrupts, hook the interrupt, - * and verify that we are getting interrupts from the board. - */ - if (priv->plxreg) - OUTL(dev->base_addr + PLX_LCL2PCI_DOORBELL, 1); - - rc = request_irq(dev->irq, &dgrs_intr, IRQF_SHARED, "RightSwitch", dev); - if (rc) - goto err_out; - - priv->intrcnt = 0; - for (i = jiffies + 2*HZ + HZ/2; time_after(i, jiffies); ) - { - cpu_relax(); - if (priv->intrcnt >= 2) - break; - } - if (priv->intrcnt < 2) - { - printk(KERN_ERR "%s: Not interrupting on IRQ %d (%d)\n", - dev->name, dev->irq, priv->intrcnt); - rc = -ENXIO; - goto err_free_irq; - } - - /* - * Entry points... - */ - dev->open = &dgrs_open; - dev->stop = &dgrs_close; - dev->get_stats = &dgrs_get_stats; - dev->hard_start_xmit = &dgrs_start_xmit; - dev->set_multicast_list = &dgrs_set_multicast_list; - dev->do_ioctl = &dgrs_ioctl; - - return rc; - -err_free_irq: - free_irq(dev->irq, dev); -err_out: - return rc; -} - -static int __init -dgrs_initclone(struct net_device *dev) -{ - DGRS_PRIV *priv = (DGRS_PRIV *) dev->priv; - int i; - - printk("%s: Digi RightSwitch port %d ", - dev->name, priv->chan); - for (i = 0; i < 6; ++i) - printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); - printk("\n"); - - return (0); -} - -static struct net_device * __init -dgrs_found_device( - int io, - ulong mem, - int irq, - ulong plxreg, - ulong plxdma, - struct device *pdev -) -{ - DGRS_PRIV *priv; - struct net_device *dev; - int i, ret = -ENOMEM; - - dev = alloc_etherdev(sizeof(DGRS_PRIV)); - if (!dev) - goto err0; - - priv = (DGRS_PRIV *)dev->priv; - - dev->base_addr = io; - dev->mem_start = mem; - dev->mem_end = mem + 2048 * 1024 - 1; - dev->irq = irq; - priv->plxreg = plxreg; - priv->plxdma = plxdma; - priv->vplxdma = NULL; - - priv->chan = 1; - priv->devtbl[0] = dev; - - SET_MODULE_OWNER(dev); - SET_NETDEV_DEV(dev, pdev); - - ret = dgrs_probe1(dev); - if (ret) - goto err1; - - ret = register_netdev(dev); - if (ret) - goto err2; - - if ( !dgrs_nicmode ) - return dev; /* Switch mode, we are done */ - - /* - * Operating card as N separate NICs - */ - - priv->nports = priv->bcomm->bc_nports; - - for (i = 1; i < priv->nports; ++i) - { - struct net_device *devN; - DGRS_PRIV *privN; - /* Allocate new dev and priv structures */ - devN = alloc_etherdev(sizeof(DGRS_PRIV)); - ret = -ENOMEM; - if (!devN) - goto fail; - - /* Don't copy the network device structure! */ - - /* copy the priv structure of dev[0] */ - privN = (DGRS_PRIV *)devN->priv; - *privN = *priv; - - /* ... and zero out VM areas */ - privN->vmem = NULL; - privN->vplxdma = NULL; - /* ... and zero out IRQ */ - devN->irq = 0; - /* ... and base MAC address off address of 1st port */ - devN->dev_addr[5] += i; - - ret = dgrs_initclone(devN); - if (ret) - goto fail; - - SET_MODULE_OWNER(devN); - SET_NETDEV_DEV(dev, pdev); - - ret = register_netdev(devN); - if (ret) { - free_netdev(devN); - goto fail; - } - privN->chan = i+1; - priv->devtbl[i] = devN; - } - return dev; - - fail: - while (i >= 0) { - struct net_device *d = priv->devtbl[i--]; - unregister_netdev(d); - free_netdev(d); - } - - err2: - free_irq(dev->irq, dev); - err1: - free_netdev(dev); - err0: - return ERR_PTR(ret); -} - -static void __devexit dgrs_remove(struct net_device *dev) -{ - DGRS_PRIV *priv = dev->priv; - int i; - - unregister_netdev(dev); - - for (i = 1; i < priv->nports; ++i) { - struct net_device *d = priv->devtbl[i]; - if (d) { - unregister_netdev(d); - free_netdev(d); - } - } - - proc_reset(priv->devtbl[0], 1); - - if (priv->vmem) - iounmap(priv->vmem); - if (priv->vplxdma) - iounmap((uchar *) priv->vplxdma); - - if (dev->irq) - free_irq(dev->irq, dev); - - for (i = 1; i < priv->nports; ++i) { - if (priv->devtbl[i]) - unregister_netdev(priv->devtbl[i]); - } -} - -#ifdef CONFIG_PCI -static int __init dgrs_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct net_device *dev; - int err; - uint io; - uint mem; - uint irq; - uint plxreg; - uint plxdma; - - /* - * Get and check the bus-master and latency values. - * Some PCI BIOSes fail to set the master-enable bit, - * and the latency timer must be set to the maximum - * value to avoid data corruption that occurs when the - * timer expires during a transfer. Yes, it's a bug. - */ - err = pci_enable_device(pdev); - if (err) - return err; - err = pci_request_regions(pdev, "RightSwitch"); - if (err) - return err; - - pci_set_master(pdev); - - plxreg = pci_resource_start (pdev, 0); - io = pci_resource_start (pdev, 1); - mem = pci_resource_start (pdev, 2); - pci_read_config_dword(pdev, 0x30, &plxdma); - irq = pdev->irq; - plxdma &= ~15; - - /* - * On some BIOSES, the PLX "expansion rom" (used for DMA) - * address comes up as "0". This is probably because - * the BIOS doesn't see a valid 55 AA ROM signature at - * the "ROM" start and zeroes the address. To get - * around this problem the SE-6 is configured to ask - * for 4 MB of space for the dual port memory. We then - * must set its range back to 2 MB, and use the upper - * half for DMA register access - */ - OUTL(io + PLX_SPACE0_RANGE, 0xFFE00000L); - if (plxdma == 0) - plxdma = mem + (2048L * 1024L); - pci_write_config_dword(pdev, 0x30, plxdma + 1); - pci_read_config_dword(pdev, 0x30, &plxdma); - plxdma &= ~15; - - dev = dgrs_found_device(io, mem, irq, plxreg, plxdma, &pdev->dev); - if (IS_ERR(dev)) { - pci_release_regions(pdev); - return PTR_ERR(dev); - } - - pci_set_drvdata(pdev, dev); - return 0; -} - -static void __devexit dgrs_pci_remove(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - dgrs_remove(dev); - pci_release_regions(pdev); - free_netdev(dev); -} - -static struct pci_driver dgrs_pci_driver = { - .name = "dgrs", - .id_table = dgrs_pci_tbl, - .probe = dgrs_pci_probe, - .remove = __devexit_p(dgrs_pci_remove), -}; -#else -static struct pci_driver dgrs_pci_driver = {}; -#endif - - -#ifdef CONFIG_EISA -static int is2iv[8] __initdata = { 0, 3, 5, 7, 10, 11, 12, 15 }; - -static int __init dgrs_eisa_probe (struct device *gendev) -{ - struct net_device *dev; - struct eisa_device *edev = to_eisa_device(gendev); - uint io = edev->base_addr; - uint mem; - uint irq; - int rc = -ENODEV; /* Not EISA configured */ - - if (!request_region(io, 256, "RightSwitch")) { - printk(KERN_ERR "dgrs: eisa io 0x%x, which is busy.\n", io); - return -EBUSY; - } - - if ( ! (inb(io+ES4H_EC) & ES4H_EC_ENABLE) ) - goto err_out; - - mem = (inb(io+ES4H_AS_31_24) << 24) - + (inb(io+ES4H_AS_23_16) << 16); - - irq = is2iv[ inb(io+ES4H_IS) & ES4H_IS_INTMASK ]; - - dev = dgrs_found_device(io, mem, irq, 0L, 0L, gendev); - if (IS_ERR(dev)) { - rc = PTR_ERR(dev); - goto err_out; - } - - gendev->driver_data = dev; - return 0; - err_out: - release_region(io, 256); - return rc; -} - -static int __devexit dgrs_eisa_remove(struct device *gendev) -{ - struct net_device *dev = gendev->driver_data; - - dgrs_remove(dev); - - release_region(dev->base_addr, 256); - - free_netdev(dev); - return 0; -} - - -static struct eisa_driver dgrs_eisa_driver = { - .id_table = dgrs_eisa_tbl, - .driver = { - .name = "dgrs", - .probe = dgrs_eisa_probe, - .remove = __devexit_p(dgrs_eisa_remove), - } -}; -#endif - -/* - * Variables that can be overriden from module command line - */ -static int debug = -1; -static int dma = -1; -static int hashexpire = -1; -static int spantree = -1; -static int ipaddr[4] = { -1 }; -static int iptrap[4] = { -1 }; -static __u32 ipxnet = -1; -static int nicmode = -1; - -module_param(debug, int, 0); -module_param(dma, int, 0); -module_param(hashexpire, int, 0); -module_param(spantree, int, 0); -module_param_array(ipaddr, int, NULL, 0); -module_param_array(iptrap, int, NULL, 0); -module_param(ipxnet, int, 0); -module_param(nicmode, int, 0); -MODULE_PARM_DESC(debug, "Digi RightSwitch enable debugging (0-1)"); -MODULE_PARM_DESC(dma, "Digi RightSwitch enable BM DMA (0-1)"); -MODULE_PARM_DESC(nicmode, "Digi RightSwitch operating mode (1: switch, 2: multi-NIC)"); - -static int __init dgrs_init_module (void) -{ - int i; - int err; - - /* - * Command line variable overrides - * debug=NNN - * dma=0/1 - * spantree=0/1 - * hashexpire=NNN - * ipaddr=A,B,C,D - * iptrap=A,B,C,D - * ipxnet=NNN - * nicmode=NNN - */ - if (debug >= 0) - dgrs_debug = debug; - if (dma >= 0) - dgrs_dma = dma; - if (nicmode >= 0) - dgrs_nicmode = nicmode; - if (hashexpire >= 0) - dgrs_hashexpire = hashexpire; - if (spantree >= 0) - dgrs_spantree = spantree; - if (ipaddr[0] != -1) - for (i = 0; i < 4; ++i) - dgrs_ipaddr[i] = ipaddr[i]; - if (iptrap[0] != -1) - for (i = 0; i < 4; ++i) - dgrs_iptrap[i] = iptrap[i]; - if (ipxnet != -1) - dgrs_ipxnet = htonl( ipxnet ); - - if (dgrs_debug) - { - printk(KERN_INFO "dgrs: SW=%s FW=Build %d %s\nFW Version=%s\n", - version, dgrs_firmnum, dgrs_firmdate, dgrs_firmver); - } - - /* - * Find and configure all the cards - */ -#ifdef CONFIG_EISA - err = eisa_driver_register(&dgrs_eisa_driver); - if (err) - return err; -#endif - err = pci_register_driver(&dgrs_pci_driver); - if (err) - return err; - return 0; -} - -static void __exit dgrs_cleanup_module (void) -{ -#ifdef CONFIG_EISA - eisa_driver_unregister (&dgrs_eisa_driver); -#endif -#ifdef CONFIG_PCI - pci_unregister_driver (&dgrs_pci_driver); -#endif -} - -module_init(dgrs_init_module); -module_exit(dgrs_cleanup_module); diff --git a/drivers/net/dgrs.h b/drivers/net/dgrs.h deleted file mode 100644 index 6058d5301cb6c7a8309a848c476e1219223d0505..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * ioctl's for the Digi Intl. RightSwitch - * - * These network driver ioctl's are a bit obtuse compared to the usual - * ioctl's for a "normal" device driver. Hey, I didn't invent it. - * - * Typical use: - * - * struct ifreq ifr; - * DGRS_IOCTL ioc; - * int x; - * - * strcpy(ifr.ifr_name, "eth1"); - * ifr.ifr_data = (caddr_t) &ioc; - * ioc.cmd = DGRS_GETMEM; - * ioc.len = sizeof(x); - * ioc.data = (caddr_t) &x; - * rc = ioctl(fd, DGRSIOCTL, &ifr); - * printf("rc=%d mem=%x\n", rc, x); - * - */ -#include - -#define DGRSIOCTL SIOCDEVPRIVATE - -typedef struct dgrs_ioctl { - unsigned short cmd; /* Command to run */ - unsigned short len; /* Length of the data buffer */ - unsigned char __user *data; /* Pointer to the data buffer */ - unsigned short port; /* port number for command, if needed */ - unsigned short filter; /* filter number for command, if needed */ -} DGRS_IOCTL; - -/* - * Commands for the driver - */ -#define DGRS_GETMEM 0x01 /* Get the dual port memory address */ -#define DGRS_SETFILTER 0x02 /* Set a filter */ diff --git a/drivers/net/dgrs_asstruct.h b/drivers/net/dgrs_asstruct.h deleted file mode 100644 index f0e2121770f1613fe81c3454dd4116cd7d63d571..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_asstruct.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * For declaring structures shared with assembly routines - * - * $Id: asstruct.h,v 1.1.1.1 1994/10/23 05:08:32 rick Exp $ - */ - -#ifdef ASSEMBLER - -# define MO(t,a) (a) -# define VMO(t,a) (a) - -# define BEGIN_STRUCT(x) _Off=0 -# define S1A(t,x,n) _Off=(_Off+0)&~0; x=_Off; _Off=_Off+(1*n) -# define S2A(t,x,n) _Off=(_Off+1)&~1; x=_Off; _Off=_Off+(2*n) -# define S4A(t,x,n) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+(4*n) -# define WORD(x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4 -# define WORDA(x,n) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+(4*n) -# define VWORD(x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4 -# define S1(t,x) _Off=(_Off+0)&~0; x=_Off; _Off=_Off+1 -# define S2(t,x) _Off=(_Off+1)&~1; x=_Off; _Off=_Off+2 -# define S4(t,x) _Off=(_Off+3)&~3; x=_Off; _Off=_Off+4 -# define END_STRUCT(x) _Off=(_Off+3)&~3; x=_Off - -#else /* C */ - -#define VMO(t,a) (*(volatile t *)(a)) - -# define BEGIN_STRUCT(x) struct x { -# define S1(t,x) t x ; -# define S1A(t,x,n) t x[n] ; -# define S2(t,x) t x ; -# define S2A(t,x,n) t x[n] ; -# define S4(t,x) t x ; -# define S4A(t,x,n) t x[n] ; -# define END_STRUCT(x) } ; - -#endif diff --git a/drivers/net/dgrs_bcomm.h b/drivers/net/dgrs_bcomm.h deleted file mode 100644 index 5e9c252739816a3b6e7ae679ed7dd3ffe5acee9f..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_bcomm.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - * The bios low-memory structure - * - * Some of the variables in here can be used to set parameters that - * are stored in NVRAM and will retain their old values the next time - * the card is brought up. To use the values stored in NVRAM, the - * parameter should be set to "all ones". This tells the firmware to - * use the NVRAM value or a suitable default. The value that is used - * will be stored back into this structure by the firmware. If the - * value of the variable is not "all ones", then that value will be - * used and will be stored into NVRAM if it isn't already there. - * The variables this applies to are the following: - * Variable Set to: Gets default of: - * bc_hashexpire -1 300 (5 minutes) - * bc_spantree -1 1 (spanning tree on) - * bc_ipaddr FF:FF:FF:FF 0 (no SNMP IP address) - * bc_ipxnet FF:FF:FF:FF 0 (no SNMP IPX net) - * bc_iptrap FF:FF:FF:FF 0 (no SNMP IP trap address) - * - * Some variables MUST have their value set after the firmware - * is loaded onto the board, but before the processor is released. - * These are: - * bc_host 0 means no host "port", run as standalone switch. - * 1 means run as a switch, with a host port. (normal) - * 2 means run as multiple NICs, not as a switch. - * -1 means run in diagnostics mode. - * bc_nowait - * bc_hostarea_len - * bc_filter_len - * - */ -BEGIN_STRUCT(bios_comm) - S4(ulong, bc_intflag) /* Count of all interrupts */ - S4(ulong, bc_lbolt) /* Count of timer interrupts */ - S4(ulong, bc_maincnt) /* Count of main loops */ - S4(ulong, bc_hashcnt) /* Count of entries in hash table */ - S4A(ulong, bc_cnt, 8) /* Misc counters, for debugging */ - S4A(ulong, bc_flag, 8) /* Misc flags, for debugging */ - S4(ulong, bc_memsize) /* Size of memory */ - S4(ulong, bc_dcache) /* Size of working dcache */ - S4(ulong, bc_icache) /* Size of working icache */ - S4(long, bc_status) /* Firmware status */ - S1A(char, bc_file, 8) /* File name of assertion failure */ - S4(ulong, bc_line) /* Line # of assertion failure */ - S4(uchar *, bc_ramstart) - S4(uchar *, bc_ramend) - S4(uchar *, bc_heapstart) /* Start of heap (end of loaded memory) */ - S4(uchar *, bc_heapend) /* End of heap */ - - /* Configurable Parameters */ - S4(long, bc_host) /* 1=Host Port, 0=No Host Port, -1=Test Mode */ - S4(long, bc_nowait) /* Don't wait for 2host circ buffer to empty*/ - S4(long, bc_150ohm) /* 0 == 100 ohm UTP, 1 == 150 ohm STP */ - S4(long, bc_squelch) /* 0 == normal squelch, 1 == reduced squelch */ - S4(ulong, bc_hashexpire) /* Expiry time in seconds for hash table */ - S4(long, bc_spantree) /* 1 == enable IEEE spanning tree */ - - S2A(ushort, bc_eaddr, 3) /* New ether address */ - S2(ushort, bc_dummy1) /* padding for DOS compilers */ - - /* Various debugging aids */ - S4(long, bc_debug) /* Debugging is turned on */ - S4(long, bc_spew) /* Spew data on port 4 for bs_spew seconds */ - S4(long, bc_spewlen) /* Length of spewed data packets */ - S4(long, bc_maxrfd) /* If != 0, max number of RFD's to allocate */ - S4(long, bc_maxrbd) /* If != 0, max number of RBD's to allocate */ - - /* Circular buffers for messages to/from host */ - S4(ulong, bc_2host_head) - S4(ulong, bc_2host_tail) - S4(ulong, bc_2host_mask) - S1A(char, bc_2host, 0x200) /* Circ buff to host */ - - S4(ulong, bc_2idt_head) - S4(ulong, bc_2idt_tail) - S4(ulong, bc_2idt_mask) - S1A(char, bc_2idt, 0x200) /* Circ buff to idt */ - - /* Pointers to structures for driver access */ - S4(uchar *, bc_port) /* pointer to Port[] structures */ - S4(long, bc_nports) /* Number of ports */ - S4(long, bc_portlen) /* sizeof(PORT) */ - S4(uchar *, bc_hash) /* Pointer to hash table */ - S4(long, bc_hashlen) /* sizeof(Table) */ - - /* SNMP agent addresses */ - S1A(uchar, bc_ipaddr, 4) /* IP address for SNMP */ - S1A(uchar, bc_ipxnet, 4) /* IPX net address for SNMP */ - - S4(long, bc_nohostintr) /* Do not cause periodic host interrupts */ - - S4(uchar *, bc_dmaaddr) /* Physical addr of host DMA buf for diags */ - S4(ulong, bc_dmalen) /* Length of DMA buffer 0..2048 */ - - /* - * Board memory allocated on startup for use by host, usually - * for the purposes of creating DMA chain descriptors. The - * "len" must be set before the processor is released. The - * address of the area is returned in bc_hostarea. The area - * is guaranteed to be aligned on a 16 byte boundary. - */ - S4(ulong, bc_hostarea_len) /* RW: Number of bytes to allocate */ - S4(uchar *, bc_hostarea) /* RO: Address of allocated memory */ - - /* - * Variables for communicating filters into the board - */ - S4(ulong *, bc_filter_area) /* RO: Space to put filter into */ - S4(ulong, bc_filter_area_len) /* RO: Length of area, in bytes */ - S4(long, bc_filter_cmd) /* RW: Filter command, see below */ - S4(ulong, bc_filter_len) /* RW: Actual length of filter */ - S4(ulong, bc_filter_port) /* RW: Port # for filter 0..6 */ - S4(ulong, bc_filter_num) /* RW: Filter #, 0=input, 1=output */ - - /* more SNMP agent addresses */ - S1A(uchar, bc_iptrap, 4) /* IP address for SNMP */ - - S4A(long, bc_spare, 2) /* spares */ -END_STRUCT(bios_comm) - -#define bc VMO(struct bios_comm, 0xa3000100) - -/* - * bc_status values - */ -#define BC_INIT 0 -#define BC_RUN 100 - -/* - * bc_host values - */ -#define BC_DIAGS -1 -#define BC_SASWITCH 0 -#define BC_SWITCH 1 -#define BC_MULTINIC 2 - -/* - * Values for spew (debugging) - */ -#define BC_SPEW_ENABLE 0x80000000 - -/* - * filter commands - */ -#define BC_FILTER_ERR -1 -#define BC_FILTER_OK 0 -#define BC_FILTER_SET 1 -#define BC_FILTER_CLR 2 diff --git a/drivers/net/dgrs_es4h.h b/drivers/net/dgrs_es4h.h deleted file mode 100644 index 5518fba46b2c9514517020463820fbee6bc252b7..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_es4h.h +++ /dev/null @@ -1,183 +0,0 @@ -/************************************************************************/ -/* */ -/* es4h.h: Hardware definition of the ES/4h Ethernet Switch, from */ -/* both the host and the 3051's point of view. */ -/* NOTE: this name is a misnomer now that there is a PCI */ -/* board. Everything that says "es4h" should really be */ -/* "se4". But we'll keep the old name for now. */ -/* */ -/* $Id: es4h.h,v 1.10 1996/08/22 17:16:53 rick Exp $ */ -/* */ -/************************************************************************/ - -/************************************************************************/ -/* */ -/* EISA I/O Registers. These are located at 0x1000 * slot-number */ -/* plus the indicated address. I.E. 0x4000-0x4009 for slot 4. */ -/* */ -/************************************************************************/ - -#define ES4H_MANUFmsb 0x00 /* Read-only */ -#define ES4H_MANUFlsb 0x01 /* Read-only */ -# define ES4H_MANUF_CODE 0x1049 /* = "DBI" */ - -#define ES4H_PRODUCT 0x02 /* Read-only */ -# define ES4H_PRODUCT_CODE 0x0A -# define EPC_PRODUCT_CODE 0x03 - -#define ES4H_REVISION 0x03 /* Read-only */ -# define ES4H_REVISION_CODE 0x01 - -#define ES4H_EC 0x04 /* EISA Control */ -# define ES4H_EC_RESET 0x04 /* WO, EISA reset */ -# define ES4H_EC_ENABLE 0x01 /* RW, EISA enable - set to */ - /* 1 before memory enable */ -#define ES4H_PC 0x05 /* Processor Control */ -# define ES4H_PC_RESET 0x04 /* RW, 3051 reset */ -# define ES4H_PC_INT 0x08 /* WO, assert 3051 intr. 3 */ - -#define ES4H_MW 0x06 /* Memory Window select and enable */ -# define ES4H_MW_ENABLE 0x80 /* WO, enable memory */ -# define ES4H_MW_SELECT_MASK 0x1f /* WO, 32k window selected */ - -#define ES4H_IS 0x07 /* Interrupt, addr select */ -# define ES4H_IS_INTMASK 0x07 /* WO, interrupt select */ -# define ES4H_IS_INTOFF 0x00 /* No IRQ */ -# define ES4H_IS_INT3 0x03 /* IRQ 3 */ -# define ES4H_IS_INT5 0x02 /* IRQ 5 */ -# define ES4H_IS_INT7 0x01 /* IRQ 7 */ -# define ES4H_IS_INT10 0x04 /* IRQ 10 */ -# define ES4H_IS_INT11 0x05 /* IRQ 11 */ -# define ES4H_IS_INT12 0x06 /* IRQ 12 */ -# define ES4H_IS_INT15 0x07 /* IRQ 15 */ -# define ES4H_IS_INTACK 0x10 /* WO, interrupt ack */ -# define ES4H_IS_INTPEND 0x10 /* RO, interrupt pending */ -# define ES4H_IS_LINEAR 0x40 /* WO, no memory windowing */ -# define ES4H_IS_AS15 0x80 /* RW, address select bit 15 */ - -#define ES4H_AS_23_16 0x08 /* Address select bits 23-16 */ -#define ES4H_AS_31_24 0x09 /* Address select bits 31-24 */ - -#define ES4H_IO_MAX 0x09 /* Size of I/O space */ - -/* - * PCI - */ -#define SE6_RESET PLX_USEROUT - -/************************************************************************/ -/* */ -/* 3051 Memory Map */ -/* */ -/* Note: 3051 has 4K I-cache, 2K D-cache. 1 cycle is 50 nsec. */ -/* */ -/************************************************************************/ -#define SE4_NPORTS 4 /* # of ethernet ports */ -#define SE6_NPORTS 6 /* # of ethernet ports */ -#define SE_NPORTS 6 /* Max # of ethernet ports */ - -#define ES4H_RAM_BASE 0x83000000 /* Base address of RAM */ -#define ES4H_RAM_SIZE 0x00200000 /* Size of RAM (2MB) */ -#define ES4H_RAM_INTBASE 0x83800000 /* Base of int-on-write RAM */ - /* a.k.a. PKT RAM */ - - /* Ethernet controllers */ - /* See: i82596.h */ -#define ES4H_ETHER0_PORT 0xA2000000 -#define ES4H_ETHER0_CMD 0xA2000100 -#define ES4H_ETHER1_PORT 0xA2000200 -#define ES4H_ETHER1_CMD 0xA2000300 -#define ES4H_ETHER2_PORT 0xA2000400 -#define ES4H_ETHER2_CMD 0xA2000500 -#define ES4H_ETHER3_PORT 0xA2000600 -#define ES4H_ETHER3_CMD 0xA2000700 -#define ES4H_ETHER4_PORT 0xA2000800 /* RS SE-6 only */ -#define ES4H_ETHER4_CMD 0xA2000900 /* RS SE-6 only */ -#define ES4H_ETHER5_PORT 0xA2000A00 /* RS SE-6 only */ -#define ES4H_ETHER5_CMD 0xA2000B00 /* RS SE-6 only */ - -#define ES4H_I8254 0xA2040000 /* 82C54 timers */ - /* See: i8254.h */ - -#define SE4_I8254_HZ (23000000/4) /* EISA clock input freq. */ -#define SE4_IDT_HZ (46000000) /* EISA CPU freq. */ -#define SE6_I8254_HZ (20000000/4) /* PCI clock input freq. */ -#define SE6_IDT_HZ (50000000) /* PCI CPU freq. */ -#define ES4H_I8254_HZ (23000000/4) /* EISA clock input freq. */ - -#define ES4H_GPP 0xA2050000 /* General purpose port */ - /* - * SE-4 (EISA) GPP bits - */ -# define ES4H_GPP_C0_100 0x0001 /* WO, Chan 0: 100 ohm TP */ -# define ES4H_GPP_C0_SQE 0x0002 /* WO, Chan 0: normal squelch */ -# define ES4H_GPP_C1_100 0x0004 /* WO, Chan 1: 100 ohm TP */ -# define ES4H_GPP_C1_SQE 0x0008 /* WO, Chan 1: normal squelch */ -# define ES4H_GPP_C2_100 0x0010 /* WO, Chan 2: 100 ohm TP */ -# define ES4H_GPP_C2_SQE 0x0020 /* WO, Chan 2: normal squelch */ -# define ES4H_GPP_C3_100 0x0040 /* WO, Chan 3: 100 ohm TP */ -# define ES4H_GPP_C3_SQE 0x0080 /* WO, Chan 3: normal squelch */ -# define ES4H_GPP_SQE 0x00AA /* WO, All: normal squelch */ -# define ES4H_GPP_100 0x0055 /* WO, All: 100 ohm TP */ -# define ES4H_GPP_HOSTINT 0x0100 /* RO, cause intr. to host */ - /* Hold high > 250 nsec */ -# define SE4_GPP_EED 0x0200 /* RW, EEPROM data bit */ -# define SE4_GPP_EECS 0x0400 /* RW, EEPROM chip select */ -# define SE4_GPP_EECK 0x0800 /* RW, EEPROM clock */ - - /* - * SE-6 (PCI) GPP bits - */ -# define SE6_GPP_EED 0x0001 /* RW, EEPROM data bit */ -# define SE6_GPP_EECS 0x0002 /* RW, EEPROM chip select */ -# define SE6_GPP_EECK 0x0004 /* RW, EEPROM clock */ -# define SE6_GPP_LINK 0x00fc /* R, Link status LEDs */ - -#define ES4H_INTVEC 0xA2060000 /* RO: Interrupt Vector */ -# define ES4H_IV_DMA0 0x01 /* Chan 0 DMA interrupt */ -# define ES4H_IV_PKT0 0x02 /* Chan 0 PKT interrupt */ -# define ES4H_IV_DMA1 0x04 /* Chan 1 DMA interrupt */ -# define ES4H_IV_PKT1 0x08 /* Chan 1 PKT interrupt */ -# define ES4H_IV_DMA2 0x10 /* Chan 2 DMA interrupt */ -# define ES4H_IV_PKT2 0x20 /* Chan 2 PKT interrupt */ -# define ES4H_IV_DMA3 0x40 /* Chan 3 DMA interrupt */ -# define ES4H_IV_PKT3 0x80 /* Chan 3 PKT interrupt */ - -#define ES4H_INTACK 0xA2060000 /* WO: Interrupt Ack */ -# define ES4H_INTACK_8254 0x01 /* Real Time Clock (int 0) */ -# define ES4H_INTACK_HOST 0x02 /* Host (int 1) */ -# define ES4H_INTACK_PKT0 0x04 /* Chan 0 Pkt (int 2) */ -# define ES4H_INTACK_PKT1 0x08 /* Chan 1 Pkt (int 3) */ -# define ES4H_INTACK_PKT2 0x10 /* Chan 2 Pkt (int 4) */ -# define ES4H_INTACK_PKT3 0x20 /* Chan 3 Pkt (int 5) */ - -#define SE6_PLX 0xA2070000 /* PLX 9060, SE-6 (PCI) only */ - /* see plx9060.h */ - -#define SE6_PCI_VENDOR_ID 0x114F /* Digi PCI vendor ID */ -#define SE6_PCI_DEVICE_ID 0x0003 /* RS SE-6 device ID */ -#define SE6_PCI_ID ((SE6_PCI_DEVICE_ID<<16) | SE6_PCI_VENDOR_ID) - -/* - * IDT Interrupts - */ -#define ES4H_INT_8254 IDT_INT0 -#define ES4H_INT_HOST IDT_INT1 -#define ES4H_INT_ETHER0 IDT_INT2 -#define ES4H_INT_ETHER1 IDT_INT3 -#define ES4H_INT_ETHER2 IDT_INT4 -#define ES4H_INT_ETHER3 IDT_INT5 - -/* - * Because there are differences between the SE-4 and the SE-6, - * we assume that the following globals will be set up at init - * time in main.c to containt the appropriate constants from above - */ -extern ushort Gpp; /* Softcopy of GPP register */ -extern ushort EEck; /* Clock bit */ -extern ushort EEcs; /* CS bit */ -extern ushort EEd; /* Data bit */ -extern ulong I8254_Hz; /* i8254 input frequency */ -extern ulong IDT_Hz; /* IDT CPU frequency */ -extern int Nports; /* Number of ethernet controllers */ -extern int Nchan; /* Nports+1 */ diff --git a/drivers/net/dgrs_ether.h b/drivers/net/dgrs_ether.h deleted file mode 100644 index 7539b596bff8e2bf4d1045e2ad41b05fd50c5323..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_ether.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * A filtering function. There are two filters/port. Filter "0" - * is the input filter, and filter "1" is the output filter. - */ -typedef int (FILTER_FUNC)(uchar *pktp, int pktlen, ulong *scratch, int port); -#define NFILTERS 2 - -/* - * The per port structure - */ -typedef struct -{ - int chan; /* Channel number (0-3) */ - ulong portaddr; /* address of 596 port register */ - volatile ulong *ca; /* address of 596 chan attention */ - ulong intmask; /* Interrupt mask for this port */ - ulong intack; /* Ack bit for this port */ - - uchar ethaddr[6]; /* Ethernet address of this port */ - int is_promisc; /* Port is promiscuous */ - - int debug; /* Debugging turned on */ - - I596_ISCP *iscpp; /* Uncached ISCP pointer */ - I596_SCP *scpp; /* Uncached SCP pointer */ - I596_SCB *scbp; /* Uncached SCB pointer */ - - I596_ISCP iscp; - I596_SCB scb; - - /* Command Queue */ - I596_CB *cb0; - I596_CB *cbN; - I596_CB *cb_head; - I596_CB *cb_tail; - - /* Receive Queue */ - I596_RFD *rfd0; - I596_RFD *rfdN; - I596_RFD *rfd_head; - I596_RFD *rfd_tail; - - /* Receive Buffers */ - I596_RBD *rbd0; - I596_RBD *rbdN; - I596_RBD *rbd_head; - I596_RBD *rbd_tail; - int buf_size; /* Size of an RBD buffer */ - int buf_cnt; /* Total RBD's allocated */ - - /* Rx Statistics */ - ulong cnt_rx_cnt; /* Total packets rcvd, good and bad */ - ulong cnt_rx_good; /* Total good packets rcvd */ - ulong cnt_rx_bad; /* Total of all bad packets rcvd */ - /* Subtotals can be gotten from SCB */ - ulong cnt_rx_nores; /* No resources */ - ulong cnt_rx_bytes; /* Total bytes rcvd */ - - /* Tx Statistics */ - ulong cnt_tx_queued; - ulong cnt_tx_done; - ulong cnt_tx_freed; - ulong cnt_tx_nores; /* No resources */ - - ulong cnt_tx_bad; - ulong cnt_tx_err_late; - ulong cnt_tx_err_nocrs; - ulong cnt_tx_err_nocts; - ulong cnt_tx_err_under; - ulong cnt_tx_err_maxcol; - ulong cnt_tx_collisions; - - /* Special stuff for host */ -# define rfd_freed cnt_rx_cnt - ulong rbd_freed; - int host_timer; - - /* Added after first beta */ - ulong cnt_tx_races; /* Counts race conditions */ - int spanstate; - ulong cnt_st_tx; /* send span tree pkts */ - ulong cnt_st_fail_tx; /* Failures to send span tree pkts */ - ulong cnt_st_fail_rbd;/* Failures to send span tree pkts */ - ulong cnt_st_rx; /* rcv span tree pkts */ - ulong cnt_st_rx_bad; /* bogus st packets rcvd */ - ulong cnt_rx_fwd; /* Rcvd packets that were forwarded */ - - ulong cnt_rx_mcast; /* Multicast pkts received */ - ulong cnt_tx_mcast; /* Multicast pkts transmitted */ - ulong cnt_tx_bytes; /* Bytes transmitted */ - - /* - * Packet filtering - * Filter 0: input filter - * Filter 1: output filter - */ - - ulong *filter_space[NFILTERS]; - FILTER_FUNC *filter_func[NFILTERS]; - ulong filter_cnt[NFILTERS]; - ulong filter_len[NFILTERS]; - - ulong pad[ (512-300) / 4]; -} PORT; - -/* - * Port[0] is host interface - * Port[1..SE_NPORTS] are external 10 Base T ports. Fewer may be in - * use, depending on whether this is an SE-4 or - * an SE-6. - * Port[SE_NPORTS] Pseudo-port for Spanning tree and SNMP - */ -extern PORT Port[1+SE_NPORTS+1]; - -extern int Nports; /* Number of genuine ethernet controllers */ -extern int Nchan; /* ... plus one for host interface */ - -extern int FirstChan; /* 0 or 1, depedning on whether host is used */ -extern int NumChan; /* 4 or 5 */ - -/* - * A few globals - */ -extern int IsPromisc; -extern int MultiNicMode; - -/* - * Functions - */ -extern void eth_xmit_spew_on(PORT *p, int cnt); -extern void eth_xmit_spew_off(PORT *p); - -extern I596_RBD *alloc_rbds(PORT *p, int num); - -extern I596_CB * eth_cb_alloc(PORT *p); diff --git a/drivers/net/dgrs_firmware.c b/drivers/net/dgrs_firmware.c deleted file mode 100644 index 8c20d4c999371e95119fd7d0da807dcf7db6a81d..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_firmware.c +++ /dev/null @@ -1,9966 +0,0 @@ -static const int dgrs_firmnum = 550; -static char dgrs_firmver[] = "$Version$"; -static char dgrs_firmdate[] = "11/16/96 03:45:15"; -static unsigned char dgrs_code[] __initdata = { - 213,5,192,8,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,64,40,35,41, - 101,115,52,104,46,98,105,110,32,32,32,32, - 32,32,49,46,48,32,48,48,47,48,48,47, - 57,52,0,64,40,35,41,67,111,112,121,114, - 105,103,104,116,32,49,57,57,53,44,32,68, - 105,103,105,32,73,110,116,101,114,110,97,116, - 105,111,110,97,108,46,32,32,65,108,108,32, - 82,105,103,104,116,115,32,82,101,115,101,114, - 118,101,100,46,0,0,0,0,97,5,192,8, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,255,255,0,16,0,0,0,0, - 0,0,0,0,8,0,224,3,0,0,0,0, - 148,255,189,39,16,0,161,175,20,0,162,175, - 24,0,163,175,28,0,164,175,32,0,165,175, - 36,0,166,175,40,0,167,175,44,0,168,175, - 48,0,169,175,52,0,170,175,56,0,171,175, - 60,0,172,175,64,0,173,175,68,0,174,175, - 72,0,175,175,76,0,184,175,80,0,185,175, - 88,0,190,175,92,0,191,175,0,112,8,64, - 18,72,0,0,16,80,0,0,0,96,11,64, - 84,0,168,175,96,0,169,175,100,0,170,175, - 104,0,171,175,33,56,0,1,0,131,24,60, - 0,1,24,39,0,0,8,143,0,0,0,0, - 1,0,8,33,0,0,8,175,0,104,5,64, - 0,96,6,64,124,0,168,48,212,255,0,21, - 0,0,0,0,36,64,166,0,0,255,8,49, - 27,0,0,17,0,0,0,0,130,65,8,0, - 2,131,9,60,33,72,40,1,0,220,41,141, - 66,64,8,0,2,131,10,60,33,80,72,1, - 0,224,74,141,0,0,0,0,38,80,70,1, - 1,255,74,49,33,40,192,0,38,48,202,0, - 0,96,134,64,66,64,8,0,2,131,4,60, - 33,32,136,0,0,226,132,144,9,248,32,1, - 0,0,0,0,104,0,166,143,0,0,0,0, - 0,96,134,64,0,104,5,64,227,255,0,16, - 0,0,0,0,104,0,168,143,96,0,169,143, - 100,0,170,143,0,0,0,0,0,96,136,64, - 19,0,32,1,17,0,64,1,20,0,162,143, - 24,0,163,143,28,0,164,143,32,0,165,143, - 36,0,166,143,40,0,167,143,44,0,168,143, - 48,0,169,143,52,0,170,143,56,0,171,143, - 60,0,172,143,64,0,173,143,68,0,174,143, - 72,0,175,143,76,0,184,143,80,0,185,143, - 88,0,190,143,92,0,191,143,0,0,0,0, - 84,0,186,143,16,0,161,143,108,0,189,39, - 8,0,64,3,16,0,0,66,0,96,26,64, - 0,0,0,0,255,255,27,60,254,0,123,55, - 0,0,0,0,36,208,91,3,0,0,0,0, - 0,96,154,64,0,0,0,0,0,112,26,64, - 0,0,0,0,16,0,0,66,0,0,0,0, - 8,0,64,3,0,0,0,0,255,255,8,36, - 133,255,0,17,0,0,0,0,1,0,8,37, - 130,255,0,21,0,0,0,0,255,255,8,36, - 33,8,0,1,126,255,40,20,0,0,0,0, - 1,0,33,36,123,255,32,20,0,0,0,0, - 255,255,2,36,120,255,72,20,0,0,0,0, - 1,0,66,36,117,255,64,20,0,0,0,0, - 255,255,3,36,114,255,104,20,0,0,0,0, - 1,0,99,36,111,255,96,20,0,0,0,0, - 255,255,4,36,108,255,136,20,0,0,0,0, - 1,0,132,36,105,255,128,20,0,0,0,0, - 255,255,5,36,102,255,168,20,0,0,0,0, - 1,0,165,36,99,255,160,20,0,0,0,0, - 255,255,6,36,96,255,200,20,0,0,0,0, - 1,0,198,36,93,255,192,20,0,0,0,0, - 255,255,7,36,90,255,232,20,0,0,0,0, - 1,0,231,36,87,255,224,20,0,0,0,0, - 255,255,9,36,84,255,40,21,0,0,0,0, - 1,0,41,37,81,255,32,21,0,0,0,0, - 255,255,10,36,78,255,72,21,0,0,0,0, - 1,0,74,37,75,255,64,21,0,0,0,0, - 255,255,11,36,72,255,104,21,0,0,0,0, - 1,0,107,37,69,255,96,21,0,0,0,0, - 255,255,12,36,66,255,136,21,0,0,0,0, - 1,0,140,37,63,255,128,21,0,0,0,0, - 255,255,13,36,60,255,168,21,0,0,0,0, - 1,0,173,37,57,255,160,21,0,0,0,0, - 255,255,14,36,54,255,200,21,0,0,0,0, - 1,0,206,37,51,255,192,21,0,0,0,0, - 255,255,15,36,48,255,232,21,0,0,0,0, - 1,0,239,37,45,255,224,21,0,0,0,0, - 255,255,24,36,42,255,8,23,0,0,0,0, - 1,0,24,39,39,255,0,23,0,0,0,0, - 255,255,16,36,36,255,8,22,0,0,0,0, - 1,0,16,38,33,255,0,22,0,0,0,0, - 255,255,17,36,30,255,40,22,0,0,0,0, - 1,0,49,38,27,255,32,22,0,0,0,0, - 255,255,18,36,24,255,72,22,0,0,0,0, - 1,0,82,38,21,255,64,22,0,0,0,0, - 255,255,19,36,18,255,104,22,0,0,0,0, - 1,0,115,38,15,255,96,22,0,0,0,0, - 255,255,20,36,12,255,136,22,0,0,0,0, - 1,0,148,38,9,255,128,22,0,0,0,0, - 255,255,21,36,6,255,168,22,0,0,0,0, - 1,0,181,38,3,255,160,22,0,0,0,0, - 255,255,22,36,0,255,200,22,0,0,0,0, - 1,0,214,38,253,254,192,22,0,0,0,0, - 255,255,23,36,250,254,232,22,0,0,0,0, - 1,0,247,38,247,254,224,22,0,0,0,0, - 255,255,26,36,244,254,72,23,0,0,0,0, - 1,0,90,39,241,254,64,23,0,0,0,0, - 255,255,27,36,238,254,104,23,0,0,0,0, - 1,0,123,39,235,254,96,23,0,0,0,0, - 255,255,28,36,232,254,136,23,0,0,0,0, - 1,0,156,39,229,254,128,23,0,0,0,0, - 255,255,29,36,226,254,168,23,0,0,0,0, - 1,0,189,39,223,254,160,23,0,0,0,0, - 255,255,30,36,220,254,200,23,0,0,0,0, - 1,0,222,39,217,254,192,23,0,0,0,0, - 255,255,31,36,214,254,232,23,0,0,0,0, - 1,0,255,39,211,254,224,23,0,0,0,0, - 0,131,24,60,0,1,24,39,0,32,1,60, - 37,192,1,3,0,96,8,64,0,0,0,0, - 1,0,1,60,37,64,1,1,0,96,136,64, - 33,16,0,0,165,165,3,60,165,165,99,52, - 0,128,1,60,0,0,35,172,0,128,9,60, - 0,0,41,141,0,0,0,0,0,96,10,64, - 0,0,0,0,8,0,1,60,36,80,65,1, - 29,0,64,21,0,0,0,0,27,0,105,20, - 0,0,0,0,0,1,2,36,0,128,1,60, - 33,8,34,0,0,0,32,172,64,16,2,0, - 1,0,1,60,1,0,33,52,43,8,65,0, - 248,255,32,20,0,0,0,0,255,255,3,36, - 0,128,1,60,0,0,35,172,0,1,2,36, - 0,128,3,60,33,24,98,0,0,0,99,140, - 0,0,0,0,7,0,96,20,0,0,0,0, - 64,16,2,0,1,0,1,60,1,0,33,52, - 43,8,65,0,245,255,32,20,0,0,0,0, - 0,96,128,64,0,0,0,0,84,0,2,175, - 0,96,8,64,0,0,0,0,3,0,1,60, - 37,64,1,1,0,96,136,64,33,16,0,0, - 165,165,3,60,165,165,99,52,0,128,1,60, - 0,0,35,172,0,128,9,60,0,0,41,141, - 0,0,0,0,0,96,10,64,0,0,0,0, - 8,0,1,60,36,80,65,1,29,0,64,21, - 0,0,0,0,27,0,105,20,0,0,0,0, - 0,1,2,36,0,128,1,60,33,8,34,0, - 0,0,32,172,64,16,2,0,1,0,1,60, - 1,0,33,52,43,8,65,0,248,255,32,20, - 0,0,0,0,255,255,3,36,0,128,1,60, - 0,0,35,172,0,1,2,36,0,128,3,60, - 33,24,98,0,0,0,99,140,0,0,0,0, - 7,0,96,20,0,0,0,0,64,16,2,0, - 1,0,1,60,1,0,33,52,43,8,65,0, - 245,255,32,20,0,0,0,0,0,96,128,64, - 0,0,0,0,88,0,2,175,88,0,9,143, - 0,0,0,0,17,0,32,17,0,0,0,0, - 0,0,0,0,3,0,2,60,0,0,0,0, - 0,96,130,64,0,128,8,60,37,72,40,1, - 0,0,0,161,4,0,0,161,8,0,0,161, - 12,0,0,161,16,0,0,161,20,0,0,161, - 24,0,0,161,32,0,8,37,247,255,9,21, - 252,255,0,161,84,0,9,143,0,0,0,0, - 17,0,32,17,0,0,0,0,0,0,0,0, - 1,0,2,60,0,0,0,0,0,96,130,64, - 0,128,8,60,37,72,40,1,0,0,0,161, - 4,0,0,161,8,0,0,161,12,0,0,161, - 16,0,0,161,20,0,0,161,24,0,0,161, - 32,0,8,37,247,255,9,21,252,255,0,161, - 32,0,8,60,0,96,136,64,0,104,128,64, - 0,131,2,60,152,28,66,36,255,31,9,60, - 255,255,41,53,36,16,73,0,0,128,9,60, - 37,16,73,0,8,0,64,0,0,0,0,0, - 2,131,8,60,224,210,8,37,252,255,1,36, - 36,64,1,1,3,131,9,60,124,18,41,37, - 252,255,1,36,36,72,33,1,0,0,10,36, - 0,0,10,173,254,255,9,21,4,0,8,37, - 3,131,8,60,128,18,8,37,252,255,1,36, - 36,64,1,1,31,131,9,60,252,255,41,53, - 252,255,1,36,36,72,33,1,237,254,10,60, - 175,222,74,53,0,0,10,173,254,255,9,21, - 4,0,8,37,2,131,8,60,0,212,8,37, - 252,255,1,36,36,64,1,1,2,131,9,60, - 252,219,41,37,252,255,1,36,36,72,33,1, - 173,222,10,60,239,190,74,53,0,0,10,173, - 254,255,9,21,4,0,8,37,0,4,8,60, - 0,0,0,0,0,24,136,64,0,0,0,0, - 2,131,29,60,0,220,189,39,0,0,30,36, - 2,131,28,60,51,8,192,12,16,78,156,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 232,255,189,39,16,0,191,175,8,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,12,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,16,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,20,128,132,39, - 15,63,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,24,133,131,143,6,0,2,36, - 20,0,191,175,6,0,98,20,16,0,176,175, - 7,162,3,60,228,0,99,52,1,0,2,36, - 184,7,192,8,0,0,98,172,0,128,130,151, - 5,162,16,60,0,1,66,52,120,63,192,12, - 0,0,2,166,0,128,130,151,0,0,0,0, - 255,254,66,48,0,0,2,166,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,33,16,128,0,3,0,64,4, - 16,0,191,175,254,255,2,60,192,29,66,52, - 0,163,4,60,96,1,132,52,0,163,1,60, - 92,1,34,172,0,163,1,60,104,1,38,172, - 204,63,192,12,8,0,6,36,228,63,192,12, - 255,255,4,36,204,7,192,8,0,0,0,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,216,255,189,39,1,0,6,36, - 3,131,2,60,143,18,66,36,240,255,3,36, - 36,16,67,0,0,163,1,60,120,1,34,172, - 0,163,2,60,120,1,66,140,33,56,0,0, - 32,0,191,175,28,0,177,175,24,0,176,175, - 16,0,160,175,0,163,1,60,116,1,34,172, - 0,163,3,60,112,1,99,140,0,163,2,60, - 116,1,66,140,0,163,4,60,116,1,132,140, - 35,136,98,0,84,64,192,12,33,40,32,2, - 13,0,64,16,0,0,0,0,1,131,4,60, - 96,127,132,36,24,128,144,39,33,40,0,2, - 1,131,7,60,128,127,231,36,15,63,192,12, - 148,0,6,36,1,0,4,36,33,40,0,2, - 188,7,192,12,148,0,6,36,2,0,33,6, - 33,16,32,2,3,0,34,38,131,136,2,0, - 0,163,2,60,116,1,66,140,0,0,0,0, - 6,0,32,18,237,254,3,60,175,222,99,52, - 0,0,67,172,255,255,49,38,253,255,32,22, - 4,0,66,36,32,0,191,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 224,255,189,39,15,0,132,36,240,255,3,36, - 20,0,177,175,0,163,17,60,120,1,49,142, - 0,163,2,60,120,1,66,140,36,32,131,0, - 33,16,68,0,0,163,1,60,120,1,34,172, - 0,163,3,60,120,1,99,140,0,163,2,60, - 112,1,66,140,24,0,191,175,43,16,67,0, - 13,0,64,16,16,0,176,175,1,131,4,60, - 96,127,132,36,24,128,144,39,33,40,0,2, - 1,131,7,60,176,127,231,36,15,63,192,12, - 171,0,6,36,1,0,4,36,33,40,0,2, - 188,7,192,12,171,0,6,36,33,16,32,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 3,0,2,60,7,162,3,60,36,0,191,175, - 32,0,176,175,0,163,1,60,92,1,32,172, - 0,0,99,140,79,17,66,52,32,0,98,20, - 87,0,4,60,76,0,8,60,64,75,8,53, - 250,2,7,60,128,240,231,52,7,162,6,60, - 152,0,198,52,67,73,3,60,67,3,99,52, - 7,162,4,60,48,1,132,52,7,162,5,60, - 0,1,165,52,6,0,2,36,24,133,130,175, - 0,163,1,60,204,5,34,172,4,0,2,36, - 50,133,130,167,2,0,2,36,48,133,130,167, - 1,0,2,36,20,133,130,167,119,119,2,36, - 28,133,136,175,16,133,135,175,0,0,195,172, - 0,0,130,172,67,1,2,36,0,0,162,172, - 109,8,192,8,31,131,4,60,240,188,132,52, - 189,2,3,60,128,231,99,52,4,0,2,36, - 24,133,130,175,0,163,1,60,204,5,34,172, - 0,8,2,36,50,133,130,167,0,4,2,36, - 48,133,130,167,0,2,2,36,20,133,130,167, - 28,133,132,175,16,133,131,175,31,131,4,60, - 0,240,132,52,24,133,131,143,0,131,2,60, - 0,163,1,60,108,1,34,172,0,163,1,60, - 112,1,36,172,1,0,99,36,32,133,131,175, - 210,7,192,12,0,0,0,0,0,163,2,60, - 132,1,66,140,0,128,128,167,2,0,64,20, - 85,0,2,36,0,128,130,167,0,163,2,60, - 136,1,66,140,0,0,0,0,5,0,64,20, - 0,0,0,0,0,128,130,151,0,0,0,0, - 170,0,66,52,0,128,130,167,0,128,131,151, - 5,162,2,60,0,0,67,164,188,64,192,12, - 1,0,16,36,2,131,4,60,0,220,132,36, - 2,131,5,60,0,224,165,36,2,131,6,60, - 0,226,198,36,8,0,7,36,2,131,2,60, - 112,154,66,36,16,0,162,175,2,131,2,60, - 144,154,66,36,20,0,162,175,2,131,2,60, - 176,154,66,36,24,0,162,175,0,131,2,60, - 36,30,66,36,0,163,1,60,92,1,48,172, - 240,64,192,12,28,0,162,175,0,163,3,60, - 124,1,99,140,40,133,128,175,2,0,98,40, - 7,0,64,16,2,0,2,36,18,0,97,4, - 255,255,2,36,7,0,98,16,0,0,0,0, - 196,8,192,8,0,0,0,0,16,0,98,16, - 0,0,0,0,196,8,192,8,0,0,0,0, - 24,133,133,143,1,131,4,60,15,63,192,12, - 208,127,132,36,0,163,1,60,112,25,192,12, - 124,1,32,172,207,8,192,8,0,0,0,0, - 211,8,192,12,0,0,0,0,207,8,192,8, - 0,0,0,0,40,133,144,175,31,10,192,12, - 0,0,0,0,207,8,192,8,0,0,0,0, - 1,131,4,60,96,127,132,36,24,128,144,39, - 33,40,0,2,32,128,135,39,15,63,192,12, - 58,1,6,36,1,0,4,36,33,40,0,2, - 188,7,192,12,58,1,6,36,36,0,191,143, - 32,0,176,143,8,0,224,3,40,0,189,39, - 192,255,189,39,56,0,191,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,180,10,192,12,32,0,176,175, - 33,32,0,0,2,0,2,36,0,163,1,60, - 244,57,192,12,92,1,34,172,3,0,2,36, - 0,163,1,60,0,12,192,12,92,1,34,172, - 1,0,4,36,4,0,2,36,0,163,1,60, - 34,11,192,12,92,1,34,172,5,0,2,36, - 0,163,1,60,92,1,34,172,0,163,19,60, - 124,1,115,142,0,163,3,60,160,1,99,140, - 1,0,98,46,80,133,130,175,0,128,2,60, - 5,0,98,20,0,0,0,0,32,133,130,143, - 0,0,0,0,252,8,192,8,255,255,66,36, - 32,133,130,143,0,0,0,0,84,133,130,175, - 130,11,192,12,0,0,0,0,33,32,64,0, - 2,0,5,36,232,3,6,36,0,131,7,60, - 196,37,231,36,156,11,192,12,16,0,160,175, - 35,35,192,12,0,0,0,0,6,0,2,36, - 0,163,1,60,84,35,192,12,92,1,34,172, - 7,0,2,36,0,163,1,60,141,47,192,12, - 92,1,34,172,8,0,2,36,0,163,1,60, - 120,50,192,12,92,1,34,172,9,0,2,36, - 0,163,1,60,92,1,34,172,0,163,2,60, - 240,5,66,140,0,0,0,0,8,0,64,16, - 10,0,2,36,0,163,4,60,240,5,132,140, - 13,8,192,12,0,0,0,0,0,163,1,60, - 244,5,34,172,10,0,2,36,0,163,1,60, - 92,1,34,172,157,15,192,12,1,0,21,36, - 2,131,2,60,192,246,66,36,33,160,64,0, - 80,133,131,143,11,0,2,36,0,163,1,60, - 92,1,34,172,100,0,2,36,0,163,1,60, - 92,1,34,172,84,133,130,143,64,26,3,0, - 33,136,116,0,64,18,2,0,33,144,84,0, - 0,163,2,60,8,1,66,140,0,0,0,0, - 1,0,66,36,0,163,1,60,8,1,34,172, - 0,163,2,60,8,1,66,140,0,163,2,60, - 124,1,66,140,0,0,0,0,14,0,83,16, - 0,0,0,0,4,0,64,16,33,152,64,0, - 80,133,128,175,76,9,192,8,0,0,0,0, - 80,133,149,175,2,131,4,60,163,23,192,12, - 192,246,132,36,80,133,130,143,0,0,0,0, - 64,18,2,0,33,136,84,0,0,163,2,60, - 0,6,66,140,0,0,0,0,3,0,64,24, - 33,128,32,2,239,15,192,12,0,0,0,0, - 43,16,18,2,11,0,64,16,33,32,0,2, - 151,18,192,12,10,0,5,36,27,22,192,12, - 33,32,0,2,142,22,192,12,33,32,0,2, - 0,2,16,38,43,16,18,2,247,255,64,20, - 33,32,0,2,184,11,192,12,0,0,0,0, - 54,9,192,8,0,0,0,0,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,4,128,130,143, - 232,255,189,39,20,0,191,175,16,0,176,175, - 1,0,67,36,4,128,131,175,255,255,3,36, - 4,0,67,20,255,31,4,60,0,163,1,60, - 8,1,32,172,255,31,4,60,255,255,132,52, - 0,163,16,60,0,163,2,60,8,1,66,140, - 208,132,131,143,220,5,16,54,35,16,67,0, - 0,163,1,60,16,1,34,172,2,131,2,60, - 192,246,66,36,36,16,68,0,0,160,3,60, - 37,16,67,0,0,163,3,60,8,1,99,140, - 28,0,68,140,0,0,5,142,3,131,2,60, - 20,18,66,140,208,132,131,175,36,133,132,175, - 18,0,162,16,0,163,4,60,99,59,192,12, - 220,5,132,52,255,0,5,60,255,0,165,52, - 0,255,6,60,0,0,4,142,0,255,198,52, - 0,20,4,0,2,28,4,0,37,16,67,0, - 2,26,2,0,36,24,101,0,0,18,2,0, - 36,16,70,0,37,24,98,0,176,133,132,175, - 184,133,131,175,0,163,16,60,16,6,16,54, - 0,0,3,142,3,131,2,60,68,18,66,140, - 0,0,0,0,18,0,98,16,0,163,4,60, - 119,59,192,12,16,6,132,52,255,0,5,60, - 255,0,165,52,0,255,6,60,0,0,4,142, - 0,255,198,52,0,20,4,0,2,28,4,0, - 37,16,67,0,2,26,2,0,36,24,101,0, - 0,18,2,0,36,16,70,0,37,24,98,0, - 196,133,132,175,192,133,131,175,0,163,16,60, - 224,5,16,54,0,0,3,142,3,131,2,60, - 24,18,66,140,0,0,0,0,18,0,98,16, - 0,163,4,60,139,59,192,12,224,5,132,52, - 255,0,5,60,255,0,165,52,0,255,6,60, - 0,0,4,142,0,255,198,52,0,20,4,0, - 2,28,4,0,37,16,67,0,2,26,2,0, - 36,24,101,0,0,18,2,0,36,16,70,0, - 37,24,98,0,188,133,132,175,180,133,131,175, - 44,133,131,143,0,163,2,60,144,1,66,140, - 0,0,0,0,5,0,98,16,0,0,0,0, - 0,163,4,60,144,1,132,140,159,59,192,12, - 0,0,0,0,0,163,3,60,140,1,99,140, - 3,131,2,60,64,18,66,140,0,0,0,0, - 5,0,98,16,0,0,0,0,0,163,4,60, - 140,1,132,140,51,60,192,12,0,0,0,0, - 44,133,130,143,0,0,0,0,3,0,64,16, - 0,0,0,0,116,38,192,12,0,0,0,0, - 164,7,192,12,0,0,0,0,36,128,130,143, - 0,0,0,0,1,0,66,36,36,128,130,175, - 60,0,66,40,8,0,64,20,0,0,0,0, - 3,131,2,60,24,18,66,140,36,128,128,175, - 3,0,64,16,0,0,0,0,222,48,192,12, - 0,0,0,0,0,163,2,60,48,1,66,140, - 0,0,0,0,20,0,64,16,0,0,0,0, - 0,163,1,60,48,1,32,172,0,163,1,60, - 16,1,32,172,0,163,1,60,20,1,32,172, - 0,163,1,60,24,1,32,172,0,163,1,60, - 28,1,32,172,0,163,1,60,32,1,32,172, - 0,163,1,60,36,1,32,172,0,163,1,60, - 40,1,32,172,0,163,1,60,201,13,192,12, - 44,1,32,172,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,216,255,189,39, - 36,0,191,175,32,0,178,175,28,0,177,175, - 180,10,192,12,24,0,176,175,33,32,0,0, - 2,0,2,36,0,163,1,60,244,57,192,12, - 92,1,34,172,3,0,2,36,0,163,1,60, - 0,12,192,12,92,1,34,172,1,0,4,36, - 4,0,2,36,0,163,1,60,34,11,192,12, - 92,1,34,172,32,133,131,143,5,0,2,36, - 0,163,1,60,92,1,34,172,80,133,128,175, - 84,133,131,175,130,11,192,12,0,0,0,0, - 33,32,64,0,2,0,5,36,232,3,6,36, - 0,131,7,60,196,37,231,36,156,11,192,12, - 16,0,160,175,0,163,2,60,240,5,66,140, - 0,0,0,0,8,0,64,16,10,0,2,36, - 0,163,4,60,240,5,132,140,13,8,192,12, - 0,0,0,0,0,163,1,60,244,5,34,172, - 10,0,2,36,0,163,1,60,92,1,34,172, - 100,0,2,36,80,133,131,143,2,131,4,60, - 192,246,132,36,0,163,1,60,92,1,34,172, - 84,133,130,143,64,26,3,0,33,144,100,0, - 64,18,2,0,33,136,68,0,0,163,2,60, - 8,1,66,140,33,128,64,2,1,0,66,36, - 0,163,1,60,8,1,34,172,0,163,2,60, - 8,1,66,140,43,16,17,2,11,0,64,16, - 33,32,0,2,151,18,192,12,10,0,5,36, - 27,22,192,12,33,32,0,2,142,22,192,12, - 33,32,0,2,0,2,16,38,43,16,17,2, - 247,255,64,20,33,32,0,2,184,11,192,12, - 0,0,0,0,91,10,192,8,0,0,0,0, - 36,0,191,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 4,128,130,143,232,255,189,39,16,0,191,175, - 1,0,67,36,4,128,131,175,255,255,3,36, - 4,0,67,20,255,31,4,60,0,163,1,60, - 8,1,32,172,255,31,4,60,0,163,2,60, - 8,1,66,140,212,132,131,143,255,255,132,52, - 35,16,67,0,0,163,1,60,16,1,34,172, - 2,131,2,60,192,246,66,36,36,16,68,0, - 0,160,3,60,37,16,67,0,0,163,3,60, - 8,1,99,140,28,0,66,140,212,132,131,175, - 36,133,130,175,164,7,192,12,0,0,0,0, - 0,163,2,60,48,1,66,140,0,0,0,0, - 20,0,64,16,0,0,0,0,0,163,1,60, - 48,1,32,172,0,163,1,60,16,1,32,172, - 0,163,1,60,20,1,32,172,0,163,1,60, - 24,1,32,172,0,163,1,60,28,1,32,172, - 0,163,1,60,32,1,32,172,0,163,1,60, - 36,1,32,172,0,163,1,60,40,1,32,172, - 0,163,1,60,201,13,192,12,44,1,32,172, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,224,255,189,39,24,0,191,175, - 20,0,177,175,120,63,192,12,16,0,176,175, - 52,0,2,36,4,162,1,60,12,0,34,160, - 120,63,192,12,232,3,16,36,28,133,130,143, - 0,0,0,0,27,0,80,0,2,0,0,22, - 0,0,0,0,13,0,7,0,18,16,0,0, - 4,162,17,60,120,63,192,12,0,0,34,162, - 28,133,130,143,0,0,0,0,27,0,80,0, - 2,0,0,22,0,0,0,0,13,0,7,0, - 18,16,0,0,33,40,0,0,33,32,0,0, - 6,162,3,60,2,18,2,0,0,0,34,162, - 1,0,2,36,0,163,1,60,4,1,32,172, - 0,0,98,172,2,131,1,60,33,8,36,0, - 8,245,32,172,1,0,165,36,22,0,162,44, - 250,255,64,20,20,0,132,36,31,131,4,60, - 0,240,132,52,52,128,131,143,1,0,2,36, - 68,133,128,175,48,128,130,175,64,133,128,175, - 32,131,1,60,252,239,36,172,8,0,96,16, - 31,131,5,60,252,239,165,52,31,131,6,60, - 1,131,4,60,224,127,132,36,15,63,192,12, - 0,240,198,52,52,128,128,175,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,16,0,176,175, - 116,0,2,36,20,0,191,175,4,162,1,60, - 12,0,34,160,130,63,192,12,33,128,128,0, - 4,162,1,60,4,0,48,160,130,63,192,12, - 3,130,16,0,4,162,1,60,130,63,192,12, - 4,0,48,160,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,224,255,189,39, - 64,0,2,36,24,0,191,175,20,0,177,175, - 16,0,176,175,4,162,1,60,130,63,192,12, - 12,0,34,160,4,162,17,60,4,0,49,146, - 0,0,0,0,130,63,192,12,255,0,49,50, - 4,162,16,60,4,0,16,146,0,0,0,0, - 130,63,192,12,255,0,16,50,0,130,16,0, - 37,16,17,2,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 48,128,130,143,232,255,189,39,16,0,176,175, - 33,128,128,0,3,0,64,20,20,0,191,175, - 180,10,192,12,0,0,0,0,5,0,0,18, - 0,0,0,0,236,63,192,12,1,4,4,36, - 50,11,192,8,0,0,0,0,228,63,192,12, - 0,4,4,36,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,216,255,189,39, - 6,162,3,60,1,0,2,36,32,0,191,175, - 28,0,177,175,24,0,176,175,0,0,98,172, - 0,163,2,60,4,1,66,140,33,136,224,0, - 1,0,66,36,0,163,1,60,4,1,34,172, - 56,128,130,143,0,163,3,60,4,1,99,140, - 1,0,66,36,56,128,130,175,232,3,66,40, - 21,0,64,20,255,127,3,60,68,133,130,143, - 254,255,99,52,56,128,128,175,1,0,66,36, - 43,24,98,0,68,133,130,175,13,0,96,16, - 0,0,0,0,2,131,4,60,28,128,132,36, - 60,128,144,39,33,40,0,2,2,131,7,60, - 60,128,231,36,15,63,192,12,144,0,6,36, - 1,0,4,36,33,40,0,2,188,7,192,12, - 144,0,6,36,64,133,134,143,0,0,0,0, - 14,0,192,24,33,24,0,0,2,131,5,60, - 0,245,165,36,33,32,0,0,2,131,2,60, - 33,16,68,0,0,245,66,140,20,0,132,36, - 1,0,99,36,255,255,66,36,0,0,162,172, - 42,16,102,0,247,255,64,20,20,0,165,36, - 31,131,4,60,252,239,132,52,31,131,2,60, - 0,0,131,140,255,255,66,52,0,0,113,172, - 4,0,99,36,43,16,67,0,3,0,64,16, - 0,0,0,0,31,131,3,60,0,240,99,52, - 0,0,131,172,32,0,191,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 64,133,130,143,232,255,189,39,20,0,191,175, - 22,0,66,40,13,0,64,20,16,0,176,175, - 2,131,4,60,28,128,132,36,60,128,144,39, - 33,40,0,2,2,131,7,60,84,128,231,36, - 15,63,192,12,173,0,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,173,0,6,36, - 64,133,130,143,0,0,0,0,1,0,67,36, - 64,133,131,175,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,128,16,4,0, - 33,16,68,0,16,0,163,143,128,16,2,0, - 2,131,1,60,33,8,34,0,4,245,38,172, - 2,131,1,60,33,8,34,0,12,245,39,172, - 2,131,1,60,33,8,34,0,0,245,38,172, - 2,131,1,60,33,8,34,0,8,245,37,172, - 2,131,1,60,33,8,34,0,16,245,35,172, - 8,0,224,3,33,16,0,1,128,16,4,0, - 33,16,68,0,128,16,2,0,2,131,1,60, - 33,8,34,0,8,0,224,3,8,245,32,172, - 64,133,130,143,192,255,189,39,40,0,180,175, - 33,160,0,0,56,0,191,175,52,0,183,175, - 48,0,182,175,44,0,181,175,36,0,179,175, - 32,0,178,175,28,0,177,175,48,0,64,24, - 24,0,176,175,1,0,23,36,2,0,22,36, - 2,131,16,60,12,245,16,38,4,0,19,38, - 244,255,17,38,252,255,18,38,33,168,0,0, - 0,0,67,142,0,0,0,0,7,0,119,16, - 2,0,98,40,25,0,64,20,0,0,0,0, - 9,0,118,16,0,0,0,0,236,11,192,8, - 20,0,16,38,0,0,34,142,0,0,0,0, - 17,0,64,28,0,0,0,0,230,11,192,8, - 0,0,64,174,0,0,34,142,0,0,0,0, - 11,0,64,28,0,0,0,0,2,131,2,60, - 33,16,85,0,4,245,66,140,0,0,0,0, - 0,0,34,174,0,0,100,142,0,0,2,142, - 0,0,0,0,9,248,64,0,0,0,0,0, - 20,0,16,38,20,0,115,38,20,0,49,38, - 20,0,82,38,64,133,130,143,1,0,148,38, - 42,16,130,2,218,255,64,20,20,0,181,38, - 56,0,191,143,52,0,183,143,48,0,182,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,64,0,189,39,0,0,0,0, - 2,131,3,60,192,246,99,36,0,2,2,36, - 0,163,1,60,200,5,35,172,0,163,1,60, - 208,5,34,172,0,163,2,60,124,1,66,140, - 216,255,189,39,16,0,176,175,33,128,0,0, - 28,0,179,175,255,255,19,36,24,0,178,175, - 21,0,114,36,20,0,177,175,32,0,191,175, - 1,0,66,44,80,133,130,175,139,14,192,12, - 20,0,113,36,184,24,192,12,0,0,0,0, - 27,67,192,12,33,32,0,2,6,0,83,20, - 1,0,16,38,2,131,4,60,15,63,192,12, - 112,128,132,36,126,12,192,8,1,0,2,36, - 0,0,34,162,3,18,2,0,0,0,66,162, - 2,0,82,38,3,0,2,42,241,255,64,20, - 2,0,49,38,2,131,17,60,212,246,49,38, - 33,32,32,2,33,40,0,0,255,127,6,60, - 247,24,192,12,255,255,198,52,255,31,3,60, - 255,255,99,52,236,255,48,38,36,0,34,38, - 36,16,67,0,0,160,3,60,37,16,67,0, - 0,32,3,36,236,255,32,174,2,131,1,60, - 220,246,32,172,2,131,1,60,204,246,32,172, - 2,131,1,60,236,246,34,172,0,0,67,164, - 222,21,192,12,33,32,0,2,122,15,192,12, - 33,32,0,2,242,21,192,12,33,32,0,2, - 32,133,130,143,1,0,16,36,42,16,2,2, - 12,0,64,16,255,31,3,60,236,1,49,38, - 133,12,192,12,33,32,0,2,242,21,192,12, - 33,32,32,2,32,133,130,143,1,0,16,38, - 42,16,2,2,248,255,64,20,0,2,49,38, - 255,31,3,60,255,255,99,52,2,131,16,60, - 192,4,16,38,7,0,2,36,0,0,2,174, - 56,0,2,38,36,16,67,0,0,160,3,60, - 37,16,67,0,0,32,3,36,2,131,1,60, - 220,4,32,172,2,131,1,60,204,4,32,172, - 2,131,1,60,236,4,34,172,0,0,67,164, - 2,131,2,60,212,246,66,140,2,131,3,60, - 216,246,99,132,20,0,2,174,24,0,3,166, - 2,131,2,60,217,4,66,144,0,0,0,0, - 7,0,66,36,2,131,1,60,217,4,34,160, - 112,15,192,12,33,32,0,2,33,32,0,2, - 19,15,192,12,32,0,5,36,20,0,16,38, - 33,32,0,2,7,0,5,36,255,127,6,60, - 247,24,192,12,255,255,198,52,33,16,0,0, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,200,255,189,39,48,0,180,175, - 33,160,128,0,255,31,6,60,255,255,198,52, - 64,26,20,0,2,131,2,60,192,246,66,36, - 40,0,178,175,33,144,98,0,255,255,132,38, - 64,18,4,0,0,162,3,60,33,16,67,0, - 52,0,191,175,44,0,179,175,36,0,177,175, - 32,0,176,175,4,0,66,174,0,1,66,36, - 8,0,66,174,0,16,2,36,4,16,130,0, - 12,0,66,174,4,0,2,36,4,16,130,0, - 0,160,5,60,16,0,66,174,48,0,66,38, - 36,16,70,0,37,16,69,0,36,0,66,174, - 64,16,4,0,33,16,68,0,128,16,2,0, - 2,131,3,60,240,231,99,36,33,16,67,0, - 36,16,70,0,37,16,69,0,40,0,66,174, - 56,0,66,38,36,16,70,0,37,16,69,0, - 0,0,84,174,44,0,66,174,32,0,64,174, - 2,131,2,60,212,246,66,140,2,131,3,60, - 216,246,99,132,20,0,66,174,24,0,67,166, - 25,0,66,146,0,0,0,0,33,32,84,0, - 2,131,2,60,0,227,66,36,36,16,70,0, - 37,128,69,0,2,131,2,60,32,227,66,36, - 36,16,70,0,25,0,68,162,40,133,131,143, - 0,0,0,0,3,0,96,16,37,136,69,0, - 255,255,130,36,25,0,66,162,12,0,68,142, - 28,0,64,174,228,63,192,12,1,0,132,52, - 4,0,68,142,0,0,0,0,76,67,192,12, - 33,40,0,0,76,63,192,12,0,0,0,0, - 76,63,192,12,0,0,0,0,255,255,2,36, - 4,0,2,174,4,0,2,142,0,0,0,0, - 0,0,2,174,4,0,68,142,0,0,0,0, - 76,67,192,12,1,0,5,54,4,0,4,38, - 33,40,0,0,255,255,6,36,211,67,192,12, - 208,7,7,36,8,0,64,20,255,255,2,52, - 2,131,4,60,184,128,132,36,4,0,6,142, - 0,0,0,0,15,63,192,12,33,40,128,2, - 255,255,2,52,48,1,34,174,4,0,68,142, - 0,0,0,0,76,67,192,12,3,0,37,54, - 48,1,36,38,33,40,0,0,255,255,6,52, - 211,67,192,12,208,7,7,36,7,0,64,20, - 0,0,0,0,2,131,4,60,8,129,132,36, - 48,1,38,142,0,0,0,0,15,63,192,12, - 33,40,128,2,143,63,192,12,0,0,0,0, - 40,0,69,142,4,0,68,142,0,0,0,0, - 76,67,192,12,2,0,165,52,44,0,81,142, - 84,128,131,143,80,128,132,143,100,0,2,36, - 0,0,32,166,2,0,32,166,4,0,32,174, - 8,0,32,174,12,0,32,174,16,0,32,174, - 24,0,32,174,20,0,32,174,28,0,32,174, - 32,0,32,174,36,0,34,166,38,0,34,166, - 36,0,35,166,38,0,36,166,36,0,83,142, - 1,0,2,36,0,0,98,174,44,0,66,142, - 0,0,0,0,4,0,98,174,40,0,67,142, - 116,0,2,60,0,0,98,172,40,0,67,142, - 36,0,66,142,0,0,0,0,8,0,98,172, - 8,0,66,142,0,0,0,0,0,0,64,172, - 0,0,98,142,0,0,0,0,10,0,64,16, - 33,128,0,0,208,7,2,42,7,0,64,16, - 0,0,0,0,143,63,192,12,0,0,0,0, - 0,0,98,142,0,0,0,0,248,255,64,20, - 1,0,16,38,0,0,98,142,0,0,0,0, - 6,0,64,16,33,32,32,2,2,131,4,60, - 76,129,132,36,15,63,192,12,33,40,128,2, - 33,32,32,2,8,0,5,36,0,0,34,150, - 8,0,6,36,0,240,66,48,0,6,66,52, - 2,0,34,166,8,0,66,142,208,7,7,36, - 129,67,192,12,0,0,64,172,6,0,64,20, - 2,0,36,38,2,131,4,60,160,129,132,36, - 15,63,192,12,33,40,128,2,2,0,36,38, - 33,40,0,0,0,0,34,150,33,48,0,0, - 0,240,66,48,2,0,34,166,8,0,66,142, - 208,7,7,36,129,67,192,12,0,0,64,172, - 4,0,64,20,0,0,0,0,2,131,4,60, - 15,63,192,12,248,129,132,36,143,63,192,12, - 0,0,0,0,108,0,80,142,0,128,2,52, - 0,0,0,166,2,0,2,166,44,0,66,142, - 0,32,5,36,4,0,80,172,44,0,67,142, - 0,241,2,52,2,0,98,164,8,0,66,142, - 0,32,6,36,0,0,64,172,44,0,68,142, - 0,0,0,0,129,67,192,12,208,7,7,36, - 12,0,64,20,0,0,0,0,44,0,66,142, - 0,0,0,0,0,0,69,148,2,131,4,60, - 15,63,192,12,16,130,132,36,254,255,4,36, - 2,131,5,60,44,130,165,36,188,7,192,12, - 1,1,6,36,108,0,80,142,2,128,2,52, - 0,0,0,166,2,0,2,166,14,0,2,36, - 8,0,2,162,200,0,2,36,9,0,2,162, - 65,0,2,36,10,0,2,162,46,0,2,36, - 11,0,2,162,87,0,2,36,12,0,0,162, - 13,0,2,162,242,0,2,36,14,0,0,162, - 15,0,2,162,1,0,2,36,16,0,2,162, - 8,0,2,36,17,0,2,162,88,128,130,143, - 0,0,0,0,6,0,64,16,64,0,2,36, - 2,131,4,60,15,63,192,12,56,130,132,36, - 88,128,128,175,64,0,2,36,18,0,2,162, - 255,0,2,36,19,0,2,162,63,0,2,36, - 20,0,0,162,21,0,2,162,44,0,66,142, - 0,32,5,36,4,0,80,172,44,0,67,142, - 0,33,2,36,2,0,98,164,8,0,66,142, - 0,32,6,36,0,0,64,172,44,0,68,142, - 0,0,0,0,129,67,192,12,208,7,7,36, - 12,0,64,20,0,0,0,0,44,0,66,142, - 0,0,0,0,0,0,69,148,2,131,4,60, - 15,63,192,12,16,130,132,36,253,255,4,36, - 2,131,5,60,44,130,165,36,188,7,192,12, - 85,1,6,36,222,21,192,12,33,32,64,2, - 122,15,192,12,33,32,64,2,52,0,191,143, - 48,0,180,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 56,0,189,39,248,255,189,39,32,133,133,143, - 0,0,0,0,50,0,160,24,33,32,0,0, - 2,131,3,60,192,246,99,36,44,0,98,140, - 152,0,96,172,156,0,96,172,160,0,96,172, - 164,0,96,172,168,0,96,172,172,0,96,172, - 176,0,96,172,180,0,96,172,184,0,96,172, - 188,0,96,172,192,0,96,172,196,0,96,172, - 200,0,96,172,204,0,96,172,208,0,96,172, - 212,0,96,172,216,0,96,172,224,0,96,172, - 232,0,96,172,236,0,96,172,240,0,96,172, - 244,0,96,172,248,0,96,172,252,0,96,172, - 0,1,96,172,4,1,96,172,8,1,96,172, - 12,0,64,172,44,0,98,140,0,0,0,0, - 16,0,64,172,44,0,98,140,0,0,0,0, - 24,0,64,172,44,0,98,140,0,0,0,0, - 20,0,64,172,44,0,98,140,1,0,132,36, - 28,0,64,172,44,0,98,140,0,2,99,36, - 32,0,64,172,42,16,133,0,210,255,64,20, - 0,0,0,0,33,32,0,0,0,163,3,60, - 0,1,99,52,32,0,5,36,33,16,131,0, - 188,0,69,160,1,0,132,36,0,2,130,44, - 251,255,64,20,0,0,0,0,8,0,224,3, - 8,0,189,39,0,0,0,0,124,133,130,143, - 232,255,189,39,20,0,191,175,17,0,64,20, - 16,0,176,175,208,7,16,36,7,0,0,26, - 0,0,0,0,143,63,192,12,255,255,16,38, - 124,133,130,143,0,0,0,0,249,255,64,16, - 0,0,0,0,6,0,0,22,0,0,0,0, - 2,131,4,60,15,63,192,12,80,130,132,36, - 45,14,192,8,33,16,0,0,220,63,192,12, - 33,32,0,0,33,32,64,0,124,133,144,143, - 128,133,130,143,4,0,3,142,255,255,66,36, - 128,133,130,175,124,133,131,175,220,63,192,12, - 0,0,0,0,33,16,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,96,133,130,143,33,40,128,0, - 43,16,162,0,6,0,64,20,16,0,191,175, - 100,133,130,143,0,0,0,0,43,16,162,0, - 6,0,64,20,0,0,0,0,2,131,4,60, - 15,63,192,12,116,130,132,36,71,14,192,8, - 0,0,0,0,124,133,131,143,128,133,130,143, - 124,133,133,175,1,0,66,36,4,0,163,172, - 128,133,130,175,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,108,133,130,143, - 232,255,189,39,20,0,191,175,17,0,64,20, - 16,0,176,175,208,7,16,36,7,0,0,26, - 0,0,0,0,143,63,192,12,255,255,16,38, - 108,133,130,143,0,0,0,0,249,255,64,16, - 0,0,0,0,6,0,0,22,0,0,0,0, - 2,131,4,60,15,63,192,12,148,130,132,36, - 108,14,192,8,33,16,0,0,220,63,192,12, - 33,32,0,0,33,32,64,0,108,133,144,143, - 120,133,130,143,0,0,3,142,255,255,66,36, - 120,133,130,175,108,133,131,175,220,63,192,12, - 0,0,0,0,33,16,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,104,133,130,143,33,40,128,0, - 43,16,162,0,6,0,64,20,16,0,191,175, - 112,133,130,143,0,0,0,0,43,16,162,0, - 6,0,64,20,0,0,0,0,2,131,4,60, - 15,63,192,12,184,130,132,36,135,14,192,8, - 0,0,0,0,108,133,130,143,0,0,0,0, - 0,0,162,172,120,133,130,143,108,133,133,175, - 1,0,66,36,120,133,130,175,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,20,0,191,175,16,0,176,175, - 124,133,128,175,13,8,192,12,0,32,4,36, - 255,31,3,60,255,255,99,52,255,1,16,36, - 36,16,67,0,0,160,3,60,37,16,67,0, - 96,133,130,175,0,32,66,36,100,133,130,175, - 0,17,16,0,96,133,132,143,255,255,16,38, - 49,14,192,12,33,32,130,0,251,255,1,6, - 0,17,16,0,0,2,2,36,132,133,130,175, - 108,133,128,175,13,8,192,12,18,0,4,60, - 255,31,3,60,255,255,99,52,255,17,16,36, - 36,16,67,0,0,160,3,60,37,16,67,0, - 18,0,3,60,104,133,130,175,33,16,67,0, - 112,133,130,175,0,18,16,0,104,133,132,143, - 255,255,16,38,112,14,192,12,33,32,130,0, - 251,255,1,6,0,18,16,0,0,18,2,36, - 116,133,130,175,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,0,0,0,0, - 0,0,0,0,0,0,0,0,0,163,2,60, - 168,1,66,140,216,255,189,39,28,0,177,175, - 33,136,128,0,32,0,178,175,33,144,160,0, - 36,0,191,175,17,0,64,16,24,0,176,175, - 0,163,2,60,168,1,66,140,0,0,0,0, - 42,16,82,0,12,0,64,16,128,128,18,0, - 0,0,34,142,0,163,18,60,168,1,82,142, - 0,0,0,0,6,0,64,20,128,128,18,0, - 2,131,4,60,224,130,132,36,15,63,192,12, - 33,40,64,2,128,128,18,0,33,128,18,2, - 128,128,16,0,13,8,192,12,33,32,0,2, - 255,31,3,60,255,255,99,52,33,32,0,0, - 36,16,67,0,0,160,3,60,37,16,67,0, - 112,0,34,174,112,0,35,142,33,16,80,0, - 15,0,64,26,116,0,34,174,8,0,5,36, - 1,0,132,36,20,0,98,36,4,0,98,172, - 2,0,101,164,0,0,96,164,8,0,96,172, - 14,0,96,164,12,0,96,164,33,24,64,0, - 42,16,146,0,246,255,64,20,1,0,132,36, - 255,255,132,36,116,0,35,142,112,0,34,142, - 0,0,0,0,240,255,98,172,116,0,35,142, - 0,0,0,0,218,255,98,148,0,0,0,0, - 0,128,66,52,218,255,98,164,116,0,35,142, - 0,0,0,0,238,255,98,148,0,0,0,0, - 0,128,66,52,238,255,98,164,116,0,34,142, - 112,0,35,142,216,255,66,36,120,0,35,174, - 124,0,34,174,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,200,255,189,39,32,0,178,175, - 33,144,128,0,0,1,2,36,48,0,191,175, - 44,0,181,175,40,0,180,175,36,0,179,175, - 28,0,177,175,24,0,176,175,144,0,66,174, - 0,163,2,60,172,1,66,140,0,0,0,0, - 17,0,64,16,33,160,160,0,0,163,2,60, - 172,1,66,140,0,0,0,0,42,16,84,0, - 12,0,64,16,128,128,20,0,0,0,66,142, - 0,163,20,60,172,1,148,142,0,0,0,0, - 6,0,64,20,128,128,20,0,2,131,4,60, - 236,130,132,36,15,63,192,12,33,40,128,2, - 128,128,20,0,33,128,20,2,128,128,16,0, - 33,32,0,2,13,8,192,12,148,0,84,174, - 255,31,3,60,255,255,99,52,33,152,0,0, - 36,16,67,0,0,160,3,60,37,16,67,0, - 128,0,66,174,128,0,81,142,33,16,80,0, - 15,0,128,26,132,0,66,174,0,1,21,36, - 20,0,48,38,4,0,48,174,75,14,192,12, - 0,0,32,174,8,0,34,174,12,0,53,174, - 0,0,66,142,1,0,115,38,16,0,34,162, - 17,0,32,162,42,16,116,2,244,255,64,20, - 33,136,0,2,132,0,67,142,128,0,66,142, - 0,0,0,0,240,255,98,172,132,0,67,142, - 0,0,0,0,228,255,98,140,0,0,0,0, - 0,128,66,52,228,255,98,172,132,0,67,142, - 0,0,0,0,248,255,98,140,0,0,0,0, - 0,128,66,52,248,255,98,172,132,0,66,142, - 128,0,67,142,216,255,66,36,136,0,67,174, - 140,0,66,174,48,0,191,143,44,0,181,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 56,0,189,39,152,0,128,172,156,0,128,172, - 160,0,128,172,164,0,128,172,168,0,128,172, - 252,0,128,172,0,1,128,172,152,0,128,172, - 8,0,224,3,216,0,128,172,232,255,189,39, - 16,0,176,175,20,0,191,175,112,15,192,12, - 33,128,128,0,33,32,0,2,192,14,192,12, - 0,4,5,36,33,32,0,2,19,15,192,12, - 128,2,5,36,120,0,3,142,136,0,2,142, - 0,0,0,0,8,0,98,172,44,0,3,142, - 120,0,2,142,0,0,0,0,8,0,98,172, - 0,0,2,142,0,0,0,0,255,255,66,36, - 6,0,66,44,7,0,64,16,16,0,3,36, - 44,0,2,142,0,0,0,0,2,0,67,164, - 8,0,2,142,0,0,0,0,0,0,64,172, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,184,255,189,39,0,32,6,36, - 68,0,191,175,64,0,190,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,0,163,1,60,252,5,38,172, - 13,8,192,12,0,32,4,36,255,31,4,60, - 255,255,132,52,33,168,0,0,255,31,6,60, - 255,255,198,52,2,131,3,60,212,247,99,36, - 16,0,101,36,8,0,126,36,248,255,119,36, - 33,176,96,0,36,16,68,0,0,160,3,60, - 37,16,67,0,16,0,166,175,0,163,1,60, - 248,5,34,172,0,163,1,60,0,6,32,172, - 33,160,0,0,33,128,224,2,33,152,160,0, - 33,144,192,3,33,136,192,2,32,133,130,143, - 0,0,32,174,0,0,0,174,0,0,64,174, - 42,16,162,2,10,0,64,16,0,0,96,174, - 0,32,4,36,13,8,192,12,24,0,165,175, - 16,0,166,143,0,128,3,60,36,16,70,0, - 37,16,67,0,0,0,2,174,24,0,165,143, - 4,0,16,38,4,0,115,38,4,0,82,38, - 1,0,148,38,2,0,130,42,234,255,64,20, - 4,0,49,38,0,2,165,36,0,2,222,39, - 0,2,247,38,1,0,181,38,7,0,162,42, - 222,255,64,20,0,2,214,38,68,0,191,143, - 64,0,190,143,60,0,183,143,56,0,182,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,72,0,189,39,0,163,4,60, - 0,6,132,140,0,163,3,60,8,6,99,140, - 32,133,130,143,224,255,189,39,16,0,176,175, - 0,163,16,60,12,6,16,142,20,0,177,175, - 0,163,17,60,4,6,49,142,43,16,98,0, - 42,0,64,16,24,0,191,175,2,0,2,46, - 40,0,64,16,255,255,2,36,0,163,2,60, - 252,5,66,140,0,0,0,0,43,16,81,0, - 34,0,64,20,255,255,2,36,64,18,3,0, - 2,131,3,60,192,246,99,36,33,24,67,0, - 1,0,2,36,5,0,130,16,2,0,2,36, - 18,0,130,16,128,16,16,0,36,16,192,8, - 0,0,0,0,128,128,16,0,33,128,3,2, - 12,1,4,142,0,163,5,60,248,5,165,140, - 33,48,32,2,80,68,192,12,36,1,17,174, - 12,1,4,142,12,1,2,142,33,40,32,2, - 114,68,192,12,20,1,2,174,36,16,192,8, - 0,0,0,0,33,16,67,0,20,1,64,172, - 36,1,64,172,0,163,1,60,42,16,192,8, - 0,6,32,172,255,255,2,36,0,163,1,60, - 0,6,34,172,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 176,133,136,143,188,133,137,143,232,255,189,39, - 3,0,0,21,16,0,191,175,124,0,32,17, - 0,0,0,0,12,0,194,148,0,0,0,0, - 0,26,2,0,2,18,2,0,37,56,98,0, - 255,255,227,48,221,5,98,44,36,0,64,20, - 170,170,2,52,0,8,2,36,23,0,98,20, - 6,8,2,36,21,0,0,17,0,0,0,0, - 32,0,194,148,30,0,195,148,0,20,2,0, - 37,56,67,0,36,0,195,148,0,161,2,52, - 5,0,98,16,8,0,2,36,34,0,195,148, - 0,0,0,0,98,0,98,20,0,0,0,0, - 3,0,232,16,255,255,2,36,94,0,226,20, - 0,0,0,0,226,46,192,12,14,0,6,36, - 177,16,192,8,0,0,0,0,7,0,98,20, - 255,255,227,48,71,0,0,17,55,129,2,52, - 108,43,192,12,14,0,6,36,177,16,192,8, - 0,0,0,0,162,16,192,8,55,129,2,52, - 14,0,195,148,0,0,0,0,61,0,98,20, - 255,255,2,52,16,0,195,144,3,0,2,36, - 55,0,98,20,255,255,2,52,20,0,194,148, - 0,0,0,0,0,26,2,0,2,18,2,0, - 37,56,98,0,255,255,227,48,0,8,2,36, - 23,0,98,20,6,8,2,36,21,0,0,17, - 0,0,0,0,40,0,194,148,38,0,195,148, - 0,20,2,0,37,56,67,0,44,0,195,148, - 0,161,2,52,5,0,98,16,8,0,2,36, - 42,0,195,148,0,0,0,0,49,0,98,20, - 0,0,0,0,3,0,232,16,255,255,2,36, - 45,0,226,20,0,0,0,0,226,46,192,12, - 22,0,6,36,177,16,192,8,0,0,0,0, - 7,0,98,20,255,255,227,48,6,0,0,17, - 55,129,2,52,108,43,192,12,22,0,6,36, - 177,16,192,8,0,0,0,0,55,129,2,52, - 30,0,98,20,0,0,0,0,28,0,32,17, - 144,15,3,36,38,0,194,148,28,0,198,140, - 24,0,67,20,0,0,0,0,3,0,201,16, - 0,0,0,0,20,0,192,20,0,0,0,0, - 175,16,192,8,22,0,6,36,14,0,195,148, - 0,0,0,0,14,0,98,20,0,0,0,0, - 12,0,32,17,144,15,3,36,30,0,194,148, - 20,0,198,140,8,0,67,20,0,0,0,0, - 3,0,201,16,0,0,0,0,4,0,192,20, - 0,0,0,0,14,0,6,36,126,49,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,128,255,189,39, - 116,0,183,175,33,184,128,0,3,0,3,36, - 124,0,191,175,120,0,190,175,112,0,182,175, - 108,0,181,175,104,0,180,175,100,0,179,175, - 96,0,178,175,92,0,177,175,88,0,176,175, - 0,0,245,142,8,0,178,140,192,17,21,0, - 3,131,4,60,33,32,130,0,20,13,132,140, - 8,0,84,142,0,0,0,0,59,0,131,16, - 5,0,130,44,57,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,104,131,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 44,133,130,143,0,0,0,0,48,0,64,16, - 6,0,132,38,4,0,131,150,2,131,2,60, - 68,207,66,148,0,0,0,0,6,0,98,20, - 33,32,0,0,0,0,130,142,48,129,131,143, - 0,0,0,0,38,16,67,0,1,0,68,44, - 72,1,128,16,33,32,160,2,114,42,192,12, - 33,40,128,2,45,18,192,8,33,32,64,2, - 44,133,130,143,0,0,0,0,27,0,64,16, - 6,0,132,38,4,0,131,150,2,131,2,60, - 68,207,66,148,0,0,0,0,6,0,98,20, - 33,32,0,0,0,0,130,142,48,129,131,143, - 0,0,0,0,38,16,67,0,1,0,68,44, - 5,0,128,16,33,32,160,2,114,42,192,12, - 33,40,128,2,45,18,192,8,33,32,64,2, - 6,0,132,38,0,163,6,60,140,1,198,140, - 0,0,0,0,247,24,192,12,33,40,160,2, - 45,18,192,8,33,32,64,2,6,0,132,38, - 0,163,6,60,140,1,198,140,0,0,0,0, - 247,24,192,12,33,40,160,2,203,24,192,12, - 33,32,128,2,20,1,227,142,0,0,0,0, - 14,0,96,16,33,240,64,0,33,32,128,2, - 16,0,166,39,18,0,69,150,0,0,0,0, - 9,248,96,0,33,56,192,3,6,0,64,16, - 33,32,64,2,28,1,226,142,0,0,0,0, - 1,0,66,36,45,18,192,8,28,1,226,174, - 132,0,193,7,7,0,2,36,4,0,131,150, - 2,131,2,60,68,207,66,148,0,0,0,0, - 6,0,98,20,33,32,0,0,0,0,130,142, - 48,129,131,143,0,0,0,0,38,16,67,0, - 1,0,68,44,9,0,128,16,255,255,2,36, - 44,133,130,143,0,0,0,0,251,0,64,16, - 33,32,160,2,114,42,192,12,33,40,128,2, - 45,18,192,8,33,32,64,2,10,0,194,23, - 0,0,0,0,8,0,160,18,0,0,0,0, - 36,133,130,143,0,0,0,0,8,0,64,16, - 1,0,19,36,80,133,147,143,69,17,192,8, - 0,0,0,0,0,1,226,142,80,133,147,143, - 1,0,66,36,0,1,226,174,84,133,130,143, - 0,0,0,0,35,16,83,0,255,255,66,36, - 17,0,66,162,84,133,130,143,33,128,96,2, - 42,16,2,2,15,0,64,16,64,18,16,0, - 2,131,3,60,192,246,99,36,33,136,67,0, - 5,0,21,18,0,0,0,0,247,22,192,12, - 33,32,32,2,217,0,64,16,33,16,0,0, - 84,133,130,143,1,0,16,38,42,16,2,2, - 246,255,64,20,0,2,49,38,84,133,130,143, - 33,128,96,2,42,16,2,2,55,0,64,16, - 64,18,16,0,2,131,3,60,192,246,99,36, - 33,152,67,0,33,136,64,0,192,177,16,0, - 41,0,21,18,0,0,0,0,2,131,2,60, - 33,16,81,0,216,247,66,140,0,0,0,0, - 15,0,64,16,33,32,128,2,16,0,166,39, - 18,0,69,150,0,0,0,0,9,248,64,0, - 33,56,160,2,8,0,64,16,0,0,0,0, - 2,131,2,60,33,16,81,0,224,247,66,140, - 0,0,0,0,1,0,66,36,140,17,192,8, - 32,1,98,174,44,133,130,143,0,0,0,0, - 7,0,64,16,3,0,8,36,3,131,2,60, - 33,16,86,0,20,13,66,140,0,0,0,0, - 6,0,72,20,0,0,0,0,33,32,96,2, - 6,23,192,12,33,40,64,2,146,17,192,8, - 0,2,115,38,17,0,66,146,0,0,0,0, - 255,255,66,36,17,0,66,162,17,0,66,146, - 0,2,115,38,0,2,49,38,84,133,130,143, - 1,0,16,38,42,16,2,2,208,255,64,20, - 128,0,214,38,254,255,2,36,4,0,194,23, - 33,32,224,2,33,40,64,2,47,16,192,12, - 33,48,128,2,17,0,66,146,0,0,0,0, - 140,0,64,16,33,32,64,2,36,18,192,8, - 0,0,0,0,26,0,194,23,0,0,0,0, - 36,133,130,143,0,0,0,0,11,0,64,16, - 33,32,224,2,9,0,160,18,1,0,2,36, - 17,0,66,162,2,131,4,60,192,246,132,36, - 6,23,192,12,33,40,64,2,126,0,64,16, - 33,16,0,0,33,32,224,2,33,40,64,2, - 47,16,192,12,33,48,128,2,36,133,130,143, - 0,0,0,0,115,0,64,16,33,32,64,2, - 116,0,160,22,1,0,2,36,45,18,192,8, - 0,0,0,0,87,0,213,19,64,130,30,0, - 2,131,2,60,33,16,80,0,216,247,66,140, - 0,0,0,0,18,0,64,16,33,32,128,2, - 16,0,166,39,18,0,69,150,0,0,0,0, - 9,248,64,0,33,56,160,2,11,0,64,16, - 33,32,64,2,2,131,2,60,33,16,80,0, - 224,247,66,140,0,0,0,0,1,0,66,36, - 2,131,1,60,33,8,48,0,224,247,34,172, - 45,18,192,8,17,0,128,160,36,133,130,143, - 0,0,0,0,43,0,64,16,0,0,0,0, - 41,0,192,19,0,0,0,0,39,0,160,18, - 64,18,30,0,2,131,16,60,192,246,16,38, - 33,136,80,0,247,22,192,12,33,32,32,2, - 74,0,64,16,33,16,0,0,247,22,192,12, - 33,32,0,2,63,0,64,16,2,0,2,36, - 17,0,66,162,44,133,130,143,0,0,0,0, - 7,0,64,16,192,17,30,0,3,131,3,60, - 33,24,98,0,20,13,99,140,3,0,2,36, - 6,0,98,20,0,0,0,0,33,32,32,2, - 6,23,192,12,33,40,64,2,0,18,192,8, - 0,0,0,0,17,0,66,146,0,0,0,0, - 255,255,66,36,17,0,66,162,17,0,66,146, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,36,18,192,8,0,0,0,0, - 44,133,130,143,0,0,0,0,7,0,64,16, - 192,17,30,0,3,131,3,60,33,24,98,0, - 20,13,99,140,3,0,2,36,28,0,98,20, - 0,0,0,0,1,0,2,36,17,0,66,162, - 64,18,30,0,2,131,4,60,192,246,132,36, - 32,18,192,8,33,32,68,0,36,133,130,143, - 0,0,0,0,17,0,64,16,0,0,0,0, - 15,0,192,19,1,0,2,36,17,0,66,162, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,13,0,64,16,33,16,0,0, - 252,0,226,142,0,0,0,0,1,0,66,36, - 47,18,192,8,252,0,226,174,48,18,192,8, - 33,16,0,0,17,0,64,162,33,32,64,2, - 152,21,192,12,0,0,0,0,1,0,2,36, - 124,0,191,143,120,0,190,143,116,0,183,143, - 112,0,182,143,108,0,181,143,104,0,180,143, - 100,0,179,143,96,0,178,143,92,0,177,143, - 88,0,176,143,8,0,224,3,128,0,189,39, - 216,255,189,39,24,0,178,175,33,144,128,0, - 32,0,191,175,28,0,179,175,20,0,177,175, - 16,0,176,175,8,0,177,140,0,0,66,142, - 8,0,38,142,36,0,64,16,0,0,0,0, - 28,0,66,142,0,0,0,0,18,0,64,20, - 1,0,2,36,0,0,194,144,0,0,0,0, - 1,0,66,48,13,0,64,20,1,0,2,36, - 4,0,195,148,24,0,66,150,0,0,0,0, - 6,0,98,20,33,32,0,0,0,0,194,140, - 20,0,67,142,0,0,0,0,38,16,67,0, - 1,0,68,44,10,0,128,16,1,0,2,36, - 17,0,34,162,2,131,4,60,192,246,132,36, - 6,23,192,12,33,40,32,2,45,0,64,16, - 33,16,0,0,139,18,192,8,0,0,0,0, - 17,0,32,162,152,21,192,12,33,32,32,2, - 144,18,192,8,1,0,2,36,16,0,179,140, - 0,0,0,0,6,0,96,26,0,0,0,0, - 32,133,130,143,0,0,0,0,42,16,98,2, - 15,0,64,20,1,0,2,36,2,131,4,60, - 248,130,132,36,2,131,16,60,24,131,16,38, - 33,40,0,2,2,131,7,60,36,131,231,36, - 15,63,192,12,188,2,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,188,2,6,36, - 1,0,2,36,17,0,34,162,64,18,19,0, - 2,131,4,60,192,246,132,36,33,32,68,0, - 6,23,192,12,33,40,32,2,6,0,64,16, - 33,16,0,0,252,0,66,142,0,0,0,0, - 1,0,66,36,252,0,66,174,1,0,2,36, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,72,255,189,39,164,0,181,175, - 33,168,128,0,180,0,191,175,176,0,190,175, - 172,0,183,175,168,0,182,175,160,0,180,175, - 156,0,179,175,152,0,178,175,148,0,177,175, - 144,0,176,175,88,0,165,175,120,0,160,175, - 120,0,168,142,0,0,0,0,96,0,168,175, - 124,0,169,142,0,0,0,0,15,2,160,24, - 104,0,169,175,96,0,168,143,0,0,0,0, - 0,0,4,149,0,0,0,0,0,128,130,48, - 9,2,64,16,0,0,0,0,128,0,160,175, - 8,0,2,141,136,0,169,142,255,255,8,36, - 18,0,72,16,112,0,169,175,112,0,169,143, - 0,0,0,0,0,0,35,141,4,0,40,141, - 128,0,169,143,255,63,98,48,33,72,34,1, - 0,128,99,48,112,0,168,175,246,255,96,16, - 128,0,169,175,96,0,168,143,0,0,0,0, - 8,0,2,141,128,0,169,151,0,0,0,0, - 18,0,73,164,0,32,130,48,200,1,64,16, - 0,0,0,0,40,133,130,143,0,0,0,0, - 75,0,64,16,3,0,8,36,96,0,168,143, - 0,0,0,0,8,0,16,141,0,0,162,142, - 8,0,5,142,30,0,64,16,0,0,0,0, - 28,0,162,142,0,0,0,0,18,0,64,20, - 1,0,9,36,0,0,162,144,0,0,0,0, - 1,0,66,48,13,0,64,20,0,0,0,0, - 4,0,163,148,24,0,162,150,0,0,0,0, - 6,0,98,20,33,32,0,0,0,0,162,140, - 20,0,163,142,0,0,0,0,38,16,67,0, - 1,0,68,44,6,0,128,16,1,0,9,36, - 17,0,9,162,2,131,4,60,192,246,132,36, - 18,19,192,8,33,40,0,2,17,0,0,162, - 130,20,192,8,33,32,0,2,16,0,17,141, - 0,0,0,0,6,0,32,26,0,0,0,0, - 32,133,130,143,0,0,0,0,42,16,34,2, - 15,0,64,20,1,0,9,36,2,131,4,60, - 248,130,132,36,2,131,5,60,24,131,165,36, - 2,131,7,60,36,131,231,36,15,63,192,12, - 188,2,6,36,1,0,4,36,2,131,5,60, - 24,131,165,36,188,7,192,12,188,2,6,36, - 1,0,9,36,17,0,9,162,64,34,17,0, - 2,131,8,60,192,246,8,37,33,32,136,0, - 33,40,0,2,6,23,192,12,0,0,0,0, - 112,1,64,16,33,16,0,0,252,0,162,142, - 0,0,0,0,1,0,66,36,132,20,192,8, - 252,0,162,174,0,0,182,142,96,0,169,143, - 192,17,22,0,8,0,50,141,3,131,3,60, - 33,24,98,0,20,13,99,140,8,0,84,142, - 0,0,0,0,59,0,104,16,5,0,98,44, - 57,0,64,16,128,16,3,0,2,131,1,60, - 33,8,34,0,128,131,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,44,133,130,143, - 0,0,0,0,48,0,64,16,6,0,132,38, - 4,0,131,150,2,131,2,60,68,207,66,148, - 0,0,0,0,6,0,98,20,33,32,0,0, - 0,0,130,142,48,129,131,143,0,0,0,0, - 38,16,67,0,1,0,68,44,67,1,128,16, - 33,32,192,2,114,42,192,12,33,40,128,2, - 130,20,192,8,33,32,64,2,44,133,130,143, - 0,0,0,0,27,0,64,16,6,0,132,38, - 4,0,131,150,2,131,2,60,68,207,66,148, - 0,0,0,0,6,0,98,20,33,32,0,0, - 0,0,130,142,48,129,131,143,0,0,0,0, - 38,16,67,0,1,0,68,44,5,0,128,16, - 33,32,192,2,114,42,192,12,33,40,128,2, - 130,20,192,8,33,32,64,2,6,0,132,38, - 0,163,6,60,140,1,198,140,0,0,0,0, - 247,24,192,12,33,40,192,2,130,20,192,8, - 33,32,64,2,6,0,132,38,0,163,6,60, - 140,1,198,140,0,0,0,0,247,24,192,12, - 33,40,192,2,203,24,192,12,33,32,128,2, - 20,1,163,142,0,0,0,0,14,0,96,16, - 33,240,64,0,33,32,128,2,16,0,166,39, - 18,0,69,150,0,0,0,0,9,248,96,0, - 33,56,192,3,6,0,64,16,33,32,64,2, - 28,1,162,142,0,0,0,0,1,0,66,36, - 130,20,192,8,28,1,162,174,132,0,193,7, - 7,0,2,36,4,0,131,150,2,131,2,60, - 68,207,66,148,0,0,0,0,6,0,98,20, - 33,32,0,0,0,0,130,142,48,129,131,143, - 0,0,0,0,38,16,67,0,1,0,68,44, - 9,0,128,16,255,255,9,36,44,133,130,143, - 0,0,0,0,246,0,64,16,33,32,192,2, - 114,42,192,12,33,40,128,2,130,20,192,8, - 33,32,64,2,10,0,201,23,0,0,0,0, - 8,0,192,18,0,0,0,0,36,133,130,143, - 0,0,0,0,8,0,64,16,1,0,19,36, - 80,133,147,143,159,19,192,8,0,0,0,0, - 0,1,162,142,80,133,147,143,1,0,66,36, - 0,1,162,174,84,133,130,143,0,0,0,0, - 35,16,83,0,255,255,66,36,17,0,66,162, - 84,133,130,143,33,136,96,2,42,16,34,2, - 15,0,64,16,64,18,17,0,2,131,8,60, - 192,246,8,37,33,128,72,0,5,0,54,18, - 0,0,0,0,247,22,192,12,33,32,0,2, - 212,0,64,16,33,16,0,0,84,133,130,143, - 1,0,49,38,42,16,34,2,246,255,64,20, - 0,2,16,38,84,133,130,143,33,136,96,2, - 42,16,34,2,55,0,64,16,64,18,17,0, - 2,131,9,60,192,246,41,37,33,152,73,0, - 33,128,64,0,192,185,17,0,41,0,54,18, - 0,0,0,0,2,131,2,60,33,16,80,0, - 216,247,66,140,0,0,0,0,15,0,64,16, - 33,32,128,2,16,0,166,39,18,0,69,150, - 0,0,0,0,9,248,64,0,33,56,192,2, - 8,0,64,16,0,0,0,0,2,131,2,60, - 33,16,80,0,224,247,66,140,0,0,0,0, - 1,0,66,36,230,19,192,8,32,1,98,174, - 44,133,130,143,0,0,0,0,7,0,64,16, - 3,0,8,36,3,131,2,60,33,16,87,0, - 20,13,66,140,0,0,0,0,6,0,72,20, - 0,0,0,0,33,32,96,2,6,23,192,12, - 33,40,64,2,236,19,192,8,0,2,115,38, - 17,0,66,146,0,0,0,0,255,255,66,36, - 17,0,66,162,17,0,66,146,0,2,115,38, - 0,2,16,38,84,133,130,143,1,0,49,38, - 42,16,34,2,208,255,64,20,128,0,247,38, - 254,255,2,36,4,0,194,23,33,32,160,2, - 33,40,64,2,47,16,192,12,33,48,128,2, - 17,0,66,146,0,0,0,0,135,0,64,16, - 33,32,64,2,22,19,192,8,0,0,0,0, - 26,0,194,23,0,0,0,0,36,133,130,143, - 0,0,0,0,11,0,64,16,33,32,160,2, - 9,0,192,18,1,0,9,36,17,0,73,162, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,121,0,64,16,33,16,0,0, - 33,32,160,2,33,40,64,2,47,16,192,12, - 33,48,128,2,36,133,130,143,0,0,0,0, - 110,0,64,16,33,32,64,2,111,0,192,22, - 1,0,2,36,130,20,192,8,0,0,0,0, - 89,0,214,19,64,130,30,0,2,131,2,60, - 33,16,80,0,216,247,66,140,0,0,0,0, - 18,0,64,16,33,32,128,2,16,0,166,39, - 18,0,69,150,0,0,0,0,9,248,64,0, - 33,56,192,2,11,0,64,16,33,32,64,2, - 2,131,8,60,192,246,8,37,2,131,2,60, - 33,16,80,0,224,247,66,140,33,24,8,2, - 1,0,66,36,32,1,98,172,130,20,192,8, - 17,0,128,160,36,133,130,143,0,0,0,0, - 44,0,64,16,0,0,0,0,42,0,192,19, - 0,0,0,0,40,0,192,18,64,18,30,0, - 2,131,9,60,192,246,41,37,33,128,73,0, - 247,22,192,12,33,32,0,2,69,0,64,16, - 33,16,0,0,2,131,4,60,247,22,192,12, - 192,246,132,36,57,0,64,16,2,0,2,36, - 17,0,66,162,44,133,130,143,0,0,0,0, - 7,0,64,16,192,17,30,0,3,131,1,60, - 33,8,34,0,20,13,34,140,3,0,8,36, - 6,0,72,20,0,0,0,0,33,32,0,2, - 6,23,192,12,33,40,64,2,91,20,192,8, - 0,0,0,0,17,0,66,146,0,0,0,0, - 255,255,66,36,17,0,66,162,17,0,66,146, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,64,2,22,19,192,8,0,0,0,0, - 44,133,130,143,0,0,0,0,7,0,64,16, - 192,17,30,0,3,131,1,60,33,8,34,0, - 20,13,34,140,3,0,9,36,22,0,73,20, - 0,0,0,0,1,0,8,36,17,0,72,162, - 64,34,30,0,2,131,9,60,192,246,41,37, - 33,32,137,0,18,19,192,8,33,40,64,2, - 36,133,130,143,0,0,0,0,10,0,64,16, - 0,0,0,0,8,0,192,19,1,0,8,36, - 17,0,72,162,2,131,4,60,192,246,132,36, - 18,19,192,8,33,40,64,2,133,20,192,8, - 33,16,0,0,17,0,64,162,33,32,64,2, - 152,21,192,12,0,0,0,0,1,0,2,36, - 52,0,64,16,0,0,0,0,152,0,162,142, - 0,0,0,0,1,0,66,36,152,0,162,174, - 156,0,162,142,168,0,163,142,1,0,66,36, - 156,0,162,174,128,0,169,143,0,0,0,0, - 33,24,105,0,163,20,192,8,168,0,163,174, - 152,0,162,142,160,0,163,142,1,0,66,36, - 1,0,99,36,152,0,162,174,160,0,163,174, - 96,0,168,143,0,0,0,0,8,0,2,141, - 255,255,9,36,4,0,73,16,0,0,0,0, - 8,0,4,141,152,21,192,12,0,0,0,0, - 120,0,168,143,112,0,169,143,1,0,8,37, - 120,0,168,175,136,0,169,174,96,0,168,143, - 8,128,2,52,0,0,0,165,2,0,2,165, - 104,0,169,143,8,0,2,36,2,0,34,165, - 4,0,40,141,96,0,169,143,104,0,168,175, - 4,0,41,141,120,0,168,143,96,0,169,175, - 88,0,169,143,0,0,0,0,42,16,9,1, - 243,253,64,20,0,0,0,0,96,0,168,143, - 44,0,163,142,120,0,168,174,104,0,169,143, - 0,0,0,0,124,0,169,174,0,0,98,148, - 0,0,0,0,0,16,66,48,43,0,64,16, - 0,0,0,0,2,0,98,148,0,0,0,0, - 39,0,64,20,0,0,0,0,0,0,2,149, - 0,0,0,0,35,0,64,20,0,0,0,0, - 2,0,2,149,8,0,3,36,255,255,66,48, - 30,0,67,20,0,0,0,0,136,0,162,142, - 0,0,0,0,12,0,66,140,0,0,0,0, - 0,128,66,48,23,0,64,20,0,0,0,0, - 164,0,162,142,44,0,163,142,1,0,66,36, - 164,0,162,174,8,0,104,172,136,0,162,142, - 0,0,0,0,8,0,2,173,44,0,163,142, - 16,16,2,36,2,0,98,164,0,0,162,142, - 0,0,0,0,5,0,64,20,0,0,0,0, - 164,7,192,12,0,0,0,0,239,20,192,8, - 0,0,0,0,8,0,162,142,0,0,0,0, - 0,0,64,172,180,0,191,143,176,0,190,143, - 172,0,183,143,168,0,182,143,164,0,181,143, - 160,0,180,143,156,0,179,143,152,0,178,143, - 148,0,177,143,144,0,176,143,8,0,224,3, - 184,0,189,39,216,255,189,39,28,0,177,175, - 33,136,128,0,32,0,178,175,33,144,160,0, - 96,128,132,39,6,0,37,38,24,0,176,175, - 104,128,144,39,36,0,191,175,31,21,192,12, - 33,48,0,2,108,128,132,39,33,40,32,2, - 31,21,192,12,33,48,0,2,10,0,64,26, - 33,128,0,0,116,128,132,39,33,16,17,2, - 12,0,69,144,0,0,0,0,15,63,192,12, - 1,0,16,38,42,16,18,2,248,255,64,20, - 0,0,0,0,124,128,132,39,15,63,192,12, - 0,0,0,0,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,208,255,189,39,40,0,191,175, - 2,0,162,144,0,0,163,144,1,0,167,144, - 16,0,162,175,3,0,162,144,33,64,128,0, - 20,0,162,175,4,0,162,144,2,131,4,60, - 68,131,132,36,24,0,162,175,5,0,162,144, - 33,40,0,1,32,0,166,175,33,48,96,0, - 15,63,192,12,28,0,162,175,40,0,191,143, - 48,0,189,39,8,0,224,3,0,0,0,0, - 248,255,189,39,136,0,135,140,255,255,163,36, - 12,0,160,16,33,48,224,0,255,255,5,36, - 12,0,194,140,0,0,0,0,0,128,66,48, - 8,0,64,20,33,16,0,0,255,255,99,36, - 0,0,192,172,4,0,198,140,247,255,101,20, - 0,0,0,0,136,0,134,172,33,16,224,0, - 8,0,224,3,8,0,189,39,224,255,189,39, - 16,0,176,175,33,128,160,0,28,0,191,175, - 24,0,178,175,33,0,128,20,20,0,177,175, - 84,133,130,143,80,133,131,143,0,0,0,0, - 35,16,67,0,17,0,2,162,80,133,145,143, - 84,133,130,143,0,0,0,0,42,16,34,2, - 19,0,64,16,64,18,17,0,2,131,3,60, - 192,246,99,36,33,144,67,0,33,32,64,2, - 6,23,192,12,33,40,0,2,6,0,64,20, - 0,0,0,0,17,0,2,146,0,0,0,0, - 255,255,66,36,17,0,2,162,17,0,2,146, - 84,133,130,143,1,0,49,38,42,16,34,2, - 242,255,64,20,0,2,82,38,17,0,2,146, - 144,21,192,8,0,0,0,0,36,133,130,143, - 0,0,0,0,25,0,64,16,1,0,2,36, - 0,0,130,140,0,0,0,0,20,0,64,16, - 2,0,2,36,17,0,2,162,6,23,192,12, - 33,40,0,2,19,0,64,16,33,16,0,0, - 2,131,4,60,192,246,132,36,6,23,192,12, - 33,40,0,2,7,0,64,20,0,0,0,0, - 17,0,2,146,0,0,0,0,255,255,66,36, - 17,0,2,162,17,0,2,146,0,0,0,0, - 144,21,192,8,1,0,2,36,1,0,2,36, - 17,0,2,162,6,23,192,12,33,40,0,2, - 28,0,191,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 0,0,0,0,0,0,0,0,0,129,9,52, - 16,0,130,144,2,131,3,60,192,246,99,36, - 64,18,2,0,33,56,67,0,140,0,230,140, - 0,1,8,36,4,0,197,140,0,0,131,140, - 0,0,128,172,12,0,137,172,4,0,164,172, - 12,0,200,172,33,48,160,0,216,0,226,140, - 33,40,128,0,1,0,66,36,0,128,99,48, - 4,0,96,20,216,0,226,172,4,0,132,140, - 161,21,192,8,0,0,0,0,8,0,224,3, - 140,0,230,172,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,8,0,224,3,0,0,0,0, - 172,0,128,172,176,0,128,172,180,0,128,172, - 184,0,128,172,188,0,128,172,192,0,128,172, - 196,0,128,172,200,0,128,172,204,0,128,172, - 208,0,128,172,212,0,128,172,224,0,128,172, - 8,1,128,172,4,1,128,172,236,0,128,172, - 240,0,128,172,232,0,128,172,244,0,128,172, - 8,0,224,3,248,0,128,172,224,255,189,39, - 16,0,176,175,33,128,128,0,20,0,177,175, - 0,2,17,36,24,0,191,175,13,8,192,12, - 0,48,4,36,255,31,3,60,255,255,99,52, - 33,32,0,0,36,16,67,0,0,128,3,60, - 37,40,67,0,33,24,160,0,0,128,6,52, - 1,0,132,36,24,0,98,36,0,0,96,164, - 2,0,102,164,4,0,98,172,33,24,64,0, - 42,16,145,0,249,255,64,20,1,0,132,36, - 255,255,132,36,64,16,17,0,33,16,81,0, - 192,16,2,0,33,16,69,0,48,0,163,36, - 236,255,69,172,108,0,3,174,104,0,3,174, - 96,0,5,174,100,0,2,174,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,248,255,189,39,0,32,14,60, - 4,0,177,175,7,0,17,60,0,0,176,175, - 4,0,16,60,2,131,25,60,192,246,57,39, - 0,1,15,36,108,0,152,140,104,0,137,140, - 176,0,140,140,180,0,141,140,94,0,56,17, - 0,0,0,0,4,0,43,141,0,0,0,0, - 37,16,110,1,0,0,66,148,0,0,0,0, - 0,128,66,48,86,0,64,16,37,16,46,1, - 0,0,67,140,0,0,0,0,36,16,113,0, - 76,0,80,20,0,32,98,48,41,0,64,20, - 15,0,98,48,188,0,130,140,0,0,0,0, - 1,0,66,36,188,0,130,172,0,8,98,48, - 6,0,64,16,0,4,98,48,192,0,130,140, - 0,0,0,0,1,0,66,36,192,0,130,172, - 0,4,98,48,6,0,64,16,0,2,98,48, - 196,0,130,140,0,0,0,0,1,0,66,36, - 196,0,130,172,0,2,98,48,6,0,64,16, - 0,1,98,48,200,0,130,140,0,0,0,0, - 1,0,66,36,200,0,130,172,0,1,98,48, - 6,0,64,16,32,0,98,48,204,0,130,140, - 0,0,0,0,1,0,66,36,204,0,130,172, - 32,0,98,48,6,0,64,16,15,0,98,48, - 208,0,130,140,0,0,0,0,1,0,66,36, - 208,0,130,172,15,0,98,48,212,0,131,140, - 8,0,37,141,33,24,98,0,212,0,131,172, - 17,0,162,144,1,0,140,37,255,255,66,36, - 17,0,162,160,25,0,64,20,37,24,46,1, - 16,0,162,144,1,0,173,37,64,18,2,0, - 33,64,89,0,140,0,7,141,0,129,10,52, - 4,0,230,140,0,0,163,140,0,0,160,172, - 12,0,170,172,4,0,197,172,12,0,239,172, - 33,56,192,0,216,0,2,141,33,48,160,0, - 1,0,66,36,0,128,99,48,4,0,96,20, - 216,0,2,173,4,0,165,140,114,22,192,8, - 0,0,0,0,140,0,7,173,37,24,46,1, - 0,128,2,60,0,0,98,172,40,22,192,8, - 33,72,96,1,104,0,137,172,176,0,140,172, - 180,0,141,172,4,0,177,143,0,0,176,143, - 8,0,224,3,8,0,189,39,224,255,189,39, - 16,0,176,175,33,128,128,0,24,0,191,175, - 20,0,177,175,44,0,17,142,0,0,0,0, - 0,0,34,150,0,0,0,0,0,32,66,48, - 89,0,64,16,0,0,0,0,2,0,34,150, - 0,0,0,0,0,1,66,48,84,0,64,20, - 0,0,0,0,27,22,192,12,0,0,0,0, - 104,0,4,142,0,0,0,0,2,0,130,148, - 0,128,3,52,255,255,66,48,75,0,67,16, - 0,0,0,0,224,0,2,142,0,0,0,0, - 1,0,66,36,224,0,2,174,4,0,36,174, - 0,0,128,164,4,0,130,140,0,0,0,0, - 0,0,64,164,0,0,2,142,0,0,0,0, - 51,0,64,16,0,33,2,36,2,0,34,150, - 0,0,0,0,47,0,64,16,0,33,2,36, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 2,0,34,150,0,0,0,0,211,255,64,20, - 0,33,2,36,2,0,34,166,0,0,2,142, - 0,0,0,0,5,0,64,16,0,0,0,0, - 8,0,2,142,0,0,0,0,242,22,192,8, - 0,0,64,172,164,7,192,12,0,0,0,0, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,108,0,133,140, - 0,0,0,0,4,0,162,140,0,0,0,0, - 4,0,67,140,104,0,130,140,0,0,0,0, - 5,0,98,20,33,16,160,0,184,0,131,140, - 33,16,0,0,1,0,99,36,184,0,131,172, - 8,0,224,3,0,0,0,0,224,255,189,39, - 16,0,176,175,33,128,128,0,28,0,191,175, - 24,0,178,175,20,0,177,175,108,0,18,142, - 8,1,6,142,44,0,17,142,4,0,66,142, - 104,0,7,142,4,0,66,140,18,0,163,148, - 172,0,4,142,0,0,0,0,6,0,71,20, - 255,255,99,48,184,0,3,142,33,16,0,0, - 1,0,99,36,157,23,192,8,184,0,3,174, - 33,48,195,0,1,0,130,36,172,0,2,174, - 8,1,6,174,8,0,162,140,4,1,3,142, - 0,0,70,144,33,32,64,2,8,0,69,174, - 12,0,64,174,1,0,194,48,2,0,64,16, - 1,0,98,36,4,1,2,174,0,0,2,142, - 0,0,0,0,35,0,64,20,0,0,0,0, - 18,0,162,148,0,0,0,0,255,255,66,48, - 12,0,66,174,0,0,34,150,0,0,0,0, - 0,32,66,48,24,0,64,16,12,0,2,36, - 2,0,34,150,0,0,0,0,0,1,66,48, - 19,0,64,20,12,0,2,36,4,0,242,16, - 0,0,0,0,27,22,192,12,33,32,0,2, - 12,0,2,36,2,0,66,166,104,0,4,142, - 0,0,0,0,0,0,128,164,4,0,130,140, - 0,0,0,0,0,0,64,164,0,33,2,36, - 4,0,36,174,164,7,192,12,2,0,34,166, - 154,23,192,8,0,0,0,0,154,23,192,8, - 2,0,130,164,0,0,34,150,0,0,0,0, - 0,32,66,48,69,0,64,16,12,0,2,36, - 4,0,242,16,0,0,0,0,27,22,192,12, - 33,32,0,2,12,0,2,36,2,0,66,166, - 104,0,4,142,0,0,0,0,0,0,128,164, - 4,0,130,140,0,0,0,0,0,0,64,164, - 4,0,36,174,2,0,34,150,0,0,0,0, - 47,0,64,16,0,33,2,36,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,2,0,34,150, - 0,0,0,0,211,255,64,20,0,33,2,36, - 2,0,34,166,8,0,2,142,0,0,0,0, - 154,23,192,8,0,0,64,172,2,0,66,166, - 4,0,67,142,1,0,2,36,108,0,3,174, - 28,0,191,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 216,255,189,39,20,0,177,175,33,136,128,0, - 24,0,178,175,2,131,18,60,192,131,82,38, - 28,0,179,175,0,1,19,36,32,0,191,175, - 16,0,176,175,104,0,48,142,108,0,34,142, - 0,0,0,0,4,0,2,22,0,0,0,0, - 2,0,2,150,245,23,192,8,0,0,0,0, - 2,0,2,150,0,0,0,0,7,0,66,48, - 11,0,64,20,33,40,64,2,2,131,4,60, - 160,131,132,36,2,131,7,60,252,131,231,36, - 15,63,192,12,0,2,6,36,1,0,4,36, - 33,40,64,2,188,7,192,12,0,2,6,36, - 2,0,2,150,4,0,3,36,7,0,66,48, - 40,0,67,20,0,128,2,52,8,0,3,142, - 0,0,0,0,17,0,98,144,0,0,0,0, - 255,255,66,36,17,0,98,160,17,0,98,144, - 0,0,0,0,30,0,64,20,0,128,2,52, - 180,0,34,142,33,32,96,0,1,0,66,36, - 180,0,34,174,16,0,130,144,2,131,3,60, - 192,246,99,36,64,18,2,0,33,56,67,0, - 140,0,230,140,0,129,8,52,4,0,197,140, - 0,0,131,140,0,0,128,172,12,0,136,172, - 4,0,164,172,12,0,211,172,33,48,160,0, - 216,0,226,140,33,40,128,0,1,0,66,36, - 0,128,99,48,4,0,96,20,216,0,226,172, - 4,0,132,140,223,23,192,8,0,0,0,0, - 140,0,230,172,0,128,2,52,2,0,2,166, - 0,0,0,166,4,0,16,142,174,23,192,8, - 0,0,0,0,44,0,35,142,104,0,48,174, - 0,0,98,148,0,0,0,0,0,32,66,52, - 0,0,98,164,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,0,163,2,60, - 0,1,66,52,0,0,66,140,2,131,3,60, - 192,6,99,36,255,3,66,48,60,0,66,36, - 0,128,66,52,8,0,224,3,0,0,98,172, - 208,255,189,39,28,0,177,175,33,136,128,0, - 32,0,178,175,33,144,160,0,24,0,176,175, - 0,163,16,60,0,163,2,60,164,1,66,140, - 0,1,16,54,44,0,191,175,40,0,180,175, - 4,0,64,20,36,0,179,175,60,0,2,36, - 0,163,1,60,164,1,34,172,0,163,2,60, - 164,1,66,140,0,0,0,0,221,5,66,40, - 3,0,64,20,220,5,2,36,0,163,1,60, - 164,1,34,172,0,163,3,60,164,1,99,140, - 255,255,2,36,21,0,98,20,0,0,0,0, - 128,128,130,143,0,0,0,0,5,0,67,20, - 2,0,5,36,130,11,192,12,0,0,0,0, - 128,128,130,175,2,0,5,36,10,0,6,36, - 128,128,132,143,0,131,7,60,8,96,231,36, - 156,11,192,12,16,0,160,175,0,0,2,142, - 2,131,3,60,192,6,99,36,255,3,66,48, - 66,24,192,8,64,0,66,36,0,163,2,60, - 164,1,66,140,2,131,3,60,192,6,99,36, - 0,128,66,52,0,0,98,172,255,31,4,60, - 255,255,132,52,2,131,2,60,208,6,66,36, - 36,16,68,0,0,160,5,60,37,16,69,0, - 2,131,3,60,176,12,99,36,2,131,1,60, - 196,6,32,172,2,131,1,60,200,6,34,172, - 12,0,2,36,0,0,96,164,2,131,1,60, - 178,12,34,164,6,0,65,6,36,16,100,0, - 37,16,69,0,2,131,1,60,180,12,34,172, - 99,24,192,8,255,31,18,60,2,131,2,60, - 178,12,66,148,0,0,0,0,0,128,66,52, - 2,131,1,60,178,12,34,164,255,31,18,60, - 255,255,82,54,2,131,2,60,192,6,66,36, - 36,16,82,0,0,160,20,60,37,16,84,0, - 2,131,1,60,184,12,34,172,2,131,1,60, - 188,12,32,172,44,0,34,142,0,0,0,0, - 0,0,66,148,2,131,19,60,176,12,115,38, - 0,32,66,48,15,0,64,20,33,40,0,0, - 2,131,4,60,160,131,132,36,2,131,16,60, - 192,131,16,38,33,40,0,2,2,131,7,60, - 24,132,231,36,15,63,192,12,71,2,6,36, - 1,0,4,36,33,40,0,2,188,7,192,12, - 71,2,6,36,33,40,0,0,33,48,0,0, - 36,16,114,2,44,0,35,142,37,16,84,0, - 4,0,98,172,44,0,36,142,208,7,7,36, - 129,67,192,12,2,0,132,36,12,0,64,20, - 0,0,0,0,44,0,34,142,0,0,0,0, - 2,0,69,148,2,131,4,60,15,63,192,12, - 56,132,132,36,255,255,4,36,2,131,5,60, - 192,131,165,36,188,7,192,12,79,2,6,36, - 44,0,34,142,0,33,3,36,2,0,67,164, - 8,0,34,142,0,0,0,0,0,0,64,172, - 44,0,191,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,232,255,189,39, - 128,128,132,143,0,128,2,52,16,0,191,175, - 2,131,1,60,3,0,128,4,178,12,34,164, - 177,11,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 8,0,224,3,0,0,0,0,0,0,0,0, - 0,0,0,0,240,255,2,52,2,131,1,60, - 33,8,34,0,208,12,32,172,240,255,66,36, - 251,255,65,4,0,0,0,0,2,131,2,60, - 208,12,66,36,0,163,1,60,12,1,32,172, - 0,163,1,60,212,5,34,172,1,0,2,60, - 148,133,128,175,144,133,128,175,0,163,1,60, - 8,0,224,3,216,5,34,172,0,0,136,148, - 4,0,138,148,1,0,2,49,34,0,64,20, - 2,0,137,148,0,25,10,0,38,24,106,0, - 38,24,105,0,240,255,99,48,2,131,15,60, - 208,12,239,37,33,40,111,0,68,133,142,143, - 128,0,3,36,0,0,162,140,4,0,171,148, - 23,0,64,16,43,16,194,1,9,0,64,16, - 6,0,172,148,7,0,11,21,8,0,173,148, - 5,0,44,21,0,0,0,0,3,0,77,21, - 10,0,162,148,8,0,224,3,0,0,0,0, - 255,255,99,36,10,0,96,16,240,255,165,36, - 43,16,175,0,238,255,64,16,0,0,162,140, - 248,127,229,37,248,127,165,36,218,24,192,8, - 0,0,162,140,8,0,224,3,254,255,2,36, - 8,0,224,3,255,255,2,36,8,0,224,3, - 0,0,0,0,0,0,136,148,68,133,142,143, - 1,0,2,49,53,0,64,20,2,0,137,148, - 0,131,2,60,4,0,138,148,12,1,89,140, - 0,25,10,0,38,24,106,0,38,24,105,0, - 240,255,99,48,2,131,15,60,208,12,239,37, - 33,56,111,0,128,0,3,36,0,0,248,140, - 4,0,235,148,43,16,216,1,14,0,64,16, - 6,0,236,148,24,0,11,21,8,0,237,148, - 22,0,44,21,255,127,2,60,20,0,77,21, - 255,255,66,52,43,16,2,3,2,0,64,16, - 33,16,198,1,0,0,226,172,10,0,229,164, - 8,0,224,3,0,0,2,36,3,0,0,23, - 1,0,57,35,0,131,2,60,12,1,89,172, - 33,16,198,1,0,0,226,172,10,0,229,164, - 4,0,232,164,6,0,233,164,8,0,234,164, - 8,0,224,3,1,0,2,36,255,255,99,36, - 11,0,96,16,0,0,0,0,240,255,231,36, - 43,16,239,0,221,255,64,16,0,0,248,140, - 248,127,231,37,248,127,231,36,8,25,192,8, - 0,0,248,140,8,0,224,3,0,0,2,36, - 144,133,130,143,0,0,0,0,1,0,66,32, - 144,133,130,175,8,0,224,3,255,255,2,36, - 8,0,224,3,0,0,0,0,164,128,130,143, - 0,0,0,0,7,0,130,20,232,255,189,39, - 160,128,130,143,2,131,3,60,208,12,99,36, - 0,17,2,0,108,25,192,8,33,16,67,0, - 42,16,130,0,3,0,64,16,255,255,2,36, - 164,128,128,175,160,128,130,175,164,128,130,143, - 160,128,131,143,35,48,130,0,1,0,101,36, - 0,16,162,40,25,0,64,16,0,25,5,0, - 68,133,135,143,2,131,2,60,33,16,67,0, - 208,12,66,140,0,0,0,0,43,16,226,0, - 4,0,64,16,0,0,0,0,255,255,198,36, - 6,0,192,16,0,16,162,40,1,0,165,36, - 0,16,162,40,243,255,64,20,16,0,99,36, - 0,16,162,40,7,0,64,16,0,25,5,0, - 2,131,2,60,208,12,66,36,160,128,133,175, - 164,128,132,175,108,25,192,8,33,16,98,0, - 33,16,0,0,255,255,3,36,164,128,128,175, - 160,128,131,175,8,0,224,3,24,0,189,39, - 0,0,0,0,0,0,0,0,24,255,189,39, - 228,0,191,175,224,0,190,175,220,0,183,175, - 216,0,182,175,212,0,181,175,208,0,180,175, - 204,0,179,175,200,0,178,175,196,0,177,175, - 192,0,176,175,44,28,192,12,0,0,0,0, - 176,128,132,39,15,63,192,12,1,0,17,36, - 24,0,176,39,164,68,192,12,33,32,0,2, - 24,0,162,131,0,0,0,0,137,25,192,8, - 32,0,8,36,0,0,2,130,32,0,8,36, - 253,255,72,16,1,0,16,38,255,255,16,38, - 9,0,8,36,249,255,72,16,1,0,16,38, - 255,255,16,38,0,0,2,146,0,0,0,0, - 208,255,66,36,10,0,66,44,27,0,64,16, - 33,32,0,2,33,40,0,0,212,68,192,12, - 33,48,0,0,0,0,3,146,0,0,0,0, - 208,255,99,36,10,0,99,44,9,0,96,16, - 33,136,64,0,1,0,16,38,0,0,2,146, - 0,0,0,0,208,255,66,36,10,0,66,44, - 251,255,64,20,1,0,16,38,255,255,16,38, - 0,0,2,130,32,0,8,36,253,255,72,16, - 1,0,16,38,255,255,16,38,9,0,8,36, - 249,255,72,16,1,0,16,38,255,255,16,38, - 0,0,2,130,0,0,3,146,0,0,0,0, - 22,0,64,16,104,0,180,39,32,0,8,36, - 19,0,72,16,9,0,8,36,17,0,72,16, - 32,0,5,36,9,0,4,36,208,255,98,36, - 10,0,66,44,12,0,64,20,0,0,0,0, - 1,0,16,38,0,0,131,162,0,0,2,130, - 0,0,3,146,0,0,0,0,5,0,64,16, - 1,0,148,38,3,0,69,16,0,0,0,0, - 243,255,68,20,208,255,98,36,0,0,128,162, - 104,0,180,39,0,0,2,130,32,0,8,36, - 253,255,72,16,1,0,16,38,255,255,16,38, - 9,0,8,36,249,255,72,16,1,0,16,38, - 255,255,16,38,33,240,0,2,0,0,196,131, - 0,0,0,0,32,69,192,12,144,0,190,175, - 11,0,64,16,33,32,192,3,33,40,0,0, - 212,68,192,12,33,48,0,0,33,152,64,0, - 33,32,192,3,33,40,0,0,44,69,192,12, - 16,0,6,36,232,25,192,8,33,144,64,0, - 255,255,18,36,255,255,19,36,0,0,3,130, - 0,0,2,146,0,0,0,0,17,0,96,16, - 32,0,8,36,15,0,104,16,1,0,16,38, - 255,255,16,38,32,0,4,36,0,22,2,0, - 3,22,2,0,9,0,8,36,8,0,72,16, - 0,0,0,0,1,0,16,38,0,0,3,130, - 0,0,2,146,3,0,96,16,0,0,0,0, - 246,255,100,20,0,22,2,0,0,0,2,130, - 32,0,8,36,253,255,72,16,1,0,16,38, - 255,255,16,38,9,0,8,36,249,255,72,16, - 1,0,16,38,255,255,16,38,33,184,0,2, - 33,32,224,2,33,40,0,0,212,68,192,12, - 33,48,0,0,33,32,224,2,33,40,0,0, - 16,0,6,36,44,69,192,12,33,176,64,0, - 0,0,227,130,0,0,0,0,15,0,96,16, - 33,168,64,0,32,0,8,36,12,0,104,16, - 32,0,3,36,0,0,2,130,9,0,8,36, - 8,0,72,16,0,0,0,0,1,0,16,38, - 0,0,2,130,0,0,0,0,3,0,64,16, - 0,0,0,0,248,255,67,20,0,0,0,0, - 0,0,131,130,0,0,0,0,121,0,98,44, - 244,1,64,16,128,16,3,0,2,131,1,60, - 33,8,34,0,160,138,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,1,0,131,130, - 104,0,2,36,26,0,98,16,105,0,98,40, - 7,0,64,16,116,0,2,36,34,0,96,16, - 98,0,2,36,11,0,98,16,0,0,0,0, - 26,28,192,8,0,0,0,0,5,0,98,16, - 119,0,8,36,27,0,104,16,33,16,32,2, - 26,28,192,8,0,0,0,0,4,162,2,60, - 33,144,66,2,2,0,130,130,0,0,0,0, - 214,1,64,20,0,0,0,0,2,131,4,60, - 108,132,132,36,0,0,70,146,82,26,192,8, - 0,0,0,0,2,0,130,130,0,0,0,0, - 205,1,64,20,0,0,0,0,2,131,4,60, - 120,132,132,36,0,0,70,150,0,0,0,0, - 15,63,192,12,33,40,64,2,125,25,192,8, - 0,0,0,0,33,16,32,2,37,255,64,16, - 255,255,49,38,0,0,80,142,2,131,4,60, - 132,132,132,36,33,40,64,2,4,0,82,38, - 15,63,192,12,33,48,0,2,33,16,32,2, - 247,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,1,0,131,130,104,0,2,36, - 23,0,98,16,105,0,98,40,7,0,64,16, - 116,0,2,36,25,0,96,16,98,0,2,36, - 11,0,98,16,0,0,0,0,26,28,192,8, - 0,0,0,0,5,0,98,16,119,0,8,36, - 17,0,104,16,0,0,0,0,26,28,192,8, - 0,0,0,0,4,162,2,60,33,144,66,2, - 2,0,130,130,0,0,0,0,158,1,64,20, - 0,0,0,0,125,25,192,8,0,0,85,162, - 2,0,130,130,0,0,0,0,152,1,64,20, - 0,0,0,0,125,25,192,8,0,0,85,166, - 125,25,192,8,0,0,85,174,0,163,16,60, - 31,163,17,60,255,255,49,54,0,0,2,142, - 0,0,0,0,4,0,82,20,0,0,0,0, - 180,128,132,39,15,63,192,12,33,40,0,2, - 4,0,16,38,43,16,48,2,246,255,64,16, - 0,0,0,0,125,25,192,8,0,0,0,0, - 33,16,32,2,228,254,64,16,255,255,49,38, - 33,32,96,2,164,32,192,12,33,40,192,2, - 33,16,32,2,251,255,64,20,255,255,49,38, - 125,25,192,8,0,0,0,0,1,0,130,130, - 0,0,0,0,117,1,64,20,33,32,32,2, - 133,29,192,12,33,40,96,2,125,25,192,8, - 0,0,0,0,33,32,96,2,33,40,32,2, - 234,31,192,12,33,48,192,2,125,25,192,8, - 0,0,0,0,1,0,130,130,0,0,0,0, - 103,1,64,20,33,16,32,2,200,254,64,16, - 255,255,49,38,33,32,96,2,33,40,192,2, - 182,29,192,12,33,48,0,2,33,16,32,2, - 250,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,33,32,32,2,33,40,96,2, - 33,48,192,2,38,30,192,12,33,56,0,2, - 125,25,192,8,0,0,0,0,5,162,2,60, - 0,0,69,144,2,131,4,60,15,63,192,12, - 144,132,132,36,125,25,192,8,0,0,0,0, - 0,163,1,60,20,1,32,172,14,0,32,18, - 33,128,0,0,164,7,192,12,1,0,16,38, - 143,63,192,12,0,0,0,0,143,63,192,12, - 0,0,0,0,143,63,192,12,0,0,0,0, - 143,63,192,12,0,0,0,0,43,16,17,2, - 244,255,64,20,0,0,0,0,184,63,192,12, - 0,0,0,0,0,163,16,60,20,1,16,142, - 0,0,0,0,7,0,17,22,33,40,32,2, - 2,131,4,60,164,132,132,36,15,63,192,12, - 33,40,32,2,125,25,192,8,0,0,0,0, - 2,131,4,60,188,132,132,36,15,63,192,12, - 33,48,0,2,125,25,192,8,0,0,0,0, - 0,0,226,130,7,162,8,60,16,0,64,16, - 33,144,72,2,33,16,32,2,134,254,64,16, - 255,255,49,38,0,0,85,174,2,131,4,60, - 132,132,132,36,33,40,64,2,15,63,192,12, - 33,48,160,2,4,0,82,38,33,16,32,2, - 247,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,33,16,32,2,119,254,64,16, - 255,255,49,38,0,0,80,142,2,131,4,60, - 132,132,132,36,33,40,64,2,4,0,82,38, - 15,63,192,12,33,48,0,2,33,16,32,2, - 247,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,7,162,16,60,64,0,17,38, - 2,131,4,60,228,132,132,36,33,40,0,2, - 0,0,6,142,0,0,0,0,15,63,192,12, - 4,0,16,38,42,16,17,2,247,255,64,20, - 7,162,8,60,128,0,16,37,176,0,17,37, - 2,131,4,60,228,132,132,36,33,40,0,2, - 0,0,6,142,0,0,0,0,15,63,192,12, - 4,0,16,38,42,16,17,2,247,255,64,20, - 7,162,8,60,192,0,16,37,240,0,17,37, - 2,131,4,60,228,132,132,36,33,40,0,2, - 0,0,6,142,0,0,0,0,15,63,192,12, - 4,0,16,38,42,16,17,2,247,255,64,20, - 0,0,0,0,125,25,192,8,0,0,0,0, - 1,0,130,130,0,0,0,0,222,0,64,20, - 33,16,32,2,63,254,64,16,255,255,49,38, - 33,32,96,2,213,29,192,12,33,40,160,2, - 33,16,32,2,251,255,64,20,255,255,49,38, - 125,25,192,8,0,0,0,0,1,0,130,130, - 0,0,0,0,208,0,64,20,33,16,32,2, - 49,254,64,16,255,255,49,38,33,32,96,2, - 161,31,192,12,33,40,192,2,33,16,32,2, - 251,255,64,20,255,255,49,38,125,25,192,8, - 0,0,0,0,33,16,32,2,38,254,64,16, - 255,255,49,38,208,32,192,12,33,32,0,0, - 33,32,0,0,164,32,192,12,33,40,0,0, - 40,29,192,12,0,0,0,0,133,29,192,12, - 255,255,4,36,33,16,32,2,245,255,64,20, - 255,255,49,38,125,25,192,8,0,0,0,0, - 1,0,131,130,87,0,2,36,27,0,98,16, - 88,0,98,40,7,0,64,16,114,0,2,36, - 37,0,96,16,82,0,2,36,9,0,98,16, - 0,0,0,0,125,25,192,8,0,0,0,0, - 5,0,98,16,119,0,8,36,15,0,104,16, - 0,0,0,0,125,25,192,8,0,0,0,0, - 2,0,130,130,0,0,0,0,159,0,64,20, - 0,0,0,0,60,65,192,12,33,32,96,2, - 184,128,132,39,33,40,96,2,15,63,192,12, - 33,48,64,0,125,25,192,8,0,0,0,0, - 2,0,130,130,0,0,0,0,147,0,64,20, - 33,32,96,2,162,65,192,12,33,40,160,2, - 242,253,64,20,33,40,160,2,2,131,4,60, - 8,133,132,36,15,63,192,12,33,48,96,2, - 125,25,192,8,0,0,0,0,33,16,32,2, - 233,253,64,16,255,255,49,38,40,29,192,12, - 0,0,0,0,33,16,32,2,252,255,64,20, - 255,255,49,38,125,25,192,8,0,0,0,0, - 1,0,133,130,87,0,2,36,29,0,162,16, - 88,0,162,40,5,0,64,16,82,0,2,36, - 11,0,162,16,33,16,32,2,125,25,192,8, - 0,0,0,0,114,0,2,36,5,0,162,16, - 119,0,8,36,19,0,168,16,33,32,64,2, - 125,25,192,8,0,0,0,0,33,16,32,2, - 206,253,64,16,255,255,49,38,168,69,192,12, - 33,32,64,2,184,128,132,39,33,40,64,2, - 15,63,192,12,33,48,64,0,1,0,82,38, - 33,16,32,2,247,255,64,20,255,255,49,38, - 125,25,192,8,0,0,0,0,33,32,64,2, - 29,70,192,12,33,40,160,2,189,253,64,20, - 33,40,160,2,2,131,4,60,40,133,132,36, - 15,63,192,12,33,48,64,2,125,25,192,8, - 0,0,0,0,144,0,164,143,122,28,192,12, - 0,0,0,0,125,25,192,8,0,0,0,0, - 33,16,96,2,175,253,64,16,255,255,115,38, - 143,63,192,12,0,0,0,0,33,16,96,2, - 252,255,64,20,255,255,115,38,125,25,192,8, - 0,0,0,0,33,16,32,2,165,253,64,16, - 255,255,49,38,33,32,96,2,208,32,192,12, - 33,40,192,2,33,16,32,2,251,255,64,20, - 255,255,49,38,125,25,192,8,0,0,0,0, - 1,0,130,146,0,0,0,0,159,255,66,36, - 0,22,2,0,3,30,2,0,24,0,98,44, - 27,0,64,16,128,16,3,0,2,131,1,60, - 33,8,34,0,136,140,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,12,33,192,12, - 33,32,64,2,125,25,192,8,0,0,0,0, - 15,33,192,12,33,32,96,2,125,25,192,8, - 0,0,0,0,18,33,192,12,33,32,96,2, - 125,25,192,8,0,0,0,0,22,33,192,12, - 33,32,96,2,125,25,192,8,0,0,0,0, - 25,33,192,12,33,32,64,2,125,25,192,8, - 0,0,0,0,33,32,64,2,7,33,192,12, - 33,40,192,2,125,25,192,8,0,0,0,0, - 16,0,182,175,33,32,32,2,33,40,192,3, - 33,48,224,2,161,33,192,12,33,56,160,2, - 125,25,192,8,0,0,0,0,33,136,0,0, - 2,131,4,60,72,133,132,36,15,63,192,12, - 1,0,49,38,32,0,34,46,250,255,64,20, - 0,0,0,0,125,25,192,8,0,0,0,0, - 2,131,4,60,92,133,132,36,15,63,192,12, - 33,40,128,2,123,25,192,8,0,0,0,0, - 228,0,191,143,224,0,190,143,220,0,183,143, - 216,0,182,143,212,0,181,143,208,0,180,143, - 204,0,179,143,200,0,178,143,196,0,177,143, - 192,0,176,143,8,0,224,3,232,0,189,39, - 232,255,189,39,2,131,5,60,192,154,165,36, - 20,0,191,175,16,0,176,175,0,0,162,140, - 0,0,0,0,9,0,64,16,33,128,160,0, - 0,0,5,142,192,128,132,39,15,63,192,12, - 4,0,16,38,0,0,2,142,0,0,0,0, - 249,255,64,20,0,0,0,0,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 0,0,132,144,0,0,0,0,208,255,130,36, - 10,0,66,44,4,0,64,16,0,22,4,0, - 3,22,2,0,89,28,192,8,208,255,66,36, - 159,255,130,36,6,0,66,44,4,0,64,16, - 0,22,4,0,3,22,2,0,89,28,192,8, - 169,255,66,36,191,255,130,36,6,0,66,44, - 3,0,64,20,0,22,4,0,89,28,192,8, - 255,255,2,36,3,22,2,0,201,255,66,36, - 8,0,224,3,0,0,0,0,216,255,189,39, - 24,0,178,175,33,144,128,0,32,0,191,175, - 28,0,179,175,20,0,177,175,16,0,176,175, - 0,0,81,142,0,0,0,0,65,28,192,12, - 33,32,32,2,33,24,64,0,255,255,19,36, - 9,0,115,16,0,129,3,0,65,28,192,12, - 1,0,36,38,33,24,64,0,4,0,115,16, - 2,0,34,38,0,0,66,174,115,28,192,8, - 37,16,3,2,255,255,2,36,32,0,191,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 176,255,189,39,64,0,180,175,33,160,128,0, - 72,0,191,175,68,0,181,175,60,0,179,175, - 56,0,178,175,52,0,177,175,48,0,176,175, - 0,0,130,130,0,0,0,0,53,0,64,20, - 33,128,0,0,27,67,192,12,33,32,0,0, - 1,0,4,36,27,67,192,12,33,128,64,0, - 2,0,4,36,27,67,192,12,33,136,64,0, - 33,24,64,0,255,255,2,36,5,0,2,18, - 0,0,0,0,3,0,34,18,0,0,0,0, - 6,0,98,20,255,255,2,52,2,131,4,60, - 15,63,192,12,60,137,132,36,29,29,192,8, - 0,0,0,0,5,0,2,18,0,0,0,0, - 3,0,34,18,0,0,0,0,6,0,98,20, - 1,0,2,50,2,131,4,60,15,63,192,12, - 104,137,132,36,29,29,192,8,0,0,0,0, - 6,0,64,16,255,0,5,50,2,131,4,60, - 15,63,192,12,132,137,132,36,29,29,192,8, - 0,0,0,0,2,131,4,60,176,137,132,36, - 3,50,16,0,3,18,17,0,16,0,162,175, - 255,0,98,48,20,0,162,175,3,18,3,0, - 255,0,39,50,15,63,192,12,24,0,162,175, - 29,29,192,8,0,0,0,0,40,0,180,175, - 58,0,21,36,32,0,19,36,255,255,18,36, - 32,0,177,39,40,0,162,143,0,0,0,0, - 0,0,67,128,0,0,0,0,3,0,117,16, - 0,0,0,0,3,0,115,20,0,0,0,0, - 1,0,66,36,40,0,162,175,91,28,192,12, - 40,0,164,39,33,24,64,0,75,0,114,16, - 0,0,0,0,40,0,162,143,0,0,35,166, - 0,0,67,128,0,0,0,0,3,0,117,16, - 0,0,0,0,3,0,115,20,0,0,0,0, - 1,0,66,36,40,0,162,175,91,28,192,12, - 40,0,164,39,33,24,64,0,60,0,114,16, - 1,0,16,38,0,0,34,150,0,26,3,0, - 37,16,67,0,0,0,34,166,3,0,2,42, - 220,255,64,20,2,0,49,38,32,0,165,151, - 0,0,0,0,1,0,162,48,7,0,64,16, - 0,0,0,0,2,131,4,60,208,137,132,36, - 15,63,192,12,255,0,165,48,25,29,192,8, - 0,0,0,0,36,0,162,151,0,0,0,0, - 0,7,66,48,6,0,64,16,0,0,0,0, - 2,131,4,60,15,63,192,12,0,138,132,36, - 25,29,192,8,0,0,0,0,255,66,192,12, - 33,32,0,0,1,0,4,36,34,0,165,151, - 0,0,0,0,255,66,192,12,33,128,0,0, - 36,0,165,151,0,0,0,0,255,66,192,12, - 2,0,4,36,2,131,4,60,15,63,192,12, - 32,138,132,36,2,131,4,60,80,138,132,36, - 15,63,192,12,33,40,0,2,196,128,132,39, - 200,128,134,39,31,21,192,12,32,0,165,39, - 36,0,162,151,1,0,16,38,0,1,66,36, - 36,0,162,167,8,0,2,42,7,0,64,16, - 0,0,0,0,8,29,192,8,0,0,0,0, - 2,131,4,60,116,138,132,36,15,63,192,12, - 33,40,128,2,72,0,191,143,68,0,181,143, - 64,0,180,143,60,0,179,143,56,0,178,143, - 52,0,177,143,48,0,176,143,8,0,224,3, - 80,0,189,39,0,0,0,0,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,0,0, - 20,0,177,175,33,136,0,0,24,0,191,175, - 33,32,0,2,162,65,192,12,33,40,0,0, - 43,0,64,16,0,0,0,0,1,0,16,38, - 64,0,2,42,249,255,64,20,33,32,0,2, - 33,128,0,0,85,85,17,36,33,32,0,2, - 162,65,192,12,85,85,5,36,32,0,64,16, - 0,0,0,0,1,0,16,38,64,0,2,42, - 249,255,64,20,33,32,0,2,33,128,0,0, - 170,170,17,52,33,32,0,2,162,65,192,12, - 170,170,5,52,21,0,64,16,0,0,0,0, - 1,0,16,38,64,0,2,42,249,255,64,20, - 33,32,0,2,33,128,0,0,255,255,17,52, - 33,32,0,2,162,65,192,12,255,255,5,52, - 10,0,64,16,0,0,0,0,1,0,16,38, - 64,0,2,42,249,255,64,20,33,32,0,2, - 2,131,4,60,15,63,192,12,240,140,132,36, - 101,29,192,8,0,0,0,0,60,65,192,12, - 33,32,0,2,2,131,4,60,4,141,132,36, - 33,40,32,2,33,48,0,2,15,63,192,12, - 33,56,64,0,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 0,0,0,0,0,0,0,0,232,255,189,39, - 16,0,191,175,210,7,192,12,0,0,0,0, - 139,14,192,12,0,0,0,0,180,10,192,12, - 0,0,0,0,32,133,132,143,1,0,2,36, - 42,16,68,0,9,0,64,16,0,2,3,36, - 64,34,4,0,2,131,1,60,33,8,35,0, - 196,246,32,172,0,2,99,36,42,16,100,0, - 250,255,64,20,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 208,255,189,39,24,0,178,175,33,144,128,0, - 32,0,180,175,33,160,160,0,44,0,191,175, - 40,0,182,175,36,0,181,175,28,0,179,175, - 20,0,177,175,3,0,128,26,16,0,176,175, - 149,29,192,8,1,0,147,38,1,0,20,36, - 32,133,147,143,255,255,82,38,255,255,2,36, - 20,0,66,18,255,255,21,36,2,131,22,60, - 192,246,214,38,108,29,192,12,33,128,128,2, - 42,16,19,2,10,0,64,16,64,18,16,0, - 33,136,86,0,242,21,192,12,33,32,32,2, - 133,12,192,12,33,32,0,2,1,0,16,38, - 42,16,19,2,249,255,64,20,0,2,49,38, - 255,255,82,38,240,255,85,22,0,0,0,0, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,216,255,189,39,24,0,178,175, - 33,144,160,0,28,0,179,175,33,152,192,0, - 32,0,191,175,20,0,177,175,3,0,128,24, - 16,0,176,175,195,29,192,8,1,0,145,36, - 1,0,4,36,32,133,145,143,33,128,128,0, - 42,16,17,2,8,0,64,16,33,32,0,2, - 33,40,64,2,250,29,192,12,33,48,96,2, - 1,0,16,38,42,16,17,2,250,255,64,20, - 33,32,0,2,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,224,255,189,39, - 24,0,191,175,20,0,177,175,3,0,128,24, - 16,0,176,175,222,29,192,8,1,0,145,36, - 1,0,4,36,32,133,145,143,33,128,128,0, - 42,16,17,2,7,0,64,16,0,0,0,0, - 237,29,192,12,33,32,0,2,1,0,16,38, - 42,16,17,2,251,255,64,20,0,0,0,0, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,64,34,4,0, - 2,131,2,60,192,246,66,36,33,32,130,0, - 44,0,131,140,1,0,2,36,32,0,130,172, - 16,0,2,36,2,0,98,164,8,0,130,140, - 0,0,0,0,8,0,224,3,0,0,64,172, - 208,255,189,39,33,48,128,0,64,18,6,0, - 2,131,3,60,192,246,99,36,36,0,177,175, - 33,136,67,0,40,0,191,175,32,0,176,175, - 4,0,34,142,0,0,0,0,4,0,64,20, - 33,128,160,0,1,0,4,36,133,29,192,12, - 33,40,192,0,3,0,0,30,221,5,2,42, - 17,30,192,8,1,0,16,36,3,0,64,20, - 33,32,32,2,220,5,16,36,33,32,32,2, - 208,7,5,36,108,0,131,140,12,0,2,36, - 2,0,98,164,16,0,162,39,8,0,98,172, - 0,128,2,54,12,0,96,172,16,0,162,175, - 255,255,2,36,20,0,162,175,2,131,2,60, - 0,155,66,36,98,31,192,12,24,0,162,175, - 40,0,191,143,36,0,177,143,32,0,176,143, - 8,0,224,3,48,0,189,39,56,254,189,39, - 160,1,176,175,33,128,192,0,48,1,164,175, - 33,32,224,0,64,18,5,0,2,131,3,60, - 192,246,99,36,33,16,67,0,56,1,162,175, - 64,18,16,0,33,16,67,0,40,0,168,39, - 196,1,191,175,192,1,190,175,188,1,183,175, - 184,1,182,175,180,1,181,175,176,1,180,175, - 172,1,179,175,168,1,178,175,164,1,177,175, - 64,1,162,175,12,0,160,24,96,1,168,175, - 32,133,131,143,0,0,0,0,42,16,163,0, - 19,1,64,16,1,0,2,36,5,0,0,26, - 42,16,3,2,15,1,64,16,1,0,2,36, - 3,0,176,20,33,40,0,0,86,31,192,8, - 1,0,2,36,212,68,192,12,33,48,0,0, - 6,0,65,4,104,1,162,175,33,72,64,0, - 35,72,9,0,104,1,169,175,87,30,192,8, - 112,1,160,175,1,0,8,36,112,1,168,175, - 1,0,4,36,133,29,192,12,33,40,0,0, - 237,29,192,12,33,32,0,2,24,0,169,39, - 56,1,168,143,255,0,2,36,80,1,169,175, - 108,0,8,141,43,1,163,39,72,1,168,175, - 0,0,98,160,255,255,66,36,253,255,65,4, - 255,255,99,36,64,1,169,143,0,0,0,0, - 120,0,41,141,64,1,168,143,128,1,169,175, - 124,0,8,141,64,1,169,143,136,1,168,175, - 44,0,34,141,0,0,0,0,12,0,64,172, - 44,0,34,141,0,0,0,0,16,0,64,172, - 44,0,34,141,120,1,160,175,32,0,64,172, - 44,0,34,141,88,1,160,175,24,0,64,172, - 48,1,168,143,0,0,0,0,168,0,0,25, - 40,0,169,39,144,1,169,175,88,1,168,143, - 0,0,0,0,255,0,2,49,4,0,86,36, - 60,0,194,42,2,0,64,16,0,0,0,0, - 60,0,22,36,104,1,169,143,0,0,0,0, - 2,0,32,17,0,0,0,0,104,1,182,143, - 56,1,164,143,72,1,168,143,12,0,2,36, - 2,0,2,165,80,1,169,143,0,128,194,54, - 8,0,9,173,12,0,0,173,0,0,34,173, - 255,255,8,36,4,0,40,173,144,1,168,143, - 0,0,0,0,8,0,40,173,88,1,168,143, - 96,1,169,143,208,7,5,36,98,31,192,12, - 0,0,40,173,0,128,5,52,0,128,6,52, - 128,1,164,143,0,0,0,0,129,67,192,12, - 2,0,7,36,13,0,64,20,0,0,0,0, - 88,1,165,143,2,131,4,60,15,63,192,12, - 64,141,132,36,120,1,169,143,0,0,0,0, - 1,0,41,37,20,0,34,41,117,0,64,16, - 120,1,169,175,32,31,192,8,0,0,0,0, - 128,1,168,143,64,1,169,143,8,0,2,141, - 255,255,8,36,136,0,53,141,0,0,0,0, - 50,0,72,16,33,184,0,0,1,0,4,36, - 4,0,18,36,4,0,3,36,0,0,190,142, - 8,0,166,142,112,1,169,143,255,63,212,51, - 30,0,32,17,33,184,244,2,42,16,116,0, - 27,0,64,16,33,152,96,0,144,1,168,143, - 0,0,0,0,33,136,72,2,33,128,102,0, - 15,0,128,16,0,0,0,0,0,0,2,146, - 0,0,35,146,0,0,0,0,10,0,67,16, - 33,48,192,2,2,131,4,60,92,141,132,36, - 88,1,165,143,16,0,163,175,0,0,2,146, - 33,56,64,2,15,63,192,12,20,0,162,175, - 33,32,0,0,1,0,115,38,1,0,16,38, - 1,0,49,38,42,16,116,2,235,255,64,20, - 1,0,82,38,33,24,0,0,4,0,181,142, - 0,128,194,51,217,255,64,16,0,0,0,0, - 128,1,169,143,0,0,0,0,8,0,34,141, - 0,0,0,0,25,0,128,16,18,0,87,164, - 9,0,246,18,33,48,192,2,2,131,4,60, - 140,141,132,36,88,1,165,143,0,0,0,0, - 15,63,192,12,33,56,224,2,5,31,192,8, - 0,0,0,0,64,1,168,143,0,0,0,0, - 136,0,2,141,96,1,169,143,8,0,70,140, - 0,0,34,141,0,0,198,140,0,0,0,0, - 7,0,194,16,0,0,0,0,88,1,165,143, - 2,131,4,60,15,63,192,12,184,141,132,36, - 64,1,168,143,0,0,0,0,136,0,4,141, - 152,21,192,12,0,0,0,0,64,1,169,143, - 0,0,0,0,136,0,53,173,128,1,168,143, - 8,128,2,52,0,0,0,165,2,0,2,165, - 8,0,0,173,12,0,0,165,136,1,169,143, - 8,0,2,36,2,0,34,165,4,0,40,141, - 128,1,169,143,136,1,168,175,4,0,41,141, - 64,1,168,143,128,1,169,175,120,0,9,173, - 136,1,169,143,0,0,0,0,124,0,9,173, - 88,1,168,143,48,1,169,143,1,0,8,37, - 42,16,9,1,91,255,64,20,88,1,168,175, - 64,1,168,143,0,0,0,0,44,0,3,141, - 0,0,0,0,12,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,12,0,101,140, - 2,131,4,60,15,63,192,12,212,141,132,36, - 64,1,169,143,0,0,0,0,44,0,35,141, - 0,0,0,0,16,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,16,0,101,140, - 2,131,4,60,15,63,192,12,240,141,132,36, - 64,1,168,143,0,0,0,0,44,0,3,141, - 0,0,0,0,32,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,32,0,101,140, - 2,131,4,60,15,63,192,12,16,142,132,36, - 64,1,169,143,0,0,0,0,44,0,35,141, - 0,0,0,0,24,0,98,140,0,0,0,0, - 5,0,64,16,0,0,0,0,24,0,101,140, - 2,131,4,60,15,63,192,12,48,142,132,36, - 196,1,191,143,192,1,190,143,188,1,183,143, - 184,1,182,143,180,1,181,143,176,1,180,143, - 172,1,179,143,168,1,178,143,164,1,177,143, - 160,1,176,143,8,0,224,3,200,1,189,39, - 224,255,189,39,16,0,176,175,33,128,128,0, - 24,0,191,175,20,0,177,175,44,0,4,142, - 0,0,0,0,0,0,130,148,0,0,0,0, - 0,32,66,48,7,0,64,20,33,136,160,0, - 0,0,133,148,2,131,4,60,15,63,192,12, - 80,142,132,36,156,31,192,8,3,0,2,36, - 2,0,132,36,33,40,0,0,33,48,0,0, - 129,67,192,12,33,56,32,2,13,0,64,16, - 33,40,0,0,44,0,3,142,0,33,2,36, - 2,0,98,164,8,0,2,142,33,48,0,0, - 0,0,64,172,44,0,4,142,33,56,32,2, - 129,67,192,12,2,0,132,36,9,0,64,20, - 0,32,5,36,44,0,2,142,0,0,0,0, - 2,0,69,148,2,131,4,60,15,63,192,12, - 108,142,132,36,156,31,192,8,1,0,2,36, - 44,0,4,142,0,32,6,36,129,67,192,12, - 33,56,32,2,8,0,64,20,33,16,0,0, - 44,0,2,142,0,0,0,0,0,0,69,148, - 2,131,4,60,15,63,192,12,132,142,132,36, - 2,0,2,36,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 224,255,189,39,24,0,178,175,33,144,160,0, - 28,0,191,175,20,0,177,175,7,0,128,4, - 16,0,176,175,24,133,130,143,0,0,0,0, - 255,255,66,36,42,16,68,0,4,0,64,16, - 33,136,128,0,24,133,130,143,33,32,0,0, - 255,255,81,36,33,128,128,0,42,16,48,2, - 7,0,64,20,33,32,0,2,193,31,192,12, - 33,40,64,2,1,0,16,38,42,16,48,2, - 251,255,64,16,33,32,0,2,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,8,0,224,3, - 0,0,0,0,232,255,189,39,16,0,191,175, - 236,63,192,12,1,16,4,36,85,0,2,36, - 131,131,1,60,128,18,34,160,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 216,255,189,39,28,0,177,175,33,136,128,0, - 32,0,178,175,33,144,160,0,212,128,132,39, - 36,0,191,175,15,63,192,12,24,0,176,175, - 9,0,64,26,33,128,0,0,0,0,37,146, - 1,0,49,38,220,128,132,39,15,63,192,12, - 1,0,16,38,42,16,18,2,249,255,64,20, - 0,0,0,0,228,128,132,39,15,63,192,12, - 0,0,0,0,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,48,255,189,39,33,56,128,0, - 192,0,178,175,33,144,160,0,200,0,180,175, - 33,160,192,0,255,255,226,36,6,0,66,44, - 204,0,191,175,196,0,179,175,188,0,177,175, - 2,0,64,20,184,0,176,175,1,0,7,36, - 2,0,64,30,0,0,0,0,1,0,18,36, - 2,0,128,30,64,18,7,0,60,0,20,36, - 2,131,3,60,192,246,99,36,33,136,67,0, - 4,0,34,142,0,0,0,0,4,0,64,20, - 33,152,64,2,1,0,4,36,133,29,192,12, - 33,40,224,0,255,31,4,60,255,255,132,52, - 0,128,133,54,120,0,162,39,36,16,68,0, - 0,160,3,60,37,16,67,0,104,0,165,175, - 108,0,160,175,112,0,162,175,12,0,2,36, - 80,0,160,167,82,0,162,167,80,0,162,39, - 36,16,68,0,37,128,67,0,104,0,162,39, - 36,16,68,0,232,128,132,143,37,16,67,0, - 84,0,176,175,88,0,162,175,92,0,160,175, - 5,0,128,16,4,0,2,36,82,0,162,167, - 255,255,2,36,92,0,165,175,88,0,162,175, - 44,0,34,142,0,0,0,0,0,0,66,148, - 0,0,0,0,0,32,66,48,7,0,64,20, - 33,40,0,0,255,255,4,36,2,131,5,60, - 184,142,165,36,188,7,192,12,208,1,6,36, - 33,40,0,0,44,0,34,142,33,48,0,0, - 4,0,80,172,44,0,36,142,208,7,7,36, - 129,67,192,12,2,0,132,36,12,0,64,20, - 0,0,0,0,44,0,34,142,0,0,0,0, - 2,0,69,148,2,131,4,60,15,63,192,12, - 108,142,132,36,255,255,4,36,2,131,5,60, - 184,142,165,36,188,7,192,12,216,1,6,36, - 34,11,192,12,1,0,4,36,0,163,16,60, - 4,1,16,142,0,163,2,60,4,1,66,140, - 0,0,0,0,252,255,2,18,0,33,3,36, - 44,0,34,142,0,0,0,0,2,0,67,164, - 8,0,34,142,0,0,0,0,0,0,64,172, - 0,163,16,60,4,1,16,142,44,0,36,142, - 0,0,0,0,4,0,130,140,0,0,0,0, - 0,0,66,148,0,0,0,0,0,128,66,48, - 10,0,64,20,0,0,0,0,44,0,35,142, - 0,0,0,0,4,0,98,140,0,0,0,0, - 0,0,66,148,0,0,0,0,0,128,66,48, - 250,255,64,16,0,0,0,0,255,255,115,38, - 19,0,96,18,33,40,64,2,44,0,35,142, - 0,0,0,0,4,0,98,140,0,0,0,0, - 0,0,66,148,0,0,0,0,0,128,66,48, - 229,255,64,16,0,0,0,0,4,0,98,140, - 0,0,0,0,0,0,66,148,0,0,0,0, - 0,128,66,48,250,255,64,20,0,0,0,0, - 89,32,192,8,0,0,0,0,2,131,4,60, - 200,142,132,36,33,48,128,2,0,163,3,60, - 4,1,99,140,0,128,2,52,82,0,162,167, - 35,128,112,0,15,63,192,12,33,56,0,2, - 19,0,0,18,64,41,18,0,35,40,178,0, - 128,40,5,0,33,40,178,0,192,40,5,0, - 26,0,176,0,2,0,0,22,0,0,0,0, - 13,0,7,0,255,255,1,36,4,0,1,22, - 0,128,1,60,2,0,161,20,0,0,0,0, - 13,0,6,0,18,40,0,0,236,128,132,39, - 15,63,192,12,0,0,0,0,204,0,191,143, - 200,0,180,143,196,0,179,143,192,0,178,143, - 188,0,177,143,184,0,176,143,8,0,224,3, - 208,0,189,39,224,255,189,39,20,0,177,175, - 33,136,128,0,24,0,191,175,180,10,192,12, - 16,0,176,175,34,11,192,12,1,0,4,36, - 16,133,132,143,0,163,16,60,4,1,16,142, - 193,63,192,12,0,0,0,0,0,163,2,60, - 4,1,66,140,0,0,0,0,35,40,80,0, - 73,252,162,36,99,0,66,44,4,0,64,16, - 0,0,0,0,2,131,4,60,196,32,192,8, - 0,143,132,36,5,0,160,20,0,0,0,0, - 2,131,4,60,36,143,132,36,196,32,192,8, - 33,40,0,0,2,131,4,60,76,143,132,36, - 15,63,192,12,1,0,16,36,3,0,48,18, - 0,0,0,0,34,11,192,12,33,32,0,0, - 0,129,144,175,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 200,255,189,39,32,0,178,175,33,144,128,0, - 33,48,64,2,44,0,181,175,1,131,21,60, - 60,252,181,38,33,56,160,2,40,0,180,175, - 2,131,20,60,144,143,148,38,36,0,179,175, - 0,163,19,60,120,1,115,142,0,163,3,60, - 120,1,99,140,32,131,2,60,48,0,191,175, - 28,0,177,175,24,0,176,175,16,0,180,175, - 33,32,96,2,35,136,67,0,84,64,192,12, - 33,40,32,2,3,0,64,18,33,128,64,0, - 10,0,0,22,0,0,0,0,16,0,180,175, - 33,32,96,2,33,40,32,2,33,48,64,2, - 244,63,192,12,33,56,160,2,33,128,2,2, - 5,0,0,18,33,40,96,2,2,131,4,60, - 168,143,132,36,252,32,192,8,33,40,96,2, - 2,131,4,60,204,143,132,36,15,63,192,12, - 33,48,177,0,48,0,191,143,44,0,181,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 56,0,189,39,0,163,1,60,232,5,36,172, - 0,163,1,60,8,0,224,3,236,5,37,172, - 28,129,132,175,8,0,224,3,0,0,0,0, - 16,129,132,175,8,0,224,3,0,0,0,0, - 15,0,132,48,20,129,132,175,8,0,224,3, - 0,0,0,0,24,129,132,175,8,0,224,3, - 0,0,0,0,32,129,132,175,8,0,224,3, - 0,0,0,0,33,72,128,0,33,80,160,0, - 33,88,192,0,7,162,4,60,48,1,132,52, - 7,162,8,60,0,1,8,53,20,129,130,143, - 24,129,131,143,128,48,2,0,28,129,130,143, - 3,0,197,52,2,0,96,16,0,0,130,172, - 67,0,197,52,16,129,130,143,0,0,0,0, - 2,0,64,16,33,24,160,0,0,1,99,52, - 36,129,130,143,0,0,0,0,2,0,64,16, - 0,0,0,0,0,4,99,52,32,129,130,143, - 0,0,3,173,3,0,64,16,7,162,5,60, - 0,0,2,173,7,162,5,60,4,1,165,52, - 7,162,6,60,8,1,198,52,255,0,2,60, - 255,255,66,52,7,162,3,60,12,1,99,52, - 7,162,4,60,16,1,132,52,36,16,66,1, - 0,0,169,172,0,0,194,172,43,16,7,0, - 192,16,2,0,0,0,107,172,8,0,224,3, - 0,0,130,172,7,162,3,60,40,1,99,52, - 3,0,2,36,0,163,1,60,20,1,32,172, - 8,0,224,3,0,0,98,172,232,255,189,39, - 16,0,191,175,33,24,0,0,7,162,6,60, - 40,1,198,52,15,0,4,60,63,66,132,52, - 0,0,197,140,0,0,0,0,16,0,162,48, - 7,0,64,20,1,0,99,36,42,16,131,0, - 249,255,64,16,0,0,0,0,2,131,4,60, - 122,33,192,8,240,143,132,36,36,129,130,143, - 0,0,0,0,3,0,64,20,33,24,0,0, - 125,33,192,8,33,16,0,0,1,0,5,36, - 15,0,4,60,63,66,132,52,0,163,2,60, - 20,1,66,140,0,0,0,0,247,255,69,16, - 1,0,99,36,42,16,131,0,249,255,64,16, - 0,0,0,0,0,163,5,60,20,1,165,140, - 2,131,4,60,24,144,132,36,15,63,192,12, - 0,0,0,0,1,0,2,36,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 224,255,189,39,24,0,191,175,33,72,192,0, - 255,31,3,60,255,255,99,52,33,64,0,0, - 36,32,131,0,0,160,2,60,37,32,130,0, - 36,40,163,0,16,0,32,25,37,40,162,0, - 0,0,134,144,0,0,167,144,0,0,0,0, - 7,0,199,16,1,0,165,36,2,131,4,60, - 72,144,132,36,15,63,192,12,33,40,0,1, - 157,33,192,8,1,0,2,36,1,0,8,37, - 42,16,9,1,242,255,64,20,1,0,132,36, - 33,16,0,0,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,0,163,2,60, - 232,5,66,140,152,255,189,39,80,0,180,175, - 120,0,180,143,64,0,176,175,33,128,160,0, - 68,0,177,175,33,136,192,0,72,0,178,175, - 33,144,224,0,100,0,191,175,96,0,190,175, - 92,0,183,175,88,0,182,175,84,0,181,175, - 76,0,179,175,12,0,64,16,16,0,164,175, - 0,163,2,60,236,5,66,140,0,0,0,0, - 7,0,64,16,0,0,0,0,0,163,2,60, - 236,5,66,140,0,0,0,0,1,8,66,44, - 10,0,64,20,16,0,2,60,0,163,5,60, - 232,5,165,140,0,163,6,60,236,5,198,140, - 2,131,4,60,15,63,192,12,124,144,132,36, - 7,35,192,8,0,0,0,0,16,0,168,143, - 0,0,0,0,43,16,72,0,6,0,64,16, - 0,0,0,0,2,131,4,60,15,63,192,12, - 172,144,132,36,7,35,192,8,0,0,0,0, - 224,132,130,143,0,0,0,0,11,0,64,20, - 0,0,0,0,0,163,4,60,236,5,132,140, - 13,8,192,12,0,0,0,0,255,31,3,60, - 255,255,99,52,36,16,67,0,0,160,3,60, - 37,16,67,0,224,132,130,175,228,132,130,143, - 0,0,0,0,11,0,64,20,0,0,0,0, - 0,163,4,60,236,5,132,140,13,8,192,12, - 0,0,0,0,255,31,3,60,255,255,99,52, - 36,16,67,0,0,160,3,60,37,16,67,0, - 228,132,130,175,224,132,133,143,0,163,6,60, - 232,5,198,140,228,132,135,143,2,131,4,60, - 15,63,192,12,208,144,132,36,16,129,133,143, - 20,129,134,143,2,131,4,60,15,63,192,12, - 8,145,132,36,7,162,2,60,232,0,66,52, - 0,0,83,140,1,0,3,130,105,0,2,36, - 7,0,98,20,251,255,2,60,1,0,2,36, - 36,129,130,175,4,0,2,60,0,8,66,52, - 10,34,192,8,37,152,98,2,36,129,128,175, - 255,247,66,52,36,152,98,2,7,162,2,60, - 232,0,66,52,0,0,83,172,0,0,5,130, - 114,0,2,36,3,0,162,16,82,0,2,36, - 3,0,162,20,119,0,2,36,42,34,192,8, - 33,176,0,0,3,0,162,16,87,0,2,36, - 3,0,162,20,108,0,2,36,42,34,192,8, - 1,0,22,36,3,0,162,16,76,0,2,36, - 3,0,162,20,116,0,2,36,42,34,192,8, - 2,0,22,36,118,0,162,16,84,0,2,36, - 116,0,162,16,0,0,0,0,2,131,4,60, - 15,63,192,12,52,145,132,36,7,35,192,8, - 0,0,0,0,0,0,38,130,0,0,0,0, - 12,0,192,16,99,0,2,36,3,0,194,16, - 67,0,2,36,4,0,194,20,33,152,0,0, - 5,0,19,36,61,34,192,8,5,0,21,36, - 2,131,1,60,80,155,50,160,61,34,192,8, - 33,168,0,0,33,168,0,0,5,0,19,36, - 2,131,1,60,80,155,32,160,16,0,168,143, - 0,163,18,60,236,5,82,142,0,0,0,0, - 197,0,0,17,255,255,20,37,255,255,194,38, - 2,0,87,44,2,0,30,36,33,128,160,2, - 42,16,112,2,73,0,64,20,0,0,0,0, - 42,0,224,18,5,0,2,36,13,0,2,22, - 0,0,0,0,25,0,64,26,33,136,0,0, - 224,132,130,143,0,0,0,0,33,16,81,0, - 0,0,81,160,1,0,49,38,42,16,50,2, - 249,255,64,20,33,48,64,2,105,34,192,8, - 0,0,0,0,2,131,3,60,33,24,112,0, - 80,155,99,144,0,0,0,0,9,0,64,26, - 33,136,0,0,224,132,130,143,0,0,0,0, - 33,16,81,0,1,0,49,38,0,0,67,160, - 42,16,50,2,249,255,64,20,0,0,0,0, - 33,48,64,2,0,163,4,60,232,5,132,140, - 224,132,133,143,0,0,0,0,28,33,192,12, - 1,0,7,36,76,33,192,12,0,0,0,0, - 83,33,192,12,0,0,0,0,147,0,64,20, - 0,0,0,0,3,0,192,18,33,48,64,2, - 22,0,222,22,0,0,0,0,0,163,4,60, - 232,5,132,140,228,132,133,143,0,0,0,0, - 28,33,192,12,33,56,0,0,76,33,192,12, - 0,0,0,0,83,33,192,12,0,0,0,0, - 131,0,64,20,0,0,0,0,8,0,222,22, - 0,0,0,0,224,132,132,143,228,132,133,143, - 0,0,0,0,129,33,192,12,33,48,64,2, - 122,0,64,20,0,0,0,0,1,0,16,38, - 42,16,112,2,185,255,64,16,0,0,0,0, - 255,255,148,38,255,255,2,36,178,255,130,22, - 33,128,160,2,7,35,192,8,0,0,0,0, - 180,10,192,12,0,0,0,0,34,11,192,12, - 1,0,4,36,0,0,34,130,0,0,0,0, - 6,0,64,16,33,184,0,0,24,0,160,175, - 2,131,1,60,88,155,52,164,171,34,192,8, - 33,176,0,0,6,0,23,36,4,0,2,36, - 24,0,160,175,2,131,1,60,88,155,34,164, - 33,176,0,0,0,8,30,36,24,0,177,143, - 0,0,0,0,42,16,241,2,83,0,64,20, - 64,16,17,0,2,131,8,60,88,155,8,37, - 33,168,72,0,0,0,178,150,0,0,0,0, - 26,0,210,3,2,0,64,22,0,0,0,0, - 13,0,7,0,255,255,1,36,4,0,65,22, - 0,128,1,60,2,0,193,23,0,0,0,0, - 13,0,6,0,18,16,0,0,16,0,168,143, - 0,0,0,0,24,0,72,0,33,56,192,2, - 33,128,0,0,0,163,19,60,4,1,115,142, - 0,163,4,60,232,5,132,140,224,132,133,143, - 18,160,0,0,0,0,0,0,0,0,0,0, - 28,33,192,12,33,48,64,2,10,0,128,26, - 0,0,0,0,76,33,192,12,0,0,0,0, - 83,33,192,12,0,0,0,0,48,0,64,20, - 1,0,16,38,42,16,20,2,248,255,64,20, - 0,0,0,0,2,131,5,60,140,145,165,36, - 0,163,16,60,4,1,16,142,3,0,192,18, - 0,0,0,0,2,131,5,60,128,145,165,36, - 2,131,4,60,96,145,132,36,15,63,192,12, - 33,48,64,2,19,0,19,18,24,0,146,2, - 18,24,0,0,35,16,19,2,0,0,0,0, - 27,0,98,0,2,0,64,20,0,0,0,0, - 13,0,7,0,18,16,0,0,2,131,4,60, - 152,145,132,36,64,41,2,0,35,40,162,0, - 128,40,5,0,33,40,162,0,15,63,192,12, - 192,40,5,0,255,34,192,8,2,0,181,38, - 2,131,4,60,168,145,132,36,15,63,192,12, - 2,0,181,38,1,0,49,38,42,16,241,2, - 178,255,64,16,0,0,0,0,1,0,214,38, - 2,0,194,42,166,255,64,20,0,0,0,0, - 100,0,191,143,96,0,190,143,92,0,183,143, - 88,0,182,143,84,0,181,143,80,0,180,143, - 76,0,179,143,72,0,178,143,68,0,177,143, - 64,0,176,143,8,0,224,3,104,0,189,39, - 0,0,0,0,43,16,134,0,0,0,164,175, - 4,0,165,175,8,0,166,175,7,0,64,20, - 12,0,167,175,43,16,196,0,5,0,64,20, - 1,0,2,36,43,16,167,0,2,0,64,16, - 43,16,229,0,255,255,2,36,8,0,224,3, - 0,0,0,0,232,255,189,39,3,131,4,60, - 208,12,132,36,170,0,5,36,16,0,191,175, - 144,71,192,12,60,0,6,36,2,131,6,60, - 112,155,198,36,2,131,2,60,212,246,66,140, - 2,131,3,60,216,246,99,132,0,0,194,172, - 4,0,195,164,2,131,2,60,138,155,66,148, - 2,131,3,60,132,155,99,148,2,131,4,60, - 134,155,132,148,2,131,5,60,136,155,165,148, - 3,131,1,60,216,12,34,164,2,131,2,60, - 130,155,66,148,3,131,10,60,218,12,74,37, - 3,0,199,136,0,0,199,152,4,0,200,128, - 5,0,201,128,3,0,71,169,0,0,71,185, - 4,0,72,161,5,0,73,161,3,131,1,60, - 238,12,35,164,3,131,1,60,242,12,36,164, - 3,131,1,60,246,12,37,164,3,131,1,60, - 240,12,34,164,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,3,131,2,60, - 216,12,66,140,3,131,3,60,220,12,99,140, - 3,131,1,60,208,12,34,172,3,131,1,60, - 212,12,35,172,3,131,2,60,238,12,66,148, - 3,131,3,60,240,12,99,148,3,131,4,60, - 242,12,132,148,232,255,189,39,16,0,176,175, - 3,131,1,60,234,12,35,164,24,133,131,143, - 20,0,191,175,3,131,1,60,224,12,32,172, - 3,131,1,60,228,12,32,172,3,131,1,60, - 248,12,32,172,3,131,1,60,252,12,32,172, - 3,131,1,60,8,13,32,164,3,131,1,60, - 4,13,32,164,3,131,1,60,232,12,34,164, - 33,16,68,0,3,131,1,60,236,12,36,164, - 3,131,1,60,244,12,34,164,8,0,96,24, - 1,0,16,36,150,35,192,12,33,32,0,2, - 24,133,130,143,1,0,16,38,42,16,80,0, - 250,255,64,16,0,0,0,0,206,35,192,12, - 0,0,0,0,52,36,192,12,0,0,0,0, - 1,0,2,36,3,131,1,60,0,13,34,164, - 3,0,2,36,3,131,1,60,2,13,32,164, - 3,131,1,60,20,13,34,172,2,131,1,60, - 164,247,34,172,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,224,255,189,39, - 20,0,177,175,33,136,128,0,16,0,176,175, - 192,129,17,0,3,131,4,60,16,13,132,36, - 33,32,4,2,187,0,5,36,24,0,191,175, - 144,71,192,12,128,0,6,36,2,131,2,60, - 140,155,66,148,100,0,3,36,3,131,1,60, - 33,8,48,0,24,13,35,172,0,18,2,0, - 37,16,34,2,3,131,1,60,33,8,48,0, - 16,13,34,164,22,36,192,12,33,32,32,2, - 4,0,2,36,64,138,17,0,3,131,1,60, - 33,8,48,0,20,13,34,172,2,131,1,60, - 33,8,49,0,164,247,34,172,3,131,1,60, - 33,8,48,0,52,13,32,172,3,131,1,60, - 33,8,48,0,56,13,32,172,3,131,1,60, - 33,8,48,0,106,13,32,164,3,131,1,60, - 33,8,48,0,110,13,32,164,3,131,1,60, - 33,8,48,0,114,13,32,164,3,131,1,60, - 33,8,48,0,120,13,32,172,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,24,133,130,143,216,255,189,39, - 20,0,177,175,1,0,17,36,36,0,191,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 55,0,64,24,16,0,176,175,3,131,20,60, - 228,12,148,38,3,131,2,60,56,13,66,36, - 128,0,83,36,124,0,82,36,128,0,16,36, - 0,0,130,142,0,0,0,0,6,0,34,22, - 33,32,32,2,0,0,64,174,101,36,192,12, - 0,0,96,174,8,36,192,8,128,0,115,38, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 244,255,134,142,248,255,135,142,20,35,192,12, - 0,0,0,0,17,0,64,20,33,32,32,2, - 3,131,3,60,33,24,112,0,48,13,99,148, - 3,131,2,60,33,16,80,0,16,13,66,148, - 0,0,0,0,8,0,98,20,0,0,0,0, - 3,131,1,60,33,8,48,0,106,13,32,164, - 101,36,192,12,33,32,32,2,8,36,192,8, - 128,0,115,38,0,0,64,174,125,36,192,12, - 0,0,96,174,128,0,115,38,128,0,82,38, - 24,133,130,143,1,0,49,38,42,16,81,0, - 210,255,64,16,128,0,16,38,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,192,33,4,0,3,131,2,60, - 28,13,66,36,33,24,130,0,3,131,5,60, - 208,12,165,140,3,131,6,60,212,12,198,140, - 0,0,101,172,4,0,102,172,12,0,66,36, - 3,131,3,60,224,12,99,140,33,16,130,0, - 3,131,1,60,33,8,36,0,36,13,35,172, - 3,131,3,60,216,12,99,140,3,131,5,60, - 220,12,165,140,0,0,67,172,4,0,69,172, - 3,131,2,60,33,16,68,0,16,13,66,148, - 3,131,1,60,33,8,36,0,8,0,224,3, - 48,13,34,164,24,133,130,143,224,255,189,39, - 20,0,177,175,1,0,17,36,24,0,191,175, - 38,0,64,24,16,0,176,175,128,0,16,36, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 18,0,64,20,0,0,0,0,3,131,3,60, - 33,24,112,0,48,13,99,148,3,131,2,60, - 33,16,80,0,16,13,66,148,0,0,0,0, - 9,0,98,20,0,0,0,0,3,131,2,60, - 33,16,80,0,20,13,66,140,0,0,0,0, - 3,0,64,16,0,0,0,0,203,36,192,12, - 33,32,32,2,24,133,130,143,1,0,49,38, - 42,16,81,0,221,255,64,16,128,0,16,38, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,192,41,4,0, - 3,131,3,60,33,24,101,0,20,13,99,140, - 4,0,2,36,16,0,98,20,240,255,189,39, - 1,0,2,36,64,26,4,0,3,131,1,60, - 33,8,37,0,20,13,34,172,2,131,1,60, - 33,8,35,0,164,247,34,172,1,0,2,36, - 3,131,1,60,33,8,37,0,110,13,34,164, - 3,131,1,60,33,8,37,0,112,13,32,164, - 8,0,224,3,16,0,189,39,224,255,189,39, - 24,0,178,175,33,144,128,0,16,0,176,175, - 192,129,18,0,28,0,191,175,20,0,177,175, - 3,131,2,60,33,16,80,0,20,13,66,140, - 0,0,0,0,18,0,64,16,4,0,17,36, - 16,0,81,16,254,255,66,36,2,0,66,44, - 4,0,64,16,64,18,18,0,161,36,192,12, - 0,0,0,0,64,18,18,0,3,131,1,60, - 33,8,48,0,20,13,49,172,2,131,1,60, - 33,8,34,0,164,247,49,172,3,131,1,60, - 33,8,48,0,110,13,32,164,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,3,131,4,60, - 208,12,132,140,3,131,5,60,212,12,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,232,255,189,39,16,0,191,175, - 20,35,192,12,0,0,0,0,10,0,64,20, - 1,0,2,36,3,131,1,60,252,12,34,172, - 1,0,2,36,3,131,1,60,4,13,34,164, - 3,131,1,60,6,13,32,164,197,36,192,8, - 1,0,2,36,3,131,2,60,248,12,66,140, - 0,0,0,0,9,0,64,20,1,0,2,36, - 72,37,192,12,0,0,0,0,1,0,2,36, - 3,131,1,60,8,13,34,164,3,131,1,60, - 10,13,32,164,1,0,2,36,3,131,1,60, - 248,12,34,172,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,224,255,189,39, - 20,0,177,175,33,136,128,0,16,0,176,175, - 192,129,17,0,24,0,191,175,3,131,2,60, - 33,16,80,0,114,13,66,132,0,0,0,0, - 5,0,64,16,1,0,2,36,3,131,1,60, - 33,8,48,0,67,37,192,8,56,13,34,172, - 3,131,4,60,208,12,132,36,3,131,2,60, - 64,13,66,36,33,24,2,2,3,131,1,60, - 33,8,48,0,60,13,32,164,0,0,133,140, - 4,0,134,140,0,0,101,172,4,0,102,172, - 12,0,66,36,3,131,3,60,224,12,99,140, - 33,16,2,2,3,131,1,60,33,8,48,0, - 72,13,35,172,3,131,3,60,216,12,99,140, - 3,131,5,60,220,12,165,140,0,0,67,172, - 4,0,69,172,3,131,2,60,33,16,80,0, - 16,13,66,148,3,131,1,60,33,8,48,0, - 84,13,34,164,0,0,132,140,3,131,5,60, - 212,12,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,5,0,64,20,0,0,0,0, - 3,131,1,60,33,8,48,0,22,37,192,8, - 86,13,32,164,3,131,2,60,228,12,66,140, - 2,131,3,60,128,155,99,148,192,17,2,0, - 3,131,1,60,33,8,34,0,108,13,34,148, - 0,0,0,0,33,16,67,0,3,131,1,60, - 33,8,48,0,86,13,34,164,3,131,2,60, - 232,12,66,148,192,129,17,0,3,131,1,60, - 33,8,48,0,88,13,34,164,3,131,2,60, - 234,12,66,148,33,32,32,2,3,131,1,60, - 33,8,48,0,90,13,34,164,3,131,3,60, - 236,12,99,148,3,131,2,60,33,16,80,0, - 52,13,66,140,3,131,5,60,60,13,165,36, - 3,131,1,60,33,8,48,0,52,13,32,172, - 3,131,1,60,33,8,48,0,96,13,34,172, - 3,131,1,60,33,8,48,0,92,13,35,164, - 3,131,2,60,252,12,66,140,3,131,1,60, - 33,8,48,0,100,13,34,172,80,40,192,12, - 33,40,5,2,1,0,2,36,3,131,1,60, - 33,8,48,0,56,13,32,172,3,131,1,60, - 33,8,48,0,114,13,34,164,3,131,1,60, - 33,8,48,0,116,13,32,164,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,128,0,2,36, - 3,131,4,60,228,12,132,140,3,131,5,60, - 104,13,165,36,16,0,191,175,192,25,4,0, - 3,131,1,60,33,8,35,0,104,13,34,164, - 151,40,192,12,33,40,101,0,2,131,4,60, - 15,63,192,12,12,146,132,36,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,102,37,192,12, - 0,0,0,0,13,38,192,12,0,0,0,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,24,133,130,143,208,255,189,39, - 24,0,178,175,33,144,0,0,28,0,179,175, - 1,0,19,36,44,0,191,175,40,0,182,175, - 36,0,181,175,32,0,180,175,20,0,177,175, - 110,0,64,24,16,0,176,175,3,131,21,60, - 216,12,181,38,3,131,22,60,16,13,214,38, - 128,0,208,38,128,0,20,36,192,17,18,0, - 3,131,4,60,33,32,148,0,40,13,132,140, - 3,131,5,60,33,40,180,0,44,13,165,140, - 0,0,166,142,3,131,7,60,220,12,231,140, - 0,0,0,0,20,35,192,12,33,136,86,0, - 10,0,64,20,0,0,0,0,3,131,3,60, - 33,24,116,0,48,13,99,148,3,131,2,60, - 33,16,84,0,16,13,66,148,0,0,0,0, - 74,0,98,16,0,0,0,0,4,0,2,142, - 0,0,0,0,70,0,64,16,0,0,0,0, - 12,0,4,142,16,0,5,142,0,0,166,142, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,61,0,65,4,0,0,0,0, - 58,0,64,18,0,0,0,0,12,0,4,142, - 16,0,5,142,12,0,38,142,16,0,39,142, - 20,35,192,12,0,0,0,0,50,0,64,4, - 0,0,0,0,12,0,4,142,16,0,5,142, - 12,0,38,142,16,0,39,142,20,35,192,12, - 0,0,0,0,43,0,64,20,0,0,0,0, - 20,0,5,142,8,0,3,142,20,0,36,142, - 8,0,34,142,33,40,163,0,33,32,130,0, - 43,16,164,0,33,0,64,20,0,0,0,0, - 32,0,164,20,0,0,0,0,24,0,4,142, - 28,0,5,142,24,0,38,142,28,0,39,142, - 20,35,192,12,0,0,0,0,23,0,64,4, - 0,0,0,0,24,0,4,142,28,0,5,142, - 24,0,38,142,28,0,39,142,20,35,192,12, - 0,0,0,0,16,0,64,20,0,0,0,0, - 32,0,4,150,32,0,35,150,0,0,0,0, - 43,16,131,0,9,0,64,20,0,0,0,0, - 8,0,131,20,0,0,0,0,0,0,2,150, - 0,0,35,150,0,0,0,0,43,16,67,0, - 2,0,64,16,0,0,0,0,33,144,96,2, - 128,0,16,38,24,133,130,143,1,0,115,38, - 42,16,83,0,154,255,64,16,128,0,148,38, - 3,131,1,60,228,12,50,172,12,0,64,22, - 192,17,18,0,3,131,2,60,216,12,66,140, - 3,131,3,60,220,12,99,140,3,131,1,60, - 208,12,34,172,3,131,1,60,212,12,35,172, - 3,131,1,60,3,38,192,8,224,12,32,172, - 3,131,3,60,33,24,98,0,28,13,99,140, - 3,131,4,60,33,32,130,0,32,13,132,140, - 3,131,1,60,208,12,35,172,3,131,1,60, - 212,12,36,172,3,131,3,60,33,24,98,0, - 36,13,99,140,3,131,1,60,33,8,34,0, - 24,13,34,140,0,0,0,0,33,24,98,0, - 3,131,1,60,224,12,35,172,44,0,191,143, - 40,0,182,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,48,0,189,39, - 24,133,130,143,208,255,189,39,36,0,181,175, - 1,0,21,36,44,0,191,175,40,0,182,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,82,0,64,24,16,0,176,175, - 3,131,22,60,216,12,214,38,3,131,2,60, - 48,13,66,36,128,0,84,36,96,0,83,36, - 124,0,82,36,120,0,81,36,128,0,16,36, - 0,0,36,142,0,0,69,142,0,0,198,142, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,55,0,64,20,0,0,0,0, - 0,0,131,150,0,0,98,150,0,0,0,0, - 50,0,98,20,0,0,0,0,3,131,4,60, - 33,32,144,0,28,13,132,140,3,131,5,60, - 33,40,176,0,32,13,165,140,248,255,198,142, - 3,131,7,60,212,12,231,140,20,35,192,12, - 0,0,0,0,37,0,64,16,0,0,0,0, - 8,0,196,142,3,131,3,60,33,24,112,0, - 36,13,99,140,0,0,0,0,43,16,131,0, - 29,0,64,20,0,0,0,0,27,0,131,20, - 0,0,0,0,0,0,196,142,3,131,5,60, - 220,12,165,140,0,0,38,142,0,0,71,142, - 20,35,192,12,0,0,0,0,16,0,64,4, - 0,0,0,0,0,0,196,142,3,131,5,60, - 220,12,165,140,0,0,38,142,0,0,71,142, - 20,35,192,12,0,0,0,0,9,0,64,20, - 0,0,0,0,0,0,99,150,0,0,130,150, - 0,0,0,0,43,16,67,0,3,0,64,20, - 0,0,0,0,22,36,192,12,33,32,160,2, - 128,0,148,38,128,0,115,38,128,0,82,38, - 128,0,49,38,24,133,130,143,1,0,181,38, - 42,16,85,0,185,255,64,16,128,0,16,38, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,216,255,189,39,3,131,4,60, - 0,13,132,36,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 0,0,130,132,0,0,0,0,14,0,64,16, - 0,0,0,0,3,131,2,60,2,13,66,148, - 3,131,3,60,234,12,99,148,1,0,66,36, - 3,131,1,60,2,13,34,164,255,255,66,48, - 43,16,67,0,3,0,64,20,0,0,0,0, - 11,39,192,12,0,0,128,164,3,131,4,60, - 8,13,132,36,0,0,130,132,0,0,0,0, - 14,0,64,16,0,0,0,0,3,131,2,60, - 10,13,66,148,3,131,3,60,234,12,99,148, - 1,0,66,36,3,131,1,60,10,13,34,164, - 255,255,66,48,43,16,67,0,3,0,64,20, - 0,0,0,0,24,39,192,12,0,0,128,164, - 3,131,4,60,4,13,132,36,0,0,130,132, - 0,0,0,0,14,0,64,16,0,0,0,0, - 3,131,2,60,6,13,66,148,3,131,3,60, - 244,12,99,148,1,0,66,36,3,131,1,60, - 6,13,34,164,255,255,66,48,43,16,67,0, - 3,0,64,20,0,0,0,0,37,39,192,12, - 0,0,128,164,24,133,130,143,0,0,0,0, - 78,0,64,24,1,0,17,36,3,131,2,60, - 16,13,66,36,226,0,83,36,128,0,82,36, - 128,0,16,36,3,131,2,60,33,16,80,0, - 110,13,66,132,0,0,0,0,18,0,64,16, - 0,0,0,0,3,131,2,60,33,16,80,0, - 112,13,66,148,0,0,0,0,1,0,66,36, - 96,0,66,166,3,131,3,60,236,12,99,148, - 255,255,66,48,43,16,67,0,6,0,64,20, - 0,0,0,0,3,131,1,60,33,8,48,0, - 110,13,32,164,79,39,192,12,33,32,32,2, - 3,131,2,60,33,16,80,0,106,13,66,132, - 0,0,0,0,18,0,64,16,0,0,0,0, - 3,131,2,60,33,16,80,0,108,13,66,148, - 0,0,0,0,1,0,66,36,92,0,66,166, - 3,131,3,60,232,12,99,148,255,255,66,48, - 43,16,67,0,6,0,64,20,0,0,0,0, - 3,131,1,60,33,8,48,0,106,13,32,164, - 129,39,192,12,33,32,32,2,0,0,98,134, - 0,0,0,0,16,0,64,16,0,0,0,0, - 3,131,2,60,33,16,80,0,116,13,66,148, - 0,0,0,0,1,0,66,36,100,0,66,166, - 3,131,3,60,246,12,99,148,255,255,66,48, - 43,16,67,0,4,0,64,20,0,0,0,0, - 0,0,96,166,191,39,192,12,33,32,32,2, - 128,0,115,38,128,0,82,38,24,133,130,143, - 1,0,49,38,42,16,81,0,185,255,64,16, - 128,0,16,38,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,232,255,189,39, - 16,0,191,175,52,36,192,12,0,0,0,0, - 1,0,2,36,3,131,1,60,0,13,34,164, - 3,131,1,60,2,13,32,164,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,16,0,191,175,72,37,192,12, - 0,0,0,0,1,0,2,36,3,131,1,60, - 8,13,34,164,3,131,1,60,10,13,32,164, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,240,255,189,39,3,131,1,60, - 248,12,32,172,3,131,1,60,252,12,32,172, - 8,0,224,3,16,0,189,39,24,133,130,143, - 224,255,189,39,20,0,177,175,1,0,17,36, - 24,0,191,175,23,0,64,24,16,0,176,175, - 128,0,16,36,3,131,4,60,33,32,144,0, - 40,13,132,140,3,131,5,60,33,40,176,0, - 44,13,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,3,0,64,20,1,0,49,38, - 74,39,192,8,1,0,2,36,24,133,130,143, - 0,0,0,0,42,16,81,0,236,255,64,16, - 128,0,16,38,33,16,0,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,192,41,4,0, - 16,0,191,175,3,131,3,60,33,24,101,0, - 20,13,99,140,1,0,2,36,16,0,98,20, - 2,0,2,36,64,26,4,0,3,131,1,60, - 33,8,37,0,20,13,34,172,2,131,1,60, - 33,8,35,0,164,247,34,172,1,0,2,36, - 3,131,1,60,33,8,37,0,110,13,34,164, - 3,131,1,60,33,8,37,0,125,39,192,8, - 112,13,32,164,21,0,98,20,3,0,3,36, - 64,18,4,0,3,131,1,60,33,8,37,0, - 20,13,35,172,2,131,1,60,33,8,34,0, - 164,247,35,172,3,131,2,60,33,16,69,0, - 120,13,66,140,0,0,0,0,1,0,66,36, - 3,131,1,60,33,8,37,0,44,39,192,12, - 120,13,34,172,3,0,64,16,0,0,0,0, - 161,36,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,128,0, - 20,0,177,175,3,131,17,60,208,12,49,38, - 24,0,191,175,0,0,36,142,3,131,5,60, - 212,12,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,33,32,0,2,22,36,192,12, - 1,0,80,44,92,37,192,12,0,0,0,0, - 206,35,192,12,0,0,0,0,33,0,0,22, - 0,0,0,0,0,0,36,142,3,131,5,60, - 212,12,165,140,3,131,6,60,216,12,198,140, - 3,131,7,60,220,12,231,140,20,35,192,12, - 0,0,0,0,22,0,64,20,0,0,0,0, - 3,131,2,60,238,12,66,148,3,131,3,60, - 240,12,99,148,3,131,4,60,242,12,132,148, - 3,131,1,60,232,12,34,164,3,131,1,60, - 234,12,35,164,3,131,1,60,161,36,192,12, - 236,12,36,164,3,131,1,60,52,36,192,12, - 8,13,32,164,1,0,2,36,3,131,1,60, - 0,13,34,164,3,131,1,60,2,13,32,164, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 192,17,4,0,16,0,191,175,3,131,1,60, - 33,8,34,0,56,13,34,140,0,0,0,0, - 3,0,64,16,0,0,0,0,203,36,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,3,131,1,60, - 248,12,32,172,3,131,1,60,8,0,224,3, - 8,13,32,164,232,255,189,39,192,25,4,0, - 1,0,2,36,16,0,191,175,3,131,1,60, - 33,8,35,0,203,36,192,12,52,13,34,172, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,192,33,4,0,3,131,2,60, - 28,13,66,36,33,24,130,0,4,0,166,140, - 8,0,167,140,0,0,102,172,4,0,103,172, - 12,0,66,36,12,0,163,140,33,16,130,0, - 3,131,1,60,33,8,36,0,36,13,35,172, - 16,0,163,140,20,0,166,140,0,0,67,172, - 4,0,70,172,24,0,163,148,1,0,2,36, - 3,131,1,60,33,8,36,0,106,13,34,164, - 3,131,1,60,33,8,36,0,48,13,35,164, - 26,0,162,148,3,131,1,60,33,8,36,0, - 8,0,224,3,108,13,34,164,28,0,130,148, - 3,131,1,60,232,12,34,164,30,0,130,148, - 3,131,1,60,234,12,34,164,32,0,130,148, - 3,131,1,60,236,12,34,164,40,0,130,140, - 3,131,1,60,8,0,224,3,252,12,34,172, - 224,255,189,39,16,0,176,175,33,128,160,0, - 192,25,4,0,3,131,2,60,16,13,66,36, - 20,0,177,175,33,136,98,0,24,0,191,175, - 4,0,4,142,8,0,5,142,12,0,38,142, - 16,0,39,142,20,35,192,12,0,0,0,0, - 48,0,64,4,1,0,2,36,4,0,4,142, - 8,0,5,142,12,0,38,142,16,0,39,142, - 20,35,192,12,0,0,0,0,40,0,64,20, - 33,16,0,0,12,0,4,142,20,0,35,142, - 0,0,0,0,43,16,131,0,34,0,64,20, - 1,0,2,36,32,0,131,20,33,16,0,0, - 16,0,4,142,20,0,5,142,24,0,38,142, - 28,0,39,142,20,35,192,12,0,0,0,0, - 24,0,64,4,1,0,2,36,16,0,4,142, - 20,0,5,142,24,0,38,142,28,0,39,142, - 20,35,192,12,0,0,0,0,16,0,64,20, - 33,16,0,0,16,0,4,142,20,0,5,142, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 6,0,64,20,1,0,2,36,24,0,3,150, - 32,0,34,150,0,0,0,0,43,16,67,0, - 1,0,66,56,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 44,133,130,143,216,255,189,39,20,0,177,175, - 33,136,128,0,32,0,180,175,33,160,160,0, - 36,0,191,175,28,0,179,175,24,0,178,175, - 53,0,64,16,16,0,176,175,2,131,19,60, - 192,4,115,38,33,32,96,2,54,21,192,12, - 1,0,5,36,33,128,64,0,8,0,0,22, - 64,26,17,0,2,131,2,60,33,16,67,0, - 176,247,66,140,33,24,99,2,1,0,66,36, - 143,40,192,8,240,242,98,172,8,0,4,142, - 64,146,17,0,20,242,101,38,33,40,69,2, - 172,41,192,12,33,48,128,2,33,24,64,0, - 60,0,98,40,2,0,64,16,0,242,98,38, - 60,0,3,36,33,136,66,2,33,32,32,2, - 33,40,0,2,1,0,2,36,17,0,2,162, - 0,128,98,52,0,0,2,174,6,23,192,12, - 18,0,3,166,10,0,64,20,33,32,0,2, - 2,131,2,60,33,16,82,0,172,247,66,140, - 0,0,0,0,1,0,66,36,152,21,192,12, - 236,0,34,174,143,40,192,8,0,0,0,0, - 2,131,2,60,33,16,82,0,168,247,66,140, - 0,0,0,0,1,0,66,36,232,0,34,174, - 36,0,191,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,44,133,130,143, - 216,255,189,39,20,0,177,175,33,136,128,0, - 32,0,180,175,33,160,160,0,36,0,191,175, - 28,0,179,175,24,0,178,175,53,0,64,16, - 16,0,176,175,2,131,19,60,192,4,115,38, - 33,32,96,2,54,21,192,12,1,0,5,36, - 33,128,64,0,8,0,0,22,64,26,17,0, - 2,131,2,60,33,16,67,0,176,247,66,140, - 33,24,99,2,1,0,66,36,214,40,192,8, - 240,242,98,172,8,0,4,142,64,146,17,0, - 20,242,101,38,33,40,69,2,74,42,192,12, - 33,48,128,2,33,24,64,0,60,0,98,40, - 2,0,64,16,0,242,98,38,60,0,3,36, - 33,136,66,2,33,32,32,2,33,40,0,2, - 1,0,2,36,17,0,2,162,0,128,98,52, - 0,0,2,174,6,23,192,12,18,0,3,166, - 10,0,64,20,33,32,0,2,2,131,2,60, - 33,16,82,0,172,247,66,140,0,0,0,0, - 1,0,66,36,152,21,192,12,236,0,34,174, - 214,40,192,8,0,0,0,0,2,131,2,60, - 33,16,82,0,168,247,66,140,0,0,0,0, - 1,0,66,36,232,0,34,174,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,216,255,189,39,24,0,178,175, - 33,144,128,0,20,0,177,175,33,136,160,0, - 16,0,176,175,33,128,192,0,36,0,191,175, - 32,0,180,175,28,0,179,175,4,0,36,142, - 8,0,37,142,160,133,134,143,2,131,7,60, - 180,211,231,140,20,35,192,12,0,0,0,0, - 11,0,64,20,0,0,0,0,2,131,4,60, - 144,146,132,36,15,63,192,12,33,40,0,2, - 100,129,132,39,108,129,134,39,31,21,192,12, - 6,0,5,38,92,41,192,8,0,0,0,0, - 3,131,20,60,208,12,148,38,0,0,132,142, - 3,131,5,60,212,12,165,140,3,131,6,60, - 216,12,198,140,3,131,7,60,220,12,231,140, - 0,0,0,0,20,35,192,12,192,129,18,0, - 3,131,3,60,33,24,112,0,20,13,99,140, - 0,0,0,0,80,0,96,16,1,0,83,44, - 33,32,64,2,11,40,192,12,33,40,32,2, - 50,0,64,16,33,32,64,2,223,39,192,12, - 33,40,32,2,92,37,192,12,0,0,0,0, - 206,35,192,12,0,0,0,0,0,0,132,142, - 3,131,5,60,212,12,165,140,3,131,6,60, - 216,12,198,140,3,131,7,60,220,12,231,140, - 20,35,192,12,0,0,0,0,16,0,64,16, - 0,0,0,0,14,0,96,18,0,0,0,0, - 3,131,2,60,248,12,66,140,3,131,1,60, - 9,0,64,16,0,13,32,164,3,131,1,60, - 72,37,192,12,4,13,32,164,1,0,2,36, - 3,131,1,60,8,13,34,164,3,131,1,60, - 10,13,32,164,3,131,2,60,228,12,66,140, - 0,0,0,0,38,0,66,22,0,0,0,0, - 254,39,192,12,33,32,32,2,52,36,192,12, - 0,0,0,0,36,0,34,142,0,0,0,0, - 30,0,64,16,0,0,0,0,206,39,192,12, - 0,0,0,0,92,41,192,8,0,0,0,0, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 12,0,64,20,0,0,0,0,3,131,3,60, - 33,24,112,0,48,13,99,148,3,131,2,60, - 33,16,80,0,16,13,66,148,0,0,0,0, - 3,0,98,20,0,0,0,0,203,36,192,12, - 33,32,64,2,36,0,191,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 224,255,189,39,20,0,177,175,33,136,128,0, - 16,0,176,175,192,129,17,0,24,0,191,175, - 3,131,2,60,33,16,80,0,20,13,66,140, - 0,0,0,0,28,0,64,16,0,0,0,0, - 3,131,4,60,33,32,144,0,40,13,132,140, - 3,131,5,60,33,40,176,0,44,13,165,140, - 3,131,6,60,216,12,198,140,3,131,7,60, - 220,12,231,140,20,35,192,12,0,0,0,0, - 14,0,64,20,0,0,0,0,3,131,3,60, - 33,24,112,0,48,13,99,148,3,131,2,60, - 33,16,80,0,16,13,66,148,0,0,0,0, - 5,0,98,20,0,0,0,0,161,36,192,12, - 0,0,0,0,211,39,192,12,33,32,32,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,2,18,5,0, - 0,0,130,160,8,0,224,3,1,0,133,160, - 2,22,5,0,0,0,130,160,2,20,5,0, - 1,0,130,160,2,18,5,0,2,0,130,160, - 8,0,224,3,3,0,133,160,0,0,130,144, - 1,0,131,144,0,18,2,0,8,0,224,3, - 37,16,98,0,0,0,130,144,1,0,131,144, - 2,0,133,144,0,22,2,0,0,28,3,0, - 33,16,67,0,0,42,5,0,3,0,131,144, - 33,16,69,0,8,0,224,3,37,16,67,0, - 224,255,189,39,16,0,176,175,33,128,128,0, - 24,0,191,175,20,0,177,175,48,129,135,39, - 3,0,226,136,0,0,226,152,4,0,227,128, - 5,0,228,128,3,0,2,170,0,0,2,186, - 4,0,3,162,5,0,4,162,3,0,162,136, - 0,0,162,152,4,0,163,128,5,0,164,128, - 9,0,2,170,6,0,2,186,10,0,3,162, - 11,0,4,162,12,0,4,38,38,0,5,36, - 144,41,192,12,33,136,192,0,14,0,4,38, - 144,41,192,12,66,66,5,36,17,0,4,38, - 33,40,0,0,3,0,2,36,144,41,192,12, - 16,0,2,162,19,0,0,162,20,0,0,162, - 40,0,34,142,0,0,0,0,43,32,2,0, - 36,0,34,142,0,0,0,0,3,0,64,16, - 33,24,128,0,218,41,192,8,128,0,130,52, - 33,16,96,0,21,0,2,162,4,0,37,150, - 0,0,0,0,144,41,192,12,22,0,4,38, - 9,0,34,138,6,0,34,154,10,0,35,130, - 11,0,36,130,27,0,2,170,24,0,2,186, - 28,0,3,162,29,0,4,162,12,0,37,142, - 0,0,0,0,148,41,192,12,30,0,4,38, - 16,0,37,150,0,0,0,0,144,41,192,12, - 34,0,4,38,21,0,34,138,18,0,34,154, - 22,0,35,130,23,0,36,130,39,0,2,170, - 36,0,2,186,40,0,3,162,41,0,4,162, - 24,0,37,150,0,0,0,0,144,41,192,12, - 42,0,4,38,26,0,37,150,0,0,0,0, - 144,41,192,12,44,0,4,38,28,0,37,150, - 0,0,0,0,144,41,192,12,46,0,4,38, - 30,0,37,150,0,0,0,0,144,41,192,12, - 48,0,4,38,32,0,37,150,0,0,0,0, - 144,41,192,12,50,0,4,38,52,0,2,36, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,224,255,189,39, - 16,0,176,175,33,128,160,0,24,0,191,175, - 20,0,177,175,21,0,2,146,33,136,128,0, - 1,0,66,48,40,0,34,174,22,0,2,146, - 22,0,4,38,128,0,66,48,156,41,192,12, - 36,0,34,174,4,0,34,166,27,0,2,138, - 24,0,2,154,28,0,3,130,29,0,4,130, - 9,0,34,170,6,0,34,186,10,0,35,162, - 11,0,36,162,161,41,192,12,30,0,4,38, - 34,0,4,38,156,41,192,12,12,0,34,174, - 16,0,34,166,39,0,2,138,36,0,2,154, - 40,0,3,130,41,0,4,130,21,0,34,170, - 18,0,34,186,22,0,35,162,23,0,36,162, - 156,41,192,12,42,0,4,38,44,0,4,38, - 156,41,192,12,24,0,34,166,46,0,4,38, - 156,41,192,12,26,0,34,166,48,0,4,38, - 156,41,192,12,28,0,34,166,50,0,4,38, - 156,41,192,12,30,0,34,166,32,0,34,166, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 16,0,176,175,33,128,128,0,20,0,191,175, - 48,129,134,39,3,0,194,136,0,0,194,152, - 4,0,195,128,5,0,196,128,3,0,2,170, - 0,0,2,186,4,0,3,162,5,0,4,162, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,164,128,9,0,2,170,6,0,2,186, - 10,0,3,162,11,0,4,162,12,0,4,38, - 144,41,192,12,7,0,5,36,14,0,4,38, - 144,41,192,12,66,66,5,36,17,0,4,38, - 33,40,0,0,3,0,2,36,144,41,192,12, - 16,0,2,162,21,0,2,36,128,0,3,36, - 19,0,0,162,20,0,3,162,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 176,255,189,39,68,0,177,175,64,0,176,175, - 33,128,160,0,72,0,191,175,14,0,3,146, - 66,0,2,36,9,0,98,20,33,136,128,0, - 15,0,2,146,0,0,0,0,6,0,67,20, - 64,26,17,0,16,0,3,146,3,0,2,36, - 11,0,98,16,0,0,0,0,64,26,17,0, - 2,131,2,60,33,16,67,0,184,247,66,140, - 0,0,0,0,1,0,66,36,2,131,1,60, - 33,8,35,0,182,42,192,8,184,247,34,172, - 20,0,3,146,0,0,0,0,5,0,96,16, - 128,0,2,36,21,0,98,16,64,26,17,0, - 179,42,192,8,0,0,0,0,16,0,164,39, - 64,26,17,0,2,131,2,60,33,16,67,0, - 180,247,66,140,0,0,0,0,1,0,66,36, - 2,131,1,60,33,8,35,0,180,247,34,172, - 17,42,192,12,33,40,0,2,33,32,32,2, - 16,0,165,39,222,40,192,12,33,48,0,2, - 182,42,192,8,0,0,0,0,2,131,2,60, - 33,16,67,0,180,247,66,140,0,0,0,0, - 1,0,66,36,2,131,1,60,33,8,35,0, - 180,247,34,172,100,41,192,12,33,32,32,2, - 182,42,192,8,0,0,0,0,112,129,132,39, - 15,63,192,12,0,0,0,0,72,0,191,143, - 68,0,177,143,64,0,176,143,8,0,224,3, - 80,0,189,39,8,0,224,3,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,162,48,8,0,64,16,255,255,198,48, - 67,40,5,0,64,16,5,0,33,16,68,0, - 0,0,66,144,0,0,0,0,203,42,192,8, - 33,48,194,0,67,40,5,0,255,255,165,36, - 255,255,2,36,6,0,162,16,255,255,3,36, - 0,0,130,148,2,0,132,36,255,255,165,36, - 252,255,163,20,33,48,194,0,255,255,195,48, - 2,20,6,0,33,48,98,0,255,255,195,48, - 2,20,6,0,33,48,98,0,8,0,224,3, - 255,255,194,48,208,255,189,39,16,0,176,175, - 33,128,128,0,28,0,179,175,33,152,160,0, - 24,0,178,175,33,144,192,0,36,0,181,175, - 33,168,0,2,32,0,180,175,33,160,0,0, - 40,0,191,175,20,0,177,175,12,0,3,142, - 0,0,2,142,0,0,0,0,35,24,98,0, - 42,16,114,0,2,0,64,16,33,136,64,2, - 33,136,96,0,13,0,32,18,33,40,96,2, - 35,144,81,2,8,0,2,142,0,0,4,142, - 33,48,32,2,80,68,192,12,33,32,68,0, - 8,0,2,142,0,0,2,142,0,0,2,142, - 33,152,113,2,33,16,81,0,0,0,2,174, - 0,0,2,142,0,0,0,0,4,0,64,18, - 33,160,130,2,4,0,16,142,233,42,192,8, - 0,0,0,0,18,0,180,166,33,16,0,2, - 40,0,191,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,24,0,178,175,33,144,128,0, - 2,131,4,60,192,4,132,36,36,0,165,175, - 1,0,5,36,28,0,191,175,20,0,177,175, - 54,21,192,12,16,0,176,175,33,136,64,0, - 8,0,32,22,33,40,0,0,2,131,2,60, - 176,5,66,140,0,0,0,0,1,0,66,36, - 2,131,1,60,102,43,192,8,176,5,34,172, - 0,1,3,36,8,0,48,142,8,0,2,36, - 16,0,2,166,6,0,2,36,18,0,2,162, - 4,0,2,36,14,0,3,166,19,0,2,162, - 20,0,3,166,2,131,6,60,212,4,198,36, - 3,0,194,136,0,0,194,152,4,0,195,132, - 25,0,2,170,22,0,2,186,26,0,3,166, - 2,131,1,60,195,211,34,136,176,133,130,155, - 0,0,0,0,31,0,2,170,28,0,2,186, - 32,0,4,38,144,71,192,12,6,0,6,36, - 39,0,162,139,36,0,162,155,0,0,0,0, - 41,0,2,170,38,0,2,186,132,129,133,39, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,164,128,3,0,2,170,0,0,2,186, - 4,0,3,162,5,0,4,162,2,131,5,60, - 212,4,165,36,3,0,162,136,0,0,162,152, - 4,0,163,128,5,0,164,128,9,0,2,170, - 6,0,2,186,10,0,3,162,11,0,4,162, - 33,32,64,2,8,6,2,36,12,0,2,166, - 60,128,2,52,0,0,34,174,60,0,2,36, - 18,0,34,166,74,21,192,12,33,40,32,2, - 3,0,64,20,0,0,0,0,152,21,192,12, - 33,32,32,2,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,216,255,189,39,32,0,180,175, - 33,160,128,0,16,0,176,175,33,128,160,0, - 36,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,8,0,2,142,33,152,192,0, - 33,136,83,0,6,0,35,150,0,1,4,36, - 5,0,100,16,0,2,2,36,113,0,98,16, - 0,0,0,0,15,44,192,8,0,0,0,0, - 24,0,35,150,176,133,130,151,0,0,0,0, - 139,0,98,20,0,0,0,0,26,0,35,150, - 2,131,2,60,194,211,66,148,0,0,0,0, - 133,0,98,20,0,0,0,0,0,0,34,150, - 0,0,0,0,129,0,68,20,8,0,2,36, - 2,0,35,150,0,0,0,0,125,0,98,20, - 6,4,2,36,4,0,35,150,0,0,0,0, - 121,0,98,20,0,0,0,0,2,131,4,60, - 192,4,132,36,54,21,192,12,1,0,5,36, - 33,144,64,0,8,0,64,22,0,0,0,0, - 2,131,2,60,176,5,66,140,0,0,0,0, - 1,0,66,36,2,131,1,60,15,44,192,8, - 176,5,34,172,8,0,5,142,8,0,80,142, - 9,0,162,136,6,0,162,152,10,0,163,128, - 11,0,164,128,3,0,2,170,0,0,2,186, - 4,0,3,162,5,0,4,162,2,131,6,60, - 212,4,198,36,3,0,194,136,0,0,194,152, - 4,0,195,128,5,0,196,128,9,0,2,170, - 6,0,2,186,10,0,3,162,11,0,4,162, - 12,0,4,38,12,0,165,36,33,128,19,2, - 80,68,192,12,244,255,102,38,0,1,2,36, - 0,0,2,166,8,0,2,36,2,0,2,166, - 6,0,2,36,4,0,2,162,4,0,2,36, - 5,0,2,162,0,2,2,36,6,0,2,166, - 2,131,5,60,212,4,165,36,3,0,162,136, - 0,0,162,152,4,0,163,132,11,0,2,170, - 8,0,2,186,12,0,3,166,2,131,1,60, - 195,211,34,136,176,133,130,155,0,0,0,0, - 17,0,2,170,14,0,2,186,11,0,34,138, - 8,0,34,154,12,0,35,134,21,0,2,170, - 18,0,2,186,22,0,3,166,17,0,34,138, - 14,0,34,154,0,0,0,0,27,0,2,170, - 24,0,2,186,33,32,128,2,33,40,64,2, - 60,128,2,52,0,0,66,174,60,0,2,36, - 74,21,192,12,18,0,66,166,38,0,64,20, - 0,0,0,0,152,21,192,12,33,32,64,2, - 15,44,192,8,0,0,0,0,14,0,35,150, - 196,133,130,151,0,0,0,0,29,0,98,20, - 0,0,0,0,16,0,35,150,2,131,2,60, - 214,211,66,148,0,0,0,0,23,0,98,20, - 0,0,0,0,0,0,34,150,0,0,0,0, - 19,0,68,20,8,0,2,36,2,0,35,150, - 0,0,0,0,15,0,98,20,6,4,2,36, - 4,0,35,150,0,0,0,0,11,0,98,20, - 0,0,0,0,68,133,130,143,140,129,134,39, - 11,0,35,138,8,0,35,154,12,0,36,134, - 3,0,195,168,0,0,195,184,4,0,196,164, - 20,0,66,36,152,129,130,175,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,192,255,189,39,80,0,169,143, - 84,0,168,143,56,0,180,175,33,160,128,0, - 44,0,177,175,88,0,177,143,16,0,164,39, - 52,0,179,175,92,0,179,143,3,131,3,60, - 32,17,99,36,40,0,176,175,33,128,192,0, - 60,0,191,175,48,0,178,175,0,0,98,140, - 8,0,50,142,1,0,66,36,0,0,98,172, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,170,128,3,0,66,170,0,0,66,186, - 4,0,67,162,5,0,74,162,2,131,10,60, - 212,4,74,37,3,0,66,137,0,0,66,153, - 4,0,67,129,5,0,69,129,9,0,66,170, - 6,0,66,186,10,0,67,162,11,0,69,162, - 69,0,2,36,16,0,162,163,17,0,168,163, - 18,0,34,150,240,132,131,143,33,48,0,0, - 25,0,169,163,20,0,66,36,0,66,2,0, - 255,255,66,48,2,18,2,0,37,64,2,1, - 0,74,3,0,255,255,98,48,2,18,2,0, - 37,72,34,1,3,131,2,60,0,17,66,140, - 22,0,160,167,26,0,160,167,18,0,168,167, - 176,133,136,143,1,0,99,36,240,132,131,175, - 20,0,169,167,24,0,162,163,28,0,168,175, - 3,0,226,136,0,0,226,152,0,0,0,0, - 35,0,162,171,32,0,162,187,192,42,192,12, - 20,0,5,36,39,16,2,0,26,0,162,167, - 14,0,2,36,44,0,2,22,8,0,2,36, - 12,0,66,166,19,0,162,139,16,0,162,155, - 23,0,163,139,20,0,163,155,27,0,164,139, - 24,0,164,155,31,0,165,139,28,0,165,155, - 17,0,66,170,14,0,66,186,21,0,67,170, - 18,0,67,186,25,0,68,170,22,0,68,186, - 29,0,69,170,26,0,69,186,35,0,162,139, - 32,0,162,155,0,0,0,0,33,0,66,170, - 30,0,66,186,34,0,2,36,0,0,34,174, - 0,0,35,142,18,0,34,150,0,0,0,0, - 33,16,67,0,18,0,34,166,18,0,34,150, - 0,0,0,0,60,0,66,44,68,0,64,16, - 33,32,128,2,0,0,98,142,18,0,35,150, - 60,0,66,36,35,16,67,0,0,0,98,174, - 60,0,2,36,18,0,34,166,201,44,192,8, - 33,32,128,2,164,129,133,39,3,0,162,136, - 0,0,162,152,4,0,163,128,5,0,164,128, - 17,0,66,170,14,0,66,186,18,0,67,162, - 19,0,68,162,8,0,2,36,20,0,66,166, - 19,0,162,139,16,0,162,155,23,0,163,139, - 20,0,163,155,27,0,164,139,24,0,164,155, - 31,0,165,139,28,0,165,155,25,0,66,170, - 22,0,66,186,29,0,67,170,26,0,67,186, - 33,0,68,170,30,0,68,186,37,0,69,170, - 34,0,69,186,35,0,162,139,32,0,162,155, - 0,0,0,0,41,0,66,170,38,0,66,186, - 42,0,2,36,0,0,34,174,0,0,35,142, - 18,0,34,150,0,0,0,0,33,16,67,0, - 18,0,34,166,18,0,34,150,0,0,0,0, - 60,0,66,44,8,0,64,16,0,0,0,0, - 0,0,98,142,18,0,35,150,60,0,66,36, - 35,16,67,0,0,0,98,174,60,0,2,36, - 18,0,34,166,18,0,34,150,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 12,0,67,166,33,32,128,2,74,21,192,12, - 33,40,32,2,8,0,64,20,33,32,32,2, - 3,131,3,60,36,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,152,21,192,12, - 0,0,98,172,60,0,191,143,56,0,180,143, - 52,0,179,143,48,0,178,143,44,0,177,143, - 40,0,176,143,8,0,224,3,64,0,189,39, - 176,255,189,39,56,0,180,175,112,0,180,143, - 48,0,178,175,100,0,178,143,52,0,179,175, - 104,0,179,143,64,0,182,175,33,176,128,0, - 72,0,190,175,33,240,160,0,60,0,181,175, - 33,168,224,0,68,0,183,175,108,0,183,143, - 2,131,4,60,192,4,132,36,76,0,191,175, - 44,0,177,175,40,0,176,175,32,0,166,175, - 7,2,130,38,2,130,2,0,54,21,192,12, - 33,40,0,2,33,136,64,0,8,0,32,22, - 0,74,18,0,2,131,4,60,232,146,132,36, - 33,40,128,2,15,63,192,12,33,48,0,2, - 74,45,192,8,0,0,0,0,255,255,66,50, - 2,18,2,0,37,72,34,1,0,66,19,0, - 255,255,98,50,2,18,2,0,37,64,2,1, - 8,0,130,38,0,58,2,0,255,255,66,48, - 2,18,2,0,37,56,226,0,0,163,4,60, - 220,5,132,52,4,0,5,36,4,0,34,142, - 0,17,6,36,8,0,80,140,4,0,35,142, - 8,0,2,36,0,0,98,172,0,0,9,166, - 2,0,8,166,6,0,0,166,192,42,192,12, - 4,0,7,166,33,32,160,2,4,0,5,36, - 192,42,192,12,255,255,70,48,4,0,4,38, - 2,0,5,36,192,42,192,12,255,255,70,48, - 33,32,0,2,8,0,5,36,192,42,192,12, - 255,255,70,48,33,32,224,2,33,40,128,2, - 192,42,192,12,255,255,70,48,39,24,2,0, - 255,255,98,48,2,0,64,20,33,40,224,2, - 255,255,3,52,6,0,3,166,4,0,36,142, - 0,0,0,0,220,42,192,12,33,48,128,2, - 33,32,192,2,0,0,67,140,33,40,192,3, - 0,128,99,52,0,0,67,172,4,0,35,142, - 32,0,166,143,18,0,99,148,33,56,160,2, - 18,0,35,166,96,0,170,143,17,0,3,36, - 16,0,163,175,24,0,177,175,28,0,162,175, - 23,44,192,12,20,0,170,175,3,131,3,60, - 124,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,76,0,191,143, - 72,0,190,143,68,0,183,143,64,0,182,143, - 60,0,181,143,56,0,180,143,52,0,179,143, - 48,0,178,143,44,0,177,143,40,0,176,143, - 8,0,224,3,80,0,189,39,128,255,189,39, - 116,0,183,175,33,184,128,0,112,0,182,175, - 33,176,160,0,104,0,180,175,33,160,192,0, - 108,0,181,175,33,168,224,0,40,0,164,39, - 96,0,178,175,144,0,178,143,33,40,0,0, - 100,0,179,175,148,0,179,143,16,0,6,36, - 120,0,191,175,92,0,177,175,144,71,192,12, - 88,0,176,175,56,0,177,39,33,32,32,2, - 33,40,0,0,2,0,16,36,40,0,176,167, - 2,0,162,150,0,0,0,0,42,0,162,167, - 19,0,130,138,16,0,130,154,0,0,0,0, - 47,0,162,171,44,0,162,187,144,71,192,12, - 16,0,6,36,33,32,64,2,33,40,96,2, - 40,0,166,39,33,56,32,2,56,0,176,167, - 0,0,162,150,2,131,16,60,8,239,16,38, - 58,0,162,167,15,0,130,138,12,0,130,154, - 0,0,0,0,63,0,162,171,60,0,162,187, - 242,5,2,36,84,0,162,167,72,0,162,39, - 72,0,160,167,76,0,176,175,80,0,176,175, - 247,71,192,12,16,0,162,175,255,255,3,36, - 22,0,67,16,12,0,145,38,33,32,224,2, - 6,0,197,38,35,48,150,2,0,0,163,150, - 4,0,2,36,16,0,162,175,161,0,2,36, - 20,0,162,175,28,0,176,175,0,18,3,0, - 2,26,3,0,37,16,67,0,255,255,66,48, - 24,0,162,175,80,0,162,143,76,0,163,143, - 33,56,32,2,35,16,67,0,255,255,66,48, - 220,44,192,12,32,0,162,175,120,0,191,143, - 116,0,183,143,112,0,182,143,108,0,181,143, - 104,0,180,143,100,0,179,143,96,0,178,143, - 92,0,177,143,88,0,176,143,8,0,224,3, - 128,0,189,39,196,133,130,143,184,255,189,39, - 64,0,191,175,60,0,177,175,109,0,64,16, - 56,0,176,175,255,255,3,36,106,0,67,16, - 0,0,0,0,176,133,130,143,176,133,145,39, - 102,0,64,16,0,0,0,0,100,0,67,16, - 0,0,0,0,2,131,2,60,8,239,66,36, - 44,0,162,175,48,0,162,175,242,5,2,36, - 40,0,160,167,6,0,128,16,52,0,162,167, - 1,0,2,36,23,0,130,16,0,0,0,0, - 36,46,192,8,0,0,0,0,2,131,16,60, - 160,204,16,38,156,71,192,12,33,32,0,2, - 0,163,4,60,4,1,132,140,204,204,3,60, - 205,204,99,52,25,0,131,0,33,40,32,2, - 33,48,0,2,33,56,64,0,40,0,164,39, - 16,64,0,0,194,16,8,0,0,0,0,0, - 104,56,192,12,16,0,162,175,251,45,192,8, - 33,24,64,0,3,131,2,60,28,18,66,148, - 0,0,0,0,2,0,66,48,61,0,64,16, - 0,0,0,0,2,131,16,60,160,204,16,38, - 156,71,192,12,33,32,0,2,0,163,4,60, - 4,1,132,140,204,204,3,60,205,204,99,52, - 25,0,131,0,33,40,32,2,33,48,0,2, - 33,56,64,0,40,0,164,39,16,64,0,0, - 194,16,8,0,0,0,0,0,105,57,192,12, - 16,0,162,175,33,24,64,0,255,255,2,36, - 39,0,98,16,0,0,0,0,140,129,130,147, - 0,0,0,0,1,0,66,48,7,0,64,20, - 0,0,0,0,68,133,131,143,152,129,130,143, - 0,0,0,0,43,16,67,0,11,0,64,16, - 33,32,0,0,68,133,131,143,148,129,130,143, - 0,0,0,0,6,0,98,16,33,32,0,0, - 196,133,133,143,148,129,131,175,17,43,192,12, - 33,32,0,0,33,32,0,0,140,129,133,39, - 14,0,6,36,4,0,2,36,16,0,162,175, - 162,0,2,36,20,0,162,175,24,0,162,175, - 2,131,2,60,8,239,66,36,28,0,162,175, - 48,0,162,143,44,0,163,143,196,133,135,39, - 35,16,67,0,255,255,66,48,220,44,192,12, - 32,0,162,175,64,0,191,143,60,0,177,143, - 56,0,176,143,8,0,224,3,72,0,189,39, - 208,255,189,39,36,0,179,175,33,152,128,0, - 40,0,180,175,33,160,160,0,32,0,178,175, - 24,0,176,175,33,128,224,0,44,0,191,175, - 28,0,177,175,6,0,2,150,64,0,177,143, - 0,0,0,0,20,0,64,16,33,144,192,0, - 12,0,68,38,8,0,5,36,192,42,192,12, - 0,17,6,36,4,0,4,38,2,0,5,36, - 192,42,192,12,255,255,70,48,33,32,0,2, - 33,40,32,2,192,42,192,12,255,255,70,48, - 255,255,66,48,255,255,3,52,4,0,67,16, - 0,0,0,0,3,131,3,60,111,46,192,8, - 120,17,99,36,4,0,2,150,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 255,255,99,48,4,0,113,16,8,0,7,38, - 3,131,3,60,111,46,192,8,120,17,99,36, - 2,0,2,150,0,0,0,0,0,26,2,0, - 2,18,2,0,37,24,98,0,255,255,99,48, - 161,0,2,36,15,0,98,20,248,255,40,38, - 33,32,96,2,33,40,128,2,3,131,3,60, - 112,17,99,36,0,0,98,140,33,48,64,2, - 16,0,167,175,33,56,0,2,20,0,168,175, - 1,0,66,36,86,45,192,12,0,0,98,172, - 115,46,192,8,0,0,0,0,3,131,3,60, - 116,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,44,0,191,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 48,0,189,39,192,255,189,39,52,0,181,175, - 33,168,128,0,44,0,179,175,33,152,160,0, - 48,0,180,175,33,160,192,0,32,0,176,175, - 33,128,224,0,33,32,0,2,33,48,0,0, - 40,0,178,175,80,0,178,143,3,131,3,60, - 144,16,99,36,56,0,191,175,36,0,177,175, - 0,0,98,140,33,40,64,2,1,0,66,36, - 192,42,192,12,0,0,98,172,255,255,66,48, - 255,255,3,52,8,0,67,16,8,0,2,36, - 3,131,2,60,148,16,66,140,0,0,0,0, - 1,0,66,36,3,131,1,60,217,46,192,8, - 148,16,34,172,0,0,3,150,0,0,0,0, - 58,0,98,20,255,1,69,38,2,131,4,60, - 192,4,132,36,3,131,2,60,172,16,66,140, - 3,131,3,60,196,16,99,140,1,0,66,36, - 1,0,99,36,3,131,1,60,172,16,34,172, - 3,131,1,60,196,16,35,172,54,21,192,12, - 2,42,5,0,33,136,64,0,8,0,32,22, - 33,32,0,2,3,131,2,60,200,16,66,140, - 0,0,0,0,1,0,66,36,3,131,1,60, - 217,46,192,8,200,16,34,172,33,40,64,2, - 33,48,0,0,0,0,0,162,192,42,192,12, - 2,0,0,166,33,40,0,2,39,16,2,0, - 2,0,162,164,4,0,36,142,0,0,0,0, - 220,42,192,12,33,48,64,2,33,32,160,2, - 6,0,101,38,35,48,147,2,0,0,67,140, - 12,0,135,38,0,128,99,52,0,0,67,172, - 1,0,3,36,18,0,50,166,16,0,163,175, - 4,0,3,36,20,0,163,175,24,0,177,175, - 23,44,192,12,28,0,162,175,3,131,2,60, - 228,16,66,140,0,0,0,0,1,0,66,36, - 3,131,1,60,228,16,34,172,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,200,255,189,39, - 44,0,181,175,33,168,128,0,3,131,3,60, - 4,17,99,36,48,0,191,175,40,0,180,175, - 36,0,179,175,32,0,178,175,28,0,177,175, - 24,0,176,175,0,0,98,140,33,136,160,0, - 1,0,66,36,0,0,98,172,18,0,34,150, - 0,0,0,0,255,255,84,48,243,5,130,46, - 8,0,64,20,33,152,192,0,3,131,2,60, - 8,17,66,140,0,0,0,0,1,0,66,36, - 3,131,1,60,132,47,192,8,8,17,34,172, - 2,131,18,60,18,233,82,38,33,32,64,2, - 0,0,48,142,8,0,37,142,255,63,16,50, - 80,68,192,12,33,48,0,2,0,0,34,142, - 0,0,0,0,0,128,66,48,5,0,64,20, - 33,144,80,2,4,0,49,142,0,0,0,0, - 1,47,192,8,33,32,64,2,2,131,18,60, - 18,233,82,38,33,128,114,2,16,0,17,38, - 33,32,32,2,176,133,133,39,168,71,192,12, - 4,0,6,36,9,0,64,16,33,32,32,2, - 128,129,133,39,168,71,192,12,4,0,6,36, - 4,0,64,16,0,0,0,0,3,131,3,60, - 128,47,192,8,12,17,99,36,0,0,4,146, - 64,0,2,36,240,0,131,48,4,0,98,16, - 15,0,130,48,3,131,3,60,128,47,192,8, - 8,17,99,36,128,136,2,0,20,0,34,42, - 4,0,64,16,33,32,0,2,3,131,3,60, - 128,47,192,8,8,17,99,36,33,40,32,2, - 192,42,192,12,33,48,0,0,255,255,66,48, - 255,255,3,52,4,0,67,16,0,0,0,0, - 3,131,3,60,128,47,192,8,8,17,99,36, - 6,0,2,150,0,0,0,0,63,255,66,48, - 18,0,64,16,33,56,17,2,3,131,3,60, - 48,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,3,131,2,60, - 56,17,66,140,3,131,3,60,24,17,99,140, - 1,0,66,36,1,0,99,36,3,131,1,60, - 56,17,34,172,3,131,1,60,132,47,192,8, - 24,17,35,172,2,0,2,150,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 255,255,99,48,35,64,113,0,35,16,242,0, - 35,16,130,2,42,16,72,0,4,0,64,16, - 1,0,2,36,3,131,3,60,128,47,192,8, - 24,17,99,36,9,0,3,146,0,0,0,0, - 5,0,98,16,17,0,2,36,15,0,98,16, - 33,32,160,2,126,47,192,8,0,0,0,0, - 33,32,160,2,33,40,64,2,3,131,3,60, - 28,17,99,36,0,0,98,140,33,48,0,2, - 16,0,168,175,1,0,66,36,123,46,192,12, - 0,0,98,172,132,47,192,8,0,0,0,0, - 33,40,64,2,3,131,3,60,28,17,99,36, - 0,0,98,140,33,48,0,2,16,0,168,175, - 1,0,66,36,41,46,192,12,0,0,98,172, - 132,47,192,8,0,0,0,0,3,131,3,60, - 20,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,48,0,191,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,232,255,189,39, - 255,0,12,60,255,0,140,53,0,255,13,60, - 0,255,173,53,16,0,176,175,3,131,16,60, - 0,17,16,38,33,32,0,2,33,40,0,0, - 0,163,9,60,220,5,41,141,0,163,10,60, - 16,6,74,141,0,163,11,60,224,5,107,141, - 20,0,191,175,0,28,9,0,2,20,9,0, - 37,24,98,0,0,60,10,0,2,20,10,0, - 37,56,226,0,0,68,11,0,2,20,11,0, - 37,64,2,1,2,18,3,0,36,16,76,0, - 0,26,3,0,36,24,109,0,37,16,67,0, - 184,133,130,175,2,18,7,0,36,16,76,0, - 0,58,7,0,36,56,237,0,37,16,71,0, - 192,133,130,175,2,18,8,0,36,16,76,0, - 0,66,8,0,36,64,13,1,37,16,72,0, - 176,133,137,175,196,133,138,175,188,133,139,175, - 180,133,130,175,144,71,192,12,76,0,6,36, - 3,131,4,60,144,16,132,36,33,40,0,0, - 32,0,2,36,0,0,2,174,10,0,2,36, - 3,131,1,60,44,17,34,172,144,71,192,12, - 104,0,6,36,3,131,4,60,112,17,132,36, - 33,40,0,0,144,71,192,12,16,0,6,36, - 3,131,4,60,80,17,132,36,33,40,0,0, - 144,71,192,12,32,0,6,36,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 176,255,189,39,100,0,162,143,96,0,169,143, - 72,0,182,175,33,176,128,0,48,0,176,175, - 104,0,176,143,34,0,164,39,60,0,179,175, - 108,0,179,143,3,131,3,60,104,17,99,36, - 56,0,178,175,2,131,18,60,212,4,82,38, - 52,0,177,175,33,136,192,0,68,0,181,175, - 112,0,181,143,4,0,6,36,76,0,191,175, - 64,0,180,175,0,66,2,0,255,255,66,48, - 2,18,2,0,37,64,2,1,0,0,98,140, - 8,0,116,142,1,0,66,36,0,0,98,172, - 3,0,162,136,0,0,162,152,4,0,163,128, - 5,0,170,128,3,0,130,170,0,0,130,186, - 4,0,131,162,5,0,138,162,3,0,66,138, - 0,0,66,154,4,0,67,130,5,0,69,130, - 9,0,130,170,6,0,130,186,10,0,131,162, - 11,0,133,162,255,255,2,52,16,0,162,167, - 18,0,98,150,33,40,0,0,20,0,160,163, - 21,0,160,163,30,0,66,36,0,26,2,0, - 255,255,66,48,2,18,2,0,37,24,98,0, - 18,0,163,167,3,0,226,136,0,0,226,152, - 0,0,0,0,25,0,162,171,22,0,162,187, - 3,0,34,137,0,0,34,153,4,0,35,129, - 5,0,39,129,29,0,162,171,26,0,162,187, - 30,0,163,163,31,0,167,163,144,71,192,12, - 32,0,168,167,3,0,66,138,0,0,66,154, - 4,0,67,134,41,0,162,171,38,0,162,187, - 42,0,163,167,33,16,0,2,0,130,16,0, - 255,255,66,48,2,18,2,0,37,128,2,2, - 14,0,2,36,58,0,34,22,44,0,176,167, - 18,0,162,151,0,0,0,0,12,0,130,166, - 19,0,162,139,16,0,162,155,23,0,163,139, - 20,0,163,155,27,0,164,139,24,0,164,155, - 31,0,165,139,28,0,165,155,17,0,130,170, - 14,0,130,186,21,0,131,170,18,0,131,186, - 25,0,132,170,22,0,132,186,29,0,133,170, - 26,0,133,186,35,0,162,139,32,0,162,155, - 39,0,163,139,36,0,163,155,43,0,164,139, - 40,0,164,155,44,0,165,131,33,0,130,170, - 30,0,130,186,37,0,131,170,34,0,131,186, - 41,0,132,170,38,0,132,186,42,0,133,162, - 45,0,162,131,0,0,0,0,43,0,130,162, - 44,0,2,36,0,0,98,174,0,0,99,142, - 18,0,98,150,0,0,0,0,33,16,67,0, - 18,0,98,166,18,0,98,150,0,0,0,0, - 60,0,66,44,80,0,64,16,33,32,192,2, - 0,0,162,142,18,0,99,150,60,0,66,36, - 35,16,67,0,0,0,162,174,60,0,2,36, - 18,0,98,166,172,48,192,8,33,32,192,2, - 208,129,133,39,3,0,162,136,0,0,162,152, - 4,0,163,128,5,0,164,128,17,0,130,170, - 14,0,130,186,18,0,131,162,19,0,132,162, - 129,55,2,36,20,0,130,166,19,0,162,139, - 16,0,162,155,23,0,163,139,20,0,163,155, - 27,0,164,139,24,0,164,155,31,0,165,139, - 28,0,165,155,25,0,130,170,22,0,130,186, - 29,0,131,170,26,0,131,186,33,0,132,170, - 30,0,132,186,37,0,133,170,34,0,133,186, - 35,0,162,139,32,0,162,155,39,0,163,139, - 36,0,163,155,43,0,164,139,40,0,164,155, - 44,0,165,131,41,0,130,170,38,0,130,186, - 45,0,131,170,42,0,131,186,49,0,132,170, - 46,0,132,186,50,0,133,162,45,0,162,131, - 0,0,0,0,51,0,130,162,52,0,2,36, - 0,0,98,174,0,0,99,142,18,0,98,150, - 0,0,0,0,33,16,67,0,18,0,98,166, - 18,0,98,150,0,0,0,0,60,0,66,44, - 8,0,64,16,0,0,0,0,0,0,162,142, - 18,0,99,150,60,0,66,36,35,16,67,0, - 0,0,162,174,60,0,2,36,18,0,98,166, - 18,0,98,150,0,0,0,0,0,26,2,0, - 2,18,2,0,37,24,98,0,12,0,131,166, - 33,32,192,2,74,21,192,12,33,40,96,2, - 8,0,64,20,33,32,96,2,3,131,3,60, - 108,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,152,21,192,12,0,0,98,172, - 76,0,191,143,72,0,182,143,68,0,181,143, - 64,0,180,143,60,0,179,143,56,0,178,143, - 52,0,177,143,48,0,176,143,8,0,224,3, - 80,0,189,39,33,24,0,0,5,0,7,36, - 58,0,6,36,0,0,162,144,0,0,0,0, - 2,17,2,0,2,131,1,60,33,8,34,0, - 176,155,34,144,0,0,0,0,0,0,130,160, - 0,0,162,144,1,0,132,36,15,0,66,48, - 2,131,1,60,33,8,34,0,176,155,34,144, - 1,0,165,36,0,0,130,160,3,0,103,16, - 1,0,132,36,0,0,134,160,1,0,132,36, - 1,0,99,36,6,0,98,40,233,255,64,20, - 0,0,0,0,8,0,224,3,0,0,0,0, - 128,255,189,39,2,101,2,36,0,2,3,36, - 112,0,176,175,44,0,176,39,33,32,0,2, - 33,40,0,0,48,0,6,36,120,0,191,175, - 116,0,177,175,40,0,162,167,144,71,192,12, - 42,0,163,167,3,131,17,60,96,18,49,38, - 2,131,5,60,224,147,165,36,188,71,192,12, - 33,32,32,2,18,0,64,20,33,32,0,2, - 2,131,5,60,236,147,165,36,0,0,162,140, - 4,0,163,140,8,0,164,140,44,0,162,175, - 48,0,163,175,52,0,164,175,12,0,162,128, - 0,0,0,0,56,0,162,163,2,131,5,60, - 212,4,165,36,193,48,192,12,56,0,164,39, - 8,49,192,8,92,0,177,39,33,40,32,2, - 204,63,192,12,48,0,6,36,92,0,177,39, - 33,32,32,2,33,40,0,0,144,71,192,12, - 4,0,6,36,2,131,4,60,212,4,132,36, - 0,0,130,140,4,0,131,132,96,0,162,175, - 100,0,163,167,4,82,2,36,0,1,3,36, - 236,255,132,36,2,0,5,36,102,0,162,167, - 54,21,192,12,104,0,163,167,33,128,64,0, - 22,0,0,18,40,0,165,39,4,0,4,142, - 0,0,0,0,220,42,192,12,66,0,6,36, - 33,32,0,0,0,0,67,140,132,129,133,39, - 0,128,99,52,0,0,67,172,4,0,3,142, - 14,0,6,36,18,0,99,148,33,56,32,2, - 18,0,3,166,82,4,3,36,16,0,165,175, - 20,0,163,175,24,0,163,175,28,0,176,175, - 214,47,192,12,32,0,162,175,120,0,191,143, - 116,0,177,143,112,0,176,143,8,0,224,3, - 128,0,189,39,144,255,189,39,104,0,180,175, - 33,160,128,0,100,0,179,175,33,152,160,0, - 92,0,177,175,33,136,192,0,33,32,224,0, - 40,0,166,39,56,0,167,39,96,0,178,175, - 2,131,18,60,8,239,82,38,88,0,176,175, - 128,0,176,143,242,5,2,36,84,0,162,167, - 72,0,162,39,108,0,191,175,72,0,160,167, - 76,0,178,175,80,0,178,175,16,0,162,175, - 247,71,192,12,33,40,0,2,255,255,3,36, - 37,0,67,16,255,1,5,38,2,131,4,60, - 192,4,132,36,54,21,192,12,2,42,5,0, - 33,128,64,0,30,0,0,18,33,40,64,2, - 80,0,166,143,76,0,162,143,4,0,4,142, - 35,48,194,0,220,42,192,12,255,255,198,48, - 33,32,128,2,0,0,67,140,6,0,101,38, - 0,128,99,52,0,0,67,172,4,0,3,142, - 35,48,51,2,18,0,99,148,18,0,39,38, - 18,0,3,166,28,0,40,150,22,0,35,38, - 16,0,163,175,15,144,3,52,24,0,163,175, - 28,0,176,175,32,0,162,175,0,18,8,0, - 2,66,8,0,37,16,72,0,255,255,66,48, - 214,47,192,12,20,0,162,175,108,0,191,143, - 104,0,180,143,100,0,179,143,96,0,178,143, - 92,0,177,143,88,0,176,143,8,0,224,3, - 112,0,189,39,200,255,189,39,44,0,181,175, - 33,168,128,0,28,0,177,175,33,136,160,0, - 48,0,191,175,40,0,180,175,36,0,179,175, - 32,0,178,175,24,0,176,175,18,0,34,150, - 0,0,0,0,255,255,84,48,243,5,130,46, - 4,0,64,20,33,152,192,0,3,131,3,60, - 241,49,192,8,84,17,99,36,2,131,18,60, - 16,233,82,38,33,32,64,2,0,0,48,142, - 8,0,37,142,255,63,16,50,80,68,192,12, - 33,48,0,2,0,0,34,142,0,0,0,0, - 0,128,66,48,5,0,64,20,33,144,80,2, - 4,0,49,142,0,0,0,0,148,49,192,8, - 33,32,64,2,2,131,2,60,16,233,66,36, - 33,128,98,2,6,0,17,38,33,32,32,2, - 0,163,5,60,224,5,165,52,168,71,192,12, - 4,0,6,36,9,0,64,16,33,32,32,2, - 224,129,133,39,168,71,192,12,4,0,6,36, - 5,0,64,16,10,0,17,38,3,131,3,60, - 241,49,192,8,88,17,99,36,10,0,17,38, - 33,32,32,2,2,131,5,60,212,4,165,36, - 168,71,192,12,6,0,6,36,9,0,64,16, - 33,32,32,2,228,129,133,39,168,71,192,12, - 6,0,6,36,4,0,64,16,0,0,0,0, - 3,131,3,60,241,49,192,8,88,17,99,36, - 0,0,3,150,255,255,2,52,4,0,98,16, - 30,0,7,38,3,131,3,60,241,49,192,8, - 88,17,99,36,2,0,2,150,2,131,5,60, - 16,233,165,36,0,26,2,0,2,18,2,0, - 37,24,98,0,255,255,99,48,226,255,104,36, - 35,16,229,0,35,16,130,2,42,16,72,0, - 4,0,64,16,0,0,0,0,3,131,3,60, - 241,49,192,8,96,17,99,36,16,0,2,150, - 0,0,0,0,0,26,2,0,2,18,2,0, - 37,24,98,0,255,255,99,48,15,144,2,52, - 11,0,98,20,33,32,160,2,3,131,3,60, - 100,17,99,36,0,0,98,140,33,48,0,2, - 16,0,168,175,1,0,66,36,54,49,192,12, - 0,0,98,172,245,49,192,8,0,0,0,0, - 3,131,3,60,92,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,0,0,98,172, - 48,0,191,143,44,0,181,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,56,0,189,39, - 0,0,0,0,0,0,0,0,232,255,189,39, - 16,0,191,175,13,8,192,12,0,8,4,36, - 8,133,130,175,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,232,255,189,39, - 45,0,128,16,16,0,191,175,240,129,133,143, - 7,0,130,36,194,16,2,0,10,0,160,20, - 1,0,70,36,8,133,133,143,0,133,130,39, - 0,133,133,175,0,0,162,172,0,8,2,36, - 240,129,133,175,2,131,1,60,20,211,32,172, - 4,0,162,172,0,0,164,140,0,0,0,0, - 4,0,131,140,0,0,0,0,43,16,102,0, - 14,0,64,20,0,0,0,0,5,0,102,20, - 35,16,102,0,0,0,130,140,0,0,0,0, - 43,50,192,8,0,0,162,172,4,0,130,172, - 192,16,2,0,33,32,130,0,4,0,134,172, - 240,129,133,175,57,50,192,8,8,0,130,36, - 240,129,130,143,0,0,0,0,4,0,130,16, - 33,40,128,0,0,0,132,140,28,50,192,8, - 0,0,0,0,2,131,4,60,15,63,192,12, - 64,148,132,36,33,16,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 56,0,128,16,248,255,132,36,240,129,133,143, - 0,0,0,0,78,50,192,8,43,16,164,0, - 0,0,163,140,0,0,0,0,43,16,163,0, - 5,0,64,20,43,16,164,0,12,0,64,20, - 43,16,131,0,10,0,64,20,0,0,0,0, - 33,40,96,0,43,16,164,0,244,255,64,16, - 0,0,0,0,0,0,162,140,0,0,0,0, - 43,16,130,0,239,255,64,16,0,0,0,0, - 4,0,134,140,0,0,163,140,192,16,6,0, - 33,16,130,0,11,0,67,20,0,0,0,0, - 4,0,98,140,0,0,0,0,33,16,194,0, - 4,0,130,172,0,0,162,140,0,0,0,0, - 0,0,66,140,0,0,0,0,102,50,192,8, - 0,0,130,172,0,0,131,172,4,0,163,140, - 0,0,0,0,192,16,3,0,33,16,162,0, - 9,0,68,20,0,0,0,0,4,0,130,140, - 0,0,0,0,33,16,98,0,4,0,162,172, - 0,0,130,140,0,0,0,0,117,50,192,8, - 0,0,162,172,0,0,164,172,240,129,133,175, - 8,0,224,3,0,0,0,0,232,255,189,39, - 16,0,191,175,0,50,192,12,0,0,0,0, - 178,45,192,12,33,32,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 1,0,3,36,5,0,195,20,255,255,2,36, - 0,0,226,140,0,0,0,0,43,16,2,0, - 35,16,2,0,8,0,224,3,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,224,0, - 20,0,177,175,48,0,177,143,1,0,2,36, - 5,0,162,20,24,0,191,175,0,0,194,140, - 0,0,0,0,8,0,64,16,0,0,0,0, - 11,0,2,36,33,32,0,2,33,40,32,2, - 48,72,192,12,96,0,2,174,1,0,66,36, - 100,0,2,174,17,0,34,146,0,0,0,0, - 1,0,66,52,17,0,34,162,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,8,0,224,3,0,0,0,0, - 16,0,163,143,0,0,0,0,17,0,98,144, - 0,0,0,0,2,0,66,52,8,0,224,3, - 17,0,98,160,8,0,224,3,0,0,0,0, - 224,255,189,39,16,0,176,175,33,128,128,0, - 244,129,131,151,255,0,2,36,28,0,191,175, - 24,0,178,175,20,0,177,175,4,0,2,174, - 60,0,0,174,1,0,98,36,244,129,130,167, - 10,0,3,166,3,0,162,136,0,0,162,152, - 7,0,163,136,4,0,163,152,11,0,164,136, - 8,0,164,152,15,0,167,136,12,0,167,152, - 15,0,2,170,12,0,2,186,19,0,3,170, - 16,0,3,186,23,0,4,170,20,0,4,186, - 27,0,7,170,24,0,7,186,3,0,194,136, - 0,0,194,152,7,0,195,136,4,0,195,152, - 11,0,196,136,8,0,196,152,15,0,197,136, - 12,0,197,152,31,0,2,170,28,0,2,186, - 35,0,3,170,32,0,3,186,39,0,4,170, - 36,0,4,186,43,0,5,170,40,0,5,186, - 80,0,2,142,76,0,3,142,0,0,0,0, - 35,16,67,0,255,255,81,48,88,0,3,150, - 3,0,2,36,13,0,98,16,0,0,0,0, - 2,131,18,60,160,204,82,38,156,71,192,12, - 33,32,64,2,7,0,81,20,33,32,64,2, - 76,0,5,142,0,0,0,0,168,71,192,12, - 33,48,32,2,21,0,64,16,33,16,0,0, - 2,131,18,60,192,204,82,38,156,71,192,12, - 33,32,64,2,7,0,81,20,33,32,64,2, - 76,0,5,142,0,0,0,0,168,71,192,12, - 33,48,32,2,9,0,64,16,33,16,0,0, - 3,131,3,60,132,17,99,36,0,0,98,140, - 1,0,4,36,1,0,66,36,178,45,192,12, - 0,0,98,172,1,0,2,36,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,0,0,0,0, - 0,0,0,0,224,255,189,39,20,0,177,175, - 33,136,224,0,16,0,176,175,48,0,176,143, - 24,0,191,175,156,71,192,12,33,32,32,2, - 0,0,2,174,33,16,32,2,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,8,0,224,3,33,16,224,0, - 0,0,227,140,204,204,2,60,205,204,66,52, - 25,0,98,0,16,32,0,0,0,0,0,0, - 0,0,0,0,8,0,224,3,194,16,4,0, - 224,255,189,39,16,0,176,175,33,128,224,0, - 33,32,0,2,33,40,0,0,20,0,177,175, - 48,0,177,143,24,0,191,175,208,71,192,12, - 16,0,6,36,2,0,64,20,35,16,80,0, - 16,0,2,36,0,0,34,174,33,16,0,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 40,0,164,143,44,0,165,143,16,0,191,175, - 205,59,192,12,0,0,0,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,40,0,164,143,44,0,165,143, - 16,0,191,175,239,59,192,12,0,0,0,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,232,255,189,39,40,0,164,143, - 44,0,165,143,16,0,191,175,17,60,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,8,0,224,3, - 33,16,224,0,0,0,226,140,8,0,224,3, - 0,0,0,0,216,255,189,39,24,0,176,175, - 56,0,176,143,32,0,191,175,28,0,177,175, - 36,0,2,142,1,0,3,36,20,0,81,140, - 187,0,163,20,0,0,0,0,0,0,195,140, - 0,0,0,0,183,0,96,16,0,0,0,0, - 32,133,130,143,0,0,0,0,43,16,67,0, - 178,0,64,20,255,255,104,36,64,18,8,0, - 2,131,3,60,192,246,99,36,33,40,67,0, - 255,255,132,36,22,0,130,44,170,0,64,16, - 128,16,4,0,2,131,1,60,33,8,34,0, - 144,148,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,2,0,2,36,16,0,2,162, - 17,0,2,146,0,0,195,140,0,0,0,0, - 15,52,192,8,2,0,66,52,33,32,32,2, - 17,0,3,146,4,0,2,36,16,0,2,162, - 40,0,0,166,44,0,17,174,2,0,99,52, - 156,71,192,12,17,0,3,162,255,255,66,48, - 33,16,34,2,48,0,2,174,40,52,192,8, - 52,0,0,166,17,0,3,146,2,0,2,36, - 16,0,2,162,243,51,192,8,40,0,17,174, - 17,0,3,146,2,0,2,36,16,0,2,162, - 243,51,192,8,40,0,17,174,66,0,2,36, - 13,0,0,21,16,0,2,162,24,133,132,143, - 0,0,0,0,64,25,4,0,35,24,100,0, - 128,17,3,0,35,16,67,0,192,16,2,0, - 33,16,68,0,128,24,2,0,33,16,67,0, - 178,51,192,8,192,17,2,0,152,0,2,60, - 128,150,66,52,40,0,2,174,17,0,2,146, - 0,0,0,0,199,51,192,8,2,0,66,52, - 17,0,3,146,4,0,2,36,16,0,2,162, - 20,0,162,36,44,0,2,174,26,0,162,36, - 40,0,0,166,48,0,2,174,243,51,192,8, - 52,0,0,166,2,0,2,36,16,0,2,162, - 17,0,2,146,1,0,3,36,40,0,3,174, - 2,0,66,52,40,52,192,8,17,0,2,162, - 17,0,3,146,0,0,0,0,241,51,192,8, - 67,0,2,36,65,0,2,36,16,0,2,162, - 17,0,2,146,168,0,163,140,0,0,0,0, - 15,52,192,8,2,0,66,52,65,0,2,36, - 16,0,2,162,156,0,162,140,0,1,164,140, - 22,52,192,8,0,0,0,0,65,0,2,36, - 16,0,2,162,17,0,2,146,0,1,163,140, - 0,0,0,0,15,52,192,8,2,0,66,52, - 65,0,2,36,16,0,2,162,17,0,2,146, - 164,0,163,140,0,0,0,0,15,52,192,8, - 2,0,66,52,65,0,2,36,16,0,2,162, - 17,0,2,146,160,0,163,140,0,0,0,0, - 15,52,192,8,2,0,66,52,17,0,3,146, - 65,0,2,36,16,0,2,162,40,0,0,174, - 2,0,99,52,40,52,192,8,17,0,3,162, - 65,0,2,36,16,0,2,162,172,0,162,140, - 4,1,164,140,22,52,192,8,0,0,0,0, - 65,0,2,36,16,0,2,162,17,0,2,146, - 4,1,163,140,0,0,0,0,15,52,192,8, - 2,0,66,52,65,0,2,36,16,0,2,162, - 17,0,2,146,184,0,163,140,0,0,0,0, - 15,52,192,8,2,0,66,52,65,0,2,36, - 16,0,2,162,17,0,2,146,188,0,163,140, - 2,0,66,52,40,0,3,174,40,52,192,8, - 17,0,2,162,66,0,2,36,16,0,2,162, - 172,0,162,140,176,0,164,140,17,0,3,146, - 35,16,68,0,2,0,99,52,40,0,2,174, - 40,52,192,8,17,0,3,162,16,0,160,175, - 33,32,224,0,33,40,0,2,2,131,7,60, - 96,204,231,36,226,76,192,12,2,0,6,36, - 40,52,192,8,0,0,0,0,33,32,224,0, - 200,76,192,12,33,40,0,2,32,0,191,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,224,255,189,39,16,0,176,175, - 33,128,224,0,20,0,177,175,48,0,177,143, - 1,0,2,36,10,0,162,20,24,0,191,175, - 0,0,198,140,0,0,0,0,6,0,192,16, - 0,0,0,0,32,133,130,143,0,0,0,0, - 43,16,70,0,5,0,64,16,7,0,2,36, - 33,32,0,2,33,40,32,2,70,52,192,8, - 11,0,2,36,7,0,130,16,33,32,0,2, - 33,40,32,2,17,0,2,36,48,72,192,12, - 96,0,2,174,1,0,66,36,100,0,2,174, - 17,0,34,146,0,0,0,0,1,0,66,52, - 17,0,34,162,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 208,255,189,39,32,0,176,175,64,0,176,143, - 36,0,177,175,33,136,224,0,4,0,160,20, - 40,0,191,175,1,0,2,36,106,52,192,8, - 24,0,162,175,0,0,198,140,32,133,130,143, - 0,0,0,0,43,16,194,0,3,0,64,16, - 1,0,194,36,106,52,192,8,24,0,162,175, - 17,0,2,146,0,0,0,0,18,0,66,52, - 116,52,192,8,17,0,2,162,16,0,176,175, - 1,0,5,36,24,0,166,39,97,51,192,12, - 33,56,32,2,33,32,32,2,33,40,0,2, - 1,0,6,36,253,76,192,12,24,0,167,39, - 40,0,191,143,36,0,177,143,32,0,176,143, - 8,0,224,3,48,0,189,39,16,0,163,143, - 1,0,2,36,13,0,162,20,14,0,2,36, - 0,0,198,140,0,0,0,0,9,0,192,16, - 0,0,0,0,32,133,130,143,0,0,0,0, - 43,16,70,0,4,0,64,20,14,0,2,36, - 7,0,2,36,2,0,130,16,14,0,2,36, - 96,0,226,172,17,0,98,144,0,0,0,0, - 2,0,66,52,8,0,224,3,17,0,98,160, - 16,0,162,143,0,0,0,0,8,0,224,3, - 0,0,226,172,0,0,226,140,8,0,224,3, - 0,0,0,0,232,255,189,39,40,0,168,143, - 1,0,2,36,61,0,162,20,16,0,191,175, - 0,0,197,140,0,0,0,0,57,0,160,16, - 0,0,0,0,32,133,130,143,0,0,0,0, - 43,16,69,0,52,0,64,20,255,255,132,36, - 5,0,130,44,49,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,232,148,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 64,0,2,36,16,0,2,161,0,163,5,60, - 220,5,165,52,3,0,162,136,0,0,162,152, - 0,0,0,0,43,0,2,169,40,0,2,185, - 17,0,2,145,0,0,0,0,213,52,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 17,0,2,145,0,0,195,140,0,0,0,0, - 198,52,192,8,2,0,66,52,64,0,2,36, - 16,0,2,161,17,0,2,145,128,132,131,143, - 2,0,66,52,40,0,3,173,218,52,192,8, - 17,0,2,161,2,0,2,36,16,0,2,161, - 17,0,2,145,0,0,0,0,211,52,192,8, - 1,0,3,36,2,0,2,36,16,0,2,161, - 17,0,2,145,220,5,3,36,40,0,3,173, - 2,0,66,52,218,52,192,8,17,0,2,161, - 33,32,224,0,200,76,192,12,33,40,0,1, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,208,255,189,39,32,0,176,175, - 64,0,176,143,36,0,177,175,33,136,224,0, - 4,0,160,20,40,0,191,175,1,0,2,36, - 245,52,192,8,24,0,162,175,0,0,198,140, - 32,133,130,143,0,0,0,0,43,16,194,0, - 3,0,64,16,1,0,194,36,245,52,192,8, - 24,0,162,175,17,0,2,146,0,0,0,0, - 18,0,66,52,255,52,192,8,17,0,2,162, - 16,0,176,175,1,0,5,36,24,0,166,39, - 150,52,192,12,33,56,32,2,33,32,32,2, - 33,40,0,2,1,0,6,36,253,76,192,12, - 24,0,167,39,40,0,191,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 232,255,189,39,40,0,165,143,16,0,191,175, - 200,76,192,12,33,32,224,0,16,0,191,143, - 24,0,189,39,8,0,224,3,0,0,0,0, - 16,0,163,143,14,0,2,36,96,0,226,172, - 17,0,98,144,0,0,0,0,2,0,66,52, - 8,0,224,3,17,0,98,160,224,255,189,39, - 16,0,176,175,33,128,224,0,17,0,2,36, - 24,0,191,175,20,0,177,175,96,0,2,174, - 48,0,177,143,33,32,0,2,48,72,192,12, - 33,40,32,2,1,0,66,36,100,0,2,174, - 17,0,34,146,0,0,0,0,1,0,66,52, - 17,0,34,162,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 16,0,163,143,0,0,0,0,17,0,98,144, - 0,0,0,0,18,0,66,52,8,0,224,3, - 17,0,98,160,8,0,224,3,33,16,224,0, - 224,255,189,39,48,0,168,143,1,0,2,36, - 114,0,162,20,24,0,191,175,0,0,195,140, - 0,0,0,0,110,0,96,16,0,0,0,0, - 32,133,130,143,0,0,0,0,43,16,67,0, - 105,0,64,20,255,255,98,36,64,18,2,0, - 2,131,3,60,192,246,99,36,33,24,67,0, - 255,255,132,36,17,0,130,44,97,0,64,16, - 128,16,4,0,2,131,1,60,33,8,34,0, - 0,149,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,2,0,2,36,16,0,2,161, - 17,0,2,145,0,0,195,140,0,0,0,0, - 140,53,192,8,2,0,66,52,2,0,2,36, - 16,0,2,161,44,0,99,140,17,0,2,145, - 16,0,99,140,0,0,0,0,101,53,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 44,0,99,140,17,0,2,145,12,0,99,140, - 2,0,66,52,17,0,2,161,173,53,192,8, - 40,0,3,173,2,0,2,36,16,0,2,161, - 17,0,2,145,212,0,99,140,0,0,0,0, - 140,53,192,8,2,0,66,52,2,0,2,36, - 16,0,2,161,17,0,2,145,192,0,99,140, - 0,0,0,0,140,53,192,8,2,0,66,52, - 2,0,2,36,16,0,2,161,17,0,2,145, - 208,0,99,140,0,0,0,0,140,53,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 204,0,98,140,184,0,100,140,17,0,3,145, - 33,16,68,0,2,0,99,52,40,0,2,173, - 173,53,192,8,17,0,3,161,2,0,2,36, - 16,0,2,161,17,0,2,145,196,0,99,140, - 2,0,66,52,40,0,3,173,173,53,192,8, - 17,0,2,161,17,0,3,145,2,0,2,36, - 16,0,2,161,40,0,0,173,2,0,99,52, - 173,53,192,8,17,0,3,161,2,0,2,36, - 16,0,2,161,44,0,100,140,17,0,2,145, - 20,0,131,140,24,0,132,140,2,0,66,52, - 17,0,2,161,33,24,100,0,173,53,192,8, - 40,0,3,173,16,0,160,175,33,32,224,0, - 33,40,0,1,2,131,7,60,104,204,231,36, - 226,76,192,12,11,0,6,36,173,53,192,8, - 0,0,0,0,33,32,224,0,200,76,192,12, - 33,40,0,1,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,208,255,189,39, - 32,0,176,175,64,0,176,143,36,0,177,175, - 33,136,224,0,4,0,160,20,40,0,191,175, - 1,0,2,36,200,53,192,8,24,0,162,175, - 0,0,198,140,32,133,130,143,0,0,0,0, - 43,16,194,0,3,0,64,16,1,0,194,36, - 200,53,192,8,24,0,162,175,17,0,2,146, - 0,0,0,0,18,0,66,52,210,53,192,8, - 17,0,2,162,16,0,176,175,1,0,5,36, - 24,0,166,39,52,53,192,12,33,56,32,2, - 33,32,32,2,33,40,0,2,1,0,6,36, - 253,76,192,12,24,0,167,39,40,0,191,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 48,0,189,39,0,0,226,140,8,0,224,3, - 0,0,0,0,3,131,2,60,28,18,66,148, - 0,0,0,0,2,0,66,48,2,0,64,16, - 2,0,3,36,1,0,3,36,8,0,224,3, - 33,16,96,0,232,255,189,39,40,0,164,143, - 16,0,191,175,1,0,132,56,186,59,192,12, - 1,0,132,44,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,16,0,163,143, - 6,0,2,36,0,0,98,172,8,0,224,3, - 33,16,224,0,224,255,189,39,48,0,168,143, - 1,0,2,36,52,0,162,20,24,0,191,175, - 0,0,197,140,0,0,0,0,48,0,160,16, - 0,0,0,0,24,133,130,143,0,0,0,0, - 43,16,69,0,43,0,64,20,255,255,132,36, - 5,0,130,44,40,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,72,149,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 2,0,2,36,16,0,2,161,17,0,2,145, - 0,0,195,140,2,0,66,52,40,0,3,173, - 45,54,192,8,17,0,2,161,2,0,2,36, - 16,0,2,161,0,0,194,140,17,0,3,145, - 1,0,66,36,2,0,99,52,40,0,2,173, - 45,54,192,8,17,0,3,161,16,0,160,175, - 33,32,224,0,33,40,0,1,2,131,7,60, - 148,204,231,36,226,76,192,12,2,0,6,36, - 45,54,192,8,0,0,0,0,17,0,3,145, - 2,0,2,36,16,0,2,161,40,0,0,173, - 2,0,99,52,45,54,192,8,17,0,3,161, - 33,32,224,0,200,76,192,12,33,40,0,1, - 24,0,191,143,32,0,189,39,8,0,224,3, - 0,0,0,0,208,255,189,39,32,0,176,175, - 64,0,176,143,36,0,177,175,33,136,224,0, - 4,0,160,20,40,0,191,175,1,0,2,36, - 72,54,192,8,24,0,162,175,0,0,198,140, - 24,133,130,143,0,0,0,0,43,16,194,0, - 3,0,64,16,1,0,194,36,72,54,192,8, - 24,0,162,175,17,0,2,146,0,0,0,0, - 18,0,66,52,82,54,192,8,17,0,2,162, - 16,0,176,175,1,0,5,36,24,0,166,39, - 242,53,192,12,33,56,32,2,33,32,32,2, - 33,40,0,2,1,0,6,36,253,76,192,12, - 24,0,167,39,40,0,191,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 0,0,226,148,8,0,224,3,0,0,0,0, - 8,0,224,3,33,16,224,0,16,0,163,143, - 8,0,2,36,0,0,98,172,8,0,224,3, - 33,16,224,0,224,255,189,39,16,0,176,175, - 48,0,176,143,1,0,2,36,24,0,191,175, - 126,0,162,20,20,0,177,175,0,0,198,140, - 0,0,0,0,122,0,192,16,0,0,0,0, - 24,133,130,143,0,0,0,0,43,16,70,0, - 117,0,64,20,192,17,6,0,3,131,3,60, - 16,13,99,36,33,136,67,0,255,255,132,36, - 10,0,130,44,110,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,96,149,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 17,0,3,146,2,0,2,36,16,0,2,162, - 211,54,192,8,40,0,6,174,2,0,2,36, - 16,0,2,162,0,0,34,150,17,0,3,146, - 0,0,0,0,143,54,192,8,2,18,2,0, - 2,0,2,36,16,0,2,162,4,0,34,142, - 17,0,3,146,1,0,66,36,2,0,99,52, - 40,0,2,174,232,54,192,8,17,0,3,162, - 2,0,2,36,16,0,2,162,4,0,34,142, - 0,0,0,0,2,0,64,16,2,0,3,36, - 1,0,3,36,17,0,2,146,40,0,3,174, - 2,0,66,52,232,54,192,8,17,0,2,162, - 2,0,2,36,16,0,2,162,17,0,2,146, - 8,0,35,142,0,0,0,0,226,54,192,8, - 2,0,66,52,9,50,192,12,8,0,4,36, - 33,48,64,0,15,0,34,138,12,0,34,154, - 19,0,35,138,16,0,35,154,3,0,194,168, - 0,0,194,184,7,0,195,168,196,54,192,8, - 4,0,195,184,2,0,2,36,16,0,2,162, - 17,0,2,146,20,0,35,142,0,0,0,0, - 226,54,192,8,2,0,66,52,9,50,192,12, - 8,0,4,36,33,48,64,0,27,0,34,138, - 24,0,34,154,31,0,35,138,28,0,35,154, - 3,0,194,168,0,0,194,184,7,0,195,168, - 4,0,195,184,0,0,194,148,0,0,0,0, - 0,26,2,0,2,18,2,0,37,24,98,0, - 0,0,195,164,17,0,3,146,4,0,2,36, - 16,0,2,162,1,0,2,36,40,0,2,166, - 8,0,194,36,44,0,6,174,48,0,2,174, - 52,0,0,166,2,0,99,52,232,54,192,8, - 17,0,3,162,2,0,2,36,16,0,2,162, - 17,0,2,146,32,0,35,150,0,0,0,0, - 226,54,192,8,2,0,66,52,2,0,2,36, - 16,0,2,162,17,0,2,146,104,0,35,142, - 2,0,66,52,40,0,3,174,232,54,192,8, - 17,0,2,162,33,32,224,0,200,76,192,12, - 33,40,0,2,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 224,255,189,39,16,0,176,175,33,128,224,0, - 20,0,177,175,48,0,177,143,1,0,2,36, - 10,0,162,20,24,0,191,175,0,0,198,140, - 0,0,0,0,6,0,192,16,0,0,0,0, - 24,133,130,143,0,0,0,0,43,16,70,0, - 5,0,64,16,2,0,2,36,33,32,0,2, - 33,40,32,2,13,55,192,8,11,0,2,36, - 14,0,130,16,2,0,130,44,5,0,64,20, - 6,0,130,44,3,0,64,16,4,0,130,44, - 8,0,64,16,0,0,0,0,33,32,0,2, - 33,40,32,2,17,0,2,36,48,72,192,12, - 96,0,2,174,1,0,66,36,100,0,2,174, - 17,0,34,146,0,0,0,0,1,0,66,52, - 17,0,34,162,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 208,255,189,39,32,0,176,175,64,0,176,143, - 36,0,177,175,33,136,224,0,4,0,160,20, - 40,0,191,175,1,0,2,36,49,55,192,8, - 24,0,162,175,0,0,198,140,24,133,130,143, - 0,0,0,0,43,16,194,0,3,0,64,16, - 1,0,194,36,49,55,192,8,24,0,162,175, - 17,0,2,146,0,0,0,0,18,0,66,52, - 59,55,192,8,17,0,2,162,16,0,176,175, - 1,0,5,36,24,0,166,39,97,54,192,12, - 33,56,32,2,33,32,32,2,33,40,0,2, - 1,0,6,36,253,76,192,12,24,0,167,39, - 40,0,191,143,36,0,177,143,32,0,176,143, - 8,0,224,3,48,0,189,39,232,255,189,39, - 33,64,128,0,16,0,176,175,40,0,176,143, - 1,0,2,36,57,0,162,20,20,0,191,175, - 0,0,196,140,0,0,0,0,54,0,128,16, - 14,0,2,36,24,133,130,143,0,0,0,0, - 43,16,68,0,49,0,64,20,14,0,2,36, - 192,17,4,0,3,131,3,60,16,13,99,36, - 33,48,67,0,4,0,2,36,21,0,2,17, - 5,0,2,45,5,0,64,16,2,0,2,36, - 8,0,2,17,14,0,2,36,129,55,192,8, - 96,0,226,172,5,0,2,36,28,0,2,17, - 14,0,2,36,129,55,192,8,96,0,226,172, - 0,0,195,144,0,0,0,0,0,0,195,164, - 40,0,2,142,0,0,0,0,0,18,2,0, - 37,24,98,0,129,55,192,8,0,0,195,164, - 40,0,3,142,0,0,0,0,5,0,101,16, - 2,0,2,36,7,0,98,16,14,0,2,36, - 129,55,192,8,96,0,226,172,187,42,192,12, - 1,0,5,36,129,55,192,8,0,0,0,0, - 187,42,192,12,33,40,0,0,129,55,192,8, - 0,0,0,0,40,0,2,142,0,0,0,0, - 129,55,192,8,8,0,194,172,14,0,2,36, - 96,0,226,172,17,0,2,146,0,0,0,0, - 2,0,66,52,17,0,2,162,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 216,255,189,39,20,0,177,175,33,136,128,0, - 28,0,179,175,33,152,160,0,24,0,178,175, - 33,144,224,0,16,0,176,175,56,0,176,143, - 1,0,2,36,46,0,98,22,32,0,191,175, - 0,0,196,140,0,0,0,0,42,0,128,16, - 0,0,0,0,58,25,192,12,0,0,0,0, - 33,32,64,0,37,0,128,16,2,0,2,36, - 11,0,34,18,3,0,34,46,5,0,64,16, - 3,0,2,36,15,0,51,18,4,0,2,36, - 195,55,192,8,33,32,64,2,20,0,34,18, - 33,32,64,2,195,55,192,8,0,0,0,0, - 2,0,2,36,16,0,2,162,17,0,2,146, - 10,0,131,132,2,0,66,52,40,0,3,174, - 197,55,192,8,17,0,2,162,17,0,3,146, - 16,0,2,162,4,0,130,36,44,0,2,174, - 10,0,130,36,40,0,0,166,48,0,2,174, - 191,55,192,8,52,0,0,166,17,0,3,146, - 2,0,2,36,16,0,2,162,40,0,17,174, - 2,0,99,52,197,55,192,8,17,0,3,162, - 33,32,64,2,200,76,192,12,33,40,0,2, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,208,255,189,39,32,0,176,175, - 64,0,176,143,40,0,178,175,33,144,128,0, - 36,0,177,175,33,136,224,0,3,0,160,20, - 44,0,191,175,218,55,192,8,1,0,2,36, - 0,0,194,140,0,0,0,0,1,0,66,36, - 24,0,162,175,24,0,164,143,58,25,192,12, - 0,0,0,0,6,0,64,20,33,32,64,2, - 17,0,2,146,0,0,0,0,18,0,66,52, - 239,55,192,8,17,0,2,162,16,0,176,175, - 1,0,5,36,24,0,166,39,137,55,192,12, - 33,56,32,2,33,32,32,2,33,40,0,2, - 1,0,6,36,253,76,192,12,24,0,167,39, - 44,0,191,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 232,255,189,39,40,0,168,143,1,0,2,36, - 63,0,162,20,16,0,191,175,0,0,195,140, - 0,0,0,0,59,0,96,16,0,0,0,0, - 24,133,130,143,0,0,0,0,43,16,67,0, - 54,0,64,20,64,18,3,0,2,131,3,60, - 192,246,99,36,33,24,67,0,255,255,132,36, - 5,0,130,44,47,0,64,16,128,16,4,0, - 2,131,1,60,33,8,34,0,136,149,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 2,0,2,36,16,0,2,161,17,0,2,145, - 0,0,195,140,0,0,0,0,43,56,192,8, - 2,0,66,52,2,0,2,36,16,0,2,161, - 17,0,2,145,220,5,3,36,40,0,3,173, - 2,0,66,52,59,56,192,8,17,0,2,161, - 65,0,2,36,16,0,2,161,17,0,2,145, - 156,0,99,140,0,0,0,0,43,56,192,8, - 2,0,66,52,65,0,2,36,16,0,2,161, - 17,0,2,145,172,0,99,140,2,0,66,52, - 40,0,3,173,59,56,192,8,17,0,2,161, - 65,0,2,36,16,0,2,161,156,0,98,140, - 252,0,100,140,17,0,3,145,35,16,68,0, - 2,0,99,52,40,0,2,173,59,56,192,8, - 17,0,3,161,33,32,224,0,200,76,192,12, - 33,40,0,1,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,208,255,189,39, - 32,0,176,175,64,0,176,143,36,0,177,175, - 33,136,224,0,4,0,160,20,40,0,191,175, - 1,0,2,36,86,56,192,8,24,0,162,175, - 0,0,198,140,24,133,130,143,0,0,0,0, - 43,16,194,0,3,0,64,16,1,0,194,36, - 86,56,192,8,24,0,162,175,17,0,2,146, - 0,0,0,0,18,0,66,52,96,56,192,8, - 17,0,2,162,16,0,176,175,1,0,5,36, - 24,0,166,39,245,55,192,12,33,56,32,2, - 33,32,32,2,33,40,0,2,1,0,6,36, - 253,76,192,12,24,0,167,39,40,0,191,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 48,0,189,39,0,0,0,0,0,0,0,0, - 0,0,0,0,200,255,189,39,72,0,163,143, - 44,0,177,175,33,136,128,0,20,0,165,175, - 33,40,224,0,2,131,2,60,172,210,66,140, - 152,132,135,143,33,32,0,0,48,0,191,175, - 40,0,176,175,24,0,160,175,28,0,160,175, - 36,0,160,175,16,0,162,175,104,77,192,12, - 32,0,163,175,33,128,64,0,3,0,0,22, - 33,32,0,2,143,56,192,8,33,16,0,0, - 197,80,192,12,33,40,32,2,255,255,3,36, - 5,0,67,20,0,0,0,0,167,83,192,12, - 33,32,0,2,143,56,192,8,33,16,0,0, - 167,83,192,12,33,32,0,2,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,66,48,48,0,191,143,44,0,177,143, - 40,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,44,0,177,175,33,136,128,0, - 72,0,168,143,33,32,0,0,20,0,165,175, - 33,40,224,0,2,131,3,60,172,210,99,140, - 152,132,135,143,1,0,2,36,48,0,191,175, - 40,0,176,175,24,0,162,175,28,0,160,175, - 36,0,160,175,16,0,163,175,104,77,192,12, - 32,0,168,175,33,128,64,0,3,0,0,22, - 33,32,0,2,188,56,192,8,33,16,0,0, - 197,80,192,12,33,40,32,2,255,255,3,36, - 5,0,67,20,0,0,0,0,167,83,192,12, - 33,32,0,2,188,56,192,8,33,16,0,0, - 167,83,192,12,33,32,0,2,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,66,48,48,0,191,143,44,0,177,143, - 40,0,176,143,8,0,224,3,56,0,189,39, - 176,255,189,39,44,0,177,175,108,0,177,143, - 68,0,183,175,96,0,183,143,72,0,190,175, - 100,0,190,143,48,0,178,175,33,144,128,0, - 56,0,180,175,33,160,160,0,52,0,179,175, - 33,152,192,0,40,0,176,175,33,128,224,0, - 60,0,181,175,1,0,21,36,76,0,191,175, - 3,0,53,18,64,0,182,175,9,57,192,8, - 255,255,2,36,4,0,6,36,2,131,22,60, - 48,205,214,38,160,132,132,143,104,0,165,143, - 128,32,4,0,80,68,192,12,33,32,150,0, - 33,32,0,0,33,40,0,2,152,132,135,143, - 2,0,2,36,24,0,162,175,160,132,130,143, - 2,131,3,60,172,210,99,140,33,48,96,2, - 20,0,180,175,28,0,160,175,32,0,183,175, - 36,0,181,175,1,0,81,36,104,77,192,12, - 16,0,163,175,33,128,64,0,23,0,0,18, - 33,40,0,0,16,0,190,175,33,32,0,2, - 33,48,32,2,108,84,192,12,33,56,192,2, - 255,255,17,36,13,0,81,16,33,32,0,2, - 197,80,192,12,33,40,64,2,9,0,81,16, - 0,0,0,0,167,83,192,12,33,32,0,2, - 8,0,66,142,4,0,67,142,0,0,0,0, - 35,16,67,0,9,57,192,8,255,255,66,48, - 167,83,192,12,33,32,0,2,33,16,0,0, - 76,0,191,143,72,0,190,143,68,0,183,143, - 64,0,182,143,60,0,181,143,56,0,180,143, - 52,0,179,143,48,0,178,143,44,0,177,143, - 40,0,176,143,8,0,224,3,80,0,189,39, - 176,255,189,39,44,0,177,175,108,0,177,143, - 68,0,183,175,96,0,183,143,72,0,190,175, - 100,0,190,143,48,0,178,175,33,144,128,0, - 56,0,180,175,33,160,160,0,52,0,179,175, - 33,152,192,0,40,0,176,175,33,128,224,0, - 60,0,181,175,1,0,21,36,76,0,191,175, - 3,0,53,18,64,0,182,175,93,57,192,8, - 255,255,2,36,4,0,6,36,2,131,22,60, - 48,205,214,38,160,132,132,143,104,0,165,143, - 128,32,4,0,80,68,192,12,33,32,150,0, - 33,32,0,0,33,40,0,2,152,132,135,143, - 3,0,2,36,24,0,162,175,160,132,130,143, - 2,131,3,60,172,210,99,140,33,48,96,2, - 20,0,180,175,28,0,160,175,32,0,183,175, - 36,0,181,175,1,0,81,36,104,77,192,12, - 16,0,163,175,33,128,64,0,23,0,0,18, - 33,40,0,0,16,0,190,175,33,32,0,2, - 33,48,32,2,108,84,192,12,33,56,192,2, - 255,255,17,36,13,0,81,16,33,32,0,2, - 197,80,192,12,33,40,64,2,9,0,81,16, - 0,0,0,0,167,83,192,12,33,32,0,2, - 8,0,66,142,4,0,67,142,0,0,0,0, - 35,16,67,0,93,57,192,8,255,255,66,48, - 167,83,192,12,33,32,0,2,33,16,0,0, - 76,0,191,143,72,0,190,143,68,0,183,143, - 64,0,182,143,60,0,181,143,56,0,180,143, - 52,0,179,143,48,0,178,143,44,0,177,143, - 40,0,176,143,8,0,224,3,80,0,189,39, - 200,255,189,39,44,0,177,175,33,136,128,0, - 72,0,168,143,33,32,0,0,20,0,165,175, - 33,40,224,0,2,131,3,60,172,210,99,140, - 152,132,135,143,4,0,2,36,48,0,191,175, - 40,0,176,175,24,0,162,175,28,0,160,175, - 36,0,160,175,16,0,163,175,104,77,192,12, - 32,0,168,175,33,128,64,0,3,0,0,22, - 33,32,0,2,145,57,192,8,33,16,0,0, - 197,80,192,12,33,40,32,2,255,255,3,36, - 5,0,67,20,0,0,0,0,167,83,192,12, - 33,32,0,2,145,57,192,8,33,16,0,0, - 167,83,192,12,33,32,0,2,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,66,48,48,0,191,143,44,0,177,143, - 40,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,44,0,177,175,33,136,128,0, - 72,0,163,143,33,32,0,0,20,0,165,175, - 33,40,224,0,164,132,135,143,2,131,2,60, - 92,205,66,36,16,0,162,175,6,0,2,36, - 24,0,162,175,1,0,2,36,48,0,191,175, - 40,0,176,175,28,0,162,175,36,0,160,175, - 104,77,192,12,32,0,163,175,33,128,64,0, - 3,0,0,22,33,32,0,2,191,57,192,8, - 33,16,0,0,197,80,192,12,33,40,32,2, - 255,255,3,36,5,0,67,20,0,0,0,0, - 167,83,192,12,33,32,0,2,191,57,192,8, - 33,16,0,0,167,83,192,12,33,32,0,2, - 8,0,34,142,4,0,35,142,0,0,0,0, - 35,16,67,0,255,255,66,48,48,0,191,143, - 44,0,177,143,40,0,176,143,8,0,224,3, - 56,0,189,39,200,255,189,39,44,0,177,175, - 33,136,128,0,72,0,163,143,33,32,0,0, - 20,0,165,175,33,40,224,0,164,132,135,143, - 2,131,2,60,92,205,66,36,16,0,162,175, - 6,0,2,36,24,0,162,175,2,0,2,36, - 48,0,191,175,40,0,176,175,28,0,162,175, - 36,0,160,175,104,77,192,12,32,0,163,175, - 33,128,64,0,3,0,0,22,33,32,0,2, - 237,57,192,8,33,16,0,0,197,80,192,12, - 33,40,32,2,255,255,3,36,5,0,67,20, - 0,0,0,0,167,83,192,12,33,32,0,2, - 237,57,192,8,33,16,0,0,167,83,192,12, - 33,32,0,2,8,0,34,142,4,0,35,142, - 0,0,0,0,35,16,67,0,255,255,66,48, - 48,0,191,143,44,0,177,143,40,0,176,143, - 8,0,224,3,56,0,189,39,0,0,0,0, - 0,0,0,0,224,255,189,39,24,0,178,175, - 33,144,128,0,20,0,177,175,3,131,17,60, - 0,18,49,38,16,0,176,175,33,128,0,0, - 28,0,191,175,208,133,128,175,60,65,192,12, - 33,32,0,2,0,0,34,166,1,0,16,38, - 64,0,2,42,250,255,64,20,2,0,49,38, - 3,131,3,60,18,18,99,144,255,0,2,36, - 3,0,98,16,0,0,0,0,6,0,64,18, - 0,163,4,60,75,59,192,12,32,0,4,36, - 87,59,192,12,255,0,4,36,0,163,4,60, - 220,5,132,52,176,132,133,39,168,71,192,12, - 4,0,6,36,25,0,64,20,0,163,4,60, - 3,131,16,60,20,18,16,38,33,32,0,2, - 176,132,133,39,168,71,192,12,4,0,6,36, - 3,0,64,16,0,163,4,60,7,0,64,18, - 0,0,0,0,220,5,132,52,33,40,0,0, - 144,71,192,12,4,0,6,36,47,58,192,8, - 0,163,4,60,0,163,5,60,220,5,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,0,163,4,60, - 99,59,192,12,220,5,132,52,0,163,4,60, - 16,6,132,52,176,132,133,39,168,71,192,12, - 4,0,6,36,25,0,64,20,0,163,4,60, - 3,131,16,60,68,18,16,38,33,32,0,2, - 176,132,133,39,168,71,192,12,4,0,6,36, - 3,0,64,16,0,163,4,60,7,0,64,18, - 0,0,0,0,16,6,132,52,33,40,0,0, - 144,71,192,12,4,0,6,36,80,58,192,8, - 0,163,4,60,0,163,5,60,16,6,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,0,163,4,60, - 119,59,192,12,16,6,132,52,0,163,4,60, - 224,5,132,52,176,132,133,39,168,71,192,12, - 4,0,6,36,25,0,64,20,0,163,4,60, - 3,131,16,60,24,18,16,38,33,32,0,2, - 176,132,133,39,168,71,192,12,4,0,6,36, - 3,0,64,16,0,163,4,60,7,0,64,18, - 0,0,0,0,224,5,132,52,33,40,0,0, - 144,71,192,12,4,0,6,36,113,58,192,8, - 0,163,4,60,0,163,5,60,224,5,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,0,163,4,60, - 139,59,192,12,224,5,132,52,3,131,3,60, - 28,18,99,36,0,0,98,148,0,0,0,0, - 0,128,66,48,3,0,64,20,1,0,2,36, - 2,0,64,18,0,0,0,0,0,0,98,164, - 0,163,2,60,144,1,66,140,0,0,0,0, - 7,0,64,20,0,0,0,0,3,131,3,60, - 28,18,99,36,0,0,98,148,0,0,0,0, - 146,58,192,8,254,255,66,48,0,163,2,60, - 144,1,66,140,0,0,0,0,7,0,64,24, - 0,0,0,0,3,131,3,60,28,18,99,36, - 0,0,98,148,0,0,0,0,1,0,66,52, - 0,0,98,164,3,131,4,60,28,18,132,148, - 0,0,0,0,159,59,192,12,1,0,132,48, - 3,131,3,60,80,18,99,144,255,0,2,36, - 3,0,98,16,0,0,0,0,5,0,64,18, - 0,0,0,0,2,131,4,60,160,149,132,36, - 205,59,192,12,14,0,5,36,3,131,3,60, - 96,18,99,144,255,0,2,36,3,0,98,16, - 0,0,0,0,5,0,64,18,0,0,0,0, - 2,131,4,60,176,149,132,36,239,59,192,12, - 11,0,5,36,3,131,3,60,112,18,99,144, - 255,0,2,36,3,0,98,16,0,0,0,0, - 5,0,64,18,0,0,0,0,2,131,4,60, - 188,149,132,36,17,60,192,12,15,0,5,36, - 0,163,2,60,140,1,66,140,0,0,0,0, - 7,0,64,16,15,0,2,60,0,163,3,60, - 140,1,99,140,64,66,66,52,43,16,67,0, - 26,0,64,16,0,0,0,0,3,131,3,60, - 64,18,99,140,255,255,2,36,3,0,98,16, - 44,1,2,36,4,0,64,18,0,0,0,0, - 0,163,1,60,221,58,192,8,140,1,34,172, - 5,0,96,20,15,0,4,60,1,0,2,36, - 0,163,1,60,221,58,192,8,140,1,34,172, - 64,66,132,52,43,16,131,0,4,0,64,16, - 0,0,0,0,0,163,1,60,221,58,192,8, - 140,1,36,172,0,163,1,60,140,1,35,172, - 0,163,4,60,140,1,132,140,51,60,192,12, - 0,0,0,0,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,208,255,189,39,20,0,177,175, - 33,136,128,0,36,0,181,175,33,168,160,0, - 28,0,179,175,33,152,192,0,44,0,191,175, - 40,0,182,175,32,0,180,175,24,0,178,175, - 168,71,192,12,16,0,176,175,76,0,64,16, - 0,0,0,0,3,131,22,60,0,18,214,38, - 35,16,54,2,194,31,2,0,33,16,67,0, - 67,144,2,0,1,0,98,38,194,31,2,0, - 33,16,67,0,67,128,2,0,255,255,20,38, - 64,0,130,46,14,0,64,20,64,0,66,46, - 2,131,4,60,204,149,132,36,180,132,144,39, - 33,40,0,2,2,131,7,60,236,149,231,36, - 15,63,192,12,143,0,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,143,0,6,36, - 64,0,66,46,14,0,64,20,33,32,32,2, - 2,131,4,60,204,149,132,36,180,132,144,39, - 33,40,0,2,2,131,7,60,20,150,231,36, - 15,63,192,12,144,0,6,36,1,0,4,36, - 33,40,0,2,188,7,192,12,144,0,6,36, - 33,32,32,2,33,40,160,2,80,68,192,12, - 33,48,96,2,64,16,18,0,33,136,86,0, - 33,128,128,2,255,255,2,36,25,0,2,18, - 255,255,20,36,180,132,147,39,33,32,64,2, - 208,133,130,143,1,0,82,38,1,0,66,36, - 208,133,130,175,0,0,37,150,0,0,0,0, - 162,65,192,12,2,0,49,38,10,0,64,20, - 33,40,96,2,2,131,4,60,204,149,132,36, - 188,132,135,39,15,63,192,12,159,0,6,36, - 1,0,4,36,33,40,96,2,188,7,192,12, - 159,0,6,36,255,255,16,38,235,255,20,22, - 33,32,64,2,44,0,191,143,40,0,182,143, - 36,0,181,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,48,0,189,39,224,255,189,39, - 16,0,164,163,3,131,4,60,18,18,132,36, - 16,0,165,39,24,0,191,175,231,58,192,12, - 1,0,6,36,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,224,255,189,39, - 16,0,164,163,3,131,4,60,19,18,132,36, - 16,0,165,39,24,0,191,175,231,58,192,12, - 1,0,6,36,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,232,255,189,39, - 33,40,128,0,16,0,176,175,3,131,16,60, - 20,18,16,38,33,32,0,2,20,0,191,175, - 231,58,192,12,4,0,6,36,0,163,5,60, - 220,5,165,52,3,0,2,138,0,0,2,154, - 0,0,0,0,3,0,162,168,0,0,162,184, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,232,255,189,39,33,40,128,0, - 16,0,176,175,3,131,16,60,68,18,16,38, - 33,32,0,2,20,0,191,175,231,58,192,12, - 4,0,6,36,0,163,5,60,16,6,165,52, - 3,0,2,138,0,0,2,154,0,0,0,0, - 3,0,162,168,0,0,162,184,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,33,40,128,0,16,0,176,175, - 3,131,16,60,24,18,16,38,33,32,0,2, - 20,0,191,175,231,58,192,12,4,0,6,36, - 0,163,5,60,224,5,165,52,3,0,2,138, - 0,0,2,154,0,0,0,0,3,0,162,168, - 0,0,162,184,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,3,131,2,60, - 28,18,66,148,224,255,189,39,24,0,191,175, - 8,0,128,16,16,0,162,167,1,0,66,52, - 16,0,162,167,1,0,2,36,44,133,130,175, - 0,163,1,60,177,59,192,8,144,1,34,172, - 254,255,66,48,16,0,162,167,44,133,128,175, - 0,163,1,60,144,1,32,172,3,131,4,60, - 28,18,132,36,16,0,165,39,231,58,192,12, - 2,0,6,36,24,0,191,143,32,0,189,39, - 8,0,224,3,0,0,0,0,3,131,2,60, - 28,18,66,148,224,255,189,39,24,0,191,175, - 3,0,128,16,16,0,162,167,195,59,192,8, - 2,0,66,52,253,255,66,48,16,0,162,167, - 3,131,4,60,28,18,132,36,16,0,165,39, - 231,58,192,12,2,0,6,36,24,0,191,143, - 32,0,189,39,8,0,224,3,0,0,0,0, - 216,255,189,39,32,0,191,175,33,56,128,0, - 33,48,160,0,3,0,226,136,0,0,226,152, - 7,0,227,136,4,0,227,152,11,0,228,136, - 8,0,228,152,15,0,229,136,12,0,229,152, - 19,0,162,171,16,0,162,187,23,0,163,171, - 20,0,163,187,27,0,164,171,24,0,164,187, - 31,0,165,171,28,0,165,187,16,0,194,44, - 3,0,64,16,16,0,163,39,33,16,102,0, - 0,0,64,160,3,131,4,60,80,18,132,36, - 33,40,224,0,231,58,192,12,16,0,6,36, - 32,0,191,143,40,0,189,39,8,0,224,3, - 0,0,0,0,216,255,189,39,32,0,191,175, - 33,56,128,0,33,48,160,0,3,0,226,136, - 0,0,226,152,7,0,227,136,4,0,227,152, - 11,0,228,136,8,0,228,152,15,0,229,136, - 12,0,229,152,19,0,162,171,16,0,162,187, - 23,0,163,171,20,0,163,187,27,0,164,171, - 24,0,164,187,31,0,165,171,28,0,165,187, - 16,0,194,44,3,0,64,16,16,0,163,39, - 33,16,102,0,0,0,64,160,3,131,4,60, - 96,18,132,36,33,40,224,0,231,58,192,12, - 16,0,6,36,32,0,191,143,40,0,189,39, - 8,0,224,3,0,0,0,0,216,255,189,39, - 32,0,191,175,33,56,128,0,33,48,160,0, - 3,0,226,136,0,0,226,152,7,0,227,136, - 4,0,227,152,11,0,228,136,8,0,228,152, - 15,0,229,136,12,0,229,152,19,0,162,171, - 16,0,162,187,23,0,163,171,20,0,163,187, - 27,0,164,171,24,0,164,187,31,0,165,171, - 28,0,165,187,16,0,194,44,3,0,64,16, - 16,0,163,39,33,16,102,0,0,0,64,160, - 3,131,4,60,112,18,132,36,33,40,224,0, - 231,58,192,12,16,0,6,36,32,0,191,143, - 40,0,189,39,8,0,224,3,0,0,0,0, - 232,255,189,39,15,0,2,60,54,66,66,52, - 24,0,164,175,246,255,132,36,43,16,68,0, - 3,0,64,16,16,0,191,175,44,1,2,36, - 24,0,162,175,3,131,4,60,64,18,132,36, - 24,0,165,39,231,58,192,12,4,0,6,36, - 24,0,162,143,0,163,1,60,140,1,34,172, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,232,255,189,39,16,0,191,175, - 0,38,4,0,196,64,192,12,3,38,4,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,232,255,189,39,16,0,191,175, - 0,38,4,0,196,64,192,12,3,38,4,0, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,160,255,189,39,112,0,162,143, - 72,0,176,175,33,128,224,0,88,0,180,175, - 33,160,0,0,84,0,179,175,33,152,192,0, - 92,0,191,175,80,0,178,175,7,0,160,16, - 76,0,177,175,6,0,65,4,51,0,177,39, - 45,0,20,36,3,0,0,18,35,16,2,0, - 255,255,16,38,51,0,177,39,51,0,160,163, - 27,0,68,0,2,0,128,20,0,0,0,0, - 13,0,7,0,18,24,0,0,16,16,0,0, - 2,131,1,60,33,8,34,0,128,205,34,144, - 255,255,49,38,2,0,0,18,0,0,34,162, - 255,255,16,38,33,16,96,0,241,255,64,20, - 1,0,3,36,0,22,19,0,3,22,2,0, - 11,0,67,20,33,32,128,2,255,255,16,38, - 255,255,2,36,7,0,2,18,0,0,0,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,33,32,128,2, - 4,0,128,16,0,22,19,0,196,64,192,12, - 0,0,0,0,0,22,19,0,3,22,2,0, - 2,0,3,36,14,0,67,20,255,255,2,36, - 255,255,16,38,11,0,2,18,255,255,18,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,18,18,0,0,0,0,156,60,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,34,130,0,0,36,146, - 0,0,0,0,249,255,64,20,1,0,49,38, - 255,255,49,38,0,22,19,0,3,22,2,0, - 3,0,3,36,9,0,67,20,255,255,16,38, - 255,255,2,36,6,0,2,18,255,255,17,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,17,22,0,0,0,0,92,0,191,143, - 88,0,180,143,84,0,179,143,80,0,178,143, - 76,0,177,143,72,0,176,143,8,0,224,3, - 96,0,189,39,200,255,189,39,40,0,178,175, - 33,144,128,0,32,0,176,175,33,128,160,0, - 36,0,177,175,33,136,192,0,33,32,32,2, - 48,0,191,175,156,71,192,12,44,0,179,175, - 33,32,0,0,33,24,64,0,42,16,112,0, - 2,0,64,16,33,152,64,2,35,32,3,2, - 0,22,18,0,3,22,2,0,1,0,3,36, - 11,0,67,20,33,128,128,0,255,255,16,38, - 255,255,2,36,8,0,2,18,0,22,19,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,0,0,0,0, - 0,22,19,0,3,22,2,0,2,0,3,36, - 14,0,67,20,255,255,2,36,255,255,16,38, - 11,0,2,18,255,255,18,36,196,64,192,12, - 48,0,4,36,255,255,16,38,6,0,18,18, - 0,0,0,0,233,60,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,34,130,0,0,36,146,0,0,0,0, - 249,255,64,20,1,0,49,38,255,255,49,38, - 0,22,19,0,3,22,2,0,3,0,3,36, - 9,0,67,20,255,255,16,38,255,255,2,36, - 6,0,2,18,255,255,17,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,17,22, - 0,0,0,0,48,0,191,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,56,0,189,39,32,255,189,39, - 192,0,178,175,33,144,160,0,196,0,179,175, - 33,152,0,0,220,0,191,175,216,0,190,175, - 212,0,183,175,208,0,182,175,204,0,181,175, - 200,0,180,175,188,0,177,175,184,0,176,175, - 1,0,130,128,0,0,0,0,229,1,64,16, - 33,136,0,0,255,255,22,36,1,0,23,36, - 2,0,30,36,1,0,149,36,0,0,162,146, - 0,0,0,0,219,255,66,36,0,22,2,0, - 3,30,2,0,84,0,98,44,217,1,64,16, - 128,16,3,0,2,131,1,60,33,8,34,0, - 80,150,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,209,61,192,8,37,0,4,36, - 2,131,16,60,64,150,16,38,156,71,192,12, - 33,32,0,2,59,61,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,2,130,0,0,4,146,0,0,0,0, - 249,255,64,20,1,0,16,38,3,63,192,8, - 1,0,162,38,0,38,18,0,209,61,192,8, - 3,38,4,0,2,0,3,36,33,128,32,2, - 33,40,64,2,33,160,0,0,51,0,177,39, - 51,0,160,163,27,0,163,0,2,0,96,20, - 0,0,0,0,13,0,7,0,18,40,0,0, - 16,16,0,0,2,131,1,60,33,8,34,0, - 128,205,34,144,255,255,49,38,2,0,0,18, - 0,0,34,162,255,255,16,38,242,255,160,20, - 0,0,0,0,10,0,119,22,0,22,20,0, - 255,255,16,38,8,0,22,18,3,38,2,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,0,22,20,0, - 3,38,2,0,3,0,128,16,0,0,0,0, - 196,64,192,12,0,0,0,0,14,0,126,22, - 0,0,0,0,255,255,16,38,11,0,22,18, - 255,255,18,36,196,64,192,12,48,0,4,36, - 255,255,16,38,6,0,18,18,0,0,0,0, - 111,61,192,8,0,0,0,0,0,38,4,0, - 196,64,192,12,3,38,4,0,0,0,34,130, - 0,0,36,146,0,0,0,0,249,255,64,20, - 1,0,49,38,255,255,49,38,3,0,6,36, - 80,0,102,22,66,0,4,36,255,255,16,38, - 77,0,22,18,255,255,17,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,17,22, - 66,0,4,36,209,61,192,8,0,0,0,0, - 8,0,3,36,33,128,32,2,33,40,64,2, - 33,160,0,0,51,0,177,39,51,0,160,163, - 27,0,163,0,2,0,96,20,0,0,0,0, - 13,0,7,0,18,40,0,0,16,16,0,0, - 2,131,1,60,33,8,34,0,128,205,34,144, - 255,255,49,38,2,0,0,18,0,0,34,162, - 255,255,16,38,242,255,160,20,0,0,0,0, - 10,0,119,22,0,22,20,0,255,255,16,38, - 8,0,22,18,3,38,2,0,255,255,18,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,18,22,0,22,20,0,3,38,2,0, - 3,0,128,16,0,0,0,0,196,64,192,12, - 0,0,0,0,14,0,126,22,0,0,0,0, - 255,255,16,38,11,0,22,18,255,255,18,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,18,18,0,0,0,0,182,61,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,34,130,0,0,36,146, - 0,0,0,0,249,255,64,20,1,0,49,38, - 255,255,49,38,3,0,6,36,9,0,102,22, - 81,0,4,36,255,255,16,38,6,0,22,18, - 255,255,17,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,17,22,81,0,4,36, - 196,64,192,12,0,0,0,0,3,63,192,8, - 1,0,162,38,33,128,32,2,33,16,64,2, - 33,160,0,0,5,0,65,6,10,0,4,36, - 45,0,20,36,2,0,32,18,35,16,18,0, - 255,255,48,38,51,0,177,39,51,0,160,163, - 27,0,68,0,2,0,128,20,0,0,0,0, - 13,0,7,0,18,24,0,0,16,16,0,0, - 2,131,1,60,33,8,34,0,128,205,34,144, - 255,255,49,38,2,0,0,18,0,0,34,162, - 255,255,16,38,33,16,96,0,241,255,64,20, - 0,0,0,0,10,0,119,22,33,32,128,2, - 255,255,16,38,7,0,22,18,0,0,0,0, - 255,255,18,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,18,22,33,32,128,2, - 3,0,128,16,0,0,0,0,196,64,192,12, - 0,0,0,0,14,0,126,22,0,0,0,0, - 255,255,16,38,11,0,22,18,255,255,18,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,18,18,0,0,0,0,4,62,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,34,130,0,0,36,146, - 0,0,0,0,249,255,64,20,1,0,49,38, - 255,255,49,38,3,0,6,36,237,0,102,22, - 1,0,162,38,255,255,16,38,234,0,22,18, - 255,255,17,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,17,22,1,0,162,38, - 3,63,192,8,0,0,0,0,10,0,3,36, - 33,128,32,2,33,40,64,2,33,160,0,0, - 51,0,177,39,51,0,160,163,27,0,163,0, - 2,0,96,20,0,0,0,0,13,0,7,0, - 18,40,0,0,16,16,0,0,2,131,1,60, - 33,8,34,0,128,205,34,144,255,255,49,38, - 2,0,0,18,0,0,34,162,255,255,16,38, - 242,255,160,20,0,0,0,0,10,0,119,22, - 0,22,20,0,255,255,16,38,8,0,22,18, - 3,38,2,0,255,255,18,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,18,22, - 0,22,20,0,3,38,2,0,3,0,128,16, - 0,0,0,0,196,64,192,12,0,0,0,0, - 14,0,126,22,0,0,0,0,255,255,16,38, - 11,0,22,18,255,255,18,36,196,64,192,12, - 48,0,4,36,255,255,16,38,6,0,18,18, - 0,0,0,0,75,62,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,34,130,0,0,36,146,0,0,0,0, - 249,255,64,20,1,0,49,38,255,255,49,38, - 3,0,6,36,166,0,102,22,1,0,162,38, - 255,255,16,38,163,0,22,18,255,255,17,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,17,22,1,0,162,38,3,63,192,8, - 0,0,0,0,192,132,144,39,156,71,192,12, - 33,32,0,2,112,62,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,2,130,0,0,4,146,0,0,0,0, - 249,255,64,20,1,0,16,38,16,0,3,36, - 33,128,32,2,33,40,64,2,33,160,0,0, - 51,0,177,39,51,0,160,163,27,0,163,0, - 2,0,96,20,0,0,0,0,13,0,7,0, - 18,40,0,0,16,16,0,0,2,131,1,60, - 33,8,34,0,128,205,34,144,255,255,49,38, - 2,0,0,18,0,0,34,162,255,255,16,38, - 242,255,160,20,0,0,0,0,10,0,119,22, - 0,22,20,0,255,255,16,38,8,0,22,18, - 3,38,2,0,255,255,18,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,18,22, - 0,22,20,0,3,38,2,0,3,0,128,16, - 0,0,0,0,196,64,192,12,0,0,0,0, - 14,0,126,22,0,0,0,0,255,255,16,38, - 11,0,22,18,255,255,18,36,196,64,192,12, - 48,0,4,36,255,255,16,38,6,0,18,18, - 0,0,0,0,159,62,192,8,0,0,0,0, - 0,38,4,0,196,64,192,12,3,38,4,0, - 0,0,34,130,0,0,36,146,0,0,0,0, - 249,255,64,20,1,0,49,38,255,255,49,38, - 3,0,6,36,82,0,102,22,1,0,162,38, - 255,255,16,38,79,0,22,18,255,255,17,36, - 196,64,192,12,32,0,4,36,255,255,16,38, - 252,255,17,22,1,0,162,38,3,63,192,8, - 0,0,0,0,156,71,192,12,33,32,64,2, - 33,24,64,0,42,16,113,0,2,0,64,16, - 33,32,0,0,35,32,35,2,10,0,119,22, - 33,128,128,0,255,255,16,38,7,0,22,18, - 0,0,0,0,255,255,17,36,196,64,192,12, - 32,0,4,36,255,255,16,38,252,255,17,22, - 0,0,0,0,14,0,126,22,0,0,0,0, - 255,255,16,38,11,0,22,18,255,255,17,36, - 196,64,192,12,48,0,4,36,255,255,16,38, - 6,0,17,18,0,0,0,0,211,62,192,8, - 0,0,0,0,0,38,4,0,196,64,192,12, - 3,38,4,0,0,0,66,130,0,0,68,146, - 0,0,0,0,249,255,64,20,1,0,82,38, - 255,255,82,38,3,0,6,36,30,0,102,22, - 1,0,162,38,255,255,16,38,27,0,22,18, - 255,255,17,36,196,64,192,12,32,0,4,36, - 255,255,16,38,252,255,17,22,1,0,162,38, - 3,63,192,8,0,0,0,0,253,62,192,8, - 3,0,19,36,3,0,96,22,128,16,17,0, - 2,0,19,36,128,16,17,0,33,16,81,0, - 64,16,2,0,0,0,163,130,208,255,66,36, - 2,0,96,22,33,136,67,0,1,0,19,36, - 1,0,181,38,0,0,162,130,0,0,0,0, - 33,254,64,20,0,0,0,0,1,0,130,36, - 220,0,191,143,216,0,190,143,212,0,183,143, - 208,0,182,143,204,0,181,143,200,0,180,143, - 196,0,179,143,192,0,178,143,188,0,177,143, - 184,0,176,143,8,0,224,3,224,0,189,39, - 0,0,164,175,4,0,165,175,8,0,166,175, - 12,0,167,175,200,255,189,39,59,0,162,39, - 252,255,3,36,36,16,67,0,52,0,191,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,56,0,164,175, - 0,0,80,140,4,0,81,36,0,0,2,130, - 0,0,3,146,0,0,0,0,31,0,64,16, - 37,0,20,36,69,0,19,36,252,255,18,36, - 0,22,3,0,3,38,2,0,18,0,148,20, - 0,0,0,0,1,0,3,130,0,0,0,0, - 4,0,100,16,33,32,0,2,4,0,115,20, - 3,0,34,38,33,32,0,2,56,63,192,8, - 33,40,0,0,36,16,82,0,4,0,81,36, - 0,0,69,140,33,32,0,2,13,61,192,12, - 0,0,0,0,62,63,192,8,33,128,64,0, - 196,64,192,12,1,0,16,38,0,0,2,130, - 0,0,3,146,0,0,0,0,230,255,64,20, - 0,22,3,0,52,0,191,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,56,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,8,0,224,3,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 8,0,224,3,0,0,0,0,33,72,224,3, - 170,3,8,36,76,63,192,12,0,0,0,0, - 255,255,8,33,252,255,0,21,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,8,0,32,1, - 0,0,0,0,33,88,224,3,232,3,10,36, - 143,63,192,12,0,0,0,0,255,255,74,33, - 252,255,64,21,0,0,0,0,8,0,96,1, - 0,0,0,0,250,255,132,32,130,32,4,0, - 255,255,132,32,0,0,0,0,253,255,128,20, - 0,0,0,0,8,0,224,3,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 248,255,189,39,255,255,195,36,10,0,192,16, - 33,56,160,0,255,255,6,36,0,0,162,144, - 1,0,165,36,0,0,130,160,4,0,64,16, - 1,0,132,36,255,255,99,36,249,255,102,20, - 0,0,0,0,33,16,224,0,8,0,224,3, - 8,0,189,39,0,96,2,64,0,0,0,0, - 38,64,68,0,1,255,8,49,38,64,2,1, - 0,96,136,64,8,0,224,3,1,255,66,48, - 0,96,2,64,0,0,0,0,1,255,132,48, - 39,32,128,0,36,64,68,0,0,96,136,64, - 8,0,224,3,1,255,66,48,0,96,2,64, - 0,0,0,0,0,255,132,48,37,64,68,0, - 1,0,8,53,0,96,136,64,8,0,224,3, - 1,255,66,48,176,255,189,39,64,0,182,175, - 33,176,128,0,52,0,179,175,33,152,160,0, - 72,0,190,175,33,240,192,0,68,0,183,175, - 33,184,224,0,60,0,181,175,33,168,0,0, - 56,0,180,175,33,160,0,0,76,0,191,175, - 48,0,178,175,44,0,177,175,40,0,176,175, - 2,131,6,60,33,48,212,0,160,205,198,144, - 0,0,0,0,28,0,96,26,33,128,0,0, - 33,24,192,2,33,32,118,2,0,0,102,160, - 1,0,99,36,42,16,100,0,252,255,64,20, - 0,0,0,0,19,0,96,26,33,128,0,0, - 255,0,210,48,33,136,192,2,0,0,39,146, - 0,0,0,0,9,0,242,16,0,0,0,0, - 5,0,224,18,33,40,0,2,96,0,164,143, - 0,0,0,0,9,248,224,2,33,48,64,2, - 32,0,192,23,1,0,181,38,1,0,16,38, - 42,16,19,2,241,255,64,20,1,0,49,38, - 1,0,148,38,4,0,130,46,220,255,64,20, - 0,0,0,0,7,0,96,26,33,128,0,0, - 33,24,192,2,0,0,112,160,1,0,16,38, - 42,16,19,2,252,255,64,20,1,0,99,36, - 20,0,96,26,33,128,0,0,33,136,192,2, - 0,0,39,146,255,0,6,50,11,0,230,16, - 0,0,0,0,5,0,224,18,0,0,0,0, - 96,0,164,143,0,0,0,0,9,248,224,2, - 33,40,0,2,3,0,192,19,1,0,181,38, - 72,64,192,8,1,0,2,36,1,0,16,38, - 42,16,19,2,239,255,64,20,1,0,49,38, - 33,16,160,2,76,0,191,143,72,0,190,143, - 68,0,183,143,64,0,182,143,60,0,181,143, - 56,0,180,143,52,0,179,143,48,0,178,143, - 44,0,177,143,40,0,176,143,8,0,224,3, - 80,0,189,39,160,255,189,39,88,0,190,175, - 33,240,128,0,68,0,179,175,33,152,160,0, - 76,0,181,175,33,168,224,0,72,0,180,175, - 33,160,0,0,33,16,96,2,92,0,191,175, - 84,0,183,175,80,0,182,175,64,0,178,175, - 60,0,177,175,56,0,176,175,2,0,97,6, - 16,0,166,175,3,0,98,38,131,152,2,0, - 33,184,0,0,2,131,22,60,164,205,214,38, - 0,0,210,142,0,0,0,0,7,0,96,18, - 33,128,0,0,33,24,192,3,0,0,114,172, - 1,0,16,38,43,16,19,2,252,255,64,20, - 4,0,99,36,20,0,96,18,33,128,0,0, - 33,136,192,3,0,0,39,142,0,0,0,0, - 11,0,242,16,128,40,16,0,5,0,160,18, - 0,0,0,0,112,0,164,143,0,0,0,0, - 9,248,160,2,33,48,64,2,16,0,168,143, - 0,0,0,0,34,0,0,21,1,0,148,38, - 1,0,16,38,43,16,19,2,239,255,64,20, - 4,0,49,38,1,0,247,38,4,0,226,46, - 222,255,64,20,4,0,214,38,7,0,96,18, - 33,128,0,0,33,24,192,3,0,0,112,172, - 1,0,16,38,43,16,19,2,252,255,64,20, - 4,0,99,36,22,0,96,18,33,128,0,0, - 33,136,192,3,0,0,39,142,0,0,0,0, - 13,0,240,16,128,40,16,0,5,0,160,18, - 0,0,0,0,112,0,164,143,0,0,0,0, - 9,248,160,2,33,48,0,2,16,0,168,143, - 0,0,0,0,3,0,0,17,1,0,148,38, - 174,64,192,8,1,0,2,36,1,0,16,38, - 43,16,19,2,237,255,64,20,4,0,49,38, - 33,16,128,2,92,0,191,143,88,0,190,143, - 84,0,183,143,80,0,182,143,76,0,181,143, - 72,0,180,143,68,0,179,143,64,0,178,143, - 60,0,177,143,56,0,176,143,8,0,224,3, - 96,0,189,39,0,0,0,0,0,0,0,0, - 255,1,2,36,0,163,1,60,176,1,32,172, - 0,163,1,60,180,1,32,172,0,163,1,60, - 8,0,224,3,184,1,34,172,232,255,189,39, - 16,0,176,175,33,128,128,0,20,0,191,175, - 220,63,192,12,33,32,0,0,33,40,64,0, - 0,163,3,60,180,1,99,140,0,163,2,60, - 184,1,66,140,0,163,4,60,128,1,132,140, - 1,0,99,36,11,0,128,20,36,24,98,0, - 0,163,2,60,176,1,66,140,0,0,0,0, - 6,0,98,20,0,0,0,0,0,163,2,60, - 128,1,66,140,0,0,0,0,247,255,64,16, - 0,0,0,0,0,163,2,60,180,1,66,140, - 33,32,160,0,0,163,1,60,33,8,34,0, - 188,1,48,160,0,163,1,60,220,63,192,12, - 180,1,35,172,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,192,255,189,39,33,80,0,0, - 80,0,185,143,84,0,184,143,88,0,175,143, - 1,0,2,36,60,0,177,175,92,0,177,143, - 4,112,226,0,12,0,224,24,56,0,176,175, - 1,0,9,36,33,64,160,3,33,24,32,3, - 0,0,98,140,4,0,99,36,1,0,74,37, - 4,16,73,0,0,0,2,173,42,16,71,1, - 249,255,64,20,4,0,8,37,46,0,192,25, - 33,80,0,0,255,0,16,36,33,72,0,0, - 0,0,145,172,0,0,160,164,34,0,224,24, - 0,0,208,160,33,88,128,0,33,96,160,0, - 33,104,192,0,33,64,32,3,33,24,160,3, - 0,0,98,140,0,0,0,0,36,16,66,1, - 19,0,64,16,0,0,0,0,0,0,2,141, - 0,0,0,0,128,16,2,0,33,16,88,0, - 0,0,66,140,0,0,0,0,0,0,98,173, - 0,0,2,141,0,0,0,0,64,16,2,0, - 33,16,79,0,0,0,66,148,0,0,0,0, - 0,0,130,165,0,0,2,141,0,0,0,0, - 47,65,192,8,0,0,162,161,4,0,8,37, - 1,0,41,37,42,16,39,1,229,255,64,20, - 4,0,99,36,1,0,198,36,2,0,165,36, - 1,0,74,37,42,16,78,1,213,255,64,20, - 4,0,132,36,60,0,177,143,56,0,176,143, - 8,0,224,3,64,0,189,39,0,0,0,0, - 0,0,0,0,0,0,0,0,216,255,189,39, - 63,0,132,48,28,0,179,175,128,1,147,52, - 50,133,130,151,48,133,132,151,0,128,131,151, - 20,0,177,175,16,0,176,175,5,162,16,60, - 32,0,191,175,24,0,178,175,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,51,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,96,65,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,51,2,33,136,0,0,16,0,16,36, - 5,162,18,60,255,255,19,36,76,63,192,12, - 0,0,0,0,0,128,131,151,50,133,130,151, - 0,0,0,0,37,24,98,0,0,128,131,167, - 76,63,192,12,0,0,67,166,8,0,0,18, - 0,0,0,0,0,0,67,150,20,133,130,151, - 0,0,0,0,36,16,67,0,2,0,64,16, - 64,136,17,0,1,0,49,54,76,63,192,12, - 255,255,16,38,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 230,255,19,22,0,0,67,166,48,133,130,151, - 0,128,131,151,39,16,2,0,36,24,98,0, - 5,162,2,60,0,128,131,167,0,0,67,164, - 255,255,34,50,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,208,255,189,39, - 36,0,181,175,33,168,160,0,32,0,180,175, - 63,0,148,48,33,32,128,2,44,0,191,175, - 40,0,182,175,28,0,179,175,24,0,178,175, - 20,0,177,175,60,65,192,12,16,0,176,175, - 33,152,64,0,255,255,163,50,255,255,98,50, - 3,0,98,20,48,1,22,36,245,66,192,8, - 1,0,2,36,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,54,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,210,65,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,54,2,255,255,163,50,255,255,98,50, - 39,16,2,0,36,24,98,0,84,0,96,16, - 192,1,147,54,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,51,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,8,66,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,51,2,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,76,63,192,12, - 0,0,3,166,0,128,131,151,48,133,130,151, - 0,0,0,0,37,24,98,0,0,0,3,166, - 0,0,4,150,20,133,130,151,0,128,131,167, - 36,16,68,0,9,0,64,20,0,0,0,0, - 76,63,192,12,0,0,0,0,0,0,3,150, - 20,133,130,151,0,0,0,0,36,16,67,0, - 249,255,64,16,0,0,0,0,48,133,130,151, - 0,128,131,151,39,16,2,0,36,24,98,0, - 5,162,2,60,0,128,131,167,0,0,67,164, - 255,255,163,50,255,255,2,52,125,0,98,16, - 64,1,147,54,5,162,16,60,50,133,130,151, - 48,133,132,151,0,128,131,151,39,16,68,0, - 36,24,98,0,0,128,131,167,0,0,3,166, - 76,63,192,12,0,1,17,36,0,128,130,151, - 48,133,131,151,5,162,18,60,37,16,67,0, - 0,128,130,167,0,0,2,166,36,16,51,2, - 6,0,64,16,0,0,0,0,0,128,131,151, - 20,133,130,151,0,0,0,0,95,66,192,8, - 37,24,98,0,20,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,0,128,131,167, - 0,0,67,166,76,63,192,12,66,136,17,0, - 0,128,130,151,50,133,131,151,0,0,0,0, - 37,16,67,0,0,128,130,167,76,63,192,12, - 0,0,66,166,50,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,255,255,34,50, - 0,128,131,167,0,0,67,166,226,255,64,20, - 36,16,51,2,33,144,160,2,0,128,16,52, - 0,128,130,151,48,133,131,151,5,162,17,60, - 37,16,67,0,5,162,3,60,0,128,130,167, - 0,0,98,164,36,16,18,2,6,0,64,16, - 0,0,0,0,0,128,131,151,20,133,130,151, - 0,0,0,0,136,66,192,8,37,24,98,0, - 20,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,0,128,131,167,0,0,35,166, - 76,63,192,12,66,128,16,0,0,128,130,151, - 50,133,131,151,0,0,0,0,37,16,67,0, - 0,128,130,167,76,63,192,12,0,0,34,166, - 50,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,255,255,2,50,0,128,131,167, - 0,0,35,166,226,255,64,20,36,16,18,2, - 5,162,16,60,50,133,130,151,48,133,132,151, - 0,128,131,151,39,16,68,0,36,24,98,0, - 0,128,131,167,76,63,192,12,0,0,3,166, - 0,128,131,151,48,133,130,151,0,0,0,0, - 37,24,98,0,0,0,3,166,0,0,4,150, - 20,133,130,151,0,128,131,167,36,16,68,0, - 9,0,64,20,0,0,0,0,76,63,192,12, - 0,0,0,0,0,0,3,150,20,133,130,151, - 0,0,0,0,36,16,67,0,249,255,64,16, - 0,0,0,0,48,133,130,151,0,128,131,151, - 39,16,2,0,36,24,98,0,5,162,2,60, - 0,128,131,167,0,0,67,164,0,1,19,36, - 5,162,16,60,50,133,130,151,48,133,132,151, - 0,128,131,151,39,16,68,0,36,24,98,0, - 0,128,131,167,0,0,3,166,76,63,192,12, - 0,1,17,36,0,128,130,151,48,133,131,151, - 5,162,18,60,37,16,67,0,0,128,130,167, - 0,0,2,166,36,16,113,2,6,0,64,16, - 0,0,0,0,0,128,131,151,20,133,130,151, - 0,0,0,0,220,66,192,8,37,24,98,0, - 20,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,0,128,131,167,0,0,67,166, - 76,63,192,12,66,136,17,0,0,128,130,151, - 50,133,131,151,0,0,0,0,37,16,67,0, - 0,128,130,167,76,63,192,12,0,0,66,166, - 50,133,130,151,0,128,131,151,39,16,2,0, - 36,24,98,0,255,255,34,50,0,128,131,167, - 0,0,67,166,226,255,64,20,36,16,113,2, - 60,65,192,12,33,32,128,2,38,16,162,2, - 255,255,66,48,1,0,66,44,44,0,191,143, - 40,0,182,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,24,0,178,175,33,144,0,0, - 64,16,4,0,16,0,176,175,33,128,68,0, - 33,32,0,2,20,0,177,175,255,255,177,48, - 28,0,191,175,162,65,192,12,33,40,32,2, - 8,0,64,16,1,0,4,38,162,65,192,12, - 33,40,32,2,4,0,64,16,2,0,4,38, - 162,65,192,12,33,40,32,2,43,144,2,0, - 33,16,64,2,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,216,255,189,39,64,16,4,0, - 24,0,178,175,33,144,68,0,33,32,64,2, - 36,0,191,175,32,0,180,175,28,0,179,175, - 20,0,177,175,60,65,192,12,16,0,176,175, - 1,0,84,38,33,32,128,2,60,65,192,12, - 33,136,64,0,2,0,83,38,33,32,96,2, - 60,65,192,12,33,128,64,0,255,255,35,50, - 255,255,17,50,8,0,113,20,0,0,0,0, - 255,255,66,48,3,0,34,18,33,32,96,2, - 162,65,192,12,33,40,32,2,66,67,192,8, - 33,16,32,2,255,255,80,48,4,0,112,16, - 33,32,128,2,5,0,48,22,255,255,2,36, - 33,32,64,2,162,65,192,12,33,40,0,2, - 33,16,0,2,36,0,191,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 0,0,0,0,0,0,0,0,0,96,8,64, - 0,0,0,0,254,255,1,36,36,72,1,1, - 0,96,137,64,0,0,133,164,2,0,132,32, - 2,44,5,0,0,0,133,164,0,96,136,64, - 8,0,224,3,0,0,0,0,208,255,189,39, - 32,0,178,175,33,144,128,0,28,0,177,175, - 33,136,160,0,36,0,179,175,33,152,192,0, - 40,0,180,175,33,160,224,0,44,0,191,175, - 2,0,32,22,24,0,176,175,255,255,17,36, - 0,0,66,142,0,0,0,0,36,16,81,0, - 3,0,83,20,0,0,0,0,121,67,192,8, - 1,0,2,36,11,0,128,26,33,128,0,0, - 143,63,192,12,0,0,0,0,0,0,66,142, - 0,0,0,0,36,16,81,0,246,255,83,16, - 1,0,16,38,42,16,20,2,247,255,64,20, - 0,0,0,0,33,16,0,0,44,0,191,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 48,0,189,39,208,255,189,39,32,0,178,175, - 33,144,128,0,28,0,177,175,33,136,160,0, - 36,0,179,175,33,152,192,0,40,0,180,175, - 33,160,224,0,44,0,191,175,2,0,32,22, - 24,0,176,175,255,255,17,52,0,0,66,150, - 0,0,0,0,36,16,81,0,3,0,83,20, - 0,0,0,0,162,67,192,8,1,0,2,36, - 11,0,128,26,33,128,0,0,143,63,192,12, - 0,0,0,0,0,0,66,150,0,0,0,0, - 36,16,81,0,246,255,83,16,1,0,16,38, - 42,16,20,2,247,255,64,20,0,0,0,0, - 33,16,0,0,44,0,191,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 208,255,189,39,32,0,178,175,33,144,128,0, - 28,0,177,175,33,136,160,0,36,0,179,175, - 33,152,192,0,40,0,180,175,33,160,224,0, - 44,0,191,175,2,0,32,22,24,0,176,175, - 255,0,17,36,0,0,66,146,0,0,0,0, - 36,16,81,0,3,0,83,20,0,0,0,0, - 203,67,192,8,1,0,2,36,11,0,128,26, - 33,128,0,0,143,63,192,12,0,0,0,0, - 0,0,66,146,0,0,0,0,36,16,81,0, - 246,255,83,16,1,0,16,38,42,16,20,2, - 247,255,64,20,0,0,0,0,33,16,0,0, - 44,0,191,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,208,255,189,39, - 32,0,178,175,33,144,128,0,28,0,177,175, - 33,136,160,0,36,0,179,175,33,152,192,0, - 40,0,180,175,33,160,224,0,44,0,191,175, - 2,0,32,22,24,0,176,175,255,255,17,36, - 0,0,66,142,0,0,0,0,36,16,81,0, - 3,0,83,20,0,0,0,0,244,67,192,8, - 1,0,2,36,11,0,128,26,33,128,0,0, - 143,63,192,12,0,0,0,0,0,0,66,142, - 0,0,0,0,36,16,81,0,246,255,83,20, - 1,0,16,38,42,16,20,2,247,255,64,20, - 0,0,0,0,33,16,0,0,44,0,191,143, - 40,0,180,143,36,0,179,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 48,0,189,39,208,255,189,39,32,0,178,175, - 33,144,128,0,28,0,177,175,33,136,160,0, - 36,0,179,175,33,152,192,0,40,0,180,175, - 33,160,224,0,44,0,191,175,2,0,32,22, - 24,0,176,175,255,255,17,52,0,0,66,150, - 0,0,0,0,36,16,81,0,3,0,83,20, - 0,0,0,0,29,68,192,8,1,0,2,36, - 11,0,128,26,33,128,0,0,143,63,192,12, - 0,0,0,0,0,0,66,150,0,0,0,0, - 36,16,81,0,246,255,83,20,1,0,16,38, - 42,16,20,2,247,255,64,20,0,0,0,0, - 33,16,0,0,44,0,191,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 208,255,189,39,32,0,178,175,33,144,128,0, - 28,0,177,175,33,136,160,0,36,0,179,175, - 33,152,192,0,40,0,180,175,33,160,224,0, - 44,0,191,175,2,0,32,22,24,0,176,175, - 255,0,17,36,0,0,66,146,0,0,0,0, - 36,16,81,0,3,0,83,20,0,0,0,0, - 70,68,192,8,1,0,2,36,11,0,128,26, - 33,128,0,0,143,63,192,12,0,0,0,0, - 0,0,66,146,0,0,0,0,36,16,81,0, - 246,255,83,20,1,0,16,38,42,16,20,2, - 247,255,64,20,0,0,0,0,33,16,0,0, - 44,0,191,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,0,0,0,0, - 0,0,0,0,248,255,189,39,255,255,195,36, - 8,0,192,16,33,56,128,0,255,255,6,36, - 0,0,162,144,1,0,165,36,255,255,99,36, - 0,0,130,160,251,255,102,20,1,0,132,36, - 33,16,224,0,8,0,224,3,8,0,189,39, - 0,0,0,0,0,0,0,0,0,96,8,64, - 1,0,9,60,0,96,137,64,15,0,138,48, - 33,40,170,0,192,255,165,36,0,0,128,160, - 16,0,128,160,32,0,128,160,251,255,160,28, - 48,0,128,160,64,0,132,36,0,96,136,64, - 0,0,0,0,0,0,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,1,131,2,60, - 224,17,66,36,0,32,9,60,37,16,73,0, - 8,0,64,0,0,0,0,0,0,96,8,64, - 3,0,9,60,0,96,137,64,15,0,138,48, - 33,40,170,0,192,255,165,36,0,0,128,160, - 16,0,128,160,32,0,128,160,251,255,160,28, - 48,0,128,160,64,0,132,36,0,96,136,64, - 0,0,0,0,0,0,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,163,3,60,192,3,99,140, - 0,163,2,60,188,3,66,140,0,0,0,0, - 16,0,98,16,255,255,2,36,0,163,2,60, - 188,3,66,140,0,163,1,60,33,8,34,0, - 200,3,34,144,0,163,3,60,188,3,99,140, - 0,163,4,60,196,3,132,140,0,22,2,0, - 3,22,2,0,1,0,99,36,36,24,100,0, - 0,163,1,60,188,3,35,172,8,0,224,3, - 0,0,0,0,0,163,6,60,0,1,198,52, - 10,0,9,36,255,255,8,36,13,0,7,36, - 0,163,3,60,192,3,99,140,0,163,2,60, - 188,3,66,140,0,0,0,0,17,0,98,16, - 255,255,5,36,0,163,2,60,188,3,66,140, - 0,0,0,0,33,16,70,0,200,2,66,144, - 0,0,0,0,0,22,2,0,3,46,2,0, - 0,163,2,60,188,3,66,140,0,163,3,60, - 196,3,99,140,1,0,66,36,36,16,67,0, - 0,163,1,60,188,3,34,172,11,0,169,16, - 11,0,162,40,5,0,64,16,0,0,0,0, - 228,255,168,16,0,0,0,0,206,68,192,8, - 0,0,133,160,224,255,167,16,0,0,0,0, - 206,68,192,8,0,0,133,160,208,68,192,8, - 0,0,128,160,169,68,192,8,1,0,132,36, - 8,0,224,3,0,0,0,0,0,0,0,0, - 0,0,0,0,208,255,189,39,24,0,176,175, - 33,128,128,0,36,0,179,175,33,152,160,0, - 28,0,177,175,33,136,0,0,32,0,178,175, - 33,144,0,2,5,0,64,22,40,0,191,175, - 54,0,96,22,33,16,0,0,22,69,192,8, - 0,0,32,174,0,0,67,130,0,0,0,0, - 233,68,192,8,32,0,2,36,0,0,3,130, - 32,0,2,36,253,255,98,16,1,0,16,38, - 255,255,16,38,9,0,2,36,249,255,98,16, - 1,0,16,38,255,255,16,38,0,0,3,130, - 45,0,2,36,4,0,98,20,43,0,2,36, - 1,0,16,38,250,68,192,8,1,0,17,36, - 3,0,98,20,33,32,0,2,1,0,16,38, - 33,32,0,2,44,69,192,12,16,0,165,39, - 7,0,96,18,33,24,64,0,16,0,162,143, - 0,0,0,0,2,0,80,20,0,0,0,0, - 33,16,64,2,0,0,98,174,6,0,32,18, - 0,128,2,60,43,16,67,0,5,0,64,20, - 255,127,2,60,19,69,192,8,33,16,96,0, - 5,0,97,4,255,127,2,60,7,0,32,18, - 255,255,66,52,22,69,192,8,0,128,2,60, - 33,16,96,0,2,0,32,18,0,0,0,0, - 35,16,2,0,40,0,191,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,0,0,0,0, - 0,0,0,0,0,0,0,0,208,255,130,36, - 10,0,66,44,7,0,64,20,1,0,2,36, - 191,255,130,36,6,0,66,44,3,0,64,20, - 1,0,2,36,159,255,130,36,6,0,66,44, - 8,0,224,3,0,0,0,0,248,255,189,39, - 0,0,176,175,33,56,0,0,33,72,0,0, - 33,80,0,0,33,112,0,0,5,0,128,20, - 33,200,128,0,110,0,160,20,33,16,0,0, - 163,69,192,8,0,0,192,173,0,0,131,128, - 32,0,2,36,253,255,98,16,1,0,132,36, - 255,255,132,36,9,0,2,36,249,255,98,16, - 1,0,132,36,255,255,132,36,0,0,131,128, - 43,0,2,36,3,0,98,20,45,0,2,36, - 75,69,192,8,1,0,132,36,3,0,98,20, - 0,0,0,0,1,0,132,36,1,0,14,36, - 3,0,192,16,16,0,2,36,16,0,194,20, - 0,0,0,0,0,0,131,128,48,0,2,36, - 9,0,98,20,10,0,2,36,1,0,131,128, - 88,0,2,36,3,0,98,16,120,0,2,36, - 3,0,98,20,8,0,2,36,16,0,2,36, - 2,0,132,36,2,0,192,20,0,0,0,0, - 33,48,64,0,0,0,131,128,255,255,2,36, - 27,0,70,0,2,0,192,20,0,0,0,0, - 13,0,7,0,18,64,0,0,16,192,0,0, - 0,0,0,0,0,0,0,0,42,0,96,16, - 48,0,98,44,48,0,207,36,11,0,205,40, - 87,0,204,36,55,0,203,36,5,0,64,20, - 43,16,111,0,3,0,64,16,0,0,0,0, - 130,69,192,8,208,255,99,36,30,0,160,21, - 97,0,98,44,6,0,64,20,65,0,98,44, - 43,16,108,0,3,0,64,16,65,0,98,44, - 130,69,192,8,169,255,99,36,21,0,64,20, - 43,16,107,0,19,0,64,16,0,0,0,0, - 201,255,99,36,43,16,7,1,6,0,64,20, - 1,0,9,36,6,0,232,20,24,0,230,0, - 43,16,3,3,3,0,64,16,0,0,0,0, - 1,0,10,36,24,0,230,0,1,0,132,36, - 18,128,0,0,33,56,3,2,0,0,131,128, - 0,0,0,0,220,255,96,20,48,0,98,44, - 5,0,64,17,0,0,0,0,13,0,160,16, - 255,255,2,36,163,69,192,8,0,0,164,172, - 6,0,160,16,0,0,0,0,3,0,32,21, - 0,0,0,0,160,69,192,8,0,0,185,172, - 0,0,164,172,2,0,192,17,33,16,224,0, - 35,16,2,0,0,0,176,143,8,0,224,3, - 8,0,189,39,0,0,0,0,0,0,0,0, - 200,255,189,39,16,0,176,175,7,162,16,60, - 236,0,16,54,255,240,3,60,255,255,99,52, - 63,0,132,48,36,0,181,175,128,1,149,52, - 24,0,178,175,0,1,18,36,20,0,177,175, - 7,162,17,60,236,0,49,54,44,0,183,175, - 0,4,23,60,32,0,180,175,255,251,20,60, - 255,255,148,54,40,0,182,175,0,1,22,60, - 28,0,179,175,255,254,19,60,48,0,191,175, - 0,0,2,142,0,0,0,0,36,16,67,0, - 224,133,130,175,0,0,2,174,76,63,192,12, - 255,255,115,54,224,133,130,143,0,2,3,60, - 37,16,67,0,224,133,130,175,0,0,2,174, - 36,16,85,2,5,0,64,16,0,0,0,0, - 224,133,130,143,0,0,0,0,214,69,192,8, - 37,16,87,0,224,133,130,143,0,0,0,0, - 36,16,84,0,224,133,130,175,76,63,192,12, - 0,0,34,174,224,133,130,143,0,0,0,0, - 37,16,86,0,224,133,130,175,0,0,34,174, - 76,63,192,12,66,144,18,0,224,133,130,143, - 0,0,0,0,36,16,83,0,224,133,130,175, - 0,0,34,174,255,255,66,50,230,255,64,20, - 36,16,85,2,33,136,0,0,16,0,16,36, - 7,162,18,60,236,0,82,54,0,1,22,60, - 0,8,21,60,255,254,19,60,255,255,115,54, - 255,255,20,36,76,63,192,12,0,0,0,0, - 224,133,130,143,0,0,0,0,37,16,86,0, - 224,133,130,175,76,63,192,12,0,0,66,174, - 7,0,0,18,0,0,0,0,0,0,66,142, - 0,0,0,0,36,16,85,0,2,0,64,16, - 64,136,17,0,1,0,49,54,76,63,192,12, - 255,255,16,38,224,133,130,143,0,0,0,0, - 36,16,83,0,224,133,130,175,0,0,66,174, - 232,255,20,22,7,162,4,60,236,0,132,52, - 255,253,3,60,224,133,130,143,255,255,99,52, - 36,16,67,0,224,133,130,175,0,0,130,172, - 255,255,34,50,48,0,191,143,44,0,183,143, - 40,0,182,143,36,0,181,143,32,0,180,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,48,0,190,175,33,240,160,0, - 40,0,182,175,63,0,150,48,33,32,192,2, - 52,0,191,175,44,0,183,175,36,0,181,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,168,69,192,12,16,0,176,175, - 33,152,64,0,255,255,195,51,255,255,98,50, - 3,0,98,20,7,162,16,60,131,71,192,8, - 1,0,2,36,236,0,16,54,255,252,3,60, - 255,255,99,52,0,1,18,36,7,162,17,60, - 236,0,49,54,255,251,21,60,255,255,181,54, - 0,1,23,60,255,254,20,60,224,133,130,143, - 0,0,0,0,36,16,67,0,224,133,130,175, - 0,0,2,174,76,63,192,12,255,255,148,54, - 224,133,130,143,0,2,3,60,37,16,67,0, - 224,133,130,175,0,0,2,174,48,1,66,50, - 5,0,64,16,0,4,6,60,224,133,130,143, - 0,0,0,0,83,70,192,8,37,16,70,0, - 224,133,130,143,0,0,0,0,36,16,85,0, - 224,133,130,175,76,63,192,12,0,0,34,174, - 224,133,130,143,0,0,0,0,37,16,87,0, - 224,133,130,175,0,0,34,174,76,63,192,12, - 66,144,18,0,224,133,130,143,0,0,0,0, - 36,16,84,0,224,133,130,175,0,0,34,174, - 255,255,66,50,230,255,64,20,48,1,66,50, - 255,255,195,51,255,255,98,50,39,16,2,0, - 36,24,98,0,88,0,96,16,192,1,213,54, - 7,162,16,60,236,0,16,54,255,252,3,60, - 255,255,99,52,0,1,18,36,7,162,17,60, - 236,0,49,54,255,251,20,60,255,255,148,54, - 0,1,23,60,255,254,19,60,224,133,130,143, - 0,0,0,0,36,16,67,0,224,133,130,175, - 0,0,2,174,76,63,192,12,255,255,115,54, - 224,133,130,143,0,2,3,60,37,16,67,0, - 224,133,130,175,0,0,2,174,36,16,85,2, - 5,0,64,16,0,4,6,60,224,133,130,143, - 0,0,0,0,140,70,192,8,37,16,70,0, - 224,133,130,143,0,0,0,0,36,16,84,0, - 224,133,130,175,76,63,192,12,0,0,34,174, - 224,133,130,143,0,0,0,0,37,16,87,0, - 224,133,130,175,0,0,34,174,76,63,192,12, - 66,144,18,0,224,133,130,143,0,0,0,0, - 36,16,83,0,224,133,130,175,0,0,34,174, - 255,255,66,50,230,255,64,20,36,16,85,2, - 7,162,16,60,236,0,16,54,255,252,3,60, - 224,133,130,143,255,255,99,52,36,16,67,0, - 224,133,130,175,76,63,192,12,0,0,2,174, - 224,133,130,143,0,2,3,60,37,16,67,0, - 224,133,130,175,0,0,2,174,0,0,2,142, - 0,8,3,60,36,16,67,0,11,0,64,20, - 7,162,4,60,7,162,16,60,236,0,16,54, - 0,8,17,60,76,63,192,12,0,0,0,0, - 0,0,2,142,0,0,0,0,36,16,81,0, - 250,255,64,16,7,162,4,60,236,0,132,52, - 255,253,3,60,224,133,130,143,255,255,99,52, - 36,16,67,0,224,133,130,175,0,0,130,172, - 255,255,195,51,255,255,2,52,133,0,98,16, - 64,1,213,54,7,162,16,60,236,0,16,54, - 255,252,3,60,255,255,99,52,0,1,18,36, - 7,162,17,60,236,0,49,54,255,251,20,60, - 255,255,148,54,0,1,23,60,255,254,19,60, - 224,133,130,143,0,0,0,0,36,16,67,0, - 224,133,130,175,0,0,2,174,76,63,192,12, - 255,255,115,54,224,133,130,143,0,2,3,60, - 37,16,67,0,224,133,130,175,0,0,2,174, - 36,16,85,2,5,0,64,16,0,4,6,60, - 224,133,130,143,0,0,0,0,231,70,192,8, - 37,16,70,0,224,133,130,143,0,0,0,0, - 36,16,84,0,224,133,130,175,76,63,192,12, - 0,0,34,174,224,133,130,143,0,0,0,0, - 37,16,87,0,224,133,130,175,0,0,34,174, - 76,63,192,12,66,144,18,0,224,133,130,143, - 0,0,0,0,36,16,83,0,224,133,130,175, - 0,0,34,174,255,255,66,50,230,255,64,20, - 36,16,85,2,33,160,192,3,7,162,2,60, - 236,0,66,52,0,128,17,52,7,162,16,60, - 236,0,16,54,0,4,23,60,255,251,19,60, - 255,255,115,54,0,1,21,60,255,254,18,60, - 255,255,82,54,224,133,131,143,0,2,4,60, - 37,24,100,0,224,133,131,175,0,0,67,172, - 36,16,52,2,5,0,64,16,0,0,0,0, - 224,133,130,143,0,0,0,0,20,71,192,8, - 37,16,87,0,224,133,130,143,0,0,0,0, - 36,16,83,0,224,133,130,175,76,63,192,12, - 0,0,2,174,224,133,130,143,0,0,0,0, - 37,16,85,0,224,133,130,175,0,0,2,174, - 76,63,192,12,66,136,17,0,224,133,130,143, - 0,0,0,0,36,16,82,0,224,133,130,175, - 0,0,2,174,255,255,34,50,230,255,64,20, - 36,16,52,2,7,162,16,60,236,0,16,54, - 255,252,3,60,224,133,130,143,255,255,99,52, - 36,16,67,0,224,133,130,175,76,63,192,12, - 0,0,2,174,224,133,130,143,0,2,3,60, - 37,16,67,0,224,133,130,175,0,0,2,174, - 0,0,2,142,0,8,3,60,36,16,67,0, - 11,0,64,20,7,162,4,60,7,162,16,60, - 236,0,16,54,0,8,17,60,76,63,192,12, - 0,0,0,0,0,0,2,142,0,0,0,0, - 36,16,81,0,250,255,64,16,7,162,4,60, - 236,0,132,52,255,253,3,60,224,133,130,143, - 255,255,99,52,36,16,67,0,224,133,130,175, - 0,0,130,172,7,162,16,60,236,0,16,54, - 255,252,3,60,255,255,99,52,0,1,18,36, - 7,162,17,60,236,0,49,54,0,4,23,60, - 255,251,20,60,255,255,148,54,0,1,21,60, - 255,254,19,60,224,133,130,143,0,0,0,0, - 36,16,67,0,224,133,130,175,0,0,2,174, - 76,63,192,12,255,255,115,54,224,133,130,143, - 0,2,3,60,37,16,67,0,224,133,130,175, - 0,0,2,174,0,1,66,50,5,0,64,16, - 0,0,0,0,224,133,130,143,0,0,0,0, - 108,71,192,8,37,16,87,0,224,133,130,143, - 0,0,0,0,36,16,84,0,224,133,130,175, - 76,63,192,12,0,0,34,174,224,133,130,143, - 0,0,0,0,37,16,85,0,224,133,130,175, - 0,0,34,174,76,63,192,12,66,144,18,0, - 224,133,130,143,0,0,0,0,36,16,83,0, - 224,133,130,175,0,0,34,174,255,255,66,50, - 230,255,64,20,0,1,66,50,168,69,192,12, - 33,32,192,2,38,16,194,3,255,255,66,48, - 1,0,66,44,52,0,191,143,48,0,190,143, - 44,0,183,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 56,0,189,39,0,0,0,0,248,255,189,39, - 255,255,195,36,6,0,192,16,33,16,128,0, - 255,255,6,36,0,0,133,160,255,255,99,36, - 253,255,102,20,1,0,132,36,8,0,189,39, - 8,0,224,3,0,0,0,0,159,71,192,8, - 33,24,0,0,1,0,99,36,0,0,130,128, - 0,0,0,0,252,255,64,20,1,0,132,36, - 8,0,224,3,33,16,96,0,0,0,0,0, - 0,0,0,0,0,0,0,0,255,255,198,36, - 10,0,192,16,0,0,0,0,0,0,131,128, - 0,0,162,128,0,0,0,0,5,0,98,20, - 0,0,0,0,1,0,132,36,255,255,198,36, - 248,255,192,20,1,0,165,36,0,0,131,144, - 0,0,162,144,0,0,0,0,8,0,224,3, - 35,16,98,0,0,0,0,0,0,0,0,0, - 0,0,0,0,196,71,192,8,240,255,189,39, - 0,0,163,128,3,22,2,0,8,0,67,20, - 0,0,0,0,1,0,132,36,1,0,165,36, - 0,0,130,128,0,0,131,144,0,0,0,0, - 246,255,64,20,0,22,3,0,0,0,131,144, - 0,0,162,144,0,0,0,0,35,16,98,0, - 8,0,224,3,16,0,189,39,0,0,0,0, - 255,255,198,36,9,0,192,4,33,16,0,0, - 0,0,130,144,0,0,0,0,5,0,69,16, - 33,16,128,0,255,255,198,36,250,255,193,4, - 1,0,132,36,33,16,0,0,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,2,0,192,20,255,255,2,36, - 1,0,2,36,8,0,224,3,0,0,226,172, - 232,255,189,39,20,0,191,175,16,0,176,175, - 40,0,176,143,33,32,192,0,4,0,5,142, - 0,0,0,0,15,86,192,12,255,255,230,48, - 3,0,64,16,255,255,2,36,243,71,192,8, - 0,0,2,174,0,0,0,174,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 208,255,189,39,40,0,191,175,33,24,128,0, - 64,0,162,143,32,0,160,175,36,0,162,175, - 12,0,66,148,0,0,0,0,2,0,64,20, - 33,32,160,0,120,5,2,36,255,255,66,48, - 16,0,162,175,1,131,2,60,148,31,66,36, - 20,0,162,175,1,131,2,60,128,31,66,36, - 24,0,162,175,32,0,162,39,28,0,162,175, - 166,85,192,12,33,40,96,0,32,0,162,143, - 40,0,191,143,48,0,189,39,8,0,224,3, - 0,0,0,0,0,0,0,0,88,0,131,148, - 4,0,2,36,9,0,98,20,0,0,0,0, - 33,72,192,8,116,0,132,36,33,16,69,0, - 128,16,2,0,8,0,131,140,0,0,0,0, - 46,72,192,8,33,16,67,0,104,0,132,36, - 12,0,128,16,33,16,0,0,4,0,130,140, - 0,0,0,0,42,16,162,0,243,255,64,20, - 0,17,5,0,4,0,130,140,12,0,132,140, - 0,0,0,0,247,255,128,20,35,40,162,0, - 33,16,0,0,8,0,224,3,0,0,0,0, - 88,0,131,148,4,0,2,36,3,0,98,20, - 33,48,0,0,55,72,192,8,116,0,132,36, - 104,0,132,36,8,0,130,140,0,0,0,0, - 43,16,162,0,14,0,64,16,255,255,2,36, - 92,72,192,8,0,0,0,0,35,24,163,0, - 0,17,3,0,35,16,67,0,0,26,2,0, - 33,16,67,0,0,28,2,0,33,16,67,0, - 35,16,2,0,131,16,2,0,92,72,192,8, - 33,16,194,0,18,0,128,16,0,0,0,0, - 4,0,131,140,0,0,0,0,0,17,3,0, - 33,16,67,0,128,16,2,0,8,0,131,140, - 0,0,0,0,33,16,67,0,43,16,162,0, - 233,255,64,20,0,0,0,0,4,0,130,140, - 12,0,132,140,0,0,0,0,241,255,128,20, - 33,48,194,0,255,255,2,36,8,0,224,3, - 0,0,0,0,0,0,0,0,0,0,0,0, - 8,0,130,36,144,0,163,140,120,132,135,39, - 2,0,96,16,20,0,137,36,33,56,96,0, - 0,0,72,140,4,0,68,140,132,72,192,8, - 0,0,0,0,30,0,0,25,0,0,0,0, - 4,0,227,140,0,0,0,0,4,0,98,140, - 0,0,0,0,55,0,64,16,255,255,2,36, - 0,0,135,140,0,0,98,140,0,0,0,0, - 8,0,71,16,0,0,0,0,8,0,99,36, - 4,0,98,140,0,0,0,0,248,255,64,20, - 255,255,2,36,168,72,192,8,0,0,0,0, - 0,0,130,140,0,0,0,0,4,0,34,173, - 4,0,103,140,255,255,8,37,4,0,132,36, - 0,0,226,148,0,0,0,0,1,0,66,48, - 226,255,64,16,0,0,0,0,0,0,226,148, - 0,0,0,0,64,0,66,48,27,0,64,20, - 255,255,2,36,0,0,226,148,0,0,0,0, - 1,0,66,48,17,0,64,16,0,0,0,0, - 13,0,192,16,1,0,2,36,64,0,162,140, - 0,0,0,0,9,0,64,20,1,0,2,36, - 28,0,226,140,4,0,163,140,0,0,0,0, - 36,16,67,0,3,0,64,20,1,0,2,36, - 168,72,192,8,255,255,2,36,164,72,192,8, - 0,0,34,165,0,0,32,165,8,0,40,173, - 12,0,36,173,16,0,39,173,33,16,0,0, - 8,0,224,3,0,0,0,0,200,255,189,39, - 48,0,191,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,152,128,0, - 33,128,160,0,33,144,192,0,0,0,4,142, - 0,0,0,0,48,0,130,40,2,0,64,16, - 8,0,113,38,48,0,4,36,0,0,36,174, - 9,50,192,12,128,32,4,0,3,0,64,20, - 4,0,34,174,214,72,192,8,255,255,2,36, - 144,0,66,142,120,132,132,39,2,0,64,16, - 0,0,0,0,33,32,64,0,16,0,160,175, - 20,0,179,175,24,0,178,175,0,0,5,142, - 4,0,6,142,0,0,0,0,221,72,192,12, - 33,56,32,2,33,128,64,0,4,0,0,26, - 0,0,0,0,0,0,34,142,214,72,192,8, - 0,0,0,0,110,86,192,12,33,32,32,2, - 33,16,0,2,48,0,191,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,56,0,189,39,184,255,189,39, - 68,0,191,175,64,0,190,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,144,128,0,33,176,160,0, - 33,136,224,0,88,0,183,143,92,0,179,143, - 96,0,190,143,32,0,226,38,0,0,36,142, - 0,0,0,0,42,16,130,0,22,0,64,16, - 33,160,192,0,4,0,132,36,9,50,192,12, - 128,32,4,0,33,128,64,0,3,0,0,22, - 33,32,0,2,153,73,192,8,255,255,2,36, - 0,0,38,142,4,0,37,142,0,0,0,0, - 80,68,192,12,128,48,6,0,4,0,36,142, - 61,50,192,12,0,0,0,0,4,0,48,174, - 0,0,34,142,0,0,0,0,4,0,66,36, - 0,0,34,174,0,0,66,150,0,0,0,0, - 1,0,66,48,96,0,64,20,0,0,0,0, - 33,0,192,30,0,0,0,0,4,0,80,142, - 0,0,0,0,4,0,2,142,0,0,0,0, - 108,0,64,16,128,160,23,0,1,0,242,38, - 4,0,34,142,0,0,0,0,33,16,130,2, - 0,0,3,142,0,0,0,0,0,0,67,172, - 0,0,2,142,0,0,0,0,24,0,98,174, - 16,0,178,175,20,0,179,175,24,0,190,175, - 4,0,4,142,33,40,0,0,33,48,0,0, - 221,72,192,12,33,56,32,2,112,0,64,20, - 8,0,16,38,4,0,2,142,0,0,0,0, - 234,255,64,20,33,16,0,0,153,73,192,8, - 0,0,0,0,4,0,80,142,0,0,0,0, - 4,0,2,142,0,0,0,0,76,0,64,16, - 128,168,23,0,1,0,242,38,0,0,3,142, - 0,0,132,142,0,0,0,0,43,16,100,0, - 42,0,64,20,0,0,0,0,19,0,100,20, - 255,255,197,38,4,0,34,142,0,0,0,0, - 33,16,162,2,0,0,67,172,0,0,2,142, - 0,0,0,0,24,0,98,174,16,0,178,175, - 20,0,179,175,24,0,190,175,4,0,4,142, - 4,0,134,38,221,72,192,12,33,56,32,2, - 25,0,64,16,8,0,16,38,153,73,192,8, - 0,0,0,0,0,0,130,142,0,0,0,0, - 43,16,67,0,17,0,64,16,33,40,0,0, - 4,0,34,142,0,0,0,0,33,16,162,2, - 0,0,67,172,0,0,2,142,0,0,0,0, - 24,0,98,174,16,0,178,175,20,0,179,175, - 24,0,190,175,4,0,4,142,33,48,0,0, - 221,72,192,12,33,56,32,2,52,0,64,20, - 0,0,0,0,8,0,16,38,4,0,2,142, - 0,0,0,0,205,255,64,20,33,16,0,0, - 153,73,192,8,0,0,0,0,0,0,66,150, - 0,0,0,0,64,0,66,48,40,0,64,20, - 33,16,0,0,3,0,66,146,0,0,0,0, - 1,0,66,48,11,0,64,16,33,24,64,2, - 64,0,194,143,0,0,0,0,9,0,64,20, - 0,0,0,0,28,0,98,140,4,0,195,143, - 0,0,0,0,36,16,67,0,3,0,64,20, - 0,0,0,0,153,73,192,8,33,16,0,0, - 14,0,192,26,33,16,246,2,0,0,34,174, - 4,0,36,142,128,128,23,0,33,32,4,2, - 33,40,128,2,80,68,192,12,128,48,22,0, - 28,0,118,174,4,0,34,142,0,0,0,0, - 33,128,2,2,149,73,192,8,32,0,112,174, - 0,0,55,174,28,0,96,174,32,0,96,174, - 36,0,114,174,1,0,2,36,20,0,98,166, - 1,0,2,36,68,0,191,143,64,0,190,143, - 60,0,183,143,56,0,182,143,52,0,181,143, - 48,0,180,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 72,0,189,39,3,0,160,28,33,16,0,0, - 0,0,224,172,1,0,2,36,8,0,224,3, - 0,0,0,0,208,255,189,39,44,0,191,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,144,128,0,33,136,224,0,64,0,176,143, - 0,0,0,0,6,0,160,24,24,0,160,175, - 17,0,2,146,0,0,0,0,18,0,66,52, - 200,73,192,8,17,0,2,162,33,32,32,2, - 33,40,0,2,1,0,6,36,253,76,192,12, - 24,0,167,39,36,0,2,142,16,0,176,175, - 8,0,66,140,33,32,64,2,1,0,5,36, - 24,0,166,39,9,248,64,0,33,56,32,2, - 44,0,191,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,28,0,191,175,24,0,178,175, - 20,0,177,175,33,144,128,0,33,136,160,0, - 31,0,81,18,16,0,176,175,4,0,80,142, - 0,0,0,0,4,0,2,142,0,0,0,0, - 10,0,64,16,0,0,0,0,4,0,4,142, - 0,0,0,0,206,73,192,12,33,40,32,2, - 8,0,16,38,4,0,2,142,0,0,0,0, - 248,255,64,20,0,0,0,0,0,0,66,150, - 0,0,0,0,32,0,66,48,4,0,64,16, - 0,0,0,0,4,0,68,142,61,50,192,12, - 0,0,0,0,0,0,66,150,0,0,0,0, - 16,0,66,48,3,0,64,16,0,0,0,0, - 61,50,192,12,33,32,64,2,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,120,132,131,39, - 2,0,128,16,0,0,0,0,33,24,128,0, - 0,0,167,140,4,0,165,140,26,74,192,8, - 0,0,0,0,28,0,224,24,0,0,0,0, - 4,0,99,140,0,0,0,0,4,0,98,140, - 0,0,0,0,11,0,64,16,0,0,0,0, - 0,0,164,140,0,0,98,140,0,0,0,0, - 9,0,68,16,0,0,0,0,8,0,99,36, - 4,0,98,140,0,0,0,0,248,255,64,20, - 0,0,0,0,0,0,192,172,33,74,192,8, - 2,0,2,36,4,0,99,140,255,255,231,36, - 4,0,165,36,0,0,98,148,0,0,0,0, - 1,0,66,48,228,255,64,16,0,0,0,0, - 0,0,195,172,42,16,7,0,8,0,224,3, - 0,0,0,0,208,255,189,39,44,0,191,175, - 40,0,182,175,36,0,181,175,32,0,180,175, - 28,0,179,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,168,192,0,0,0,162,140, - 0,0,0,0,3,0,64,28,33,48,0,0, - 220,74,192,8,5,0,2,36,120,132,147,39, - 2,0,128,16,0,0,224,172,33,152,128,0, - 0,0,177,140,4,0,180,140,84,74,192,8, - 0,0,0,0,29,0,32,26,0,0,0,0, - 4,0,102,142,0,0,0,0,4,0,194,140, - 0,0,0,0,23,0,64,16,0,0,0,0, - 0,0,131,142,0,0,194,140,0,0,0,0, - 6,0,67,16,0,0,0,0,8,0,198,36, - 4,0,194,140,0,0,0,0,248,255,64,20, - 0,0,0,0,4,0,194,140,0,0,0,0, - 9,0,64,16,0,0,0,0,255,255,49,38, - 4,0,148,38,33,152,64,0,0,0,98,150, - 0,0,0,0,1,0,66,48,227,255,64,16, - 0,0,0,0,29,0,32,22,0,0,0,0, - 0,0,99,150,0,0,0,0,1,0,98,48, - 11,0,64,16,4,0,98,48,123,0,64,16, - 3,0,2,36,0,0,162,150,0,0,0,0, - 1,0,66,48,118,0,64,16,4,0,2,36, - 0,0,243,172,219,74,192,8,4,0,213,172, - 0,0,98,150,0,0,0,0,4,0,66,48, - 110,0,64,16,3,0,2,36,0,0,162,150, - 0,0,0,0,1,0,66,48,105,0,64,20, - 4,0,2,36,0,0,243,172,219,74,192,8, - 4,0,213,172,0,0,98,150,0,0,0,0, - 1,0,66,48,97,0,64,20,2,0,2,36, - 2,0,34,42,23,0,64,20,33,144,160,2, - 56,0,22,36,9,50,192,12,16,0,4,36, - 33,128,64,0,40,0,0,18,128,16,17,0, - 33,16,84,0,252,255,66,140,0,0,0,0, - 0,0,2,174,4,0,18,174,8,0,0,174, - 12,0,0,174,9,50,192,12,8,0,4,36, - 33,144,64,0,24,0,64,18,255,255,49,38, - 0,0,86,166,2,0,34,42,236,255,64,16, - 4,0,80,174,4,0,102,142,0,0,0,0, - 4,0,194,140,0,0,0,0,6,0,64,16, - 1,0,17,36,8,0,198,36,4,0,194,140, - 0,0,0,0,252,255,64,20,1,0,49,38, - 1,0,36,38,9,50,192,12,192,32,4,0, - 33,128,64,0,12,0,0,22,33,32,64,2, - 173,74,192,8,0,0,0,0,0,0,18,142, - 0,0,0,0,61,50,192,12,33,32,0,2, - 33,32,64,2,206,73,192,12,33,40,160,2, - 220,74,192,8,1,0,2,36,4,0,102,142, - 0,0,0,0,192,74,192,8,33,168,0,2, - 4,0,194,140,0,0,0,0,14,0,64,16, - 0,0,0,0,0,0,194,140,4,0,195,140, - 0,0,2,174,4,0,3,174,8,0,198,36, - 8,0,16,38,255,255,49,38,0,0,194,140, - 0,0,131,142,0,0,0,0,43,16,67,0, - 240,255,64,20,0,0,0,0,0,0,130,142, - 0,0,0,0,0,0,2,174,4,0,18,174, - 8,0,4,38,33,40,192,0,80,68,192,12, - 192,48,17,0,4,0,102,142,4,0,117,174, - 0,0,98,150,0,0,0,0,32,0,66,48, - 3,0,64,16,0,0,0,0,61,50,192,12, - 33,32,192,0,0,0,98,150,0,0,0,0, - 32,0,66,52,0,0,98,166,33,16,0,0, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,0,0,162,140,0,0,0,0, - 85,0,64,24,33,16,0,0,120,132,144,39, - 2,0,128,16,0,0,0,0,33,128,128,0, - 0,0,164,140,4,0,165,140,0,0,0,0, - 0,0,168,140,0,0,2,150,0,0,0,0, - 33,24,64,0,1,0,66,48,34,0,64,20, - 33,56,0,2,32,0,128,24,8,0,98,48, - 4,0,6,142,5,0,64,16,0,0,0,0, - 12,0,194,140,0,0,0,0,3,0,64,16, - 0,0,0,0,33,56,0,2,0,0,168,140, - 0,0,195,140,0,0,162,140,0,0,0,0, - 10,0,98,16,0,0,0,0,0,0,163,140, - 4,0,194,140,0,0,0,0,20,0,64,16, - 8,0,198,36,0,0,194,140,0,0,0,0, - 249,255,67,20,0,0,0,0,255,255,132,36, - 4,0,208,140,0,0,0,0,0,0,3,150, - 0,0,0,0,1,0,98,48,224,255,64,16, - 4,0,165,36,36,0,128,20,33,16,0,0, - 0,0,2,150,0,0,0,0,2,0,66,48, - 3,0,64,20,0,0,0,0,65,75,192,8, - 33,16,0,0,4,0,230,140,0,0,0,0, - 0,0,194,140,0,0,0,0,7,0,72,16, - 0,0,0,0,8,0,198,36,0,0,194,140, - 0,0,0,0,253,255,72,20,8,0,198,36, - 248,255,198,36,4,0,199,140,0,0,0,0, - 10,0,224,16,33,32,224,0,8,0,194,140, - 12,0,195,140,0,0,194,172,4,0,195,172, - 8,0,198,36,4,0,194,140,0,0,0,0, - 248,255,64,20,33,32,224,0,206,73,192,12, - 33,40,0,2,33,16,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 192,255,189,39,56,0,191,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,136,128,0, - 33,144,160,0,33,152,192,0,33,160,224,0, - 80,0,181,143,0,0,0,0,36,0,162,142, - 0,0,0,0,20,0,80,140,33,32,128,2, - 48,72,192,12,33,40,160,2,16,0,3,142, - 0,0,0,0,16,0,163,175,20,0,180,175, - 24,0,162,175,0,0,2,142,1,0,4,36, - 33,40,32,2,33,48,64,2,9,248,64,0, - 33,56,96,2,6,0,64,20,33,32,128,2, - 17,0,162,146,0,0,0,0,1,0,66,52, - 113,75,192,8,17,0,162,162,33,40,160,2, - 59,77,192,12,33,48,64,0,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,192,255,189,39, - 60,0,191,175,56,0,178,175,52,0,177,175, - 48,0,176,175,33,136,224,0,80,0,178,143, - 36,0,160,175,36,0,66,142,0,0,0,0, - 20,0,67,140,2,0,80,144,0,0,0,0, - 255,0,2,50,254,255,71,36,70,0,226,44, - 110,0,64,16,128,16,7,0,2,131,1,60, - 33,8,34,0,160,151,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,16,0,177,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,2,0,3,36,16,0,67,162, - 251,75,192,8,40,0,66,174,16,0,177,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,40,0,162,175,16,0,80,162, - 40,0,162,143,0,0,0,0,251,75,192,8, - 40,0,66,174,32,0,162,39,16,0,162,175, - 20,0,177,175,36,0,162,39,24,0,162,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,33,48,64,0,16,0,80,162, - 17,0,66,146,0,0,0,0,2,0,66,52, - 17,0,66,162,36,0,162,143,0,0,0,0, - 43,16,2,0,40,0,66,166,44,0,70,174, - 32,0,162,151,0,0,0,0,33,16,194,0, - 48,0,66,174,255,75,192,8,52,0,64,166, - 16,0,177,175,36,0,162,39,20,0,162,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,33,128,64,0,8,0,0,22, - 33,32,32,2,16,0,160,175,33,40,64,2, - 33,48,0,0,226,76,192,12,33,56,0,0, - 255,75,192,8,0,0,0,0,36,0,162,143, - 0,0,0,0,16,0,162,175,0,0,6,142, - 4,0,7,142,0,0,0,0,226,76,192,12, - 33,40,64,2,36,0,162,143,0,0,0,0, - 35,0,64,16,0,0,0,0,61,50,192,12, - 33,32,0,2,255,75,192,8,0,0,0,0, - 5,0,2,36,251,75,192,8,16,0,66,162, - 16,0,177,175,40,0,162,39,20,0,162,175, - 8,0,98,140,16,0,103,140,9,248,64,0, - 0,0,0,0,33,48,64,0,7,0,192,16, - 64,0,2,36,3,0,194,136,0,0,194,152, - 0,0,0,0,43,0,162,171,40,0,162,187, - 64,0,2,36,16,0,66,162,40,0,162,143, - 0,0,0,0,251,75,192,8,40,0,66,174, - 5,0,2,36,96,0,34,174,17,0,66,146, - 0,0,0,0,2,0,66,52,17,0,66,162, - 60,0,191,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,64,0,189,39, - 80,255,189,39,168,0,191,175,164,0,179,175, - 160,0,178,175,156,0,177,175,152,0,176,175, - 33,152,128,0,33,136,224,0,192,0,178,143, - 0,0,0,0,36,0,66,142,0,0,0,0, - 20,0,67,140,0,0,0,0,16,0,98,140, - 0,0,0,0,16,0,162,175,20,0,177,175, - 4,0,98,140,0,0,0,0,9,248,64,0, - 24,0,167,39,33,128,64,0,6,0,0,22, - 33,32,32,2,17,0,66,146,0,0,0,0, - 18,0,66,52,45,76,192,8,17,0,66,162, - 33,40,64,2,33,48,0,2,253,76,192,12, - 24,0,167,39,16,0,178,175,33,32,96,2, - 33,40,0,2,24,0,166,39,122,75,192,12, - 33,56,32,2,168,0,191,143,164,0,179,143, - 160,0,178,143,156,0,177,143,152,0,176,143, - 8,0,224,3,176,0,189,39,192,255,189,39, - 56,0,191,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,152,128,0,33,160,160,0, - 33,168,192,0,33,136,224,0,80,0,178,143, - 0,0,0,0,36,0,66,142,0,0,0,0, - 20,0,80,140,33,32,32,2,48,72,192,12, - 33,40,64,2,16,0,3,142,0,0,0,0, - 16,0,163,175,20,0,177,175,24,0,162,175, - 0,0,2,142,33,32,0,0,33,40,96,2, - 33,48,128,2,9,248,64,0,33,56,160,2, - 5,0,64,16,33,32,32,2,200,76,192,12, - 33,40,64,2,95,76,192,8,0,0,0,0, - 16,0,178,175,33,32,96,2,33,40,128,2, - 33,48,160,2,122,75,192,12,33,56,32,2, - 56,0,191,143,52,0,181,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,64,0,189,39, - 192,255,189,39,56,0,191,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,152,128,0, - 33,160,160,0,33,168,192,0,80,0,177,143, - 0,0,0,0,36,0,34,142,0,0,0,0, - 20,0,82,140,2,0,66,144,0,0,0,0, - 254,255,67,36,70,0,98,44,57,0,64,16, - 33,128,224,0,128,16,3,0,2,131,1,60, - 33,8,34,0,184,152,34,140,0,0,0,0, - 8,0,64,0,0,0,0,0,33,32,0,2, - 48,72,192,12,33,40,32,2,40,0,35,142, - 0,0,0,0,16,0,163,175,20,0,176,175, - 173,76,192,8,24,0,162,175,33,32,0,2, - 48,72,192,12,33,40,32,2,44,0,35,142, - 0,0,0,0,16,0,163,175,48,0,35,142, - 44,0,36,142,0,0,0,0,35,24,100,0, - 170,76,192,8,255,255,99,48,33,32,0,2, - 48,72,192,12,33,40,32,2,40,0,35,142, - 0,0,0,0,16,0,163,175,44,0,35,142, - 0,0,0,0,171,76,192,8,20,0,163,175, - 33,32,0,2,48,72,192,12,33,40,32,2, - 40,0,35,38,16,0,163,175,4,0,3,36, - 20,0,163,175,24,0,176,175,28,0,162,175, - 12,0,66,142,33,32,96,2,33,40,128,2, - 16,0,71,142,0,0,0,0,9,248,64,0, - 33,48,160,2,184,76,192,8,0,0,0,0, - 5,0,2,36,96,0,2,174,17,0,34,146, - 0,0,0,0,2,0,66,52,17,0,34,162, - 56,0,191,143,52,0,181,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,64,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 224,255,189,39,24,0,191,175,20,0,177,175, - 16,0,176,175,33,128,128,0,64,0,2,142, - 0,0,0,0,7,0,64,20,33,136,160,0, - 2,0,2,36,48,72,192,12,96,0,2,174, - 1,0,66,36,217,76,192,8,100,0,2,174, - 129,0,2,36,16,0,34,162,17,0,34,146, - 0,0,0,0,2,0,66,52,17,0,34,162, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 33,64,160,0,33,32,192,0,33,40,224,0, - 40,0,162,143,17,0,3,145,0,0,0,0, - 2,0,99,52,17,0,3,161,6,0,3,36, - 4,0,64,16,16,0,3,161,40,0,4,173, - 249,76,192,8,44,0,5,173,80,86,192,12, - 40,0,6,37,2,0,64,16,5,0,2,36, - 96,0,2,174,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,200,255,189,39, - 48,0,191,175,44,0,183,175,40,0,182,175, - 36,0,181,175,32,0,180,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,176,128,0,33,144,160,0,33,152,192,0, - 37,0,96,18,33,184,224,0,28,0,84,38, - 8,0,67,142,28,0,66,142,0,0,0,0, - 35,128,98,0,42,16,83,0,23,0,64,16, - 33,168,19,2,9,50,192,12,128,32,21,0, - 33,136,64,0,8,0,32,22,128,128,16,0, - 5,0,2,36,96,0,194,174,17,0,66,146, - 0,0,0,0,2,0,66,52,48,77,192,8, - 17,0,66,162,33,32,32,2,12,0,69,142, - 0,0,0,0,80,68,192,12,33,48,0,2, - 110,86,192,12,8,0,68,38,12,0,81,174, - 33,128,48,2,4,0,144,174,4,0,132,142, - 33,40,224,2,80,68,192,12,128,48,19,0, - 0,0,147,174,8,0,85,174,48,0,191,143, - 44,0,183,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 56,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,136,128,0, - 5,0,192,24,33,128,160,0,3,0,2,36, - 96,0,34,174,95,77,192,8,100,0,38,174, - 19,0,195,36,19,0,98,44,9,0,64,16, - 128,16,3,0,2,131,1,60,33,8,34,0, - 208,153,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,89,77,192,8,2,0,6,36, - 89,77,192,8,5,0,6,36,89,77,192,8, - 3,0,6,36,89,77,192,8,1,0,6,36, - 35,48,6,0,96,0,38,174,33,32,32,2, - 48,72,192,12,33,40,0,2,1,0,66,36, - 100,0,34,174,17,0,2,146,0,0,0,0, - 1,0,66,52,17,0,2,162,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,200,255,189,39,52,0,191,175, - 48,0,190,175,44,0,183,175,40,0,182,175, - 36,0,181,175,32,0,180,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,152,128,0,33,168,160,0,33,160,192,0, - 76,0,182,143,80,0,183,143,84,0,190,143, - 92,0,177,143,0,0,0,0,209,83,192,12, - 33,144,224,0,33,128,64,0,3,0,0,22, - 4,0,2,36,168,77,192,8,33,16,0,0, - 88,0,2,166,64,0,19,174,33,32,64,2, - 72,0,165,143,0,0,0,0,80,86,192,12, - 92,0,6,38,255,255,3,36,23,0,67,16, - 0,0,0,0,3,0,194,138,0,0,194,154, - 0,0,0,0,103,0,2,170,100,0,2,186, - 104,0,23,174,108,0,30,174,88,0,168,143, - 0,0,0,0,112,0,8,174,72,0,0,166, - 76,0,20,174,255,255,162,50,33,16,130,2, - 80,0,2,174,84,0,0,166,9,0,32,18, - 120,0,17,174,224,83,192,12,33,32,32,2, - 6,0,64,20,124,0,2,174,167,83,192,12, - 33,32,0,2,168,77,192,8,33,16,0,0, - 124,0,0,174,33,16,0,2,52,0,191,143, - 48,0,190,143,44,0,183,143,40,0,182,143, - 36,0,181,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,56,0,189,39,224,255,189,39, - 28,0,191,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,144,128,0,92,0,68,142, - 128,86,192,12,0,0,0,0,96,0,68,142, - 0,0,0,0,128,86,192,12,33,128,64,0, - 100,0,68,142,0,0,0,0,128,86,192,12, - 33,136,64,0,33,128,17,2,6,0,16,38, - 33,16,80,0,90,0,66,166,191,79,192,12, - 104,0,68,38,255,255,67,48,90,0,68,150, - 128,0,98,44,5,0,64,20,2,0,130,36, - 0,1,98,44,2,0,64,20,3,0,130,36, - 4,0,130,36,33,16,67,0,90,0,66,166, - 80,0,66,142,76,0,67,142,90,0,80,150, - 64,0,68,142,0,0,0,0,128,86,192,12, - 35,136,67,0,255,255,67,48,90,0,68,150, - 0,0,0,0,128,0,130,44,9,0,64,20, - 0,1,130,44,4,0,64,16,0,0,0,0, - 33,24,112,0,237,77,192,8,6,0,99,36, - 33,24,112,0,237,77,192,8,7,0,99,36, - 33,24,112,0,5,0,99,36,255,255,36,50, - 128,0,130,44,5,0,64,20,1,0,130,36, - 0,1,130,44,2,0,64,20,2,0,130,36, - 3,0,130,36,33,16,98,0,2,0,66,166, - 2,0,67,150,2,0,68,150,0,0,0,0, - 128,0,130,44,6,0,64,20,1,0,99,36, - 0,1,130,44,4,0,64,20,2,0,98,36, - 3,78,192,8,3,0,98,36,1,0,98,36, - 0,0,66,166,0,0,66,150,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 32,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,152,128,0, - 171,86,192,12,92,0,100,38,104,0,100,142, - 0,0,0,0,128,86,192,12,33,144,64,0, - 108,0,100,142,0,0,0,0,128,86,192,12, - 33,136,64,0,112,0,100,142,0,0,0,0, - 153,86,192,12,33,128,64,0,10,0,82,38, - 33,136,50,2,33,128,17,2,4,0,16,38, - 33,16,80,0,90,0,98,166,191,79,192,12, - 116,0,100,38,255,255,67,48,90,0,100,150, - 128,0,98,44,5,0,64,20,2,0,130,36, - 0,1,98,44,2,0,64,20,3,0,130,36, - 4,0,130,36,33,16,67,0,90,0,98,166, - 80,0,98,142,76,0,99,142,90,0,113,150, - 64,0,100,142,0,0,0,0,128,86,192,12, - 35,128,67,0,255,255,67,48,90,0,100,150, - 0,0,0,0,128,0,130,44,9,0,64,20, - 0,1,130,44,4,0,64,16,0,0,0,0, - 33,24,113,0,74,78,192,8,6,0,99,36, - 33,24,113,0,74,78,192,8,7,0,99,36, - 33,24,113,0,5,0,99,36,255,255,4,50, - 128,0,130,44,5,0,64,20,1,0,130,36, - 0,1,130,44,2,0,64,20,2,0,130,36, - 3,0,130,36,33,16,98,0,2,0,98,166, - 2,0,99,150,2,0,100,150,0,0,0,0, - 128,0,130,44,6,0,64,20,1,0,99,36, - 0,1,130,44,4,0,64,20,2,0,112,36, - 96,78,192,8,3,0,112,36,1,0,112,36, - 255,255,2,50,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,208,255,189,39, - 40,0,191,175,36,0,179,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,152,192,0,33,144,224,0,33,136,160,0, - 16,0,4,36,32,0,5,36,1,131,6,60, - 56,97,198,36,242,86,192,12,33,56,0,2, - 255,255,36,50,1,131,5,60,56,97,165,36, - 44,87,192,12,33,48,0,2,16,0,176,175, - 2,0,4,36,33,40,0,0,1,131,7,60, - 56,97,231,36,94,87,192,12,33,48,96,2, - 8,0,71,142,4,0,66,142,0,0,0,0, - 35,56,226,0,1,131,2,60,56,97,66,36, - 16,0,162,175,20,0,176,175,4,0,4,36, - 33,40,0,0,4,0,70,142,0,0,0,0, - 194,87,192,12,255,255,231,48,40,0,191,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 216,255,189,39,36,0,191,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,136,160,0,88,0,4,150,160,0,5,36, - 1,131,6,60,56,97,198,36,242,86,192,12, - 33,56,32,2,90,0,4,150,1,131,5,60, - 56,97,165,36,44,87,192,12,33,48,32,2, - 16,0,177,175,6,0,4,36,33,40,0,0, - 1,131,7,60,56,97,231,36,15,88,192,12, - 92,0,6,38,1,131,18,60,56,97,82,38, - 16,0,178,175,20,0,177,175,33,32,0,0, - 64,0,5,36,100,0,6,38,194,87,192,12, - 4,0,7,36,16,0,177,175,2,0,4,36, - 33,40,0,0,104,0,6,142,0,0,0,0, - 94,87,192,12,33,56,64,2,16,0,177,175, - 2,0,4,36,33,40,0,0,108,0,6,142, - 0,0,0,0,94,87,192,12,33,56,64,2, - 16,0,177,175,3,0,4,36,64,0,5,36, - 112,0,6,142,0,0,0,0,143,87,192,12, - 33,56,64,2,116,0,4,38,7,79,192,12, - 33,40,32,2,36,0,191,143,32,0,178,143, - 28,0,177,143,24,0,176,143,8,0,224,3, - 40,0,189,39,216,255,189,39,32,0,191,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,136,160,0,88,0,4,150,160,0,5,36, - 1,131,6,60,56,97,198,36,242,86,192,12, - 33,56,32,2,90,0,4,150,1,131,5,60, - 56,97,165,36,44,87,192,12,33,48,32,2, - 16,0,177,175,2,0,4,36,92,0,6,142, - 1,131,7,60,56,97,231,36,94,87,192,12, - 33,40,0,0,16,0,177,175,2,0,4,36, - 96,0,6,142,1,131,7,60,56,97,231,36, - 94,87,192,12,33,40,0,0,16,0,177,175, - 2,0,4,36,100,0,6,142,1,131,7,60, - 56,97,231,36,94,87,192,12,33,40,0,0, - 104,0,4,38,7,79,192,12,33,40,32,2, - 32,0,191,143,28,0,177,143,24,0,176,143, - 8,0,224,3,40,0,189,39,200,255,189,39, - 52,0,191,175,48,0,180,175,44,0,179,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,144,128,0,33,136,160,0,16,0,4,36, - 32,0,5,36,1,131,6,60,56,97,198,36, - 242,86,192,12,33,56,32,2,0,0,68,150, - 1,131,5,60,56,97,165,36,44,87,192,12, - 33,48,32,2,155,0,64,18,0,0,0,0, - 1,131,20,60,56,97,148,38,8,0,80,142, - 0,0,0,0,145,0,0,18,0,0,0,0, - 4,0,66,142,0,0,0,0,141,0,64,24, - 33,152,0,0,16,0,4,36,32,0,5,36, - 33,48,128,2,242,86,192,12,33,56,32,2, - 4,0,4,150,33,40,128,2,44,87,192,12, - 33,48,32,2,16,0,177,175,6,0,4,36, - 33,40,0,0,8,0,6,38,15,88,192,12, - 33,56,128,2,16,0,3,146,65,0,2,36, - 47,0,98,16,66,0,98,40,18,0,64,16, - 5,0,2,36,88,0,98,16,6,0,98,40, - 7,0,64,16,2,0,2,36,30,0,98,16, - 4,0,2,36,51,0,98,16,4,0,4,36, - 174,79,192,8,1,0,115,38,6,0,2,36, - 68,0,98,16,64,0,2,36,78,0,98,16, - 33,32,0,0,174,79,192,8,1,0,115,38, - 68,0,2,36,47,0,98,16,69,0,98,40, - 7,0,64,16,66,0,2,36,24,0,98,16, - 67,0,2,36,25,0,98,16,3,0,4,36, - 174,79,192,8,1,0,115,38,131,0,98,40, - 83,0,64,16,128,0,98,40,68,0,64,16, - 0,0,0,0,174,79,192,8,1,0,115,38, - 16,0,177,175,2,0,4,36,40,0,6,142, - 1,131,7,60,56,97,231,36,94,87,192,12, - 33,40,0,0,174,79,192,8,1,0,115,38, - 16,0,177,175,111,79,192,8,1,0,4,36, - 16,0,177,175,111,79,192,8,2,0,4,36, - 16,0,177,175,40,0,6,142,1,131,7,60, - 56,97,231,36,143,87,192,12,64,0,5,36, - 174,79,192,8,1,0,115,38,48,0,7,142, - 44,0,2,142,0,0,0,0,35,56,226,0, - 16,0,180,175,20,0,177,175,134,79,192,8, - 33,40,0,0,48,0,7,142,44,0,2,142, - 0,0,0,0,35,56,226,0,16,0,180,175, - 20,0,177,175,4,0,4,36,64,0,5,36, - 44,0,6,142,0,0,0,0,194,87,192,12, - 255,255,231,48,174,79,192,8,1,0,115,38, - 16,0,177,175,6,0,4,36,33,40,0,0, - 1,131,7,60,56,97,231,36,15,88,192,12, - 40,0,6,38,174,79,192,8,1,0,115,38, - 5,0,4,36,164,79,192,8,33,40,0,0, - 16,0,180,175,20,0,177,175,64,0,5,36, - 40,0,6,38,194,87,192,12,4,0,7,36, - 174,79,192,8,1,0,115,38,16,0,4,146, - 16,0,5,146,31,0,132,48,224,0,165,48, - 1,131,6,60,56,97,198,36,242,86,192,12, - 33,56,32,2,33,32,0,0,1,131,5,60, - 56,97,165,36,44,87,192,12,33,48,32,2, - 1,0,115,38,4,0,66,142,0,0,0,0, - 42,16,98,2,117,255,64,20,68,0,16,38, - 12,0,82,142,0,0,0,0,105,255,64,22, - 0,0,0,0,52,0,191,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,56,0,189,39, - 200,255,189,39,52,0,191,175,48,0,182,175, - 44,0,181,175,40,0,180,175,36,0,179,175, - 32,0,178,175,28,0,177,175,24,0,176,175, - 33,168,128,0,33,152,160,2,113,0,160,18, - 33,144,0,0,4,0,22,36,8,0,112,142, - 0,0,0,0,104,0,0,18,0,0,0,0, - 4,0,98,142,0,0,0,0,100,0,64,24, - 33,160,0,0,171,86,192,12,8,0,4,38, - 255,255,67,48,128,0,98,44,5,0,64,20, - 2,0,113,36,0,1,98,44,2,0,64,20, - 3,0,113,36,4,0,113,36,16,0,3,146, - 64,0,2,36,50,0,98,16,65,0,98,40, - 16,0,64,16,68,0,2,36,34,0,118,16, - 5,0,98,40,5,0,64,16,2,0,2,36, - 20,0,98,16,255,255,36,50,22,80,192,8, - 0,0,0,0,5,0,2,36,35,0,98,16, - 6,0,2,36,29,0,98,16,255,255,36,50, - 22,80,192,8,0,0,0,0,19,0,98,16, - 68,0,98,40,12,0,64,20,131,0,98,40, - 28,0,64,16,128,0,98,40,27,0,64,20, - 255,255,36,50,22,80,192,8,18,0,0,166, - 40,0,4,142,128,86,192,12,0,0,0,0, - 21,80,192,8,18,0,2,166,40,0,4,142, - 153,86,192,12,0,0,0,0,21,80,192,8, - 18,0,2,166,48,0,2,142,44,0,3,142, - 0,0,0,0,35,16,67,0,21,80,192,8, - 18,0,2,166,171,86,192,12,40,0,4,38, - 21,80,192,8,18,0,2,166,21,80,192,8, - 18,0,0,166,18,0,22,166,255,255,36,50, - 18,0,3,150,0,0,0,0,128,0,98,44, - 6,0,64,20,1,0,132,36,0,1,98,44, - 4,0,64,20,2,0,98,36,33,80,192,8, - 3,0,98,36,1,0,98,36,33,16,130,0, - 4,0,2,166,4,0,4,150,0,0,0,0, - 1,0,132,36,4,0,3,150,0,0,0,0, - 128,0,98,44,6,0,64,20,255,255,69,50, - 0,1,98,44,4,0,64,20,2,0,162,36, - 49,80,192,8,3,0,162,36,1,0,162,36, - 33,144,68,0,1,0,148,38,4,0,98,142, - 0,0,0,0,42,16,130,2,158,255,64,20, - 68,0,16,38,12,0,115,142,0,0,0,0, - 146,255,96,22,0,0,0,0,0,0,178,166, - 255,255,66,50,52,0,191,143,48,0,182,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,224,255,189,39, - 24,0,191,175,20,0,177,175,16,0,176,175, - 33,128,128,0,171,86,192,12,8,0,4,38, - 255,255,67,48,128,0,98,44,5,0,64,20, - 2,0,113,36,0,1,98,44,2,0,64,20, - 3,0,113,36,4,0,113,36,16,0,3,146, - 64,0,2,36,52,0,98,16,65,0,98,40, - 17,0,64,16,4,0,2,36,37,0,98,16, - 0,0,0,0,5,0,98,40,5,0,64,16, - 2,0,2,36,22,0,98,16,255,255,36,50, - 145,80,192,8,0,0,0,0,5,0,2,36, - 36,0,98,16,6,0,2,36,30,0,98,16, - 255,255,36,50,145,80,192,8,0,0,0,0, - 68,0,2,36,20,0,98,16,0,0,0,0, - 68,0,98,40,12,0,64,20,131,0,98,40, - 28,0,64,16,128,0,98,40,27,0,64,20, - 255,255,36,50,145,80,192,8,18,0,0,166, - 40,0,4,142,128,86,192,12,0,0,0,0, - 144,80,192,8,18,0,2,166,40,0,4,142, - 153,86,192,12,0,0,0,0,144,80,192,8, - 18,0,2,166,48,0,2,142,44,0,3,142, - 0,0,0,0,143,80,192,8,35,16,67,0, - 171,86,192,12,40,0,4,38,144,80,192,8, - 18,0,2,166,144,80,192,8,18,0,0,166, - 4,0,2,36,18,0,2,166,255,255,36,50, - 18,0,3,150,0,0,0,0,128,0,98,44, - 6,0,64,20,1,0,132,36,0,1,98,44, - 4,0,64,20,2,0,98,36,156,80,192,8, - 3,0,98,36,1,0,98,36,33,16,130,0, - 4,0,2,166,4,0,3,150,4,0,4,150, - 0,0,0,0,128,0,130,44,6,0,64,20, - 1,0,99,36,0,1,130,44,4,0,64,20, - 2,0,98,36,170,80,192,8,3,0,98,36, - 1,0,98,36,255,255,66,48,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,16,0,191,175, - 64,0,130,140,0,0,0,0,12,0,64,20, - 33,16,0,0,88,0,131,148,4,0,2,36, - 5,0,98,16,0,0,0,0,180,77,192,12, - 0,0,0,0,193,80,192,8,255,255,66,48, - 11,78,192,12,0,0,0,0,255,255,66,48, - 16,0,191,143,24,0,189,39,8,0,224,3, - 0,0,0,0,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,128,0, - 176,80,192,12,33,136,160,0,33,32,0,2, - 33,40,32,2,213,80,192,12,255,255,70,48, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,224,255,189,39, - 28,0,191,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,144,128,0,33,136,192,0, - 255,255,34,50,41,0,64,16,33,128,160,0, - 4,0,2,142,0,0,0,0,11,0,64,20, - 255,255,35,50,9,50,192,12,255,255,36,50, - 33,24,64,0,32,0,96,16,1,0,2,36, - 0,0,2,166,4,0,3,174,8,0,3,174, - 242,80,192,8,12,0,17,166,12,0,2,150, - 0,0,0,0,43,16,67,0,23,0,64,20, - 255,255,2,36,64,0,66,142,0,0,0,0, - 19,0,64,20,255,255,2,36,33,32,0,2, - 2,0,69,150,33,48,0,0,104,78,192,12, - 72,0,71,38,88,0,67,150,4,0,2,36, - 5,0,98,16,33,32,64,2,217,78,192,12, - 33,40,0,2,8,81,192,8,33,16,0,0, - 153,78,192,12,33,40,0,2,8,81,192,8, - 33,16,0,0,255,255,2,36,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,0,0,0,0, - 0,0,0,0,168,255,189,39,80,0,191,175, - 76,0,183,175,72,0,182,175,68,0,181,175, - 64,0,180,175,60,0,179,175,56,0,178,175, - 52,0,177,175,48,0,176,175,33,24,128,0, - 33,152,160,0,33,176,192,0,33,184,224,0, - 104,0,162,143,0,0,0,0,2,0,64,20, - 44,0,160,175,40,0,162,39,0,0,64,172, - 24,0,164,39,33,40,96,0,156,88,192,12, - 33,48,96,2,33,136,64,0,82,0,32,18, - 33,16,0,0,209,83,192,12,0,0,0,0, - 33,144,64,0,5,0,64,22,1,0,2,36, - 183,88,192,12,33,32,32,2,124,81,192,8, - 33,16,0,0,148,0,66,162,196,88,192,12, - 33,32,32,2,224,0,85,48,44,0,176,39, - 33,32,32,2,124,89,192,12,33,40,0,2, - 33,160,64,0,33,32,32,2,198,89,192,12, - 33,40,0,2,2,0,66,166,44,0,162,143, - 0,0,0,0,14,0,64,20,0,0,0,0, - 8,0,35,142,4,0,34,142,0,0,0,0, - 35,128,98,0,2,0,66,150,0,0,0,0, - 33,128,2,2,42,16,19,2,16,0,64,20, - 33,32,32,2,42,16,112,2,16,0,64,16, - 0,0,0,0,167,83,192,12,33,32,64,2, - 183,88,192,12,33,32,32,2,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,124,81,192,8, - 33,16,0,0,33,40,0,2,42,89,192,12, - 33,48,0,0,255,0,162,50,255,255,131,50, - 37,16,67,0,48,0,3,36,8,0,67,20, - 33,32,64,2,16,0,176,175,33,40,32,2, - 33,48,192,2,135,81,192,12,33,56,224,2, - 117,81,192,8,33,128,64,0,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,33,128,0,0, - 3,0,0,22,0,0,0,0,167,83,192,12, - 33,32,64,2,183,88,192,12,33,32,32,2, - 33,16,0,2,80,0,191,143,76,0,183,143, - 72,0,182,143,68,0,181,143,64,0,180,143, - 60,0,179,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,88,0,189,39, - 176,255,189,39,76,0,191,175,72,0,182,175, - 68,0,181,175,64,0,180,175,60,0,179,175, - 56,0,178,175,52,0,177,175,48,0,176,175, - 33,128,128,0,33,136,160,0,33,160,192,0, - 33,168,224,0,96,0,182,143,24,0,160,175, - 33,32,32,2,24,0,165,39,2,0,6,36, - 239,90,192,12,33,56,0,0,64,0,2,174, - 24,0,162,143,0,0,0,0,155,0,64,20, - 0,0,0,0,64,0,2,142,0,0,0,0, - 4,0,64,16,33,32,32,2,3,131,3,60, - 60,82,192,8,148,17,99,36,16,0,160,175, - 72,0,5,38,24,0,166,39,110,90,192,12, - 4,0,7,36,24,0,162,143,0,0,0,0, - 139,0,64,20,0,0,0,0,196,88,192,12, - 33,32,32,2,224,0,66,48,160,0,3,36, - 133,0,67,20,33,32,32,2,124,89,192,12, - 24,0,165,39,33,144,64,0,33,32,32,2, - 198,89,192,12,24,0,165,39,33,152,64,0, - 24,0,162,143,0,0,0,0,122,0,64,20, - 0,0,0,0,255,255,66,50,5,0,66,44, - 118,0,64,16,0,0,0,0,8,0,34,142, - 4,0,35,142,0,0,0,0,35,16,67,0, - 255,255,99,50,33,16,67,0,110,0,194,22, - 33,32,0,2,88,0,18,166,90,0,19,166, - 33,40,128,2,178,50,192,12,33,48,160,2, - 118,0,64,20,33,16,0,0,255,255,67,50, - 4,0,2,36,24,0,98,16,33,32,32,2, - 24,0,165,39,2,0,6,36,239,90,192,12, - 33,56,0,0,92,0,2,174,33,32,32,2, - 24,0,165,39,2,0,6,36,239,90,192,12, - 33,56,0,0,96,0,2,174,33,32,32,2, - 24,0,165,39,2,0,6,36,239,90,192,12, - 33,56,0,0,100,0,2,174,24,0,162,143, - 0,0,0,0,78,0,64,20,33,32,32,2, - 67,82,192,8,104,0,5,38,4,0,2,36, - 88,0,2,166,90,0,19,166,124,0,0,174, - 16,0,160,175,92,0,5,38,24,0,166,39, - 186,91,192,12,6,0,7,36,24,0,162,143, - 0,0,0,0,63,0,64,20,100,0,4,38, - 33,40,0,0,144,71,192,12,4,0,6,36, - 32,0,160,167,40,0,160,175,36,0,160,175, - 44,0,160,167,32,0,178,39,64,0,2,36, - 16,0,162,175,33,32,32,2,33,40,64,2, - 24,0,166,39,110,90,192,12,33,56,0,0, - 24,0,162,143,0,0,0,0,5,0,64,16, - 0,0,0,0,24,92,192,12,33,32,64,2, - 58,82,192,8,0,0,0,0,40,0,162,143, - 36,0,163,143,0,0,0,0,35,16,67,0, - 255,255,70,48,5,0,194,44,2,0,64,20, - 0,0,0,0,4,0,6,36,8,0,192,16, - 33,32,32,2,36,0,165,143,0,0,0,0, - 80,68,192,12,100,0,4,38,24,92,192,12, - 32,0,164,39,33,32,32,2,24,0,165,39, - 2,0,6,36,239,90,192,12,33,56,0,0, - 104,0,2,174,33,32,32,2,24,0,165,39, - 2,0,6,36,239,90,192,12,33,56,0,0, - 108,0,2,174,33,32,32,2,24,0,165,39, - 3,0,6,36,239,90,192,12,64,0,7,36, - 112,0,2,174,24,0,162,143,0,0,0,0, - 9,0,64,16,33,32,32,2,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,73,82,192,8, - 33,16,0,0,116,0,5,38,33,48,192,2, - 163,82,192,12,33,56,0,2,255,255,3,36, - 248,255,67,16,33,16,0,2,76,0,191,143, - 72,0,182,143,68,0,181,143,64,0,180,143, - 60,0,179,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,80,0,189,39, - 184,255,189,39,64,0,191,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,128,128,0,16,0,160,175, - 8,0,2,142,4,0,3,142,0,0,0,0, - 35,184,67,0,33,144,0,0,255,255,162,48, - 45,0,64,16,33,136,0,0,3,131,19,60, - 140,17,115,38,255,255,22,36,255,255,181,48, - 8,0,3,142,4,0,2,142,0,0,0,0, - 35,160,98,0,0,0,2,146,0,0,0,0, - 128,0,66,48,32,0,64,20,33,32,0,2, - 124,89,192,12,16,0,165,39,33,32,0,2, - 198,89,192,12,16,0,165,39,33,40,64,0, - 16,0,162,143,0,0,0,0,6,0,64,20, - 33,32,0,2,255,255,165,48,251,88,192,12, - 1,0,6,36,7,0,86,20,0,0,0,0, - 0,0,98,142,0,0,0,0,1,0,66,36, - 0,0,98,174,147,82,192,8,255,255,17,36, - 8,0,2,142,4,0,3,142,0,0,0,0, - 35,16,67,0,33,16,66,2,35,144,84,0, - 255,255,66,50,43,16,85,0,217,255,64,20, - 1,0,49,38,33,32,0,2,33,40,224,2, - 251,88,192,12,33,48,0,0,33,16,32,2, - 64,0,191,143,60,0,183,143,56,0,182,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,72,0,189,39,192,255,189,39, - 56,0,191,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,144,128,0,33,152,160,0, - 33,168,224,0,16,0,160,175,124,89,192,12, - 16,0,165,39,33,32,64,2,198,89,192,12, - 16,0,165,39,0,0,98,166,16,0,162,143, - 0,0,0,0,28,0,64,20,0,0,0,0, - 12,0,66,142,8,0,67,142,0,0,0,0, - 35,16,67,0,0,0,99,150,255,255,66,48, - 20,0,98,20,0,0,0,0,4,0,96,174, - 0,0,101,150,0,0,0,0,83,82,192,12, - 33,32,64,2,33,32,64,0,255,255,2,36, - 46,0,130,16,0,0,0,0,3,0,128,20, - 0,0,0,0,246,82,192,8,8,0,96,174, - 224,83,192,12,4,0,100,174,10,0,64,20, - 8,0,98,174,247,82,192,8,255,255,2,36, - 3,131,3,60,140,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,210,82,192,8, - 0,0,98,172,8,0,112,142,4,0,98,142, - 0,0,0,0,23,0,64,24,33,136,0,0, - 255,255,20,36,33,32,64,2,124,89,192,12, - 16,0,165,39,33,32,64,2,198,89,192,12, - 16,0,165,39,4,0,2,166,16,0,162,143, - 0,0,0,0,233,255,64,20,33,32,64,2, - 33,40,0,2,0,83,192,12,33,48,160,2, - 226,255,84,16,1,0,49,38,4,0,98,142, - 0,0,0,0,42,16,34,2,236,255,64,20, - 68,0,16,38,33,16,0,0,56,0,191,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,184,255,189,39, - 68,0,191,175,64,0,180,175,60,0,179,175, - 56,0,178,175,52,0,177,175,48,0,176,175, - 33,128,128,0,33,144,160,0,24,0,160,175, - 16,0,160,175,8,0,69,38,24,0,166,39, - 186,91,192,12,6,0,7,36,24,0,162,143, - 0,0,0,0,103,0,64,20,0,0,0,0, - 196,88,192,12,33,32,0,2,224,0,84,48, - 33,32,0,2,124,89,192,12,24,0,165,39, - 33,152,64,0,33,32,0,2,198,89,192,12, - 24,0,165,39,33,136,64,0,24,0,162,143, - 0,0,0,0,88,0,64,20,37,16,116,2, - 18,0,81,166,16,0,66,162,16,0,67,146, - 64,0,2,36,48,0,98,16,65,0,98,40, - 16,0,64,16,4,0,2,36,31,0,98,16, - 5,0,98,40,5,0,64,16,2,0,2,36, - 22,0,98,16,33,32,0,2,121,83,192,8, - 0,0,0,0,5,0,2,36,65,0,98,16, - 6,0,2,36,27,0,98,16,33,32,0,2, - 121,83,192,8,0,0,0,0,68,0,2,36, - 15,0,98,16,68,0,98,40,8,0,64,20, - 33,32,0,2,131,0,98,40,57,0,64,16, - 128,0,98,40,51,0,64,16,0,0,0,0, - 121,83,192,8,0,0,0,0,255,255,37,50, - 164,90,192,12,24,0,166,39,117,83,192,8, - 40,0,66,174,33,32,0,2,255,255,37,50, - 40,0,70,38,19,90,192,12,24,0,167,39, - 117,83,192,8,0,0,0,0,255,255,37,50, - 40,0,70,38,24,91,192,12,24,0,167,39, - 117,83,192,8,0,0,0,0,40,0,68,38, - 33,40,0,0,144,71,192,12,4,0,6,36, - 32,0,160,167,40,0,160,175,36,0,160,175, - 44,0,160,167,33,32,0,2,255,255,37,50, - 32,0,166,39,19,90,192,12,24,0,167,39, - 40,0,162,143,36,0,163,143,0,0,0,0, - 35,16,67,0,255,255,70,48,5,0,194,44, - 2,0,64,20,0,0,0,0,4,0,6,36, - 7,0,192,16,0,0,0,0,36,0,165,143, - 0,0,0,0,80,68,192,12,40,0,68,38, - 24,92,192,12,32,0,164,39,24,0,162,143, - 0,0,0,0,8,0,64,16,33,16,0,0, - 3,131,3,60,140,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,0,0,98,172, - 255,255,2,36,68,0,191,143,64,0,180,143, - 60,0,179,143,56,0,178,143,52,0,177,143, - 48,0,176,143,8,0,224,3,72,0,189,39, - 232,255,189,39,20,0,191,175,16,0,176,175, - 33,128,128,0,76,0,2,142,0,0,0,0, - 3,0,64,16,0,0,0,0,24,92,192,12, - 72,0,4,38,88,0,3,150,4,0,2,36, - 5,0,98,20,0,0,0,0,110,86,192,12, - 92,0,4,38,157,83,192,8,116,0,4,38, - 13,84,192,12,104,0,4,38,120,0,4,38, - 13,84,192,12,0,0,0,0,148,0,3,146, - 0,0,0,0,248,83,192,12,33,32,0,2, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,5,0,0,18, - 0,0,0,0,136,83,192,12,0,0,0,0, - 61,50,192,12,33,32,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 224,255,189,39,24,0,191,175,20,0,177,175, - 16,0,176,175,33,136,128,0,9,50,192,12, - 16,0,4,36,33,128,64,0,11,0,0,18, - 33,32,0,2,33,40,0,0,144,71,192,12, - 16,0,6,36,224,83,192,12,33,32,32,2, - 4,0,64,16,8,0,2,174,4,0,17,174, - 204,83,192,8,33,16,0,2,61,50,192,12, - 33,32,0,2,33,16,0,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,9,50,192,12,152,0,4,36, - 33,128,64,0,4,0,0,18,33,16,0,0, - 248,83,192,12,33,32,0,2,33,16,0,2, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,0,17,4,0,33,16,68,0, - 128,136,2,0,11,0,32,18,16,0,176,175, - 9,50,192,12,33,32,32,2,33,128,64,0, - 4,0,0,18,33,32,0,2,33,40,0,0, - 144,71,192,12,33,48,32,2,243,83,192,8, - 33,16,0,2,33,16,0,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,33,40,0,0, - 144,71,192,12,152,0,6,36,255,0,2,36, - 88,0,2,166,120,5,2,36,58,0,2,166, - 72,0,0,166,80,0,0,174,76,0,0,174, - 84,0,0,166,148,0,0,162,149,0,0,162, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,208,255,189,39,40,0,191,175, - 36,0,179,175,32,0,178,175,28,0,177,175, - 24,0,176,175,33,128,128,0,31,0,0,18, - 1,0,19,36,8,0,18,142,0,0,0,0, - 16,0,64,18,0,0,0,0,4,0,2,142, - 0,0,0,0,9,0,64,24,33,136,0,0, - 59,84,192,12,33,32,64,2,1,0,49,38, - 4,0,2,142,0,0,0,0,42,16,34,2, - 249,255,64,20,68,0,82,38,8,0,4,142, - 61,50,192,12,0,0,0,0,12,0,17,142, - 4,0,96,18,0,0,0,0,33,152,0,0, - 49,84,192,8,4,0,0,174,61,50,192,12, - 33,32,0,2,33,128,32,2,227,255,0,22, - 0,0,0,0,40,0,191,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,48,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 60,0,2,142,0,0,0,0,4,0,64,16, - 0,0,0,0,9,248,64,0,0,0,0,0, - 60,0,0,174,110,86,192,12,8,0,4,38, - 78,84,192,12,33,32,0,2,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 232,255,189,39,16,0,191,175,16,0,131,144, - 6,0,2,36,18,0,98,16,7,0,98,40, - 5,0,64,16,4,0,2,36,6,0,98,16, - 0,0,0,0,103,84,192,8,0,0,0,0, - 68,0,2,36,11,0,98,20,0,0,0,0, - 44,0,130,140,0,0,0,0,7,0,64,16, - 0,0,0,0,24,92,192,12,40,0,132,36, - 103,84,192,8,0,0,0,0,110,86,192,12, - 40,0,132,36,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,0,0,0,0, - 216,255,189,39,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,136,192,0,56,0,179,143,0,0,0,0, - 20,72,192,12,33,144,224,0,33,128,64,0, - 11,0,0,18,33,32,32,2,33,40,64,2, - 80,86,192,12,8,0,6,38,255,255,3,36, - 5,0,67,16,2,0,2,36,16,0,2,162, - 40,0,19,174,133,84,192,8,33,16,0,0, - 255,255,2,36,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,216,255,189,39, - 32,0,191,175,28,0,177,175,24,0,176,175, - 33,128,128,0,96,0,5,174,100,0,6,174, - 128,0,2,142,0,0,0,0,11,0,64,16, - 0,0,0,0,13,84,192,12,104,0,4,38, - 124,0,2,142,0,0,0,0,108,0,2,174, - 128,0,2,142,0,0,0,0,112,0,2,174, - 128,0,0,174,124,0,0,174,88,0,17,150, - 2,0,2,36,88,0,2,166,176,80,192,12, - 33,32,0,2,33,56,64,0,64,0,2,142, - 0,0,0,0,35,0,64,20,255,255,35,50, - 3,0,2,36,14,0,98,16,255,255,227,48, - 58,0,2,150,0,0,0,0,43,16,67,0, - 9,0,64,16,12,0,4,38,48,0,2,142, - 28,0,5,38,52,0,7,142,0,0,0,0, - 9,248,64,0,1,0,6,36,214,84,192,8, - 0,0,0,0,96,0,2,142,0,0,0,0, - 250,255,67,36,13,0,98,44,13,0,64,16, - 128,16,3,0,2,131,1,60,33,8,34,0, - 32,154,34,140,0,0,0,0,8,0,64,0, - 0,0,0,0,204,84,192,8,2,0,2,36, - 204,84,192,8,3,0,2,36,5,0,2,36, - 96,0,2,174,52,0,2,142,0,0,0,0, - 16,0,162,175,44,0,2,142,12,0,4,38, - 28,0,5,38,33,48,0,2,9,248,64,0, - 255,255,231,48,32,0,191,143,28,0,177,143, - 24,0,176,143,8,0,224,3,40,0,189,39, - 224,255,189,39,28,0,191,175,24,0,176,175, - 33,128,128,0,96,0,5,142,0,0,0,0, - 6,0,160,16,0,0,0,0,100,0,6,142, - 140,84,192,12,0,0,0,0,56,85,192,8, - 0,0,0,0,176,80,192,12,33,32,0,2, - 33,56,64,0,88,0,2,150,0,0,0,0, - 2,0,66,44,11,0,64,16,255,255,227,48, - 58,0,2,150,0,0,0,0,43,16,67,0, - 6,0,64,16,33,32,0,2,1,0,5,36, - 140,84,192,12,33,48,0,0,56,85,192,8, - 0,0,0,0,96,0,2,142,0,0,0,0, - 49,0,64,20,2,0,2,36,88,0,3,150, - 3,0,2,36,33,0,98,16,4,0,98,40, - 7,0,64,16,2,0,98,40,41,0,64,16, - 2,0,2,36,39,0,96,4,0,0,0,0, - 13,85,192,8,0,0,0,0,5,0,2,36, - 34,0,98,20,2,0,2,36,3,131,3,60, - 236,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,104,0,4,38,25,0,128,16, - 0,0,98,172,3,131,5,60,176,17,165,36, - 0,0,162,140,108,0,3,142,0,0,0,0, - 33,16,67,0,0,0,162,172,12,0,132,140, - 0,0,0,0,248,255,128,20,2,0,2,36, - 47,85,192,8,88,0,2,166,3,131,4,60, - 236,17,132,36,0,0,130,140,0,0,0,0, - 1,0,66,36,0,0,130,172,200,255,130,140, - 108,0,3,142,0,0,0,0,33,16,67,0, - 200,255,130,172,2,0,2,36,88,0,2,166, - 52,0,2,142,0,0,0,0,16,0,162,175, - 44,0,2,142,12,0,4,38,28,0,5,38, - 33,48,0,2,9,248,64,0,255,255,231,48, - 28,0,191,143,24,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,88,0,3,150, - 1,0,2,36,25,0,98,16,2,0,98,40, - 5,0,64,16,3,0,2,36,7,0,96,16, - 0,0,0,0,116,85,192,8,0,0,0,0, - 37,0,98,16,0,0,0,0,116,85,192,8, - 0,0,0,0,112,0,4,142,108,0,3,142, - 0,0,0,0,34,0,96,16,0,0,0,0, - 17,0,130,144,0,0,0,0,2,0,66,48, - 33,0,64,16,255,255,99,36,250,255,96,20, - 68,0,132,36,116,85,192,8,0,0,0,0, - 112,0,4,142,108,0,3,142,0,0,0,0, - 8,0,96,16,0,0,0,0,17,0,130,144, - 0,0,0,0,2,0,66,48,19,0,64,16, - 255,255,99,36,250,255,96,20,68,0,132,36, - 118,93,192,12,33,32,0,2,241,255,64,28, - 255,255,66,40,7,0,64,16,0,0,0,0, - 92,85,192,8,0,0,0,0,120,94,192,12, - 33,32,0,2,5,0,64,20,0,0,0,0, - 219,84,192,12,33,32,0,2,167,83,192,12, - 33,32,0,2,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 88,0,3,150,1,0,2,36,17,0,98,16, - 2,0,98,40,5,0,64,16,3,0,2,36, - 9,0,96,16,0,0,0,0,156,85,192,8, - 0,0,0,0,13,0,98,16,5,0,2,36, - 7,0,98,16,0,0,0,0,156,85,192,8, - 0,0,0,0,60,95,192,12,33,32,0,2, - 154,85,192,8,0,0,0,0,0,93,192,12, - 33,32,0,2,154,85,192,8,0,0,0,0, - 252,93,192,12,33,32,0,2,5,0,64,20, - 0,0,0,0,60,85,192,12,33,32,0,2, - 162,85,192,8,0,0,0,0,167,83,192,12, - 33,32,0,2,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,192,255,189,39, - 60,0,191,175,56,0,182,175,52,0,181,175, - 48,0,180,175,44,0,179,175,40,0,178,175, - 36,0,177,175,32,0,176,175,33,64,128,0, - 33,136,192,0,33,152,224,0,84,0,182,143, - 88,0,181,143,92,0,180,143,80,0,178,151, - 3,131,3,60,128,17,99,36,0,0,98,140, - 0,0,0,0,1,0,66,36,0,0,98,172, - 24,0,162,39,16,0,162,175,33,32,160,0, - 16,81,192,12,33,40,0,1,33,128,64,0, - 57,0,0,18,255,255,66,50,58,0,3,150, - 0,0,0,0,43,16,67,0,2,0,64,16, - 0,0,0,0,58,0,18,166,44,0,22,174, - 48,0,21,174,52,0,20,174,24,0,162,143, - 0,0,0,0,7,0,64,16,1,0,2,36, - 219,84,192,12,33,32,0,2,167,83,192,12, - 33,32,0,2,5,86,192,8,0,0,0,0, - 88,0,3,150,0,0,0,0,16,0,98,16, - 2,0,98,40,5,0,64,16,3,0,2,36, - 9,0,96,16,0,0,0,0,244,85,192,8, - 0,0,0,0,11,0,98,16,5,0,2,36, - 31,0,98,16,0,0,0,0,244,85,192,8, - 0,0,0,0,3,131,3,60,239,85,192,8, - 184,17,99,36,3,131,3,60,239,85,192,8, - 188,17,99,36,3,131,3,60,192,17,99,36, - 0,0,98,140,0,0,0,0,1,0,66,36, - 3,86,192,8,0,0,98,172,3,131,3,60, - 140,17,99,36,0,0,98,140,0,0,0,0, - 1,0,66,36,0,0,98,172,167,83,192,12, - 33,32,0,2,33,32,32,2,33,40,96,2, - 1,0,6,36,9,248,160,2,33,56,128,2, - 5,86,192,8,0,0,0,0,124,85,192,12, - 33,32,0,2,60,0,191,143,56,0,182,143, - 52,0,181,143,48,0,180,143,44,0,179,143, - 40,0,178,143,36,0,177,143,32,0,176,143, - 8,0,224,3,64,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 213,80,192,12,255,255,198,48,53,0,64,20, - 255,255,2,36,3,131,3,60,144,17,99,36, - 0,0,98,140,0,0,0,0,1,0,66,36, - 0,0,98,172,96,0,2,142,0,0,0,0, - 32,0,64,16,4,0,2,36,92,0,98,140, - 0,0,0,0,1,0,66,36,92,0,98,172, - 96,0,2,142,0,0,0,0,255,255,67,36, - 5,0,98,44,32,0,64,16,128,16,3,0, - 2,131,1,60,33,8,34,0,88,154,34,140, - 0,0,0,0,8,0,64,0,0,0,0,0, - 3,131,3,60,70,86,192,8,204,17,99,36, - 3,131,3,60,70,86,192,8,212,17,99,36, - 3,131,3,60,70,86,192,8,216,17,99,36, - 3,131,3,60,70,86,192,8,208,17,99,36, - 3,131,3,60,70,86,192,8,220,17,99,36, - 88,0,3,150,0,0,0,0,8,0,98,20, - 33,16,0,0,3,131,3,60,240,17,99,36, - 0,0,98,140,0,0,0,0,1,0,66,36, - 0,0,98,172,33,16,0,0,20,0,191,143, - 16,0,176,143,8,0,224,3,24,0,189,39, - 0,0,0,0,224,255,189,39,28,0,191,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,144,160,0,33,128,192,0,4,0,0,174, - 14,0,128,16,0,0,4,174,128,136,4,0, - 9,50,192,12,33,32,32,2,3,0,64,20, - 4,0,2,174,104,86,192,8,255,255,2,36, - 5,0,32,18,33,40,64,2,4,0,4,142, - 0,0,0,0,80,68,192,12,33,48,32,2, - 33,16,0,0,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,20,0,191,175, - 16,0,176,175,33,128,128,0,4,0,4,142, - 0,0,0,0,4,0,128,16,0,0,0,0, - 61,50,192,12,0,0,0,0,4,0,0,174, - 0,0,0,174,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,0,0,0,0, - 0,0,0,0,11,0,128,4,128,0,130,40, - 20,0,64,20,1,0,3,36,255,127,2,36, - 42,16,68,0,16,0,64,16,2,0,3,36, - 127,0,2,60,255,255,66,52,148,86,192,8, - 42,16,68,0,128,255,130,40,9,0,64,16, - 1,0,3,36,0,128,130,40,6,0,64,16, - 2,0,3,36,128,255,2,60,42,16,130,0, - 2,0,64,20,4,0,3,36,3,0,3,36, - 8,0,224,3,33,16,96,0,128,0,130,44, - 14,0,64,20,1,0,2,36,255,127,2,36, - 43,16,68,0,9,0,64,16,127,0,2,60, - 255,255,66,52,43,16,68,0,6,0,64,16, - 3,0,2,36,4,0,128,4,5,0,2,36, - 169,86,192,8,4,0,2,36,2,0,2,36, - 8,0,224,3,0,0,0,0,4,0,135,140, - 0,0,130,140,0,0,0,0,65,0,64,16, - 33,16,0,0,0,0,227,140,4,0,231,36, - 128,16,3,0,33,16,67,0,192,16,2,0, - 0,0,227,140,0,0,0,0,33,24,67,0, - 128,0,98,44,17,0,64,20,4,0,231,36, - 0,64,98,44,15,0,64,20,2,0,5,36, - 31,0,2,60,255,255,66,52,43,16,67,0, - 7,0,64,16,255,15,2,60,255,255,66,52, - 43,16,67,0,6,0,64,20,5,0,5,36, - 204,86,192,8,4,0,5,36,204,86,192,8, - 3,0,5,36,1,0,5,36,2,0,6,36, - 0,0,130,140,0,0,0,0,42,16,194,0, - 31,0,64,16,255,255,162,48,31,0,9,60, - 255,255,41,53,255,15,8,60,255,255,8,53, - 0,0,132,140,0,0,227,140,4,0,231,36, - 128,0,98,44,16,0,64,20,255,255,165,48, - 0,64,98,44,11,0,64,20,43,16,35,1, - 7,0,64,16,43,16,3,1,3,0,64,20, - 0,0,0,0,236,86,192,8,4,0,165,36, - 236,86,192,8,5,0,165,36,236,86,192,8, - 3,0,165,36,236,86,192,8,2,0,165,36, - 1,0,165,36,1,0,198,36,42,16,196,0, - 232,255,64,20,255,255,162,48,8,0,224,3, - 0,0,0,0,208,255,189,39,40,0,191,175, - 33,72,192,0,224,0,165,48,255,255,130,48, - 31,0,66,44,7,0,64,16,33,48,160,0, - 37,16,133,0,16,0,162,163,33,32,224,0, - 16,0,165,39,38,87,192,8,1,0,6,36, - 32,0,163,39,33,40,0,0,31,0,194,52, - 24,0,162,163,255,255,130,48,8,0,64,16, - 25,0,168,39,127,0,130,48,0,0,98,160, - 1,0,99,36,255,255,130,48,194,33,2,0, - 250,255,128,20,1,0,165,36,1,0,166,36, - 33,16,160,0,255,255,66,48,2,0,66,44, - 13,0,64,20,255,255,165,36,255,255,4,52, - 255,255,99,36,0,0,98,144,0,0,0,0, - 128,0,66,52,0,0,2,161,1,0,8,37, - 33,16,160,0,255,255,66,48,2,0,66,44, - 246,255,64,16,33,40,164,0,255,255,98,144, - 0,0,0,0,0,0,2,161,33,32,224,0, - 24,0,165,39,255,255,198,48,9,248,32,1, - 0,0,0,0,40,0,191,143,48,0,189,39, - 8,0,224,3,0,0,0,0,208,255,189,39, - 40,0,191,175,33,72,160,0,255,255,130,48, - 128,0,66,44,6,0,64,16,33,64,192,0, - 16,0,164,163,33,32,0,1,16,0,165,39, - 88,87,192,8,1,0,6,36,24,0,167,39, - 32,0,165,39,255,255,130,48,7,0,64,16, - 33,24,0,0,0,0,164,160,1,0,165,36, - 255,255,130,48,2,34,2,0,251,255,128,20, - 1,0,99,36,128,0,98,52,0,0,226,160, - 1,0,231,36,1,0,102,36,33,16,96,0, - 255,255,66,48,11,0,64,16,255,255,99,36, - 255,255,4,52,255,255,165,36,0,0,162,144, - 0,0,0,0,0,0,226,160,1,0,231,36, - 33,16,96,0,255,255,66,48,248,255,64,20, - 33,24,100,0,33,32,0,1,24,0,165,39, - 255,255,198,48,9,248,32,1,0,0,0,0, - 40,0,191,143,48,0,189,39,8,0,224,3, - 0,0,0,0,200,255,189,39,48,0,191,175, - 44,0,181,175,40,0,180,175,36,0,179,175, - 32,0,178,175,28,0,177,175,24,0,176,175, - 33,136,160,0,33,144,192,0,33,152,224,0, - 72,0,180,143,33,128,128,0,128,86,192,12, - 33,32,64,2,33,168,64,0,255,255,4,50, - 192,0,37,50,33,48,96,2,242,86,192,12, - 33,56,128,2,255,255,176,50,33,32,0,2, - 33,40,96,2,44,87,192,12,33,48,128,2, - 16,0,162,39,33,24,80,0,255,255,99,36, - 6,0,98,16,0,0,114,160,16,0,162,39, - 3,146,18,0,255,255,99,36,253,255,98,20, - 0,0,114,160,33,32,128,2,16,0,165,39, - 9,248,96,2,255,255,166,50,48,0,191,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,200,255,189,39, - 48,0,191,175,44,0,181,175,40,0,180,175, - 36,0,179,175,32,0,178,175,28,0,177,175, - 24,0,176,175,33,136,160,0,33,144,192,0, - 33,160,224,0,72,0,181,143,33,128,128,0, - 153,86,192,12,33,32,64,2,33,152,64,0, - 255,255,4,50,192,0,37,50,33,48,128,2, - 242,86,192,12,33,56,160,2,255,255,112,50, - 33,32,0,2,33,40,128,2,44,87,192,12, - 33,48,160,2,16,0,162,39,33,32,80,0, - 9,0,0,18,255,255,99,38,255,255,5,52, - 255,255,132,36,0,0,146,160,2,146,18,0, - 33,16,96,0,255,255,66,48,250,255,64,20, - 33,24,101,0,33,32,160,2,16,0,165,39, - 9,248,128,2,255,255,102,50,48,0,191,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,56,0,189,39,216,255,189,39, - 32,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,152,192,0, - 56,0,178,143,60,0,177,143,33,128,224,0, - 255,255,132,48,192,0,165,48,33,48,64,2, - 242,86,192,12,33,56,32,2,255,255,16,50, - 33,32,0,2,33,40,64,2,44,87,192,12, - 33,48,32,2,4,0,0,18,33,32,32,2, - 33,40,96,2,9,248,64,2,33,48,0,2, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,224,255,189,39,24,0,191,175, - 33,72,160,0,128,0,130,44,18,0,64,20, - 33,64,192,0,0,64,130,44,13,0,64,20, - 31,0,2,60,255,255,66,52,43,16,68,0, - 7,0,64,16,255,15,2,60,255,255,66,52, - 43,16,68,0,8,0,64,20,5,0,6,36, - 250,87,192,8,4,0,6,36,250,87,192,8, - 3,0,6,36,250,87,192,8,2,0,6,36, - 1,0,6,36,255,255,194,48,16,0,163,39, - 33,40,98,0,9,0,163,16,33,56,0,0, - 16,0,163,39,255,255,165,36,127,0,130,48, - 37,16,226,0,0,0,162,160,194,33,4,0, - 250,255,163,20,128,0,7,36,33,32,0,1, - 16,0,165,39,9,248,32,1,255,255,198,48, - 24,0,191,143,32,0,189,39,8,0,224,3, - 0,0,0,0,208,255,189,39,44,0,191,175, - 40,0,182,175,36,0,181,175,32,0,180,175, - 28,0,179,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,144,160,0,33,168,192,0, - 33,160,224,0,64,0,182,143,33,136,128,0, - 4,0,179,142,0,0,0,0,171,86,192,12, - 33,32,160,2,33,128,64,0,255,255,36,50, - 192,0,69,50,33,48,128,2,242,86,192,12, - 33,56,192,2,255,255,16,50,33,32,0,2, - 33,40,128,2,44,87,192,12,33,48,192,2, - 23,0,0,18,33,40,128,2,0,0,98,142, - 4,0,115,38,128,32,2,0,33,32,130,0, - 192,32,4,0,0,0,98,142,4,0,115,38, - 33,32,130,0,226,87,192,12,33,48,192,2, - 63,88,192,8,2,0,16,36,0,0,100,142, - 4,0,115,38,226,87,192,12,33,48,192,2, - 1,0,16,38,0,0,162,142,0,0,0,0, - 42,16,2,2,247,255,64,20,33,40,128,2, - 44,0,191,143,40,0,182,143,36,0,181,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 48,0,189,39,224,255,189,39,28,0,191,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,136,128,0,33,144,192,0,255,255,67,50, - 12,0,34,150,0,0,0,0,43,16,67,0, - 4,0,64,16,1,0,2,36,12,0,50,150, - 0,0,0,0,255,255,67,50,11,0,98,16, - 2,0,98,40,5,0,64,16,2,0,2,36, - 50,0,96,16,255,255,80,50,137,88,192,8, - 0,0,0,0,15,0,98,16,255,255,80,50, - 137,88,192,8,0,0,0,0,8,0,35,142, - 0,0,0,0,1,0,98,36,8,0,34,174, - 0,0,162,144,0,0,0,0,0,0,98,160, - 12,0,34,150,0,0,0,0,255,255,66,36, - 149,88,192,8,12,0,34,166,8,0,35,142, - 0,0,0,0,1,0,98,36,8,0,34,174, - 0,0,162,144,0,0,0,0,0,0,98,160, - 8,0,35,142,0,0,0,0,1,0,98,36, - 8,0,34,174,1,0,162,144,0,0,0,0, - 0,0,98,160,12,0,34,150,0,0,0,0, - 254,255,66,36,149,88,192,8,12,0,34,166, - 8,0,36,142,0,0,0,0,80,68,192,12, - 33,48,0,2,12,0,34,150,0,0,0,0, - 35,16,82,0,12,0,34,166,8,0,34,142, - 0,0,0,0,33,128,2,2,8,0,48,174, - 255,255,66,50,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,160,0, - 10,0,128,20,33,136,192,0,9,50,192,12, - 16,0,4,36,33,32,64,0,3,0,128,20, - 1,0,2,36,178,88,192,8,33,16,0,0, - 173,88,192,8,0,0,130,160,0,0,128,160, - 4,0,144,172,8,0,144,172,33,16,17,2, - 12,0,130,172,33,16,128,0,24,0,191,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,232,255,189,39,16,0,191,175, - 0,0,130,144,0,0,0,0,1,0,66,48, - 3,0,64,16,0,0,0,0,61,50,192,12, - 0,0,0,0,16,0,191,143,24,0,189,39, - 8,0,224,3,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,255,2,36,8,0,130,140,12,0,131,140, - 0,0,0,0,43,16,67,0,7,0,64,20, - 0,0,0,0,0,0,130,144,0,0,0,0, - 128,0,66,52,0,0,130,160,216,88,192,8, - 255,255,2,36,8,0,130,140,0,0,0,0, - 0,0,66,144,8,0,224,3,0,0,0,0, - 30,0,192,24,33,56,192,0,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,3,36,8,0,131,140,12,0,130,140, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,130,172,0,0,99,144, - 240,88,192,8,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,3,36,0,0,130,144,0,0,0,0, - 128,0,66,48,5,0,64,20,0,0,0,0, - 0,0,163,160,255,255,198,36,228,255,192,28, - 1,0,165,36,8,0,224,3,35,16,230,0, - 1,0,2,36,15,0,194,16,2,0,194,40, - 5,0,64,16,2,0,2,36,7,0,192,16, - 255,255,2,36,40,89,192,8,0,0,0,0, - 11,0,194,16,255,255,2,36,40,89,192,8, - 0,0,0,0,4,0,130,140,0,0,0,0, - 21,89,192,8,33,40,162,0,8,0,130,140, - 0,0,0,0,19,89,192,8,33,40,162,0, - 12,0,130,140,0,0,0,0,35,40,69,0, - 4,0,130,140,0,0,0,0,43,16,162,0, - 17,0,64,20,255,255,2,36,12,0,130,140, - 0,0,0,0,43,16,69,0,12,0,64,20, - 255,255,2,36,12,0,130,140,0,0,0,0, - 43,16,162,0,5,0,64,16,0,0,0,0, - 0,0,130,144,0,0,0,0,127,0,66,48, - 0,0,130,160,8,0,133,172,33,16,0,0, - 8,0,224,3,0,0,0,0,12,0,130,140, - 4,0,131,140,0,0,0,0,35,56,67,0, - 1,0,2,36,15,0,194,16,2,0,194,40, - 5,0,64,16,2,0,2,36,7,0,192,16, - 255,255,2,36,87,89,192,8,0,0,0,0, - 12,0,194,16,255,255,2,36,87,89,192,8, - 0,0,0,0,4,0,130,140,0,0,0,0, - 66,89,192,8,33,16,162,0,8,0,130,140, - 0,0,0,0,33,16,162,0,72,89,192,8, - 12,0,130,172,12,0,130,140,0,0,0,0, - 35,16,69,0,12,0,130,172,8,0,130,140, - 12,0,131,140,0,0,0,0,43,16,67,0, - 5,0,64,16,0,0,0,0,0,0,130,144, - 0,0,0,0,85,89,192,8,127,0,66,48, - 0,0,130,144,0,0,0,0,128,0,66,52, - 0,0,130,160,33,16,224,0,8,0,224,3, - 0,0,0,0,232,255,189,39,20,0,191,175, - 16,0,176,175,12,0,128,20,33,128,160,0, - 9,50,192,12,16,0,4,36,33,32,64,0, - 3,0,128,20,0,0,0,0,119,89,192,8, - 33,16,0,0,0,0,2,146,0,0,0,0, - 108,89,192,8,1,0,66,52,0,0,2,146, - 0,0,0,0,254,0,66,48,0,0,130,160, - 4,0,2,142,0,0,0,0,4,0,130,172, - 8,0,2,142,0,0,0,0,8,0,130,172, - 12,0,2,142,0,0,0,0,12,0,130,172, - 33,16,128,0,20,0,191,143,16,0,176,143, - 8,0,224,3,24,0,189,39,0,0,0,0, - 0,0,130,144,0,0,0,0,128,0,66,48, - 17,0,64,20,31,0,3,36,8,0,131,140, - 12,0,130,140,0,0,0,0,43,16,98,0, - 6,0,64,16,1,0,98,36,8,0,130,172, - 0,0,98,144,0,0,0,0,145,89,192,8, - 31,0,67,48,0,0,130,144,0,0,0,0, - 128,0,66,52,0,0,130,160,31,0,3,36, - 0,0,130,144,0,0,0,0,128,0,66,48, - 4,0,64,16,1,0,2,36,0,0,162,172, - 196,89,192,8,33,16,0,0,255,0,99,48, - 31,0,2,36,6,0,98,16,33,16,96,0, - 196,89,192,8,0,0,0,0,1,0,2,36, - 195,89,192,8,0,0,162,172,33,48,0,0, - 0,0,130,144,0,0,0,0,128,0,66,48, - 16,0,64,20,255,0,3,36,8,0,131,140, - 12,0,130,140,0,0,0,0,43,16,98,0, - 5,0,64,16,1,0,98,36,8,0,130,172, - 0,0,99,144,183,89,192,8,0,0,0,0, - 0,0,130,144,0,0,0,0,128,0,66,52, - 0,0,130,160,255,0,3,36,0,0,130,144, - 0,0,0,0,128,0,66,48,228,255,64,20, - 128,0,98,48,4,0,64,16,127,0,98,48, - 37,16,194,0,163,89,192,8,192,49,2,0, - 255,0,98,48,37,48,70,0,255,255,194,48, - 8,0,224,3,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,6,36,8,0,131,140,12,0,130,140, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,130,172,0,0,102,144, - 218,89,192,8,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,6,36,0,0,130,144,0,0,0,0, - 128,0,66,48,13,0,64,20,1,0,2,36, - 255,0,195,48,128,0,2,36,4,0,98,20, - 2,0,2,36,0,0,162,172,17,90,192,8, - 255,255,2,52,128,0,194,48,6,0,64,20, - 33,24,0,0,17,90,192,8,255,0,194,48, - 0,0,162,172,17,90,192,8,33,16,0,0, - 127,0,194,48,32,0,64,16,255,255,71,36, - 0,26,3,0,0,0,130,144,0,0,0,0, - 128,0,66,48,16,0,64,20,255,255,102,48, - 8,0,131,140,12,0,130,140,0,0,0,0, - 43,16,98,0,6,0,64,16,1,0,98,36, - 8,0,130,172,0,0,98,144,0,0,0,0, - 7,90,192,8,37,24,194,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,195,52,0,0,130,144,0,0,0,0, - 128,0,66,48,224,255,64,20,1,0,2,36, - 33,16,224,0,255,0,66,48,226,255,64,20, - 255,255,231,36,255,255,98,48,8,0,224,3, - 0,0,0,0,216,255,189,39,36,0,191,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,152,128,0, - 33,128,192,0,33,144,160,0,255,255,81,50, - 33,0,32,18,33,160,224,0,255,255,2,52, - 30,0,34,18,0,0,0,0,9,50,192,12, - 33,32,32,2,33,24,64,0,29,0,96,16, - 1,0,2,36,0,0,2,166,4,0,3,174, - 8,0,3,174,12,0,18,166,33,32,96,2, - 8,0,5,142,0,0,0,0,218,88,192,12, - 33,48,32,2,33,24,64,0,255,255,100,48, - 10,0,145,20,1,0,2,36,12,0,2,150, - 0,0,0,0,35,16,67,0,12,0,2,166, - 8,0,2,142,0,0,0,0,33,16,130,0, - 68,90,192,8,8,0,2,174,68,90,192,8, - 0,0,130,174,0,0,0,166,4,0,0,174, - 8,0,0,174,12,0,0,166,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,224,255,189,39,28,0,191,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,144,128,0,33,136,160,0,33,128,192,0, - 124,89,192,12,33,40,0,2,33,32,64,2, - 198,89,192,12,33,40,0,2,33,40,64,0, - 0,0,2,142,0,0,0,0,7,0,64,20, - 33,32,64,2,255,255,165,48,33,48,32,2, - 19,90,192,12,33,56,0,2,104,90,192,8, - 0,0,0,0,0,0,32,166,4,0,32,174, - 8,0,32,174,12,0,32,166,28,0,191,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 36,0,191,175,32,0,180,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,152,128,0,33,136,160,0,33,144,192,0, - 56,0,176,147,0,0,0,0,196,88,192,12, - 33,160,224,0,224,0,66,48,7,0,80,20, - 33,32,96,2,124,89,192,12,33,40,64,2, - 255,255,66,48,255,255,131,50,7,0,67,16, - 33,32,96,2,0,0,66,142,0,0,0,0, - 16,0,64,20,4,0,2,36,152,90,192,8, - 0,0,66,174,198,89,192,12,33,40,64,2, - 33,40,64,0,0,0,66,142,0,0,0,0, - 7,0,64,20,33,32,96,2,255,255,165,48, - 33,48,32,2,19,90,192,12,33,56,64,2, - 156,90,192,8,0,0,0,0,0,0,32,166, - 4,0,32,174,8,0,32,174,12,0,32,166, - 36,0,191,143,32,0,180,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,33,56,0,0, - 255,255,168,36,255,255,165,48,50,0,160,16, - 1,0,9,36,1,0,12,36,4,0,10,36, - 3,0,11,36,255,255,5,52,0,0,130,144, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,3,36,8,0,131,140,12,0,130,140, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,130,172,0,0,99,144, - 193,90,192,8,0,0,0,0,0,0,130,144, - 0,0,0,0,128,0,66,52,0,0,130,160, - 255,0,3,36,0,0,130,144,0,0,0,0, - 128,0,66,48,3,0,64,16,0,0,0,0, - 218,90,192,8,0,0,204,172,11,0,32,17, - 255,255,2,49,5,0,74,20,33,72,0,0, - 3,0,96,16,0,0,0,0,218,90,192,8, - 0,0,203,172,128,0,98,48,3,0,64,16, - 0,18,7,0,255,255,7,36,0,18,7,0, - 37,56,67,0,33,16,0,1,255,255,66,48, - 212,255,64,20,33,64,5,1,8,0,224,3, - 33,16,224,0,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,128,0, - 124,89,192,12,33,136,160,0,33,32,0,2, - 198,89,192,12,33,40,32,2,33,32,0,2, - 255,255,69,48,164,90,192,12,33,48,32,2, - 24,0,191,143,20,0,177,143,16,0,176,143, - 8,0,224,3,32,0,189,39,216,255,189,39, - 32,0,191,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,144,128,0, - 33,136,160,0,33,152,192,0,196,88,192,12, - 33,128,224,0,224,0,66,48,255,0,16,50, - 7,0,80,20,33,32,64,2,124,89,192,12, - 33,40,32,2,255,255,66,48,255,255,99,50, - 8,0,67,16,33,32,64,2,0,0,34,142, - 0,0,0,0,2,0,64,20,4,0,2,36, - 0,0,34,174,17,91,192,8,33,16,0,0, - 198,89,192,12,33,40,32,2,33,32,64,2, - 255,255,69,48,164,90,192,12,33,48,32,2, - 32,0,191,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,208,255,189,39,44,0,191,175, - 40,0,180,175,36,0,179,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,128,128,0, - 33,152,192,0,33,160,224,0,0,0,96,174, - 4,0,96,174,8,0,3,142,4,0,2,142, - 0,0,0,0,35,24,98,0,255,255,165,48, - 35,0,160,24,33,144,0,0,1,0,6,36, - 0,0,2,146,0,0,0,0,128,0,66,48, - 16,0,64,20,255,0,4,36,8,0,4,142, - 12,0,2,142,0,0,0,0,43,16,130,0, - 5,0,64,16,1,0,130,36,8,0,2,174, - 0,0,132,144,64,91,192,8,0,0,0,0, - 0,0,2,146,0,0,0,0,128,0,66,52, - 0,0,2,162,255,0,4,36,0,0,2,146, - 0,0,0,0,128,0,66,48,3,0,64,16, - 128,0,130,48,150,91,192,8,0,0,134,174, - 2,0,64,20,0,0,0,0,1,0,82,38, - 255,255,165,36,224,255,160,28,0,0,0,0, - 33,32,0,2,33,40,96,0,251,88,192,12, - 33,48,0,0,68,0,64,18,1,0,81,38, - 9,50,192,12,128,32,17,0,33,40,64,0, - 63,0,160,16,0,0,0,0,0,0,113,174, - 4,0,101,174,59,0,64,26,33,56,0,0, - 1,0,8,36,33,48,0,0,0,0,2,146, - 0,0,0,0,128,0,66,48,16,0,64,20, - 255,0,4,36,8,0,3,142,12,0,2,142, - 0,0,0,0,43,16,98,0,5,0,64,16, - 1,0,98,36,8,0,2,174,0,0,100,144, - 114,91,192,8,0,0,0,0,0,0,2,146, - 0,0,0,0,128,0,66,52,0,0,2,162, - 255,0,4,36,0,0,2,146,0,0,0,0, - 128,0,66,48,3,0,64,16,192,49,6,0, - 150,91,192,8,0,0,136,174,127,0,130,48, - 37,48,194,0,128,0,130,48,225,255,64,20, - 0,0,0,0,18,0,224,20,40,0,194,44, - 4,0,64,16,80,0,194,44,0,0,160,172, - 145,91,192,8,4,0,165,36,5,0,64,16, - 216,255,194,36,0,0,168,172,4,0,165,36, - 146,91,192,8,0,0,162,172,2,0,2,36, - 0,0,162,172,4,0,165,36,176,255,194,36, - 146,91,192,8,0,0,162,172,0,0,166,172, - 1,0,231,36,42,16,242,0,200,255,64,20, - 4,0,165,36,44,0,191,143,40,0,180,143, - 36,0,179,143,32,0,178,143,28,0,177,143, - 24,0,176,143,8,0,224,3,48,0,189,39, - 224,255,189,39,28,0,191,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,136,128,0, - 33,144,160,0,33,128,192,0,124,89,192,12, - 33,40,0,2,33,32,32,2,198,89,192,12, - 33,40,0,2,33,40,64,0,0,0,2,142, - 0,0,0,0,5,0,64,20,33,32,32,2, - 255,255,165,48,33,48,64,2,24,91,192,12, - 33,56,0,2,28,0,191,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 32,0,189,39,216,255,189,39,36,0,191,175, - 32,0,180,175,28,0,179,175,24,0,178,175, - 20,0,177,175,16,0,176,175,33,144,128,0, - 33,160,160,0,33,136,192,0,56,0,176,147, - 0,0,0,0,196,88,192,12,33,152,224,0, - 224,0,66,48,7,0,80,20,33,32,64,2, - 124,89,192,12,33,40,32,2,255,255,66,48, - 255,255,99,50,7,0,67,16,33,32,64,2, - 0,0,34,142,0,0,0,0,14,0,64,20, - 4,0,2,36,226,91,192,8,0,0,34,174, - 198,89,192,12,33,40,32,2,33,40,64,0, - 0,0,34,142,0,0,0,0,5,0,64,20, - 33,32,64,2,255,255,165,48,33,48,128,2, - 24,91,192,12,33,56,32,2,36,0,191,143, - 32,0,180,143,28,0,179,143,24,0,178,143, - 20,0,177,143,16,0,176,143,8,0,224,3, - 40,0,189,39,0,0,0,0,0,0,0,0, - 216,255,189,39,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,152,128,0,8,0,99,142,4,0,98,142, - 0,0,0,0,35,128,98,0,255,255,4,50, - 19,0,128,16,33,136,160,0,9,50,192,12, - 0,0,0,0,33,144,64,0,3,0,64,22, - 255,255,16,50,17,92,192,8,255,255,2,36, - 33,32,64,2,4,0,101,142,0,0,0,0, - 80,68,192,12,33,48,0,2,1,0,2,36, - 0,0,34,166,4,0,50,174,33,128,80,2, - 15,92,192,8,8,0,48,174,0,0,32,166, - 4,0,32,174,8,0,32,174,12,0,32,166, - 33,16,0,0,32,0,191,143,28,0,179,143, - 24,0,178,143,20,0,177,143,16,0,176,143, - 8,0,224,3,40,0,189,39,232,255,189,39, - 20,0,191,175,16,0,176,175,33,128,128,0, - 0,0,2,150,0,0,0,0,1,0,66,48, - 7,0,64,16,0,0,0,0,4,0,4,142, - 0,0,0,0,3,0,128,16,0,0,0,0, - 61,50,192,12,0,0,0,0,0,0,0,166, - 8,0,0,174,4,0,0,174,12,0,0,166, - 20,0,191,143,16,0,176,143,8,0,224,3, - 24,0,189,39,224,255,189,39,24,0,191,175, - 20,0,177,175,16,0,176,175,33,128,128,0, - 8,0,163,140,4,0,162,140,0,0,0,0, - 35,136,98,0,255,255,35,50,12,0,2,150, - 0,0,0,0,43,16,67,0,4,0,64,16, - 255,255,38,50,12,0,17,150,0,0,0,0, - 255,255,38,50,6,0,192,16,255,255,34,50, - 8,0,4,142,4,0,165,140,80,68,192,12, - 0,0,0,0,255,255,34,50,8,0,3,142, - 0,0,0,0,33,16,67,0,8,0,2,174, - 12,0,2,150,0,0,0,0,35,16,81,0, - 12,0,2,166,24,0,191,143,20,0,177,143, - 16,0,176,143,8,0,224,3,32,0,189,39, - 1,0,2,36,23,0,194,16,2,0,194,40, - 5,0,64,16,2,0,2,36,7,0,192,16, - 255,255,2,36,132,92,192,8,0,0,0,0, - 23,0,194,16,255,255,2,36,132,92,192,8, - 0,0,0,0,255,255,162,48,4,0,131,140, - 0,0,0,0,33,48,67,0,8,0,130,140, - 0,0,0,0,35,16,67,0,12,0,131,148, - 0,0,0,0,33,16,67,0,124,92,192,8, - 35,40,69,0,255,255,162,48,8,0,131,140, - 0,0,0,0,33,48,67,0,12,0,130,148, - 0,0,0,0,124,92,192,8,35,40,69,0, - 12,0,130,148,8,0,131,140,0,0,0,0, - 33,48,67,0,255,255,162,48,35,48,194,0, - 4,0,130,140,0,0,0,0,43,16,194,0, - 4,0,64,20,255,255,2,36,8,0,134,172, - 12,0,133,164,33,16,0,0,8,0,224,3, - 0,0,0,0,216,255,189,39,32,0,191,175, - 28,0,179,175,24,0,178,175,20,0,177,175, - 16,0,176,175,33,128,128,0,33,152,160,0, - 8,0,3,142,4,0,2,142,0,0,0,0, - 35,144,98,0,255,255,66,50,12,0,3,150, - 0,0,0,0,33,16,67,0,255,255,99,50, - 42,16,67,0,35,0,64,16,1,0,2,36, - 0,0,3,150,0,0,0,0,32,0,98,20, - 255,255,2,36,9,50,192,12,255,255,100,50, - 33,136,64,0,3,0,32,22,255,255,70,50, - 189,92,192,8,255,255,2,36,5,0,192,16, - 0,0,0,0,4,0,5,142,0,0,0,0, - 80,68,192,12,33,32,32,2,0,0,2,150, - 0,0,0,0,1,0,66,48,7,0,64,16, - 0,0,0,0,4,0,4,142,0,0,0,0, - 3,0,128,16,0,0,0,0,61,50,192,12, - 0,0,0,0,4,0,17,174,255,255,66,50, - 33,16,34,2,8,0,2,174,35,16,114,2, - 12,0,2,166,33,16,0,0,32,0,191,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 216,255,189,39,32,0,191,175,28,0,179,175, - 24,0,178,175,20,0,177,175,16,0,176,175, - 33,128,128,0,33,136,192,0,255,255,36,50, - 35,0,128,16,33,152,160,0,8,0,2,142, - 4,0,3,142,0,0,0,0,35,16,67,0, - 255,255,66,48,12,0,3,150,0,0,0,0, - 33,16,67,0,42,16,68,0,20,0,64,16, - 0,0,0,0,9,50,192,12,0,0,0,0, - 33,144,64,0,24,0,64,18,255,255,2,36, - 0,0,2,150,0,0,0,0,1,0,66,48, - 8,0,64,16,1,0,2,36,4,0,4,142, - 0,0,0,0,4,0,128,16,0,0,0,0, - 61,50,192,12,0,0,0,0,1,0,2,36, - 0,0,2,166,4,0,18,174,4,0,4,142, - 33,40,96,2,80,68,192,12,255,255,38,50, - 33,32,0,2,255,255,37,50,85,92,192,12, - 33,48,0,0,33,16,0,0,32,0,191,143, - 28,0,179,143,24,0,178,143,20,0,177,143, - 16,0,176,143,8,0,224,3,40,0,189,39, - 0,0,0,0,0,0,0,0,0,0,0,0, - 184,255,189,39,64,0,191,175,60,0,183,175, - 56,0,182,175,52,0,181,175,48,0,180,175, - 44,0,179,175,40,0,178,175,36,0,177,175, - 32,0,176,175,33,144,128,0,112,0,84,142, - 0,0,0,0,92,0,128,18,1,0,2,36, - 108,0,81,142,0,0,0,0,88,0,32,18, - 0,0,0,0,96,0,87,38,136,0,66,174, - 140,0,64,174,96,0,64,174,100,0,64,174, - 224,83,192,12,33,32,32,2,33,168,64,0, - 25,0,160,18,33,128,160,2,108,0,66,142, - 0,0,0,0,124,0,66,174,128,0,84,174, - 108,0,81,174,112,0,85,174,29,0,32,26, - 33,152,0,0,255,255,22,36,33,32,0,2, - 8,0,133,38,33,48,64,2,170,72,192,12, - 1,0,7,36,10,0,86,16,33,32,64,2, - 14,0,64,20,0,0,0,0,64,0,66,142, - 0,0,0,0,10,0,64,20,2,0,5,36, - 56,93,192,8,1,0,102,38,33,32,64,2, - 5,0,5,36,33,48,0,0,140,84,192,12, - 0,0,0,0,107,93,192,8,1,0,2,36, - 1,0,115,38,68,0,16,38,42,16,113,2, - 230,255,64,20,68,0,148,38,40,0,32,18, - 33,128,160,2,17,0,2,146,0,0,0,0, - 34,0,66,48,32,0,64,20,0,0,0,0, - 36,0,2,142,16,0,176,175,16,0,66,140, - 24,0,4,142,28,0,5,142,32,0,6,142, - 0,0,0,0,9,248,64,0,33,56,64,2, - 17,0,2,146,0,0,0,0,32,0,66,52, - 17,0,2,162,0,0,226,142,0,0,0,0, - 15,0,64,16,0,0,0,0,255,255,49,38, - 15,0,32,18,68,0,16,38,17,0,3,146, - 0,0,0,0,32,0,98,48,2,0,64,20, - 34,0,98,52,17,0,2,162,255,255,49,38, - 248,255,32,22,68,0,16,38,107,93,192,8, - 33,16,0,0,255,255,49,38,218,255,32,22, - 68,0,16,38,33,16,0,0,64,0,191,143, - 60,0,183,143,56,0,182,143,52,0,181,143, - 48,0,180,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 72,0,189,39,168,255,189,39,80,0,191,175, - 76,0,183,175,72,0,182,175,68,0,181,175, - 64,0,180,175,60,0,179,175,56,0,178,175, - 52,0,177,175,48,0,176,175,33,144,128,0, - 96,0,66,142,0,0,0,0,3,0,64,16, - 33,32,0,0,240,93,192,8,255,255,2,36, - 116,0,66,142,0,0,0,0,7,0,64,16, - 104,0,67,38,12,0,99,140,0,0,0,0, - 12,0,98,140,0,0,0,0,251,255,64,20, - 0,0,0,0,8,0,116,140,0,0,0,0, - 92,0,128,18,33,16,0,0,4,0,115,140, - 0,0,0,0,88,0,96,18,0,0,0,0, - 64,0,66,142,0,0,0,0,55,0,64,20, - 0,0,0,0,33,128,128,2,52,0,96,26, - 33,136,0,0,255,255,23,36,2,0,22,36, - 5,0,21,36,17,0,2,146,0,0,0,0, - 16,0,66,48,40,0,64,16,24,0,165,39, - 8,0,3,142,28,0,2,142,0,0,0,0, - 35,24,98,0,24,0,163,175,12,0,2,142, - 0,0,0,0,28,0,162,175,128,24,3,0, - 33,24,98,0,252,255,98,140,0,0,0,0, - 1,0,66,36,252,255,98,172,12,0,0,174, - 8,0,0,174,33,32,0,2,33,48,64,2, - 170,72,192,12,1,0,7,36,6,0,87,16, - 0,0,0,0,9,0,64,20,1,0,34,38, - 96,0,86,174,196,93,192,8,100,0,66,174, - 96,0,85,174,110,86,192,12,24,0,164,39, - 240,93,192,8,255,255,2,36,110,86,192,12, - 24,0,164,39,17,0,2,146,0,0,0,0, - 12,0,66,48,17,0,2,162,1,0,4,36, - 1,0,49,38,42,16,51,2,209,255,64,20, - 68,0,16,38,27,0,128,16,33,128,128,2, - 23,0,96,26,33,136,0,0,17,0,2,146, - 0,0,0,0,34,0,66,48,14,0,64,20, - 0,0,0,0,36,0,2,142,16,0,176,175, - 16,0,66,140,24,0,4,142,28,0,5,142, - 32,0,6,142,0,0,0,0,9,248,64,0, - 33,56,64,2,17,0,2,146,0,0,0,0, - 32,0,66,52,17,0,2,162,1,0,49,38, - 42,16,51,2,235,255,64,20,68,0,16,38, - 240,93,192,8,1,0,2,36,33,16,0,0, - 80,0,191,143,76,0,183,143,72,0,182,143, - 68,0,181,143,64,0,180,143,60,0,179,143, - 56,0,178,143,52,0,177,143,48,0,176,143, - 8,0,224,3,88,0,189,39,0,0,0,0, - 200,255,189,39,48,0,191,175,44,0,179,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,144,128,0,96,0,64,174,100,0,64,174, - 112,0,80,142,0,0,0,0,105,0,0,18, - 33,16,0,0,108,0,81,142,0,0,0,0, - 101,0,32,18,0,0,0,0,64,0,66,142, - 0,0,0,0,43,0,64,20,33,32,64,2, - 50,0,32,26,33,152,0,0,33,32,0,2, - 33,40,64,2,96,72,192,12,1,0,6,36, - 13,0,64,20,33,32,64,2,20,0,2,150, - 0,0,0,0,1,0,66,48,34,0,64,16, - 2,0,5,36,36,0,2,142,0,0,0,0, - 3,0,66,144,0,0,0,0,2,0,66,48, - 3,0,64,20,0,0,0,0,63,94,192,8, - 2,0,5,36,36,0,2,142,0,0,0,0, - 32,0,66,140,4,0,67,142,0,0,0,0, - 36,16,67,0,16,0,64,16,33,32,64,2, - 36,0,2,142,16,0,3,146,2,0,66,144, - 0,0,0,0,11,0,98,20,3,0,5,36, - 1,0,115,38,42,16,113,2,219,255,64,20, - 68,0,16,38,69,94,192,8,96,0,83,38, - 5,0,5,36,64,94,192,8,33,48,0,0, - 2,0,5,36,1,0,102,38,140,84,192,12, - 0,0,0,0,113,94,192,8,1,0,2,36, - 96,0,83,38,112,0,80,142,0,0,0,0, - 41,0,32,18,33,16,0,0,17,0,2,146, - 0,0,0,0,17,0,66,48,32,0,64,20, - 0,0,0,0,36,0,2,142,16,0,176,175, - 4,0,66,140,24,0,4,142,28,0,5,142, - 32,0,6,142,0,0,0,0,9,248,64,0, - 33,56,64,2,17,0,2,146,0,0,0,0, - 16,0,66,52,17,0,2,162,0,0,98,142, - 0,0,0,0,15,0,64,16,0,0,0,0, - 255,255,49,38,15,0,32,18,68,0,16,38, - 17,0,3,146,0,0,0,0,16,0,98,48, - 2,0,64,20,17,0,98,52,17,0,2,162, - 255,255,49,38,248,255,32,22,68,0,16,38, - 113,94,192,8,33,16,0,0,255,255,49,38, - 218,255,32,22,68,0,16,38,33,16,0,0, - 48,0,191,143,44,0,179,143,40,0,178,143, - 36,0,177,143,32,0,176,143,8,0,224,3, - 56,0,189,39,192,255,189,39,56,0,191,175, - 52,0,183,175,48,0,182,175,44,0,181,175, - 40,0,180,175,36,0,179,175,32,0,178,175, - 28,0,177,175,24,0,176,175,33,144,128,0, - 112,0,84,142,0,0,0,0,171,0,128,18, - 33,16,0,0,108,0,85,142,0,0,0,0, - 166,0,160,18,32,0,2,36,56,0,67,146, - 0,0,0,0,75,0,98,16,96,0,83,38, - 33,0,98,40,5,0,64,16,64,0,2,36, - 9,0,96,16,33,16,0,0,49,95,192,8, - 0,0,0,0,85,0,98,16,128,0,2,36, - 137,0,98,16,33,16,0,0,49,95,192,8, - 0,0,0,0,33,136,160,2,8,0,32,18, - 33,128,128,2,17,0,2,146,0,0,0,0, - 1,0,66,48,139,0,64,16,255,255,49,38, - 250,255,32,22,68,0,16,38,0,0,98,142, - 0,0,0,0,136,0,64,20,33,16,0,0, - 33,136,160,2,43,0,32,18,33,128,128,2, - 14,0,22,36,64,0,23,36,17,0,2,146, - 0,0,0,0,34,0,66,48,33,0,64,20, - 0,0,0,0,36,0,2,142,16,0,176,175, - 12,0,66,140,24,0,4,142,28,0,5,142, - 32,0,6,142,0,0,0,0,9,248,64,0, - 33,56,64,2,17,0,2,146,0,0,0,0, - 32,0,66,52,17,0,2,162,0,0,98,142, - 0,0,0,0,16,0,64,16,0,0,0,0, - 255,255,49,38,10,0,32,18,68,0,16,38, - 17,0,3,146,0,0,0,0,32,0,98,48, - 2,0,64,20,192,0,98,52,17,0,2,162, - 255,255,49,38,248,255,32,22,68,0,16,38, - 0,0,118,174,236,94,192,8,56,0,87,162, - 255,255,49,38,217,255,32,22,68,0,16,38, - 32,0,2,36,56,0,66,162,0,0,98,142, - 0,0,0,0,13,0,64,20,64,0,2,36, - 33,136,160,2,81,0,32,18,33,128,128,2, - 17,0,2,146,0,0,0,0,2,0,66,48, - 74,0,64,16,255,255,49,38,250,255,32,22, - 68,0,16,38,49,95,192,8,33,16,0,0, - 56,0,66,162,14,0,2,36,0,0,98,174, - 33,136,160,2,53,0,32,18,33,128,128,2, - 2,0,23,36,15,0,22,36,17,0,2,146, - 0,0,0,0,194,0,66,48,5,0,64,16, - 0,0,0,0,19,0,87,16,0,0,0,0, - 32,95,192,8,255,255,49,38,64,0,2,142, - 0,0,0,0,34,0,64,16,0,0,0,0, - 16,0,176,175,64,0,2,142,24,0,4,142, - 28,0,5,142,32,0,6,142,0,0,0,0, - 9,248,64,0,33,56,64,2,17,0,2,146, - 0,0,0,0,30,95,192,8,64,0,66,52, - 64,0,2,142,0,0,0,0,13,0,64,16, - 0,0,0,0,16,0,176,175,64,0,2,142, - 24,0,4,142,28,0,5,142,32,0,6,142, - 0,0,0,0,9,248,64,0,33,56,64,2, - 17,0,2,146,0,0,0,0,30,95,192,8, - 64,0,66,52,0,0,118,174,17,0,2,146, - 0,0,0,0,128,0,66,52,17,0,2,162, - 255,255,49,38,208,255,32,22,68,0,16,38, - 33,136,160,2,12,0,32,18,33,128,128,2, - 17,0,2,146,0,0,0,0,128,0,66,48, - 5,0,64,16,255,255,49,38,250,255,32,22, - 68,0,16,38,49,95,192,8,33,16,0,0, - 49,95,192,8,1,0,2,36,33,16,0,0, - 56,0,191,143,52,0,183,143,48,0,182,143, - 44,0,181,143,40,0,180,143,36,0,179,143, - 32,0,178,143,28,0,177,143,24,0,176,143, - 8,0,224,3,64,0,189,39,184,255,189,39, - 64,0,191,175,60,0,183,175,56,0,182,175, - 52,0,181,175,48,0,180,175,44,0,179,175, - 40,0,178,175,36,0,177,175,32,0,176,175, - 33,160,128,0,112,0,147,142,0,0,0,0, - 129,0,96,18,33,16,0,0,108,0,146,142, - 0,0,0,0,125,0,64,18,96,0,151,38, - 96,0,128,174,100,0,128,174,224,83,192,12, - 33,32,64,2,33,168,64,0,5,0,160,22, - 33,136,64,2,33,32,128,2,5,0,5,36, - 123,95,192,8,33,48,0,0,36,0,64,18, - 33,128,160,2,5,0,22,36,16,0,22,162, - 8,0,100,142,12,0,101,142,0,0,0,0, - 80,86,192,12,8,0,6,38,5,0,64,20, - 0,0,0,0,255,255,49,38,68,0,115,38, - 245,255,32,22,68,0,16,38,21,0,32,18, - 42,16,50,2,7,0,64,16,33,128,160,2, - 59,84,192,12,33,32,0,2,1,0,49,38, - 42,16,50,2,251,255,64,20,68,0,16,38, - 61,50,192,12,33,32,160,2,33,32,128,2, - 5,0,5,36,123,95,192,8,33,48,0,0, - 2,0,5,36,1,0,38,38,140,84,192,12, - 0,0,0,0,203,95,192,8,1,0,2,36, - 124,0,146,174,112,0,130,142,0,0,0,0, - 128,0,130,174,112,0,149,174,33,128,160,2, - 27,0,64,26,33,136,0,0,33,32,0,2, - 33,40,128,2,96,72,192,12,1,0,6,36, - 13,0,64,20,0,0,0,0,20,0,2,150, - 0,0,0,0,1,0,66,48,8,0,64,16, - 0,0,0,0,36,0,2,142,0,0,0,0, - 3,0,66,144,0,0,0,0,1,0,66,48, - 5,0,64,20,0,0,0,0,64,0,130,142, - 0,0,0,0,221,255,64,16,33,32,128,2, - 1,0,49,38,42,16,50,2,231,255,64,20, - 68,0,16,38,40,0,64,18,33,128,160,2, - 17,0,2,146,0,0,0,0,34,0,66,48, - 32,0,64,20,0,0,0,0,36,0,2,142, - 16,0,176,175,8,0,66,140,24,0,4,142, - 28,0,5,142,32,0,6,142,0,0,0,0, - 9,248,64,0,33,56,128,2,17,0,2,146, - 0,0,0,0,32,0,66,52,17,0,2,162, - 0,0,226,142,0,0,0,0,15,0,64,16, - 0,0,0,0,255,255,82,38,15,0,64,18, - 68,0,16,38,17,0,3,146,0,0,0,0, - 32,0,98,48,2,0,64,20,34,0,98,52, - 17,0,2,162,255,255,82,38,248,255,64,22, - 68,0,16,38,203,95,192,8,33,16,0,0, - 255,255,82,38,218,255,64,22,68,0,16,38, - 33,16,0,0,64,0,191,143,60,0,183,143, - 56,0,182,143,52,0,181,143,48,0,180,143, - 44,0,179,143,40,0,178,143,36,0,177,143, - 32,0,176,143,8,0,224,3,72,0,189,39, - 0,0,0,0,0,0,0,0,37,115,58,37, - 100,58,32,102,97,105,108,101,100,32,97,115, - 115,101,114,116,105,111,110,32,96,37,115,39, - 10,0,0,0,114,97,109,116,101,115,116,100, - 119,40,98,99,46,98,99,95,104,101,97,112, - 115,116,97,114,116,44,32,108,101,110,44,32, - 49,44,32,48,44,32,48,41,32,61,61,32, - 48,0,0,0,98,99,46,98,99,95,104,101, - 97,112,101,110,100,32,60,61,32,98,99,46, - 98,99,95,114,97,109,101,110,100,0,0,0, - 35,32,112,111,114,116,115,58,32,37,100,10, - 0,0,0,0,42,42,42,80,114,111,102,105, - 108,105,110,103,32,64,32,37,120,44,32,37, - 120,10,0,0,103,111,116,32,104,101,114,101, - 32,99,97,117,115,101,61,37,120,32,115,116, - 97,116,117,115,61,37,120,32,118,101,99,61, - 37,120,10,0,37,115,58,37,100,58,32,102, - 97,105,108,101,100,32,97,115,115,101,114,116, - 105,111,110,32,96,37,115,39,10,0,0,0, - 83,101,99,111,110,100,115,32,60,32,48,120, - 55,70,70,70,102,102,102,102,0,0,0,0, - 84,105,109,101,114,115,85,115,101,100,32,60, - 32,78,84,73,77,69,82,83,0,0,0,0, - 0,0,0,0,69,69,80,82,79,77,32,105, - 115,32,98,97,100,10,0,0,80,111,114,116, - 32,37,100,32,101,116,104,101,114,32,97,100, - 100,114,101,115,115,58,32,37,48,50,88,58, - 37,48,50,88,58,37,48,50,88,58,37,48, - 50,88,58,37,48,50,88,58,37,48,50,88, - 10,0,0,0,35,35,35,32,56,50,53,57, - 54,32,67,104,97,110,32,37,100,58,32,115, - 101,108,102,116,101,115,116,32,102,97,105,108, - 101,100,32,40,37,120,41,10,0,0,0,0, - 42,42,42,32,56,50,53,57,54,32,80,111, - 114,116,32,37,100,58,32,115,101,108,102,116, - 101,115,116,32,112,97,115,115,101,100,10,0, - 56,50,53,57,54,32,80,111,114,116,32,37, - 100,58,32,100,117,109,112,32,102,97,105,108, - 101,100,32,40,37,120,41,10,0,0,0,0, - 42,42,42,32,56,50,53,57,54,32,80,111, - 114,116,32,37,100,58,32,100,117,109,112,32, - 112,97,115,115,101,100,10,0,35,35,35,32, - 56,50,53,57,54,32,80,111,114,116,32,37, - 100,58,32,83,67,80,32,102,101,116,99,104, - 32,102,97,105,108,101,100,10,0,0,0,0, - 42,42,42,32,56,50,53,57,54,32,80,111, - 114,116,32,37,100,58,32,83,67,80,32,102, - 101,116,99,104,32,112,97,115,115,101,100,32, - 37,120,32,10,0,0,0,0,35,35,35,32, - 56,50,53,57,54,32,80,111,114,116,32,37, - 100,58,32,66,85,83,84,73,77,69,82,83, - 32,108,111,97,100,32,102,97,105,108,101,100, - 10,0,0,0,42,42,42,32,56,50,53,57, - 54,32,80,111,114,116,32,37,100,58,32,66, - 85,83,84,73,77,69,82,83,32,108,111,97, - 100,32,112,97,115,115,101,100,10,0,0,0, - 35,35,35,32,65,67,75,32,100,105,100,32, - 110,111,116,32,111,99,99,117,114,10,0,0, - 35,35,35,32,115,116,97,116,117,115,32,115, - 116,105,108,108,32,98,117,115,121,58,32,37, - 120,10,0,0,101,116,104,95,105,110,105,116, - 46,99,0,0,42,42,42,76,49,87,65,10, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,35,35,35,32,84,66,68,32, - 98,108,111,99,107,115,32,97,114,101,110,39, - 116,32,98,101,105,110,103,32,102,114,101,101, - 100,10,0,0,65,116,116,101,109,112,116,32, - 116,111,32,102,114,101,101,32,98,111,103,117, - 115,32,84,66,68,32,37,120,10,0,0,0, - 35,35,35,32,66,85,70,32,98,108,111,99, - 107,115,32,97,114,101,110,39,116,32,98,101, - 105,110,103,32,102,114,101,101,100,10,0,0, - 65,116,116,101,109,112,116,32,116,111,32,102, - 114,101,101,32,98,111,103,117,115,32,66,85, - 70,32,37,120,10,0,0,0,0,0,0,0, - 0,0,0,0,82,70,68,115,32,37,100,32, - 10,0,0,0,82,66,68,115,32,37,100,32, - 10,0,0,0,37,115,58,37,100,58,32,102, - 97,105,108,101,100,32,97,115,115,101,114,116, - 105,111,110,32,96,37,115,39,10,0,0,0, - 101,116,104,95,114,99,118,46,99,0,0,0, - 100,115,116,99,104,97,110,32,62,61,32,49, - 32,38,38,32,100,115,116,99,104,97,110,32, - 60,32,78,99,104,97,110,0,37,115,37,48, - 50,88,58,37,48,50,88,58,37,48,50,88, - 58,37,48,50,88,58,37,48,50,88,58,37, - 48,50,88,37,115,0,0,0,176,72,0,131, - 80,67,0,131,164,67,0,131,24,68,0,131, - 80,67,0,131,0,0,0,0,4,82,0,131, - 184,76,0,131,12,77,0,131,128,77,0,131, - 184,76,0,131,0,0,0,0,0,0,0,0, - 0,0,0,0,37,115,58,37,100,58,32,102, - 97,105,108,101,100,32,97,115,115,101,114,116, - 105,111,110,32,96,37,115,39,10,0,0,0, - 101,116,104,95,120,109,105,116,46,99,0,0, - 99,98,112,45,62,110,111,112,46,99,109,100, - 32,61,61,32,73,53,57,54,95,67,66,95, - 67,77,68,95,78,79,80,124,73,53,57,54, - 95,67,66,95,67,77,68,95,69,76,0,0, - 99,98,112,45,62,110,111,112,46,99,109,100, - 32,38,32,73,53,57,54,95,67,66,95,67, - 77,68,0,0,112,45,62,115,99,98,112,45, - 62,115,116,97,116,117,115,32,38,32,73,53, - 57,54,95,83,67,66,95,67,78,65,0,0, - 35,35,35,32,99,109,100,32,115,116,105,108, - 108,32,98,117,115,121,58,32,37,120,10,0, - 37,100,61,37,100,44,37,120,44,37,100,10, - 0,0,0,0,39,37,115,39,32,37,120,32, - 37,120,10,0,37,48,56,120,58,32,37,48, - 50,120,10,0,37,48,56,120,58,32,37,48, - 52,120,10,0,37,48,56,120,58,32,37,48, - 56,120,10,0,108,105,110,107,32,115,116,97, - 116,101,32,37,48,50,120,10,0,0,0,0, - 42,42,42,32,103,111,116,32,37,100,32,105, - 110,116,101,114,114,117,112,116,115,10,0,0, - 35,35,35,32,69,120,112,101,99,116,101,100, - 32,37,100,32,98,117,116,32,103,111,116,32, - 37,100,32,105,110,116,101,114,114,117,112,116, - 115,10,0,0,80,76,88,57,48,54,48,32, - 65,100,100,114,101,115,115,32,61,32,37,88, - 32,68,65,84,65,32,61,32,37,88,32,10, - 0,0,0,0,42,42,42,32,87,114,105,116, - 101,32,111,102,32,37,120,32,116,111,32,37, - 100,32,102,97,105,108,101,100,10,0,0,0, - 42,42,42,32,87,114,105,116,101,32,111,102, - 32,37,120,32,116,111,32,37,120,32,102,97, - 105,108,101,100,10,0,0,0,42,42,42,42, - 42,42,42,42,42,42,42,42,42,42,42,42, - 0,0,0,0,42,42,42,32,73,108,108,101, - 103,97,108,32,99,111,109,109,97,110,100,32, - 39,37,115,39,10,0,0,0,45,45,45,32, - 99,111,109,109,97,110,100,115,32,109,44,116, - 44,101,44,69,44,97,44,120,44,108,44,115, - 44,112,32,99,97,110,32,98,101,32,112,114, - 101,102,105,120,101,100,32,119,105,116,104,32, - 97,32,114,101,112,101,97,116,32,99,111,117, - 110,116,0,0,108,32,120,112,111,114,116,32, - 114,112,111,114,116,32,91,108,101,110,93,32, - 32,32,32,32,32,32,32,32,32,76,111,111, - 112,98,97,99,107,32,116,101,115,116,32,102, - 114,111,109,32,120,112,111,114,116,32,40,49, - 45,54,41,32,116,111,32,114,112,111,114,116, - 32,40,49,45,54,41,0,0,105,32,32,32, - 32,32,32,32,32,32,73,110,116,101,114,114, - 117,112,116,32,72,111,115,116,32,32,124,32, - 32,115,32,91,112,111,114,116,93,32,91,108, - 101,110,93,32,66,97,99,107,50,98,97,99, - 107,32,120,109,105,116,32,99,110,116,32,112, - 97,99,107,101,116,115,0,0,80,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,80, - 76,88,32,57,48,54,48,32,32,32,124,32, - 32,100,32,91,114,124,119,124,108,124,116,93, - 32,91,118,97,108,124,39,99,39,93,32,32, - 82,101,97,100,47,87,114,105,116,101,47,76, - 111,111,112,47,84,105,109,101,32,68,77,65, - 0,0,0,0,76,32,32,32,32,32,32,32, - 32,32,82,101,97,100,32,76,105,110,107,32, - 76,69,68,115,32,32,124,32,32,112,32,114, - 101,103,110,111,32,91,118,97,108,93,32,32, - 82,101,97,100,47,91,119,114,105,116,101,93, - 32,80,76,88,32,114,101,103,105,115,116,101, - 114,0,0,0,65,32,97,100,100,114,32,32, - 32,32,83,101,116,32,101,116,104,101,114,32, - 97,100,100,114,32,32,124,32,32,36,32,115, - 99,114,105,112,116,32,32,32,32,32,32,32, - 82,101,97,100,32,99,109,100,115,32,102,114, - 111,109,32,102,105,108,101,32,39,115,99,114, - 105,112,116,39,0,0,0,0,120,32,91,112, - 111,114,116,93,32,32,84,120,32,101,116,104, - 101,114,32,32,32,32,32,32,32,32,124,32, - 32,82,32,91,112,111,114,116,93,32,32,32, - 32,32,32,32,82,120,32,101,116,104,101,114, - 32,91,111,110,32,112,111,114,116,32,49,45, - 54,93,0,0,72,32,32,32,32,32,32,32, - 32,32,84,111,103,103,108,101,32,70,67,67, - 32,116,101,115,116,32,124,32,32,113,44,94, - 68,44,94,90,32,32,32,32,32,32,32,32, - 81,117,105,116,0,0,0,0,97,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,97, - 108,108,32,32,32,32,32,32,32,32,124,32, - 32,90,32,109,115,101,99,115,32,32,32,32, - 32,32,32,32,80,97,117,115,101,32,102,111, - 114,32,97,32,119,104,105,108,101,0,0,0, - 69,32,32,32,32,32,32,32,32,32,84,101, - 115,116,32,69,69,80,82,79,77,32,32,32, - 32,32,124,32,32,83,114,32,97,100,100,114, - 59,32,83,119,32,97,100,100,114,32,118,97, - 108,59,32,32,82,101,97,100,47,87,114,105, - 116,101,32,80,76,88,32,69,50,32,114,101, - 103,0,0,0,101,32,91,112,111,114,116,93, - 32,32,84,101,115,116,32,101,116,104,101,114, - 110,101,116,32,32,32,124,32,32,69,114,32, - 97,100,100,114,59,32,69,119,32,97,100,100, - 114,32,118,97,108,59,32,32,82,101,97,100, - 47,87,114,105,116,101,32,69,69,80,82,79, - 77,32,114,101,103,0,0,0,116,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,116, - 105,109,101,114,115,32,32,32,32,32,124,32, - 32,119,91,42,93,32,97,100,100,114,32,118, - 97,108,32,32,87,114,105,116,101,32,109,101, - 109,111,114,121,58,32,119,98,32,119,104,44, - 32,119,119,44,32,119,116,0,109,32,32,32, - 32,32,32,32,32,32,84,101,115,116,32,109, - 101,109,111,114,121,32,32,32,32,32,124,32, - 32,114,91,42,93,32,97,100,100,114,32,32, - 32,32,32,32,82,101,97,100,32,109,101,109, - 111,114,121,58,32,114,98,32,114,104,44,32, - 114,119,44,32,114,116,0,0,42,42,42,32, - 82,105,103,104,116,83,119,105,116,99,104,32, - 68,105,97,103,110,111,115,116,105,99,115,32, - 109,101,110,117,32,42,42,42,0,0,0,0, - 45,45,45,32,84,104,114,101,101,32,99,111, - 112,105,101,115,32,111,102,32,97,100,100,114, - 101,115,115,32,100,111,32,110,111,116,32,97, - 103,114,101,101,33,10,0,0,45,45,45,32, - 69,116,104,101,114,32,65,100,100,114,101,115, - 115,32,78,111,116,32,83,101,116,33,10,0, - 45,45,45,32,69,116,104,101,114,32,65,100, - 100,114,101,115,115,32,105,115,32,97,32,109, - 117,108,116,105,99,97,115,116,32,97,100,100, - 114,101,115,115,33,10,0,0,42,42,42,32, - 37,48,50,88,37,48,50,88,37,48,50,88, - 58,37,48,50,88,58,37,48,50,88,37,48, - 50,88,10,0,45,45,45,32,70,105,114,115, - 116,32,98,121,116,101,32,40,37,48,50,88, - 41,32,105,115,32,97,32,98,114,111,97,100, - 99,97,115,116,32,97,100,100,114,101,115,115, - 10,0,0,0,45,45,45,32,76,97,115,116, - 32,100,105,103,105,116,32,109,117,115,116,32, - 98,101,32,48,32,111,114,32,56,10,0,0, - 42,42,42,32,69,105,103,104,116,32,101,116, - 104,101,114,110,101,116,32,97,100,100,114,101, - 115,115,101,115,32,104,97,118,101,32,98,101, - 101,110,32,117,115,101,100,58,10,0,0,0, - 42,42,42,32,80,111,114,116,32,37,100,32, - 101,116,104,101,114,110,101,116,32,97,100,100, - 114,101,115,115,32,105,115,32,0,0,0,0, - 45,45,45,32,66,97,100,32,101,116,104,101, - 114,32,97,100,100,114,101,115,115,32,39,37, - 115,39,32,115,112,101,99,105,102,105,101,100, - 10,0,0,0,0,0,0,0,244,101,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,244,101,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,64,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,28,111,0,131,104,112,0,131, - 104,112,0,131,132,111,0,131,152,109,0,131, - 104,112,0,131,104,112,0,131,244,101,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 24,107,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,76,108,0,131,104,112,0,131, - 228,108,0,131,112,110,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 28,109,0,131,104,112,0,131,48,111,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 104,112,0,131,104,112,0,131,104,112,0,131, - 84,109,0,131,104,112,0,131,104,112,0,131, - 32,112,0,131,136,106,0,131,28,106,0,131, - 104,112,0,131,104,112,0,131,52,107,0,131, - 104,112,0,131,104,112,0,131,252,106,0,131, - 88,111,0,131,104,112,0,131,104,112,0,131, - 196,107,0,131,104,112,0,131,180,104,0,131, - 168,106,0,131,92,106,0,131,104,112,0,131, - 104,112,0,131,148,105,0,131,192,106,0,131, - 0,0,0,0,188,111,0,131,204,111,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,252,111,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,12,112,0,131, - 236,111,0,131,12,112,0,131,12,112,0,131, - 12,112,0,131,12,112,0,131,220,111,0,131, - 252,111,0,131,0,0,0,0,0,0,0,0, - 42,42,42,32,69,69,80,82,79,77,32,80, - 97,115,115,101,100,10,0,0,33,33,33,32, - 69,69,80,82,79,77,32,70,97,105,108,117, - 114,101,58,32,87,114,111,116,101,32,37,48, - 52,120,32,97,116,32,37,100,44,32,103,111, - 116,32,37,48,52,120,10,0,0,0,0,0, - 0,0,0,0,0,0,0,0,35,35,35,70, - 114,97,109,101,32,37,100,32,100,105,100,32, - 110,111,116,32,97,114,114,105,118,101,10,0, - 35,35,35,32,70,114,97,109,101,32,37,100, - 44,32,108,101,110,32,37,100,44,32,98,121, - 116,101,32,37,100,44,32,119,97,110,116,32, - 37,120,32,103,111,116,32,37,120,10,0,0, - 35,35,35,70,114,97,109,101,32,37,100,32, - 119,114,111,110,103,32,108,101,110,103,116,104, - 32,40,119,97,110,116,32,37,100,32,103,111, - 116,32,37,100,41,10,0,0,35,35,35,70, - 114,97,109,101,32,37,100,58,32,103,111,116, - 32,115,101,113,32,37,100,10,0,0,0,0, - 35,35,35,32,37,100,32,67,82,67,32,101, - 114,114,111,114,115,32,111,99,99,117,114,101, - 100,10,0,0,35,35,35,32,37,100,32,65, - 108,105,103,110,32,101,114,114,111,114,115,32, - 111,99,99,117,114,101,100,10,0,0,0,0, - 35,35,35,32,37,100,32,83,104,111,114,116, - 32,101,114,114,111,114,115,32,111,99,99,117, - 114,101,100,10,0,0,0,0,35,35,35,32, - 37,100,32,79,118,101,114,114,117,110,32,101, - 114,114,111,114,115,32,111,99,99,117,114,101, - 100,10,0,0,35,35,35,32,67,85,32,115, - 116,105,108,108,32,114,117,110,110,105,110,103, - 58,32,37,120,10,0,0,0,35,35,35,32, - 99,109,100,32,115,116,105,108,108,32,98,117, - 115,121,58,32,37,120,10,0,35,35,35,32, - 115,116,97,116,117,115,32,115,116,105,108,108, - 32,98,117,115,121,58,32,37,120,10,0,0, - 67,66,61,37,120,44,32,84,66,68,61,37, - 120,44,32,66,85,70,61,37,120,10,0,0, - 116,101,115,116,95,101,116,104,101,114,46,99, - 0,0,0,0,37,100,32,102,114,97,109,101, - 115,32,111,102,32,108,101,110,103,116,104,32, - 37,100,32,115,101,110,116,32,105,110,32,37, - 100,32,109,115,101,99,115,10,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 42,42,42,32,56,50,53,52,32,84,105,109, - 101,114,32,48,32,79,75,44,32,99,111,117, - 110,116,32,119,97,115,32,37,100,10,0,0, - 42,42,42,32,56,50,53,52,32,84,105,109, - 101,114,32,48,32,110,111,116,32,105,110,116, - 101,114,114,117,112,116,105,110,103,32,37,100, - 10,0,0,0,42,42,42,32,56,50,53,52, - 32,84,105,109,101,114,32,48,32,115,112,101, - 101,100,32,119,114,111,110,103,44,32,103,111, - 116,32,37,100,32,115,104,111,117,108,100,32, - 98,101,32,49,48,48,48,10,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 9,37,120,58,32,119,97,110,116,32,37,120, - 32,103,111,116,32,37,120,10,0,0,0,0, - 45,45,45,32,82,65,77,32,84,101,115,116, - 32,111,102,32,37,120,32,116,111,32,37,120, - 32,102,97,105,108,101,100,10,0,0,0,0, - 42,42,42,32,82,65,77,32,84,101,115,116, - 32,111,102,32,37,120,32,116,111,32,37,120, - 32,112,97,115,115,101,100,10,0,0,0,0, - 35,35,35,32,68,77,65,32,68,79,78,69, - 32,110,101,118,101,114,32,111,99,99,117,114, - 114,101,100,46,32,32,99,115,114,32,61,32, - 37,120,10,0,35,35,35,32,72,111,115,116, - 32,110,101,118,101,114,32,103,111,116,32,68, - 77,65,32,105,110,116,101,114,114,117,112,116, - 46,32,98,99,95,99,110,116,32,61,32,37, - 100,10,0,0,35,35,35,32,68,77,65,32, - 101,114,114,111,114,32,97,116,32,105,110,100, - 101,120,32,37,100,58,32,119,97,110,116,101, - 100,32,37,48,50,120,32,103,111,116,32,37, - 48,50,120,10,0,0,0,0,35,35,35,32, - 73,108,108,101,103,97,108,32,72,111,115,116, - 32,97,100,100,114,32,40,61,37,120,41,32, - 111,114,32,108,101,110,103,116,104,32,40,61, - 37,100,41,10,0,0,0,0,35,35,35,32, - 67,111,117,110,116,32,99,97,110,110,111,116, - 32,98,101,32,62,32,49,48,50,52,42,49, - 48,50,52,10,0,0,0,0,42,42,42,32, - 108,99,108,46,66,117,102,49,32,61,32,37, - 120,32,45,45,62,32,104,111,115,116,46,66, - 117,102,32,61,32,37,120,32,45,45,62,32, - 108,99,108,46,66,117,102,50,32,61,32,37, - 120,10,0,0,42,42,42,32,62,32,68,98, - 32,37,100,32,40,98,117,114,115,116,41,59, - 32,62,32,68,119,32,37,100,32,40,119,97, - 105,116,115,116,97,116,101,115,41,10,0,0, - 35,35,35,32,83,101,99,111,110,100,32,97, - 114,103,32,109,117,115,116,32,98,101,32,39, - 114,39,32,111,114,32,39,119,39,32,111,114, - 32,39,108,39,10,0,0,0,42,42,42,32, - 68,77,65,32,37,115,32,105,110,32,37,52, - 100,32,98,121,116,101,32,99,104,117,110,107, - 115,58,32,0,32,32,116,111,32,104,111,115, - 116,0,0,0,102,114,111,109,32,104,111,115, - 116,0,0,0,37,56,100,32,98,121,116,101, - 115,47,115,101,99,46,10,0,116,105,109,101, - 32,116,111,111,32,115,104,111,114,116,32,116, - 111,32,109,101,97,115,117,114,101,10,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 80,37,100,45,62,37,115,32,10,0,0,0, - 116,114,97,110,115,109,105,116,32,112,101,110, - 100,105,110,103,32,111,110,32,37,100,10,0, - 116,114,97,110,115,109,105,116,32,99,111,110, - 102,105,103,32,111,110,32,37,100,10,0,0, - 116,114,97,110,115,109,105,116,32,116,99,110, - 10,0,0,0,116,99,110,32,101,120,112,10, - 0,0,0,0,102,111,114,119,97,114,100,95, - 100,101,108,97,121,32,101,120,112,32,37,100, - 10,0,0,0,109,101,115,115,97,103,101,95, - 97,103,101,32,101,120,112,32,37,100,10,0, - 104,111,108,100,32,101,120,112,32,37,100,10, - 0,0,0,0,84,120,67,79,78,70,73,71, - 37,100,10,0,84,120,84,67,78,37,100,10, - 0,0,0,0,114,99,118,32,99,111,110,102, - 105,103,32,111,110,32,37,100,10,0,0,0, - 90,69,82,79,32,114,111,111,116,33,32,97, - 116,32,37,120,32,0,0,0,115,117,112,101, - 114,99,101,100,101,115,32,37,100,10,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 65,82,80,82,69,81,32,37,120,33,10,0, - 65,82,80,82,69,80,32,37,120,33,10,0, - 83,101,110,100,32,85,68,80,32,37,100,10, - 0,0,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,85,68,80,32,40,37,100,32, - 37,100,41,10,0,0,0,0,83,101,110,116, - 32,85,68,80,32,37,100,10,0,0,0,0, - 83,78,77,80,32,39,37,99,39,32,108,101, - 110,32,37,100,10,0,0,0,69,110,118,111, - 121,32,114,99,61,37,100,10,0,0,0,0, - 71,101,110,32,116,114,97,112,32,37,100,32, - 114,99,61,37,100,10,0,0,66,97,100,32, - 85,68,80,32,99,104,101,99,107,115,117,109, - 32,37,120,32,108,101,110,32,37,100,10,0, - 66,97,100,32,85,68,80,32,108,101,110,103, - 116,104,32,119,97,110,116,32,37,100,32,103, - 111,116,32,37,100,10,0,0,66,97,100,32, - 73,67,77,80,32,99,104,101,99,107,115,117, - 109,10,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,73,67,77,80,10,0,0,0, - 66,97,100,32,73,80,32,99,104,101,99,107, - 115,117,109,10,0,0,0,0,84,114,117,110, - 99,97,116,101,100,32,73,80,10,0,0,0, - 83,69,78,84,32,73,80,88,33,10,0,0, - 110,111,32,115,121,115,78,97,109,101,0,0, - 114,105,103,104,116,115,119,105,116,99,104,45, - 0,0,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,115,101,110,100,95,115,97,112, - 10,0,0,0,78,111,32,82,66,68,39,115, - 32,105,110,32,73,80,88,10,0,0,0,0, - 84,114,117,110,99,97,116,101,100,32,73,80, - 88,10,0,0,0,0,0,0,0,0,0,0, - 77,97,108,108,111,99,32,114,101,116,117,114, - 110,115,32,78,85,76,76,33,0,0,0,0, - 0,0,0,0,0,0,0,0,68,105,103,105, - 32,73,110,116,108,46,32,82,105,103,104,116, - 83,119,105,116,99,104,32,83,69,45,88,0, - 73,110,116,101,108,32,56,50,53,57,54,0, - 0,0,0,0,0,0,0,0,8,206,0,131, - 36,206,0,131,92,206,0,131,112,206,0,131, - 132,206,0,131,220,206,0,131,4,207,0,131, - 4,207,0,131,36,207,0,131,52,207,0,131, - 80,207,0,131,104,207,0,131,132,207,0,131, - 160,207,0,131,188,207,0,131,188,207,0,131, - 216,207,0,131,240,207,0,131,12,208,0,131, - 40,208,0,131,72,208,0,131,112,208,0,131, - 180,210,0,131,232,210,0,131,4,211,0,131, - 36,211,0,131,60,211,0,131,0,0,0,0, - 64,213,0,131,92,213,0,131,124,213,0,131, - 160,213,0,131,60,214,0,131,60,214,0,131, - 60,214,0,131,188,213,0,131,216,213,0,131, - 244,213,0,131,28,214,0,131,168,214,0,131, - 60,214,0,131,168,214,0,131,168,214,0,131, - 88,214,0,131,132,214,0,131,0,0,0,0, - 36,216,0,131,68,216,0,131,104,216,0,131, - 140,216,0,131,140,216,0,131,0,0,0,0, - 248,217,0,131,12,218,0,131,40,218,0,131, - 76,218,0,131,124,218,0,131,152,218,0,131, - 200,218,0,131,228,218,0,131,88,219,0,131, - 116,219,0,131,64,224,0,131,92,224,0,131, - 124,224,0,131,152,224,0,131,184,224,0,131, - 0,0,0,0,110,111,32,115,121,115,67,111, - 110,116,97,99,116,0,0,0,110,111,32,115, - 121,115,78,97,109,101,0,0,110,111,32,115, - 121,115,76,111,99,97,116,105,111,110,0,0, - 37,115,58,37,100,58,32,102,97,105,108,101, - 100,32,97,115,115,101,114,116,105,111,110,32, - 96,37,115,39,10,0,0,0,110,117,109,114, - 101,103,115,32,60,61,32,78,86,82,65,77, - 95,78,82,69,71,83,32,38,38,32,110,117, - 109,114,101,103,115,32,62,32,48,0,0,0, - 102,105,114,115,116,114,101,103,32,60,32,78, - 86,82,65,77,95,78,82,69,71,83,32,38, - 38,32,102,105,114,115,116,114,101,103,32,62, - 61,32,48,0,0,0,0,0,10,13,69,82, - 82,79,82,32,45,0,0,0,0,0,0,0, - 192,244,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,192,251,0,131, - 8,252,0,131,8,252,0,131,200,251,0,131, - 212,251,0,131,212,251,0,131,212,251,0,131, - 212,251,0,131,212,251,0,131,212,251,0,131, - 212,251,0,131,212,251,0,131,212,251,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,200,244,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 212,249,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,20,245,0,131,8,245,0,131, - 84,247,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 8,252,0,131,8,252,0,131,244,251,0,131, - 8,252,0,131,8,252,0,131,48,246,0,131, - 8,252,0,131,8,252,0,131,8,252,0,131, - 240,250,0,131,8,252,0,131,132,248,0,131, - 8,252,0,131,8,252,0,131,160,249,0,131, - 72,46,1,131,228,47,1,131,152,46,1,131, - 132,47,1,131,0,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,228,47,1,131, - 228,47,1,131,228,47,1,131,144,47,1,131, - 108,46,1,131,108,46,1,131,108,46,1,131, - 152,46,1,131,152,46,1,131,228,47,1,131, - 108,46,1,131,20,50,1,131,216,50,1,131, - 56,50,1,131,224,50,1,131,104,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 216,50,1,131,216,50,1,131,216,50,1,131, - 144,50,1,131,20,50,1,131,20,50,1,131, - 20,50,1,131,56,50,1,131,56,50,1,131, - 216,50,1,131,20,50,1,131,124,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,96,53,1,131,96,53,1,131, - 96,53,1,131,88,53,1,131,64,53,1,131, - 80,53,1,131,72,53,1,131,64,53,1,131, - 0,0,0,0,28,83,1,131,36,83,1,131, - 36,83,1,131,36,83,1,131,36,83,1,131, - 28,83,1,131,44,83,1,131,44,83,1,131, - 44,83,1,131,44,83,1,131,44,83,1,131, - 28,83,1,131,44,83,1,131,0,0,0,0, - 196,88,1,131,232,88,1,131,208,88,1,131, - 220,88,1,131,244,88,1,131,0,0,0,0, - 4,0,0,0,5,0,0,0,6,0,0,0, - 7,0,0,0,2,0,0,0,3,0,0,0, - 0,0,0,0,1,0,0,0,72,30,0,131, - 72,30,0,131,216,44,0,131,0,30,0,131, - 108,30,0,131,108,30,0,131,108,30,0,131, - 108,30,0,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,20,137,1,131, - 204,136,1,131,132,136,1,131,56,136,1,131, - 236,135,1,131,172,135,1,131,120,135,1,131, - 52,135,1,131,232,134,1,131,160,134,1,131, - 80,134,1,131,8,134,1,131,188,133,1,131, - 120,133,1,131,0,0,0,0,0,0,0,0, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,72,72,72,72,72,72,72, - 72,72,72,72,72,0,0,0,0,255,85,170, - 0,0,0,0,4,0,8,0,16,0,32,0, - 64,0,0,1,0,8,0,0,0,0,0,0, - 0,0,0,0,0,4,3,2,1,0,0,0, - 7,0,0,0,1,0,1,0,1,0,2,0, - 20,0,15,0,1,0,0,128,128,0,0,0, - 100,0,0,0,96,207,1,131,92,207,1,131, - 88,207,1,131,84,207,1,131,80,207,1,131, - 0,0,0,0,0,0,0,0,48,49,50,51, - 52,53,54,55,56,57,65,66,67,68,69,70, - 0,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,64,204,0,131,156,202,0,131, - 96,148,1,131,1,0,4,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 200,155,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,124,204,0,131,156,202,0,131, - 168,210,1,131,1,0,6,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 4,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,132,204,0,131,156,202,0,131, - 4,1,0,163,1,0,67,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 64,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,168,204,0,131,248,204,0,131, - 80,18,3,131,1,0,4,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 124,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,168,204,0,131,32,205,0,131, - 96,18,3,131,1,0,4,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 184,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,168,204,0,131,72,205,0,131, - 112,18,3,131,1,0,4,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 244,156,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,112,205,0,131,156,202,0,131, - 2,0,0,0,1,0,2,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 48,157,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 220,155,1,131,2,0,0,0,24,156,1,131, - 3,0,0,0,84,156,1,131,4,0,0,0, - 144,156,1,131,5,0,0,0,204,156,1,131, - 6,0,0,0,8,157,1,131,7,0,0,0, - 68,157,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 156,202,0,131,48,211,1,131,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,172,157,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,4,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 124,148,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,6,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,220,5,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,66,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,4,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,180,208,0,131,132,205,0,131, - 228,209,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,67,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,208,0,131, - 132,205,0,131,164,202,0,131,76,209,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,66,1, - 180,208,0,131,132,205,0,131,164,202,0,131, - 76,209,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,6,1,180,208,0,131,132,205,0,131, - 164,202,0,131,76,209,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,232,157,1,131, - 2,0,0,0,16,158,1,131,3,0,0,0, - 56,158,1,131,4,0,0,0,96,158,1,131, - 5,0,0,0,136,158,1,131,6,0,0,0, - 176,158,1,131,7,0,0,0,216,158,1,131, - 8,0,0,0,0,159,1,131,9,0,0,0, - 40,159,1,131,10,0,0,0,80,159,1,131, - 11,0,0,0,120,159,1,131,12,0,0,0, - 160,159,1,131,13,0,0,0,200,159,1,131, - 14,0,0,0,240,159,1,131,15,0,0,0, - 24,160,1,131,16,0,0,0,64,160,1,131, - 17,0,0,0,104,160,1,131,18,0,0,0, - 144,160,1,131,19,0,0,0,184,160,1,131, - 20,0,0,0,224,160,1,131,21,0,0,0, - 8,161,1,131,22,0,0,0,48,161,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 32,208,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,192,157,1,131,2,0,0,0, - 40,208,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,112,205,0,131, - 60,210,0,131,2,0,0,0,1,0,2,3, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,56,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 60,210,0,131,0,17,3,131,1,0,2,3, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,116,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,4,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,176,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,8,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,236,162,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,12,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,40,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,16,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,100,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,20,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,160,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,24,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,220,163,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,28,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,24,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,32,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,84,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,36,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,144,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,40,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,204,164,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 156,202,0,131,44,17,3,131,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,8,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,48,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,68,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,52,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,128,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,56,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,188,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,60,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,248,165,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,64,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,52,166,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,68,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,112,166,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,64,1,44,202,0,131,88,210,0,131, - 164,202,0,131,120,211,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 88,210,0,131,164,202,0,131,120,211,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,64,1, - 44,202,0,131,88,210,0,131,164,202,0,131, - 120,211,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,88,210,0,131, - 164,202,0,131,120,211,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 88,210,0,131,164,202,0,131,120,211,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 172,166,1,131,2,0,0,0,212,166,1,131, - 3,0,0,0,252,166,1,131,4,0,0,0, - 36,167,1,131,5,0,0,0,76,167,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 56,208,1,131,0,0,0,0,0,0,0,0, - 1,0,64,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,64,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,1, - 84,212,0,131,16,212,0,131,164,202,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,64,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,6,1,84,212,0,131,16,212,0,131, - 164,202,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,180,167,1,131, - 2,0,0,0,220,167,1,131,3,0,0,0, - 4,168,1,131,4,0,0,0,44,168,1,131, - 5,0,0,0,84,168,1,131,6,0,0,0, - 124,168,1,131,7,0,0,0,164,168,1,131, - 8,0,0,0,204,168,1,131,9,0,0,0, - 244,168,1,131,10,0,0,0,28,169,1,131, - 11,0,0,0,68,169,1,131,12,0,0,0, - 108,169,1,131,13,0,0,0,148,169,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 72,208,1,131,0,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,4,3,84,212,0,131, - 16,212,0,131,52,212,0,131,172,212,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,64,3, - 84,212,0,131,16,212,0,131,52,212,0,131, - 172,212,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,3,84,212,0,131,16,212,0,131, - 52,212,0,131,172,212,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,60,170,1,131, - 2,0,0,0,100,170,1,131,3,0,0,0, - 140,170,1,131,4,0,0,0,180,170,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 88,208,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,72,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,20,171,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,76,162,1,131,2,0,0,0, - 136,162,1,131,3,0,0,0,196,162,1,131, - 4,0,0,0,0,163,1,131,5,0,0,0, - 60,163,1,131,6,0,0,0,120,163,1,131, - 7,0,0,0,180,163,1,131,8,0,0,0, - 240,163,1,131,9,0,0,0,44,164,1,131, - 10,0,0,0,104,164,1,131,11,0,0,0, - 164,164,1,131,12,0,0,0,224,164,1,131, - 13,0,0,0,28,165,1,131,14,0,0,0, - 88,165,1,131,15,0,0,0,148,165,1,131, - 16,0,0,0,208,165,1,131,17,0,0,0, - 12,166,1,131,18,0,0,0,72,166,1,131, - 19,0,0,0,132,166,1,131,20,0,0,0, - 64,208,1,131,21,0,0,0,80,208,1,131, - 22,0,0,0,96,208,1,131,23,0,0,0, - 40,171,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,144,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,16,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,148,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,76,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,152,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,136,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,156,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,196,172,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,160,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,0,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,164,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,60,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,168,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,120,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,172,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,180,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,176,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,240,173,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,180,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,44,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,184,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,104,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,188,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,164,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,192,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,224,174,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,196,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,28,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,200,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,88,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,204,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,148,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,208,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,208,175,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,212,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,12,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,216,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,72,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,220,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,132,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,224,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,192,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,228,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,252,176,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,232,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,56,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,236,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,116,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,240,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,176,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,244,16,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,236,177,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,36,172,1,131,2,0,0,0, - 96,172,1,131,3,0,0,0,156,172,1,131, - 4,0,0,0,216,172,1,131,5,0,0,0, - 20,173,1,131,6,0,0,0,80,173,1,131, - 7,0,0,0,140,173,1,131,8,0,0,0, - 200,173,1,131,9,0,0,0,4,174,1,131, - 10,0,0,0,64,174,1,131,11,0,0,0, - 124,174,1,131,12,0,0,0,184,174,1,131, - 13,0,0,0,244,174,1,131,14,0,0,0, - 48,175,1,131,15,0,0,0,108,175,1,131, - 16,0,0,0,168,175,1,131,17,0,0,0, - 228,175,1,131,18,0,0,0,32,176,1,131, - 19,0,0,0,92,176,1,131,20,0,0,0, - 152,176,1,131,21,0,0,0,212,176,1,131, - 22,0,0,0,16,177,1,131,23,0,0,0, - 76,177,1,131,24,0,0,0,136,177,1,131, - 25,0,0,0,196,177,1,131,26,0,0,0, - 0,178,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,112,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,0,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,116,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,60,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,120,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,120,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,76,210,0,131, - 156,202,0,131,124,17,3,131,1,0,65,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,180,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,200,212,0,131, - 156,202,0,131,220,5,0,163,1,0,64,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,240,179,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,112,205,0,131, - 156,202,0,131,161,0,0,0,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,44,180,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,4,180,1,131,2,0,0,0, - 64,180,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,120,208,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,20,179,1,131, - 2,0,0,0,80,179,1,131,3,0,0,0, - 140,179,1,131,4,0,0,0,200,179,1,131, - 5,0,0,0,128,208,1,131,0,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,208,212,0,131, - 164,202,0,131,196,214,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 208,212,0,131,164,202,0,131,196,214,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,6,1, - 44,202,0,131,208,212,0,131,164,202,0,131, - 196,214,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,0,0,192,180,1,131,2,0,0,0, - 232,180,1,131,3,0,0,0,16,181,1,131, - 4,0,0,0,56,181,1,131,5,0,0,0, - 96,181,1,131,6,0,0,0,136,181,1,131, - 7,0,0,0,176,181,1,131,8,0,0,0, - 216,181,1,131,9,0,0,0,0,182,1,131, - 10,0,0,0,40,182,1,131,11,0,0,0, - 80,182,1,131,13,0,0,0,120,182,1,131, - 16,0,0,0,160,182,1,131,17,0,0,0, - 200,182,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,144,208,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 160,208,1,131,2,0,0,0,168,208,1,131, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,184,208,1,131,2,0,0,0, - 192,208,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,208,208,1,131,2,0,0,0, - 216,208,1,131,3,0,0,0,224,208,1,131, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,240,208,1,131,2,0,0,0, - 248,208,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 8,209,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,1,0,0,0,24,209,1,131, - 2,0,0,0,32,209,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,48,209,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,64,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,232,208,1,131, - 2,0,0,0,0,209,1,131,3,0,0,0, - 16,209,1,131,4,0,0,0,40,209,1,131, - 5,0,0,0,56,209,1,131,6,0,0,0, - 72,209,1,131,0,0,0,0,0,0,0,0, - 2,0,0,0,152,208,1,131,6,0,0,0, - 176,208,1,131,7,0,0,0,200,208,1,131, - 8,0,0,0,80,209,1,131,0,0,0,0, - 0,0,0,0,7,0,0,0,88,209,1,131, - 0,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 128,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 8,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 144,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 68,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 148,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 128,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 132,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 188,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 136,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 248,185,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 140,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 52,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 156,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 112,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 160,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 172,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 164,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 232,186,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 168,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 36,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 172,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 96,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 176,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 156,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 180,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 216,187,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 184,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 20,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 188,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 80,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 192,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 140,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 196,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 200,188,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 200,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 4,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 204,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 64,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 208,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 124,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 212,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 184,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 220,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 244,189,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 224,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 48,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 228,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 108,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 232,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 168,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 236,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 228,190,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,92,215,0,131,156,202,0,131, - 240,17,3,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 32,191,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,104,215,0,131,140,215,0,131, - 0,0,0,0,1,0,2,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 92,191,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 28,185,1,131,2,0,0,0,88,185,1,131, - 3,0,0,0,148,185,1,131,4,0,0,0, - 208,185,1,131,5,0,0,0,12,186,1,131, - 6,0,0,0,72,186,1,131,8,0,0,0, - 132,186,1,131,9,0,0,0,192,186,1,131, - 10,0,0,0,252,186,1,131,11,0,0,0, - 56,187,1,131,12,0,0,0,116,187,1,131, - 13,0,0,0,176,187,1,131,14,0,0,0, - 236,187,1,131,15,0,0,0,40,188,1,131, - 16,0,0,0,100,188,1,131,17,0,0,0, - 160,188,1,131,18,0,0,0,220,188,1,131, - 19,0,0,0,24,189,1,131,20,0,0,0, - 84,189,1,131,21,0,0,0,144,189,1,131, - 22,0,0,0,204,189,1,131,24,0,0,0, - 8,190,1,131,25,0,0,0,68,190,1,131, - 26,0,0,0,128,190,1,131,27,0,0,0, - 188,190,1,131,28,0,0,0,248,190,1,131, - 29,0,0,0,52,191,1,131,30,0,0,0, - 112,191,1,131,0,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,180,215,0,131, - 156,202,0,131,218,12,3,131,1,0,4,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,128,192,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,120,205,0,131, - 156,202,0,131,40,211,1,131,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,188,192,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 8,202,0,131,148,38,1,131,112,205,0,131, - 156,202,0,131,2,0,0,0,1,0,2,1, - 32,45,1,131,208,48,1,131,160,49,1,131, - 20,48,1,131,248,192,1,131,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,200,215,0,131, - 164,202,0,131,196,216,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 200,215,0,131,164,202,0,131,196,216,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,6,1, - 44,202,0,131,200,215,0,131,164,202,0,131, - 196,216,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,200,215,0,131, - 164,202,0,131,196,216,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 200,215,0,131,164,202,0,131,196,216,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 52,193,1,131,2,0,0,0,92,193,1,131, - 3,0,0,0,132,193,1,131,4,0,0,0, - 172,193,1,131,5,0,0,0,212,193,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 112,209,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,148,192,1,131,2,0,0,0, - 208,192,1,131,3,0,0,0,12,193,1,131, - 4,0,0,0,120,209,1,131,0,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 112,205,0,131,156,202,0,131,3,0,0,0, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,100,194,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,216,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,160,194,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 104,217,0,131,156,202,0,131,0,0,0,0, - 1,0,67,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,220,194,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 104,217,0,131,156,202,0,131,0,0,0,0, - 1,0,65,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,24,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 112,217,0,131,156,202,0,131,216,12,3,131, - 1,0,4,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,84,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 120,205,0,131,156,202,0,131,224,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,144,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 120,205,0,131,156,202,0,131,228,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,204,195,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,232,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,8,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,234,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,68,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,246,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,128,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,156,202,0,131,236,12,3,131, - 1,0,2,1,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,188,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,238,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,248,196,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,240,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,52,197,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,8,202,0,131,148,38,1,131, - 92,217,0,131,60,210,0,131,242,12,3,131, - 1,0,2,3,32,45,1,131,208,48,1,131, - 160,49,1,131,20,48,1,131,112,197,1,131, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,180,219,0,131, - 132,217,0,131,164,202,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 180,219,0,131,132,217,0,131,0,221,0,131, - 104,220,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,180,219,0,131,132,217,0,131, - 164,202,0,131,104,220,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,3,180,219,0,131, - 132,217,0,131,0,221,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,2,3, - 180,219,0,131,132,217,0,131,0,221,0,131, - 104,220,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,4,1,180,219,0,131,132,217,0,131, - 164,202,0,131,104,220,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,180,219,0,131, - 132,217,0,131,164,202,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,4,1, - 180,219,0,131,132,217,0,131,164,202,0,131, - 104,220,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,4,1,180,219,0,131,132,217,0,131, - 164,202,0,131,104,220,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,180,219,0,131, - 132,217,0,131,164,202,0,131,104,220,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 172,197,1,131,2,0,0,0,212,197,1,131, - 3,0,0,0,252,197,1,131,4,0,0,0, - 36,198,1,131,5,0,0,0,76,198,1,131, - 6,0,0,0,116,198,1,131,7,0,0,0, - 156,198,1,131,8,0,0,0,196,198,1,131, - 9,0,0,0,236,198,1,131,10,0,0,0, - 20,199,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,136,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,120,194,1,131, - 2,0,0,0,180,194,1,131,3,0,0,0, - 240,194,1,131,4,0,0,0,44,195,1,131, - 5,0,0,0,104,195,1,131,6,0,0,0, - 164,195,1,131,7,0,0,0,224,195,1,131, - 8,0,0,0,28,196,1,131,9,0,0,0, - 88,196,1,131,10,0,0,0,148,196,1,131, - 11,0,0,0,208,196,1,131,12,0,0,0, - 12,197,1,131,13,0,0,0,72,197,1,131, - 14,0,0,0,132,197,1,131,15,0,0,0, - 144,209,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,76,210,0,131,156,202,0,131, - 160,211,1,131,1,0,65,1,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 44,200,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,8,202,0,131, - 148,38,1,131,120,205,0,131,60,210,0,131, - 140,1,0,163,1,0,2,3,32,45,1,131, - 208,48,1,131,160,49,1,131,20,48,1,131, - 104,200,1,131,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,4,1, - 44,202,0,131,36,222,0,131,164,202,0,131, - 48,223,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,36,222,0,131, - 164,202,0,131,48,223,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,2,1,44,202,0,131, - 36,222,0,131,164,202,0,131,48,223,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,0,0, - 164,200,1,131,2,0,0,0,204,200,1,131, - 3,0,0,0,244,200,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,168,209,1,131, - 0,0,0,0,0,0,0,0,1,0,2,1, - 44,202,0,131,212,223,0,131,164,202,0,131, - 252,224,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,2,1,44,202,0,131,212,223,0,131, - 164,202,0,131,252,224,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,65,1,44,202,0,131, - 212,223,0,131,164,202,0,131,252,224,0,131, - 0,0,0,0,0,0,0,0,255,0,0,0, - 255,0,0,0,0,0,0,0,1,0,65,1, - 44,202,0,131,212,223,0,131,164,202,0,131, - 252,224,0,131,0,0,0,0,0,0,0,0, - 255,0,0,0,255,0,0,0,0,0,0,0, - 1,0,65,1,44,202,0,131,212,223,0,131, - 164,202,0,131,252,224,0,131,0,0,0,0, - 0,0,0,0,255,0,0,0,255,0,0,0, - 0,0,0,0,1,0,0,0,76,201,1,131, - 2,0,0,0,116,201,1,131,3,0,0,0, - 156,201,1,131,4,0,0,0,196,201,1,131, - 5,0,0,0,236,201,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,184,209,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 64,200,1,131,2,0,0,0,124,200,1,131, - 3,0,0,0,176,209,1,131,4,0,0,0, - 192,209,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,128,209,1,131,2,0,0,0, - 152,209,1,131,3,0,0,0,160,209,1,131, - 4,0,0,0,200,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,24,208,1,131, - 2,0,0,0,48,208,1,131,4,0,0,0, - 104,208,1,131,5,0,0,0,112,208,1,131, - 7,0,0,0,136,208,1,131,10,0,0,0, - 96,209,1,131,11,0,0,0,104,209,1,131, - 17,0,0,0,208,209,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,216,209,1,131, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,240,209,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,1,0,0,0,8,210,1,131, - 2,0,0,0,16,210,1,131,3,0,0,0, - 24,210,1,131,4,0,0,0,32,210,1,131, - 5,0,0,0,40,210,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 56,210,1,131,2,0,0,0,64,210,1,131, - 0,0,0,0,0,0,0,0,1,0,0,0, - 72,210,1,131,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 48,210,1,131,2,0,0,0,80,210,1,131, - 3,0,0,0,88,210,1,131,0,0,0,0, - 0,0,0,0,1,0,0,0,16,208,1,131, - 2,0,0,0,224,209,1,131,3,0,0,0, - 232,209,1,131,4,0,0,0,248,209,1,131, - 5,0,0,0,0,210,1,131,6,0,0,0, - 96,210,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,104,210,1,131,0,0,0,0, - 0,0,0,0,6,0,0,0,112,210,1,131, - 0,0,0,0,0,0,0,0,3,0,0,0, - 120,210,1,131,0,0,0,0,0,0,0,0, - 1,0,0,0,128,210,1,131,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,3,0,0,0,6,0,0,0, - 1,0,0,0,2,0,0,0,1,0,0,0, - 10,0,0,0,7,0,0,0,8,0,0,0, - 2,0,0,0,2,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,112,117,98,108, - 105,99,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,112,114,105,118,97,116,101,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 83,78,77,80,95,116,114,97,112,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 3,0,0,0,6,0,0,0,1,0,0,0, - 4,0,0,0,1,0,0,0,76,1,0,0, - 5,0,0,0,1,0,0,0,1,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 3,0,0,0,6,0,0,0,1,0,0,0, - 2,0,0,0,1,0,0,0,2,0,0,0, - 2,0,0,0,1,0,0,0,1,0,0,0, - 0,0,0,0,1,0,0,0,3,0,0,0, - 6,0,0,0,1,0,0,0,2,0,0,0, - 1,0,0,0,17,0,0,0,0,0,0,0, - 0,0,0,0,48,49,50,51,52,53,54,55, - 56,57,65,66,67,68,69,70,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 255,85,170,0,255,255,255,255,85,85,85,85, - 170,170,170,170,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,64,40,35,41, - 32,67,111,112,121,114,105,103,104,116,32,40, - 99,41,32,49,57,56,54,32,45,32,49,57, - 57,53,32,32,69,112,105,108,111,103,117,101, - 32,84,101,99,104,110,111,108,111,103,121,32, - 67,111,114,112,111,114,97,116,105,111,110,10, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,255,255,255,255, - 72,10,0,0,78,10,0,0,83,10,0,0, - 69,10,0,0,109,97,105,110,46,99,0,0, - 48,0,0,0,55,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 0,0,0,0,116,105,109,101,114,46,99,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 100,0,0,0,100,0,0,0,1,0,0,0, - 0,0,0,0,115,114,99,32,0,0,0,0, - 32,0,0,0,100,115,116,32,0,0,0,0, - 32,37,48,50,88,0,0,0,10,0,0,0, - 255,255,255,255,48,48,48,48,48,48,0,0, - 48,48,48,48,48,49,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,255,255,255,255, - 0,0,0,0,0,0,0,0,0,0,0,0, - 62,32,0,0,37,120,10,0,37,120,58,9, - 37,120,10,0,37,115,10,0,0,0,0,0, - 10,0,0,0,0,0,0,0,0,0,0,0, - 68,85,77,80,10,0,0,0,37,48,50,120, - 32,0,0,0,10,0,0,0,0,0,0,0, - 37,100,32,112,112,115,10,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 1,0,0,0,0,0,0,0,1,0,0,0, - 119,119,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,128,194,0, - 0,0,0,0,1,128,194,0,0,16,0,0, - 66,76,75,0,70,87,68,0,76,82,78,0, - 76,73,83,0,68,73,83,0,72,69,76,76, - 79,10,0,0,116,99,32,101,120,112,10,0, - 102,114,111,109,32,0,0,0,10,0,0,0, - 87,101,105,114,100,0,0,0,0,0,0,0, - 0,0,0,0,255,255,255,255,255,255,255,255, - 255,255,0,0,255,255,255,255,255,255,0,0, - 0,0,0,0,0,0,0,0,80,65,68,37, - 100,10,0,0,170,170,3,0,0,0,0,0, - 83,69,78,84,33,10,0,0,85,68,80,10, - 0,0,0,0,73,67,77,80,10,0,0,0, - 69,67,72,79,10,0,0,0,73,80,10,0, - 170,170,3,0,0,0,0,0,73,80,88,33, - 10,0,0,0,0,0,0,0,255,255,255,255, - 255,255,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,192,155,1,131,0,0,0,0, - 108,157,1,131,0,0,0,0,88,161,1,131, - 0,0,0,0,16,162,1,131,0,0,0,0, - 32,162,1,131,0,0,0,0,116,167,1,131, - 0,0,0,0,164,167,1,131,0,0,0,0, - 188,169,1,131,0,0,0,0,44,170,1,131, - 0,0,0,0,220,170,1,131,0,0,0,0, - 4,171,1,131,0,0,0,0,80,171,1,131, - 0,0,0,0,40,178,1,131,0,0,0,0, - 104,180,1,131,0,0,0,0,128,180,1,131, - 0,0,0,0,144,180,1,131,0,0,0,0, - 240,182,1,131,0,0,0,0,104,183,1,131, - 0,0,0,0,120,183,1,131,0,0,0,0, - 128,183,1,131,0,0,0,0,136,183,1,131, - 0,0,0,0,160,183,1,131,0,0,0,0, - 168,183,1,131,0,0,0,0,176,183,1,131, - 0,0,0,0,200,183,1,131,0,0,0,0, - 208,183,1,131,0,0,0,0,216,183,1,131, - 0,0,0,0,224,183,1,131,0,0,0,0, - 0,184,1,131,0,0,0,0,8,184,1,131, - 0,0,0,0,16,184,1,131,0,0,0,0, - 40,184,1,131,0,0,0,0,48,184,1,131, - 0,0,0,0,64,184,1,131,0,0,0,0, - 72,184,1,131,0,0,0,0,80,184,1,131, - 0,0,0,0,104,184,1,131,0,0,0,0, - 112,184,1,131,0,0,0,0,128,184,1,131, - 0,0,0,0,136,184,1,131,0,0,0,0, - 152,184,1,131,0,0,0,0,208,184,1,131, - 0,0,0,0,248,184,1,131,0,0,0,0, - 152,191,1,131,0,0,0,0,252,193,1,131, - 0,0,0,0,44,194,1,131,0,0,0,0, - 60,194,1,131,0,0,0,0,60,199,1,131, - 0,0,0,0,148,199,1,131,0,0,0,0, - 164,199,1,131,0,0,0,0,36,200,1,131, - 0,0,0,0,28,201,1,131,0,0,0,0, - 60,201,1,131,0,0,0,0,20,202,1,131, - 0,0,0,0,68,202,1,131,0,0,0,0, - 84,202,1,131,0,0,0,0,124,202,1,131, - 0,0,0,0,164,202,1,131,0,0,0,0, - 236,202,1,131,0,0,0,0,252,202,1,131, - 0,0,0,0,4,203,1,131,0,0,0,0, - 12,203,1,131,0,0,0,0,28,203,1,131, - 0,0,0,0,36,203,1,131,0,0,0,0, - 44,203,1,131,0,0,0,0,52,203,1,131, - 0,0,0,0,60,203,1,131,0,0,0,0, - 68,203,1,131,0,0,0,0,76,203,1,131, - 0,0,0,0,124,203,1,131,0,0,0,0, - 132,203,1,131,0,0,0,0,140,203,1,131, - 0,0,0,0,164,203,1,131,0,0,0,0, - 180,203,1,131,0,0,0,0,188,203,1,131, - 0,0,0,0,220,203,1,131,0,0,0,0, - 20,204,1,131,0,0,0,0,36,204,1,131, - 0,0,0,0,52,204,1,131,0,0,0,0, - 68,204,1,131,255,255,255,0,0,0,0,0, - 0,0,0,0,0,0,0,0,1,0,0,0, - 10,0,0,0,10,0,0,0,0,205,1,131, - 10,0,0,0,7,0,0,0,0,0,0,0, - 0,0,0,0,255,255,255,255,110,118,114,97, - 109,46,99,0,114,99,0,0,48,120,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0 - } ; -static const int dgrs_ncode = 119520 ; diff --git a/drivers/net/dgrs_i82596.h b/drivers/net/dgrs_i82596.h deleted file mode 100644 index ac9217ad213816ec45a56e1a28f5259533c3c8e8..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_i82596.h +++ /dev/null @@ -1,473 +0,0 @@ -/* - * i82596 ethernet controller bits and structures (little endian) - * - * $Id: i82596.h,v 1.8 1996/09/03 11:19:03 rick Exp $ - */ - -/************************************************************************/ -/* */ -/* PORT commands (p. 4-20). The least significant nibble is one */ -/* of these commands, the rest of the command is a memory address */ -/* aligned on a 16 byte boundary. Note that port commands must */ -/* be written to the PORT address and the PORT address+2 with two */ -/* halfword writes. Write the LSH first to PORT, then the MSH to */ -/* PORT+2. Blame Intel. */ -/* */ -/************************************************************************/ -#define I596_PORT_RESET 0x0 /* Reset. Wait 5 SysClks & 10 TxClks */ -#define I596_PORT_SELFTEST 0x1 /* Do a selftest */ -#define I596_PORT_SCP_ADDR 0x2 /* Set new SCP address */ -#define I596_PORT_DUMP 0x3 /* Dump internal data structures */ - -/* - * I596_ST: Selftest results (p. 4-21) - */ -typedef volatile struct -{ - ulong signature; /* ROM checksum */ - ulong result; /* Selftest results: non-zero is a failure */ -} I596_ST; - -#define I596_ST_SELFTEST_FAIL 0x1000 /* Selftest Failed */ -#define I596_ST_DIAGNOSE_FAIL 0x0020 /* Diagnose Failed */ -#define I596_ST_BUSTIMER_FAIL 0x0010 /* Bus Timer Failed */ -#define I596_ST_REGISTER_FAIL 0x0008 /* Register Failed */ -#define I596_ST_ROM_FAIL 0x0004 /* ROM Failed */ - -/* - * I596_DUMP: Dump results - */ -typedef volatile struct -{ - ulong dump[77]; -} I596_DUMP; - -/************************************************************************/ -/* */ -/* I596_TBD: Transmit Buffer Descriptor (p. 4-59) */ -/* */ -/************************************************************************/ -typedef volatile struct _I596_TBD -{ - ulong count; - vol struct _I596_TBD *next; - uchar *buf; - ushort unused1; - ushort unused2; -} I596_TBD; - -#define I596_TBD_NOLINK ((I596_TBD *) 0xffffffff) -#define I596_TBD_EOF 0x8000 -#define I596_TBD_COUNT_MASK 0x3fff - -/************************************************************************/ -/* */ -/* I596_TFD: Transmit Frame Descriptor (p. 4-56) */ -/* a.k.a. I596_CB_XMIT */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - I596_TBD *tbdp; - ulong count; /* for speed */ - - /* Application defined data follows structure... */ - -#if 0 /* We don't use these intel defined ones */ - uchar addr[6]; - ushort len; - uchar data[1]; -#else - ulong dstchan;/* Used by multi-NIC mode */ -#endif -} I596_TFD; - -#define I596_TFD_NOCRC 0x0010 /* cmd: No CRC insertion */ -#define I596_TFD_FLEX 0x0008 /* cmd: Flexible mode */ - -/************************************************************************/ -/* */ -/* I596_RBD: Receive Buffer Descriptor (p. 4-84) */ -/* */ -/************************************************************************/ -typedef volatile struct _I596_RBD -{ -#ifdef INTEL_RETENTIVE - ushort count; /* Length of data in buf */ - ushort offset; -#else - ulong count; /* Length of data in buf */ -#endif - vol struct _I596_RBD *next; /* Next buffer descriptor in list */ - uchar *buf; /* Data buffer */ -#ifdef INTEL_RETENTIVE - ushort size; /* Size of buf (constant) */ - ushort zero; -#else - ulong size; /* Size of buf (constant) */ -#endif - - /* Application defined data follows structure... */ - - uchar chan; - uchar refcnt; - ushort len; -} I596_RBD; - -#define I596_RBD_NOLINK ((I596_RBD *) 0xffffffff) -#define I596_RBD_EOF 0x8000 /* This is last buffer in a frame */ -#define I596_RBD_F 0x4000 /* The actual count is valid */ - -#define I596_RBD_EL 0x8000 /* Last buffer in list */ - -/************************************************************************/ -/* */ -/* I596_RFD: Receive Frame Descriptor (p. 4-79) */ -/* */ -/************************************************************************/ -typedef volatile struct _I596_RFD -{ - ushort status; - ushort cmd; - vol struct _I596_RFD *next; - vol struct _I596_RBD *rbdp; - ushort count; /* Len of data in RFD: always 0 */ - ushort size; /* Size of RFD buffer: always 0 */ - - /* Application defined data follows structure... */ - -# if 0 /* We don't use these intel defined ones */ - uchar addr[6]; - ushort len; - uchar data[1]; -# else - ulong dstchan;/* Used by multi-nic mode */ -# endif -} I596_RFD; - -#define I596_RFD_C 0x8000 /* status: frame complete */ -#define I596_RFD_B 0x4000 /* status: frame busy or waiting */ -#define I596_RFD_OK 0x2000 /* status: frame OK */ -#define I596_RFD_ERR_LENGTH 0x1000 /* status: length error */ -#define I596_RFD_ERR_CRC 0x0800 /* status: CRC error */ -#define I596_RFD_ERR_ALIGN 0x0400 /* status: alignment error */ -#define I596_RFD_ERR_NOBUFS 0x0200 /* status: resource error */ -#define I596_RFD_ERR_DMA 0x0100 /* status: DMA error */ -#define I596_RFD_ERR_SHORT 0x0080 /* status: too short error */ -#define I596_RFD_NOMATCH 0x0002 /* status: IA was not matched */ -#define I596_RFD_COLLISION 0x0001 /* status: collision during receive */ - -#define I596_RFD_EL 0x8000 /* cmd: end of RFD list */ -#define I596_RFD_FLEX 0x0008 /* cmd: Flexible mode */ -#define I596_RFD_EOF 0x8000 /* count: last buffer in the frame */ -#define I596_RFD_F 0x4000 /* count: The actual count is valid */ - -/************************************************************************/ -/* */ -/* Commands */ -/* */ -/************************************************************************/ - - /* values for cmd halfword in all the structs below */ -#define I596_CB_CMD 0x07 /* CB COMMANDS */ -#define I596_CB_CMD_NOP 0 -#define I596_CB_CMD_IA 1 -#define I596_CB_CMD_CONF 2 -#define I596_CB_CMD_MCAST 3 -#define I596_CB_CMD_XMIT 4 -#define I596_CB_CMD_TDR 5 -#define I596_CB_CMD_DUMP 6 -#define I596_CB_CMD_DIAG 7 - -#define I596_CB_CMD_EL 0x8000 /* CB is last in linked list */ -#define I596_CB_CMD_S 0x4000 /* Suspend after execution */ -#define I596_CB_CMD_I 0x2000 /* cause interrupt */ - - /* values for the status halfword in all the struct below */ -#define I596_CB_STATUS 0xF000 /* All four status bits */ -#define I596_CB_STATUS_C 0x8000 /* Command complete */ -#define I596_CB_STATUS_B 0x4000 /* Command busy executing */ -#define I596_CB_STATUS_C_OR_B 0xC000 /* Command complete or busy */ -#define I596_CB_STATUS_OK 0x2000 /* Command complete, no errors */ -#define I596_CB_STATUS_A 0x1000 /* Command busy executing */ - -#define I596_CB_NOLINK ((I596_CB *) 0xffffffff) - -/* - * I596_CB_NOP: NOP Command (p. 4-34) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; -} I596_CB_NOP; - -/* - * Same as above, but command and status in one ulong for speed - */ -typedef volatile struct -{ - ulong csr; - union _I596_CB *next; -} I596_CB_FAST; -#define FASTs(X) (X) -#define FASTc(X) ((X)<<16) - -/* - * I596_CB_IA: Individual (MAC) Address Command (p. 4-35) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - uchar addr[6]; -} I596_CB_IA; - -/* - * I596_CB_CONF: Configure Command (p. 4-37) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - uchar conf[14]; -} I596_CB_CONF; - -#define I596_CONF0_P 0x80 /* Enable RBD Prefetch Bit */ -#define I596_CONF0_COUNT 14 /* Count of configuration bytes */ - -#define I596_CONF1_MON_OFF 0xC0 /* Monitor mode: Monitor off */ -#define I596_CONF1_MON_ON 0x80 /* Monitor mode: Monitor on */ -#define I596_CONF1_TxFIFO(W) (W) /* TxFIFO trigger, in words */ - -#define I596_CONF2_SAVEBF 0x80 /* Save bad frames */ - -#define I596_CONF3_ADDRLEN(B) (B) /* Address length */ -#define I596_CONF3_NOSRCINSERT 0x08 /* Do not insert source address */ -#define I596_CONF3_PREAMBLE8 0x20 /* 8 byte preamble */ -#define I596_CONF3_LOOPOFF 0x00 /* Loopback: Off */ -#define I596_CONF3_LOOPINT 0x40 /* Loopback: internal */ -#define I596_CONF3_LOOPEXT 0xC0 /* Loopback: external */ - -#define I596_CONF4_LINPRI(ST) (ST) /* Linear priority: slot times */ -#define I596_CONF4_EXPPRI(ST) (ST) /* Exponential priority: slot times */ -#define I596_CONF4_IEEE_BOM 0 /* IEEE 802.3 backoff method */ - -#define I596_CONF5_IFS(X) (X) /* Interframe spacing in clocks */ - -#define I596_CONF6_ST_LOW(X) (X&255) /* Slot time, low byte */ - -#define I596_CONF7_ST_HI(X) (X>>8) /* Slot time, high bits */ -#define I596_CONF7_RETRY(X) (X<<4) /* Max retry number */ - -#define I596_CONF8_PROMISC 0x01 /* Rcv all frames */ -#define I596_CONF8_NOBROAD 0x02 -#define I596_CONF8_MANCHESTER 0x04 -#define I596_CONF8_TxNOCRS 0x08 -#define I596_CONF8_NOCRC 0x10 -#define I596_CONF8_CRC_CCITT 0x20 -#define I596_CONF8_BITSTUFFING 0x40 -#define I596_CONF8_PADDING 0x80 - -#define I596_CONF9_CSFILTER(X) (X) -#define I596_CONF9_CSINT(X) 0x08 -#define I596_CONF9_CDFILTER(X) (X<<4) -#define I596_CONF9_CDINT(X) 0x80 - -#define I596_CONF10_MINLEN(X) (X) /* Minimum frame length */ - -#define I596_CONF11_PRECRS_ 0x01 /* Preamble before carrier sense */ -#define I596_CONF11_LNGFLD_ 0x02 /* Padding in End of Carrier */ -#define I596_CONF11_CRCINM_ 0x04 /* CRC in memory */ -#define I596_CONF11_AUTOTX 0x08 /* Auto retransmit */ -#define I596_CONF11_CSBSAC_ 0x10 /* Collision detect by src addr cmp. */ -#define I596_CONF11_MCALL_ 0x20 /* Multicast all */ - -#define I596_CONF13_RESERVED 0x3f /* Reserved: must be ones */ -#define I596_CONF13_MULTIA 0x40 /* Enable multiple addr. reception */ -#define I596_CONF13_DISBOF 0x80 /* Disable backoff algorithm */ -/* - * I596_CB_MCAST: Multicast-Setup Command (p. 4-54) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - ushort count; /* Number of 6-byte addrs that follow */ - uchar addr[6][1]; -} I596_CB_MCAST; - -/* - * I596_CB_XMIT: Transmit Command (p. 4-56) - */ -typedef I596_TFD I596_CB_XMIT; - -#define I596_CB_XMIT_NOCRC 0x0010 /* cmd: No CRC insertion */ -#define I596_CB_XMIT_FLEX 0x0008 /* cmd: Flexible memory mode */ - -#define I596_CB_XMIT_ERR_LATE 0x0800 /* status: error: late collision */ -#define I596_CB_XMIT_ERR_NOCRS 0x0400 /* status: error: no carriers sense */ -#define I596_CB_XMIT_ERR_NOCTS 0x0200 /* status: error: loss of CTS */ -#define I596_CB_XMIT_ERR_UNDER 0x0100 /* status: error: DMA underrun */ -#define I596_CB_XMIT_ERR_MAXCOL 0x0020 /* status: error: maximum collisions */ -#define I596_CB_XMIT_COLLISIONS 0x000f /* status: number of collisions */ - -/* - * I596_CB_TDR: Time Domain Reflectometry Command (p. 4-63) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - ushort time; -} I596_CB_TDR; - -/* - * I596_CB_DUMP: Dump Command (p. 4-65) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; - uchar *buf; -} I596_CB_DUMP; - -/* - * I596_CB_DIAG: Diagnose Command (p. 4-77) - */ -typedef volatile struct -{ - ushort status; - ushort cmd; - union _I596_CB *next; -} I596_CB_DIAG; - -/* - * I596_CB: Command Block - */ -typedef union _I596_CB -{ - I596_CB_NOP nop; - I596_CB_IA ia; - I596_CB_CONF conf; - I596_CB_MCAST mcast; - I596_CB_XMIT xmit; - I596_CB_TDR tdr; - I596_CB_DUMP dump; - I596_CB_DIAG diag; - - /* command and status in one ulong for speed... */ - I596_CB_FAST fast; -} I596_CB; - -/************************************************************************/ -/* */ -/* I596_SCB: System Configuration Block (p. 4-26) */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - volatile ushort status; /* Status word */ - volatile ushort cmd; /* Command word */ - I596_CB *cbp; - I596_RFD *rfdp; - ulong crc_errs; - ulong align_errs; - ulong resource_errs; - ulong overrun_errs; - ulong rcvcdt_errs; - ulong short_errs; - ushort toff; - ushort ton; -} I596_SCB; - - /* cmd halfword values */ -#define I596_SCB_ACK 0xF000 /* ACKNOWLEDGMENTS */ -#define I596_SCB_ACK_CX 0x8000 /* Ack command completion */ -#define I596_SCB_ACK_FR 0x4000 /* Ack received frame */ -#define I596_SCB_ACK_CNA 0x2000 /* Ack command unit not active */ -#define I596_SCB_ACK_RNR 0x1000 /* Ack rcv unit not ready */ -#define I596_SCB_ACK_ALL 0xF000 /* Ack everything */ - -#define I596_SCB_CUC 0x0700 /* COMMAND UNIT COMMANDS */ -#define I596_SCB_CUC_NOP 0x0000 /* No operation */ -#define I596_SCB_CUC_START 0x0100 /* Start execution of first CB */ -#define I596_SCB_CUC_RESUME 0x0200 /* Resume execution */ -#define I596_SCB_CUC_SUSPEND 0x0300 /* Suspend after current CB */ -#define I596_SCB_CUC_ABORT 0x0400 /* Abort current CB immediately */ -#define I596_SCB_CUC_LOAD 0x0500 /* Load Bus throttle timers */ -#define I596_SCB_CUC_LOADIMM 0x0600 /* Load Bus throttle timers, now */ - -#define I596_SCB_RUC 0x0070 /* RECEIVE UNIT COMMANDS */ -#define I596_SCB_RUC_NOP 0x0000 /* No operation */ -#define I596_SCB_RUC_START 0x0010 /* Start reception */ -#define I596_SCB_RUC_RESUME 0x0020 /* Resume reception */ -#define I596_SCB_RUC_SUSPEND 0x0030 /* Suspend reception */ -#define I596_SCB_RUC_ABORT 0x0040 /* Abort reception */ - -#define I596_SCB_RESET 0x0080 /* Hard reset chip */ - - /* status halfword values */ -#define I596_SCB_STAT 0xF000 /* STATUS */ -#define I596_SCB_CX 0x8000 /* command completion */ -#define I596_SCB_FR 0x4000 /* received frame */ -#define I596_SCB_CNA 0x2000 /* command unit not active */ -#define I596_SCB_RNR 0x1000 /* rcv unit not ready */ - -#define I596_SCB_CUS 0x0700 /* COMMAND UNIT STATUS */ -#define I596_SCB_CUS_IDLE 0x0000 /* Idle */ -#define I596_SCB_CUS_SUSPENDED 0x0100 /* Suspended */ -#define I596_SCB_CUS_ACTIVE 0x0200 /* Active */ - -#define I596_SCB_RUS 0x00F0 /* RECEIVE UNIT STATUS */ -#define I596_SCB_RUS_IDLE 0x0000 /* Idle */ -#define I596_SCB_RUS_SUSPENDED 0x0010 /* Suspended */ -#define I596_SCB_RUS_NORES 0x0020 /* No Resources */ -#define I596_SCB_RUS_READY 0x0040 /* Ready */ -#define I596_SCB_RUS_NORBDS 0x0080 /* No more RBDs modifier */ - -#define I596_SCB_LOADED 0x0008 /* Bus timers loaded */ - -/************************************************************************/ -/* */ -/* I596_ISCP: Intermediate System Configuration Ptr (p 4-26) */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - ulong busy; /* Set to 1; I596 clears it when scbp is read */ - I596_SCB *scbp; -} I596_ISCP; - -/************************************************************************/ -/* */ -/* I596_SCP: System Configuration Pointer (p. 4-23) */ -/* */ -/************************************************************************/ -typedef volatile struct -{ - ulong sysbus; - ulong dummy; - I596_ISCP *iscpp; -} I596_SCP; - - /* .sysbus values */ -#define I596_SCP_RESERVED 0x400000 /* Reserved bits must be set */ -#define I596_SCP_INTLOW 0x200000 /* Intr. Polarity active low */ -#define I596_SCP_INTHIGH 0 /* Intr. Polarity active high */ -#define I596_SCP_LOCKDIS 0x100000 /* Lock Function disabled */ -#define I596_SCP_LOCKEN 0 /* Lock Function enabled */ -#define I596_SCP_ETHROTTLE 0x080000 /* External Bus Throttle */ -#define I596_SCP_ITHROTTLE 0 /* Internal Bus Throttle */ -#define I596_SCP_LINEAR 0x040000 /* Linear Mode */ -#define I596_SCP_SEGMENTED 0x020000 /* Segmented Mode */ -#define I596_SCP_82586 0x000000 /* 82586 Mode */ diff --git a/drivers/net/dgrs_plx9060.h b/drivers/net/dgrs_plx9060.h deleted file mode 100644 index 6888ae0d0ce0ae2eb06ccfa29b61c808a721595c..0000000000000000000000000000000000000000 --- a/drivers/net/dgrs_plx9060.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * PLX 9060 PCI Interface chip - */ - -/* - * PCI configuration registers, same offset on local and PCI sides, - * but on PCI side, must use PCI BIOS calls to read/write. - */ -#define PCI_PLXREGS_BASE_ADDR 0x10 - -#define PCI_PLXREGS_IO_ADDR 0x14 - -#define PCI_SPACE0_BASE_ADDR 0x18 - -#define PCI_ROM_BASE_ADDR 0x30 -# define PCI_ROM_ENABLED 0x00000001 - -#define PCI_INT_LINE 0x3C - -/* - * Registers accessible directly from PCI and local side. - * Offset is from PCI side. Add PLX_LCL_OFFSET for local address. - */ -#define PLX_LCL_OFFSET 0x80 /* Offset of regs from local side */ - -/* - * Local Configuration Registers - */ -#define PLX_SPACE0_RANGE 0x00 /* Range for PCI to Lcl Addr Space 0 */ -#define PLX_SPACE0_BASE_ADDR 0x04 /* Lcl Base address remap */ - -#define PLX_ROM_RANGE 0x10 /* Range for expansion ROM (DMA) */ -#define PLX_ROM_BASE_ADDR 0x14 /* Lcl base address remap for ROM */ - -#define PLX_BUS_REGION 0x18 /* Bus Region Descriptors */ - -/* - * Shared Run Time Registers - */ -#define PLX_MBOX0 0x40 -#define PLX_MBOX1 0x44 -#define PLX_MBOX2 0x48 -#define PLX_MBOX3 0x4C -#define PLX_MBOX4 0x50 -#define PLX_MBOX5 0x54 -#define PLX_MBOX6 0x58 -#define PLX_MBOX7 0x5C - -#define PLX_PCI2LCL_DOORBELL 0x60 - -#define PLX_LCL2PCI_DOORBELL 0x64 - -#define PLX_INT_CSR 0x68 /* Interrupt Control/Status */ -# define PLX_LSERR_ENABLE 0x00000001 -# define PLX_LSERR_PE 0x00000002 -# define PLX_SERR 0x00000004 -# undef PLX_UNUSED /* 0x00000008 */ -# undef PLX_UNUSED /* 0x00000010 */ -# undef PLX_UNUSED /* 0x00000020 */ -# undef PLX_UNUSED /* 0x00000040 */ -# undef PLX_UNUSED /* 0x00000080 */ -# define PLX_PCI_IE 0x00000100 -# define PLX_PCI_DOORBELL_IE 0x00000200 -# define PLX_PCI_ABORT_IE 0x00000400 -# define PLX_PCI_LOCAL_IE 0x00000800 -# define PLX_RETRY_ABORT_ENABLE 0x00001000 -# define PLX_PCI_DOORBELL_INT 0x00002000 -# define PLX_PCI_ABORT_INT 0x00004000 -# define PLX_PCI_LOCAL_INT 0x00008000 -# define PLX_LCL_IE 0x00010000 -# define PLX_LCL_DOORBELL_IE 0x00020000 -# define PLX_LCL_DMA0_IE 0x00040000 -# define PLX_LCL_DMA1_IE 0x00080000 -# define PLX_LCL_DOORBELL_INT 0x00100000 -# define PLX_LCL_DMA0_INT 0x00200000 -# define PLX_LCL_DMA1_INT 0x00400000 -# define PLX_LCL_BIST_INT 0x00800000 -# define PLX_BM_DIRECT_ 0x01000000 -# define PLX_BM_DMA0_ 0x02000000 -# define PLX_BM_DMA1_ 0x04000000 -# define PLX_BM_ABORT_ 0x08000000 -# undef PLX_UNUSED /* 0x10000000 */ -# undef PLX_UNUSED /* 0x20000000 */ -# undef PLX_UNUSED /* 0x40000000 */ -# undef PLX_UNUSED /* 0x80000000 */ - -#define PLX_MISC_CSR 0x6c /* EEPROM,PCI,User,Init Control/Status*/ -# define PLX_USEROUT 0x00010000 -# define PLX_USERIN 0x00020000 -# define PLX_EECK 0x01000000 -# define PLX_EECS 0x02000000 -# define PLX_EEWD 0x04000000 -# define PLX_EERD 0x08000000 - -/* - * DMA registers. Offset is from local side - */ -#define PLX_DMA0_MODE 0x100 -# define PLX_DMA_MODE_WIDTH32 0x00000003 -# define PLX_DMA_MODE_WAITSTATES(X) ((X)<<2) -# define PLX_DMA_MODE_NOREADY 0x00000000 -# define PLX_DMA_MODE_READY 0x00000040 -# define PLX_DMA_MODE_NOBTERM 0x00000000 -# define PLX_DMA_MODE_BTERM 0x00000080 -# define PLX_DMA_MODE_NOBURST 0x00000000 -# define PLX_DMA_MODE_BURST 0x00000100 -# define PLX_DMA_MODE_NOCHAIN 0x00000000 -# define PLX_DMA_MODE_CHAIN 0x00000200 -# define PLX_DMA_MODE_DONE_IE 0x00000400 -# define PLX_DMA_MODE_ADDR_HOLD 0x00000800 - -#define PLX_DMA0_PCI_ADDR 0x104 - /* non-chaining mode PCI address */ - -#define PLX_DMA0_LCL_ADDR 0x108 - /* non-chaining mode local address */ - -#define PLX_DMA0_SIZE 0x10C - /* non-chaining mode length */ - -#define PLX_DMA0_DESCRIPTOR 0x110 -# define PLX_DMA_DESC_EOC 0x00000002 -# define PLX_DMA_DESC_TC_IE 0x00000004 -# define PLX_DMA_DESC_TO_HOST 0x00000008 -# define PLX_DMA_DESC_TO_BOARD 0x00000000 -# define PLX_DMA_DESC_NEXTADDR 0xFFFFfff0 - -#define PLX_DMA1_MODE 0x114 -#define PLX_DMA1_PCI_ADDR 0x118 -#define PLX_DMA1_LCL_ADDR 0x11C -#define PLX_DMA1_SIZE 0x110 -#define PLX_DMA1_DESCRIPTOR 0x124 - -#define PLX_DMA_CSR 0x128 -# define PLX_DMA_CSR_0_ENABLE 0x00000001 -# define PLX_DMA_CSR_0_START 0x00000002 -# define PLX_DMA_CSR_0_ABORT 0x00000004 -# define PLX_DMA_CSR_0_CLR_INTR 0x00000008 -# define PLX_DMA_CSR_0_DONE 0x00000010 -# define PLX_DMA_CSR_1_ENABLE 0x00000100 -# define PLX_DMA_CSR_1_START 0x00000200 -# define PLX_DMA_CSR_1_ABORT 0x00000400 -# define PLX_DMA_CSR_1_CLR_INTR 0x00000800 -# define PLX_DMA_CSR_1_DONE 0x00001000 - -#define PLX_DMA_ARB0 0x12C -# define PLX_DMA_ARB0_LATENCY_T 0x000000FF -# define PLX_DMA_ARB0_PAUSE_T 0x0000FF00 -# define PLX_DMA_ARB0_LATENCY_EN 0x00010000 -# define PLX_DMA_ARB0_PAUSE_EN 0x00020000 -# define PLX_DMA_ARB0_BREQ_EN 0x00040000 -# define PLX_DMA_ARB0_PRI 0x00180000 -# define PLX_DMA_ARB0_PRI_ROUND 0x00000000 -# define PLX_DMA_ARB0_PRI_0 0x00080000 -# define PLX_DMA_ARB0_PRI_1 0x00100000 - -#define PLX_DMA_ARB1 0x130 - /* Chan 0: FIFO DEPTH=16 */ -# define PLX_DMA_ARB1_0_P2L_LW_TRIG(X) ( ((X)&15) << 0 ) -# define PLX_DMA_ARB1_0_L2P_LR_TRIG(X) ( ((X)&15) << 4 ) -# define PLX_DMA_ARB1_0_L2P_PW_TRIG(X) ( ((X)&15) << 8 ) -# define PLX_DMA_ARB1_0_P2L_PR_TRIG(X) ( ((X)&15) << 12 ) - /* Chan 1: FIFO DEPTH=8 */ -# define PLX_DMA_ARB1_1_P2L_LW_TRIG(X) ( ((X)& 7) << 16 ) -# define PLX_DMA_ARB1_1_L2P_LR_TRIG(X) ( ((X)& 7) << 20 ) -# define PLX_DMA_ARB1_1_L2P_PW_TRIG(X) ( ((X)& 7) << 24 ) -# define PLX_DMA_ARB1_1_P2L_PR_TRIG(X) ( ((X)& 7) << 28 ) - -typedef struct _dmachain -{ - ulong pciaddr; - ulong lcladdr; - ulong len; - ulong next; -} DMACHAIN; diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 04e3710c908269e3510a9492296ee041e75e717b..5066beb2e7bc43e3e4e6b6e30a47e198dc647467 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -10,9 +10,9 @@ (at your option) any later version. */ -#define DRV_NAME "D-Link DL2000-based linux driver" -#define DRV_VERSION "v1.18" -#define DRV_RELDATE "2006/06/27" +#define DRV_NAME "DL2000/TC902x-based linux driver" +#define DRV_VERSION "v1.19" +#define DRV_RELDATE "2007/08/12" #include "dl2k.h" #include @@ -97,6 +97,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) static int version_printed; void *ring_space; dma_addr_t ring_dma; + DECLARE_MAC_BUF(mac); if (!version_printed++) printk ("%s", version); @@ -116,7 +117,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto err_out_res; } - SET_MODULE_OWNER (dev); SET_NETDEV_DEV(dev, &pdev->dev); #ifdef MEM_MAPPING @@ -257,10 +257,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) card_idx++; - printk (KERN_INFO "%s: %s, %02x:%02x:%02x:%02x:%02x:%02x, IRQ %d\n", - dev->name, np->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], irq); + printk (KERN_INFO "%s: %s, %s, IRQ %d\n", + dev->name, np->name, print_mac(mac, dev->dev_addr), irq); if (tx_coalesce > 1) printk(KERN_INFO "tx_coalesce:\t%d packets\n", tx_coalesce); @@ -292,7 +290,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) return err; } -int +static int find_miiphy (struct net_device *dev) { int i, phy_found = 0; @@ -316,7 +314,7 @@ find_miiphy (struct net_device *dev) return 0; } -int +static int parse_eeprom (struct net_device *dev) { int i, j; @@ -339,17 +337,24 @@ parse_eeprom (struct net_device *dev) #ifdef MEM_MAPPING ioaddr = dev->base_addr; #endif - /* Check CRC */ - crc = ~ether_crc_le (256 - 4, sromdata); - if (psrom->crc != crc) { - printk (KERN_ERR "%s: EEPROM data CRC error.\n", dev->name); - return -1; + if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ + /* Check CRC */ + crc = ~ether_crc_le (256 - 4, sromdata); + if (psrom->crc != crc) { + printk (KERN_ERR "%s: EEPROM data CRC error.\n", + dev->name); + return -1; + } } /* Set MAC address */ for (i = 0; i < 6; i++) dev->dev_addr[i] = psrom->mac_addr[i]; + if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { + return 0; + } + /* Parse Software Information Block */ i = 0x30; psib = (u8 *) sromdata; @@ -1091,7 +1096,7 @@ clear_stats (struct net_device *dev) } -int +static int change_mtu (struct net_device *dev, int new_mtu) { struct netdev_private *np = netdev_priv(dev); @@ -1326,7 +1331,7 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) #define EEP_BUSY 0x8000 /* Read the EEPROM word */ /* We use I/O instruction to read/write eeprom to avoid fail on some machines */ -int +static int read_eeprom (long ioaddr, int eep_addr) { int i = 1000; diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h index e443065a452e690600b647250cedb2ababf1e8ab..5b801775f42dea3806b814a3424ae9880015a8a7 100644 --- a/drivers/net/dl2k.h +++ b/drivers/net/dl2k.h @@ -692,6 +692,7 @@ struct netdev_private { static const struct pci_device_id rio_pci_tbl[] = { {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, + {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, { } }; MODULE_DEVICE_TABLE (pci, rio_pci_tbl); diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 738aa59065149a185414e25946658cd5ef58667b..27ac010900abd7ffce88ee26973569161606f567 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -148,7 +148,6 @@ typedef struct board_info { struct resource *irq_res; struct timer_list timer; - struct net_device_stats stats; unsigned char srom[128]; spinlock_t lock; @@ -166,8 +165,6 @@ static int dm9000_stop(struct net_device *); static void dm9000_timer(unsigned long); static void dm9000_init_dm9000(struct net_device *); -static struct net_device_stats *dm9000_get_stats(struct net_device *); - static irqreturn_t dm9000_interrupt(int, void *); static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); @@ -416,7 +413,6 @@ dm9000_probe(struct platform_device *pdev) return -ENOMEM; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); PRINTK2("dm9000_probe()"); @@ -559,7 +555,6 @@ dm9000_probe(struct platform_device *pdev) ndev->tx_timeout = &dm9000_timeout; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->stop = &dm9000_stop; - ndev->get_stats = &dm9000_get_stats; ndev->set_multicast_list = &dm9000_hash_table; #ifdef CONFIG_NET_POLL_CONTROLLER ndev->poll_controller = &dm9000_poll_controller; @@ -600,11 +595,10 @@ dm9000_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret == 0) { - printk("%s: dm9000 at %p,%p IRQ %d MAC: ", - ndev->name, db->io_addr, db->io_data, ndev->irq); - for (i = 0; i < 5; i++) - printk("%02x:", ndev->dev_addr[i]); - printk("%02x\n", ndev->dev_addr[5]); + DECLARE_MAC_BUF(mac); + printk("%s: dm9000 at %p,%p IRQ %d MAC: %s\n", + ndev->name, db->io_addr, db->io_data, ndev->irq, + print_mac(mac, ndev->dev_addr)); } return 0; @@ -714,7 +708,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) writeb(DM9000_MWCMD, db->io_addr); (db->outblk)(db->io_data, skb->data, skb->len); - db->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; db->tx_pkt_cnt++; /* TX control: First packet immediately send, second packet queue */ @@ -791,7 +785,7 @@ dm9000_tx_done(struct net_device *dev, board_info_t * db) if (tx_status & (NSR_TX2END | NSR_TX1END)) { /* One packet sent complete */ db->tx_pkt_cnt--; - db->stats.tx_packets++; + dev->stats.tx_packets++; /* Queue packet check & send */ if (db->tx_pkt_cnt > 0) { @@ -852,17 +846,6 @@ dm9000_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -/* - * Get statistics from driver. - */ -static struct net_device_stats * -dm9000_get_stats(struct net_device *dev) -{ - board_info_t *db = (board_info_t *) dev->priv; - return &db->stats; -} - - /* * A periodic timer routine * Dynamic media sense, allocated Rx buffer... @@ -940,15 +923,15 @@ dm9000_rx(struct net_device *dev) GoodPacket = false; if (rxhdr.RxStatus & 0x100) { PRINTK1("fifo error\n"); - db->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } if (rxhdr.RxStatus & 0x200) { PRINTK1("crc error\n"); - db->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } if (rxhdr.RxStatus & 0x8000) { PRINTK1("length error\n"); - db->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } } @@ -961,12 +944,12 @@ dm9000_rx(struct net_device *dev) /* Read received packet from RX SRAM */ (db->inblk)(db->io_data, rdptr, RxLen); - db->stats.rx_bytes += RxLen; + dev->stats.rx_bytes += RxLen; /* Pass to upper layer */ skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - db->stats.rx_packets++; + dev->stats.rx_packets++; } else { /* need to dump the packet's data */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 756a6bcb038d4e053593735be5ced5f8c3c678a3..84e14f397d9ae1f5322dd0614855b6a53b5e3980 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -71,7 +71,6 @@ static void dummy_setup(struct net_device *dev) dev->change_mtu = NULL; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - SET_MODULE_OWNER(dev); random_ether_addr(dev->dev_addr); } diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 280313b9b069cfa146b8d2dd4cf93f821678df5a..64f35e20fd48f60a9a31b70e34b1df69e25d64c1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -539,6 +539,7 @@ struct nic { struct csr __iomem *csr; enum scb_cmd_lo cuc_cmd; unsigned int cbs_avail; + struct napi_struct napi; struct cb *cbs; struct cb *cb_to_use; struct cb *cb_to_send; @@ -557,7 +558,6 @@ struct nic { enum mac mac; enum phy phy; struct params params; - struct net_device_stats net_stats; struct timer_list watchdog; struct timer_list blink_timer; struct mii_if_info mii; @@ -1482,7 +1482,8 @@ static void e100_set_multicast_list(struct net_device *netdev) static void e100_update_stats(struct nic *nic) { - struct net_device_stats *ns = &nic->net_stats; + struct net_device *dev = nic->netdev; + struct net_device_stats *ns = &dev->stats; struct stats *s = &nic->mem->stats; u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames : @@ -1604,7 +1605,8 @@ static void e100_watchdog(unsigned long data) else nic->flags &= ~ich_10h_workaround; - mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD); + mod_timer(&nic->watchdog, + round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); } static void e100_xmit_prepare(struct nic *nic, struct cb *cb, @@ -1659,6 +1661,7 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) static int e100_tx_clean(struct nic *nic) { + struct net_device *dev = nic->netdev; struct cb *cb; int tx_cleaned = 0; @@ -1673,8 +1676,8 @@ static int e100_tx_clean(struct nic *nic) cb->status); if(likely(cb->skb != NULL)) { - nic->net_stats.tx_packets++; - nic->net_stats.tx_bytes += cb->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += cb->skb->len; pci_unmap_single(nic->pdev, le32_to_cpu(cb->u.tcb.tbd.buf_addr), @@ -1805,6 +1808,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) static int e100_rx_indicate(struct nic *nic, struct rx *rx, unsigned int *work_done, unsigned int work_to_do) { + struct net_device *dev = nic->netdev; struct sk_buff *skb = rx->skb; struct rfd *rfd = (struct rfd *)skb->data; u16 rfd_status, actual_size; @@ -1849,8 +1853,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, nic->rx_over_length_errors++; dev_kfree_skb_any(skb); } else { - nic->net_stats.rx_packets++; - nic->net_stats.rx_bytes += actual_size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += actual_size; nic->netdev->last_rx = jiffies; netif_receive_skb(skb); if(work_done) @@ -1974,35 +1978,31 @@ static irqreturn_t e100_intr(int irq, void *dev_id) if(stat_ack & stat_ack_rnr) nic->ru_running = RU_SUSPENDED; - if(likely(netif_rx_schedule_prep(netdev))) { + if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) { e100_disable_irq(nic); - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &nic->napi); } return IRQ_HANDLED; } -static int e100_poll(struct net_device *netdev, int *budget) +static int e100_poll(struct napi_struct *napi, int budget) { - struct nic *nic = netdev_priv(netdev); - unsigned int work_to_do = min(netdev->quota, *budget); + struct nic *nic = container_of(napi, struct nic, napi); + struct net_device *netdev = nic->netdev; unsigned int work_done = 0; int tx_cleaned; - e100_rx_clean(nic, &work_done, work_to_do); + e100_rx_clean(nic, &work_done, budget); tx_cleaned = e100_tx_clean(nic); /* If no Rx and Tx cleanup work was done, exit polling mode. */ if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); e100_enable_irq(nic); - return 0; } - *budget -= work_done; - netdev->quota -= work_done; - - return 1; + return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2017,12 +2017,6 @@ static void e100_netpoll(struct net_device *netdev) } #endif -static struct net_device_stats *e100_get_stats(struct net_device *netdev) -{ - struct nic *nic = netdev_priv(netdev); - return &nic->net_stats; -} - static int e100_set_mac_address(struct net_device *netdev, void *p) { struct nic *nic = netdev_priv(netdev); @@ -2071,7 +2065,7 @@ static int e100_up(struct nic *nic) nic->netdev->name, nic->netdev))) goto err_no_irq; netif_wake_queue(nic->netdev); - netif_poll_enable(nic->netdev); + napi_enable(&nic->napi); /* enable ints _after_ enabling poll, preventing a race between * disable ints+schedule */ e100_enable_irq(nic); @@ -2089,7 +2083,7 @@ static int e100_up(struct nic *nic) static void e100_down(struct nic *nic) { /* wait here for poll to complete */ - netif_poll_disable(nic->netdev); + napi_disable(&nic->napi); netif_stop_queue(nic->netdev); e100_hw_reset(nic); free_irq(nic->pdev->irq, nic->netdev); @@ -2380,11 +2374,6 @@ static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { }; #define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN -static int e100_diag_test_count(struct net_device *netdev) -{ - return E100_TEST_LEN; -} - static void e100_diag_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) { @@ -2447,9 +2436,16 @@ static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { #define E100_NET_STATS_LEN 21 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN -static int e100_get_stats_count(struct net_device *netdev) +static int e100_get_sset_count(struct net_device *netdev, int sset) { - return E100_STATS_LEN; + switch (sset) { + case ETH_SS_TEST: + return E100_TEST_LEN; + case ETH_SS_STATS: + return E100_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void e100_get_ethtool_stats(struct net_device *netdev, @@ -2459,7 +2455,7 @@ static void e100_get_ethtool_stats(struct net_device *netdev, int i; for(i = 0; i < E100_NET_STATS_LEN; i++) - data[i] = ((unsigned long *)&nic->net_stats)[i]; + data[i] = ((unsigned long *)&netdev->stats)[i]; data[i++] = nic->tx_deferred; data[i++] = nic->tx_single_collisions; @@ -2500,12 +2496,11 @@ static const struct ethtool_ops e100_ethtool_ops = { .set_eeprom = e100_set_eeprom, .get_ringparam = e100_get_ringparam, .set_ringparam = e100_set_ringparam, - .self_test_count = e100_diag_test_count, .self_test = e100_diag_test, .get_strings = e100_get_strings, .phys_id = e100_phys_id, - .get_stats_count = e100_get_stats_count, .get_ethtool_stats = e100_get_ethtool_stats, + .get_sset_count = e100_get_sset_count, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) @@ -2554,6 +2549,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, struct net_device *netdev; struct nic *nic; int err; + DECLARE_MAC_BUF(mac); if(!(netdev = alloc_etherdev(sizeof(struct nic)))) { if(((1 << debug) - 1) & NETIF_MSG_PROBE) @@ -2564,7 +2560,6 @@ static int __devinit e100_probe(struct pci_dev *pdev, netdev->open = e100_open; netdev->stop = e100_close; netdev->hard_start_xmit = e100_xmit_frame; - netdev->get_stats = e100_get_stats; netdev->set_multicast_list = e100_set_multicast_list; netdev->set_mac_address = e100_set_mac_address; netdev->change_mtu = e100_change_mtu; @@ -2572,14 +2567,13 @@ static int __devinit e100_probe(struct pci_dev *pdev, SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); netdev->tx_timeout = e100_tx_timeout; netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; - netdev->poll = e100_poll; - netdev->weight = E100_NAPI_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = e100_netpoll; #endif strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); nic = netdev_priv(netdev); + netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT); nic->netdev = netdev; nic->pdev = pdev; nic->msg_enable = (1 << debug) - 1; @@ -2607,7 +2601,6 @@ static int __devinit e100_probe(struct pci_dev *pdev, goto err_out_free_res; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); if (use_io) @@ -2688,11 +2681,9 @@ static int __devinit e100_probe(struct pci_dev *pdev, goto err_out_free; } - DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, " - "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n", - (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq, - netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], - netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n", + (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), + pdev->irq, print_mac(mac, netdev->dev_addr)); return 0; @@ -2733,7 +2724,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) struct nic *nic = netdev_priv(netdev); if (netif_running(netdev)) - netif_poll_disable(nic->netdev); + napi_disable(&nic->napi); del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); netif_device_detach(netdev); @@ -2779,7 +2770,7 @@ static void e100_shutdown(struct pci_dev *pdev) struct nic *nic = netdev_priv(netdev); if (netif_running(netdev)) - netif_poll_disable(nic->netdev); + napi_disable(&nic->napi); del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); @@ -2804,12 +2795,13 @@ static void e100_shutdown(struct pci_dev *pdev) static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); + struct nic *nic = netdev_priv(netdev); /* Similar to calling e100_down(), but avoids adpater I/O. */ netdev->stop(netdev); /* Detach; put netif into state similar to hotplug unplug. */ - netif_poll_enable(netdev); + napi_enable(&nic->napi); netif_device_detach(netdev); pci_disable_device(pdev); diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 16a6edfeba41d38e9d4603038b3406aefedd84f4..781ed99684894b2ba72cfdfb301c21a79b01f62a 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -300,6 +300,7 @@ struct e1000_adapter { int cleaned_count); struct e1000_rx_ring *rx_ring; /* One per active queue */ #ifdef CONFIG_E1000_NAPI + struct napi_struct napi; struct net_device *polling_netdev; /* One per active queue */ #endif int num_tx_queues; diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 9ecc3adcf6c189117e038426e0fc0550f08f60bd..6c9a643426f59eba5019393b5c6cf69705a69ab7 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -106,8 +106,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { }; #define E1000_QUEUE_STATS_LEN 0 -#define E1000_GLOBAL_STATS_LEN \ - sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", @@ -619,8 +618,6 @@ e1000_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->fw_version, firmware_version, 32); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_stats = E1000_STATS_LEN; - drvinfo->testinfo_len = E1000_TEST_LEN; drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } @@ -1612,9 +1609,16 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) } static int -e1000_diag_test_count(struct net_device *netdev) +e1000_get_sset_count(struct net_device *netdev, int sset) { - return E1000_TEST_LEN; + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } } extern void e1000_power_up_phy(struct e1000_adapter *); @@ -1899,12 +1903,6 @@ e1000_nway_reset(struct net_device *netdev) return 0; } -static int -e1000_get_stats_count(struct net_device *netdev) -{ - return E1000_STATS_LEN; -} - static void e1000_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) @@ -1966,16 +1964,13 @@ static const struct ethtool_ops e1000_ethtool_ops = { .set_rx_csum = e1000_set_rx_csum, .get_tx_csum = e1000_get_tx_csum, .set_tx_csum = e1000_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = e1000_set_tso, - .self_test_count = e1000_diag_test_count, .self_test = e1000_diag_test, .get_strings = e1000_get_strings, .phys_id = e1000_phys_id, - .get_stats_count = e1000_get_stats_count, .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, }; void e1000_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 8604adbe351cc07dc2b004896af8c4cd338d9331..8fa0fe4009d56872fa1e30425251275f9be2d77b 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -871,10 +871,6 @@ e1000_init_hw(struct e1000_hw *hw) uint32_t ctrl; uint32_t i; int32_t ret_val; - uint16_t pcix_cmd_word; - uint16_t pcix_stat_hi_word; - uint16_t cmd_mmrbc; - uint16_t stat_mmrbc; uint32_t mta_size; uint32_t reg_data; uint32_t ctrl_ext; @@ -964,24 +960,9 @@ e1000_init_hw(struct e1000_hw *hw) break; default: /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ - if (hw->bus_type == e1000_bus_type_pcix) { - e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word); - e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, - &pcix_stat_hi_word); - cmd_mmrbc = (pcix_cmd_word & PCIX_COMMAND_MMRBC_MASK) >> - PCIX_COMMAND_MMRBC_SHIFT; - stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> - PCIX_STATUS_HI_MMRBC_SHIFT; - if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) - stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; - if (cmd_mmrbc > stat_mmrbc) { - pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK; - pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; - e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, - &pcix_cmd_word); - } - } - break; + if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; } /* More time needed for PHY to initialize */ diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 07f0ea73676e90e4ad00555447108c03b2f4ef60..a2a86c54a75cae76eb83f3c7e13c2f18e5523d68 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -424,6 +424,8 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw); void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); /* Port I/O is only supported on 82544 and newer */ void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); int32_t e1000_disable_pciex_master(struct e1000_hw *hw); diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index e7c8951f47fa94d3e0fae7551f5e6d84ef16676a..047263830e6a36c5c73921f25da4d23deb0253a5 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -166,7 +166,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data); static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); #ifdef CONFIG_E1000_NAPI -static int e1000_clean(struct net_device *poll_dev, int *budget); +static int e1000_clean(struct napi_struct *napi, int budget); static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); @@ -545,7 +545,7 @@ int e1000_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->flags); #ifdef CONFIG_E1000_NAPI - netif_poll_enable(adapter->netdev); + napi_enable(&adapter->napi); #endif e1000_irq_enable(adapter); @@ -634,7 +634,7 @@ e1000_down(struct e1000_adapter *adapter) set_bit(__E1000_DOWN, &adapter->flags); #ifdef CONFIG_E1000_NAPI - netif_poll_disable(netdev); + napi_disable(&adapter->napi); #endif e1000_irq_disable(adapter); @@ -872,6 +872,8 @@ e1000_probe(struct pci_dev *pdev, int i, err, pci_using_dac; uint16_t eeprom_data = 0; uint16_t eeprom_apme_mask = E1000_EEPROM_APME; + DECLARE_MAC_BUF(mac); + if ((err = pci_enable_device(pdev))) return err; @@ -897,7 +899,6 @@ e1000_probe(struct pci_dev *pdev, if (!netdev) goto err_alloc_etherdev; - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -936,8 +937,7 @@ e1000_probe(struct pci_dev *pdev, netdev->tx_timeout = &e1000_tx_timeout; netdev->watchdog_timeo = 5 * HZ; #ifdef CONFIG_E1000_NAPI - netdev->poll = &e1000_clean; - netdev->weight = 64; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); #endif netdev->vlan_rx_register = e1000_vlan_rx_register; netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; @@ -1134,8 +1134,7 @@ e1000_probe(struct pci_dev *pdev, "32-bit")); } - for (i = 0; i < 6; i++) - printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':'); + printk("%s\n", print_mac(mac, netdev->dev_addr)); /* reset the hardware with the new settings */ e1000_reset(adapter); @@ -1151,9 +1150,6 @@ e1000_probe(struct pci_dev *pdev, /* tell the stack to leave us alone until e1000_open() is called */ netif_carrier_off(netdev); netif_stop_queue(netdev); -#ifdef CONFIG_E1000_NAPI - netif_poll_disable(netdev); -#endif strcpy(netdev->name, "eth%d"); if ((err = register_netdev(netdev))) @@ -1222,12 +1218,13 @@ e1000_remove(struct pci_dev *pdev) * would have already happened in close and is redundant. */ e1000_release_hw_control(adapter); - unregister_netdev(netdev); #ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) dev_put(&adapter->polling_netdev[i]); #endif + unregister_netdev(netdev); + if (!e1000_check_phy_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); @@ -1325,8 +1322,6 @@ e1000_sw_init(struct e1000_adapter *adapter) #ifdef CONFIG_E1000_NAPI for (i = 0; i < adapter->num_rx_queues; i++) { adapter->polling_netdev[i].priv = adapter; - adapter->polling_netdev[i].poll = &e1000_clean; - adapter->polling_netdev[i].weight = 64; dev_hold(&adapter->polling_netdev[i]); set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); } @@ -1443,7 +1438,7 @@ e1000_open(struct net_device *netdev) clear_bit(__E1000_DOWN, &adapter->flags); #ifdef CONFIG_E1000_NAPI - netif_poll_enable(netdev); + napi_enable(&adapter->napi); #endif e1000_irq_enable(adapter); @@ -3266,14 +3261,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned int len = skb->len; + unsigned int len = skb->len - skb->data_len; unsigned long flags; - unsigned int nr_frags = 0; - unsigned int mss = 0; + unsigned int nr_frags; + unsigned int mss; int count = 0; int tso; unsigned int f; - len -= skb->data_len; /* This goes back to the question of how to logically map a tx queue * to a flow. Right now, performance is impacted slightly negatively @@ -3307,7 +3301,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * points to just header, pull a few bytes of payload from * frags into skb->data */ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { + if (skb->data_len && hdr_len == len) { switch (adapter->hw.mac_type) { unsigned int pull_size; case e1000_82544: @@ -3786,12 +3780,12 @@ e1000_intr_msi(int irq, void *data) } #ifdef CONFIG_E1000_NAPI - if (likely(netif_rx_schedule_prep(netdev))) { + if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } else e1000_irq_enable(adapter); #else @@ -3871,12 +3865,12 @@ e1000_intr(int irq, void *data) E1000_WRITE_REG(hw, IMC, ~0); E1000_WRITE_FLUSH(hw); } - if (likely(netif_rx_schedule_prep(netdev))) { + if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; adapter->total_rx_bytes = 0; adapter->total_rx_packets = 0; - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } else /* this really should not happen! if it does it is basically a * bug, but not a hard error, so enable ints and continue */ @@ -3924,10 +3918,10 @@ e1000_intr(int irq, void *data) **/ static int -e1000_clean(struct net_device *poll_dev, int *budget) +e1000_clean(struct napi_struct *napi, int budget) { - struct e1000_adapter *adapter; - int work_to_do = min(*budget, poll_dev->quota); + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct net_device *poll_dev = adapter->netdev; int tx_cleaned = 0, work_done = 0; /* Must NOT use netdev_priv macro here. */ @@ -3948,23 +3942,19 @@ e1000_clean(struct net_device *poll_dev, int *budget) } adapter->clean_rx(adapter, &adapter->rx_ring[0], - &work_done, work_to_do); - - *budget -= work_done; - poll_dev->quota -= work_done; + &work_done, budget); /* If no Tx and not enough Rx work done, exit the polling mode */ - if ((!tx_cleaned && (work_done == 0)) || + if ((!tx_cleaned && (work_done < budget)) || !netif_running(poll_dev)) { quit_polling: if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - netif_rx_complete(poll_dev); + netif_rx_complete(poll_dev, napi); e1000_irq_enable(adapter); - return 0; } - return 1; + return work_done; } #endif @@ -4912,6 +4902,20 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) pci_write_config_word(adapter->pdev, reg, *value); } +int +e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void +e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) { diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c new file mode 100644 index 0000000000000000000000000000000000000000..cf70522fc85160d9900dc5558c732511b3b453eb --- /dev/null +++ b/drivers/net/e1000e/82571.c @@ -0,0 +1,1351 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + */ + +#include +#include +#include + +#include "e1000.h" + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static s32 e1000_setup_link_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + return -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + return -E1000_ERR_PHY; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + nvm->word_size = 1 << size; + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->media_type = e1000_media_type_fiber; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + hw->media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; + + /* check for link */ + switch (hw->media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_82571; + func->check_for_link = e1000e_check_for_copper_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_fiber_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; + func->check_for_link = e1000e_check_for_serdes_link; + func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + return 0; +} + +static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + u16 eeprom_data = 0; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!adapter->flags & FLAG_IS_QUAD_PORT_A)) + adapter->flags &= ~FLAG_HAS_WOL; + break; + + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, + &eeprom_data); + if (eeprom_data & NVM_WORD1A_ASPM_MASK) + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + hw_dbg(hw, "Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + + swsm &= ~E1000_SWSM_SWESMBI; + + ew32(SWSM, swsm); +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type != e1000_82573) + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likley contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return ret_val; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msleep(1); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + msleep(1); + if ((er32(EECD) & E1000_EECD_FLUPD) == 0) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likley contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i; + u32 eewr = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = (data[i] << E1000_NVM_RW_REG_DATA) | + ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START; + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & + E1000_NVM_CFG_DONE_PORT_0) + break; + msleep(1); + timeout--; + } + if (!timeout) { + hw_dbg(hw, "MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl; + u32 extcnf_ctrl; + u32 ctrl_ext; + u32 icr; + s32 ret_val; + u16 i = 0; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + + hw_dbg(hw, "Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + msleep(10); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. */ + if (hw->mac.type == e1000_82573) { + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + do { + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + msleep(2); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + } + + ctrl = er32(CTRL); + + hw_dbg(hw, "Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + if (hw->nvm.type == e1000_nvm_flash_hw) { + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + if (hw->mac.type == e1000_82573) + msleep(25); + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + icr = er32(ICR); + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i; + u16 rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) { + hw_dbg(hw, "Error initializing identification LED\n"); + return ret_val; + } + + /* Disabling VLAN filtering */ + hw_dbg(hw, "Initializing the IEEE VLAN\n"); + e1000e_clear_vfta(hw); + + /* Setup the receive address. */ + /* If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + hw_dbg(hw, "Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_82571(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL, reg_data); + + /* ...for both queues. */ + if (mac->type != e1000_82573) { + reg_data = er32(TXDCTL1); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL1, reg_data); + } else { + e1000e_enable_tx_pkt_filtering(hw); + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL); + reg |= (1 << 22); + ew32(TXDCTL, reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL1); + reg |= (1 << 22); + ew32(TXDCTL1, reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC0); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + default: + break; + } + ew32(TARC0, reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC1); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC1, reg); + break; + default: + break; + } + + /* Device Control */ + if (hw->mac.type == e1000_82573) { + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + } + + /* Extended Device Control */ + if (hw->mac.type == e1000_82573) { + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + } +} + +/** + * e1000e_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000e_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + if (hw->mac.type == e1000_82573) { + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_mc_addr_list_update_82571 - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw, + u8 *mc_addr_list, + u32 mc_addr_count, + u32 rar_used_count, + u32 rar_count) +{ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + + e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count, + rar_used_count, rar_count); +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->mac.type == e1000_82573) + hw->mac.fc = e1000_fc_full; + + return e1000e_setup_link(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + u32 led_ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + /* Setup activity LED */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twidling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, + E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (hw->mac.type == e1000_82573 && + *data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + else if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administed address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return 0; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administed address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + } + } + + return 0; +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + u32 temp; + + e1000e_clear_hw_cntrs_base(hw); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); + + temp = er32(IAC); + temp = er32(ICRXOC); + + temp = er32(ICRXPTC); + temp = er32(ICRXATC); + temp = er32(ICTXPTC); + temp = er32(ICTXATC); + temp = er32(ICTXQEC); + temp = er32(ICTXQMTC); + temp = er32(ICRXDMTC); +} + +static struct e1000_mac_operations e82571_mac_ops = { + .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, + /* .check_for_link: media type dependent */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + /* .get_link_up_info: media type dependent */ + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .mc_addr_list_update = e1000_mc_addr_list_update_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ +}; + +static struct e1000_phy_operations e82_phy_ops_igp = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_phy_info = e1000e_get_phy_info_igp, + .read_phy_reg = e1000e_read_phy_reg_igp, + .release_phy = e1000_put_hw_semaphore_82571, + .reset_phy = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_phy_reg = e1000e_write_phy_reg_igp, +}; + +static struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire_phy = e1000_get_hw_semaphore_82571, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_m88, + .get_phy_info = e1000e_get_phy_info_m88, + .read_phy_reg = e1000e_read_phy_reg_m88, + .release_phy = e1000_put_hw_semaphore_82571, + .reset_phy = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_phy_reg = e1000e_write_phy_reg_m88, +}; + +static struct e1000_nvm_operations e82571_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_82571, + .read_nvm = e1000e_read_nvm_spi, + .release_nvm = e1000_release_nvm_82571, + .update_nvm = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate_nvm = e1000_validate_nvm_checksum_82571, + .write_nvm = e1000_write_nvm_82571, +}; + +static struct e1000_nvm_operations e82573_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_82571, + .read_nvm = e1000e_read_nvm_eerd, + .release_nvm = e1000_release_nvm_82571, + .update_nvm = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate_nvm = e1000_validate_nvm_checksum_82571, + .write_nvm = e1000_write_nvm_82571, +}; + +struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_STATS_ICR_ICT + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .pba = 38, + .get_invariants = e1000_get_invariants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_STATS_ICR_ICT + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .pba = 38, + .get_invariants = e1000_get_invariants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_STATS_ICR_ICT + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_ASPM + | FLAG_HAS_ERT + | FLAG_HAS_SWSM_ON_LOAD, + .pba = 20, + .get_invariants = e1000_get_invariants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82573_nvm_ops, +}; + diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..650f866e7ac2582d1d4916bdae2405fc8a04854f --- /dev/null +++ b/drivers/net/e1000e/Makefile @@ -0,0 +1,37 @@ +################################################################################ +# +# Intel PRO/1000 Linux driver +# Copyright(c) 1999 - 2007 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) PRO/1000 ethernet driver +# + +obj-$(CONFIG_E1000E) += e1000e.o + +e1000e-objs := 82571.o ich8lan.o es2lan.o \ + lib.o phy.o param.o ethtool.o netdev.o + diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h new file mode 100644 index 0000000000000000000000000000000000000000..b32ed45b4b34cef6b8d73fd373bca61ea78672b8 --- /dev/null +++ b/drivers/net/e1000e/defines.h @@ -0,0 +1,739 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +/* Extended Device Control */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ + +/* Receive Decriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ + +/* Constants used to intrepret the masked PCI-X bus speed. */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ + +/* Header split receive */ +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ + +/* Autoneg Expansion Register */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ + + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Regiser */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 + +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h new file mode 100644 index 0000000000000000000000000000000000000000..d2499bb07c13c5d4f17e489b519453b2e53909e4 --- /dev/null +++ b/drivers/net/e1000e/e1000.h @@ -0,0 +1,514 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include + +#include "hw.h" + +struct e1000_info; + +#define ndev_printk(level, netdev, format, arg...) \ + printk(level "%s: %s: " format, (netdev)->dev.parent->bus_id, \ + (netdev)->name, ## arg) + +#ifdef DEBUG +#define ndev_dbg(netdev, format, arg...) \ + ndev_printk(KERN_DEBUG , netdev, format, ## arg) +#else +#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) +#endif + +#define ndev_err(netdev, format, arg...) \ + ndev_printk(KERN_ERR , netdev, format, ## arg) +#define ndev_info(netdev, format, arg...) \ + ndev_printk(KERN_INFO , netdev, format, ## arg) +#define ndev_warn(netdev, format, arg...) \ + ndev_printk(KERN_WARNING , netdev, format, ## arg) +#define ndev_notice(netdev, format, arg...) \ + ndev_printk(KERN_NOTICE , netdev, format, ## arg) + + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 80 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 80 + +/* Early Receive defines */ +#define E1000_ERT_2048 0x100 + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_80003es2lan, + board_ich8lan, + board_ich9lan, +}; + +struct e1000_queue_stats { + u64 packets; + u64 bytes; +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* TX */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + }; + /* RX */ + struct page *page; + }; + +}; + +struct e1000_ring { + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct sk_buff *rx_skb_top; + + struct e1000_queue_stats stats; +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + struct vlan_group *vlgrp; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + + spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ + + /* this is still needed for 82571 and above */ + atomic_t irq_sem; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * TX + */ + struct e1000_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + struct napi_struct napi; + + unsigned long tx_queue_len; + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* TX stats */ + u64 tpt_old; + u64 colc_old; + u64 gotcl_old; + u32 gotcl; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * RX + */ + bool (*clean_rx) (struct e1000_adapter *adapter, + int *work_done, int work_to_do) + ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + int cleaned_count); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* RX stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u64 gorcl_old; + u32 gorcl; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + spinlock_t stats_lock; /* prevent concurrent stats updates */ + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + + u32 eeprom_wol; + u32 wol; + u32 pba; + + u8 fc_autoneg; + + unsigned long led_status; + + unsigned int flags; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + s32 (*get_invariants)(struct e1000_adapter *); + struct e1000_mac_operations *mac_ops; + struct e1000_phy_operations *phy_ops; + struct e1000_nvm_operations *nvm_ops; +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +#define FLAG_HAS_ERT (1 << 4) +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_HAS_ASPM (1 << 8) +#define FLAG_HAS_STATS_ICR_ICT (1 << 9) +#define FLAG_HAS_STATS_PTC_PRC (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +#define FLAG_RX_CSUM_ENABLED (1 << 28) +#define FLAG_TSO_FORCE (1 << 29) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +extern void e1000e_check_options(struct e1000_adapter *adapter); +extern void e1000e_set_ethtool_ops(struct net_device *netdev); + +extern int e1000e_up(struct e1000_adapter *adapter); +extern void e1000e_down(struct e1000_adapter *adapter); +extern void e1000e_reinit_locked(struct e1000_adapter *adapter); +extern void e1000e_reset(struct e1000_adapter *adapter); +extern void e1000e_power_up_phy(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); +extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); +extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); +extern void e1000e_update_stats(struct e1000_adapter *adapter); + +extern unsigned int copybreak; + +extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw); + +extern struct e1000_info e1000_82571_info; +extern struct e1000_info e1000_82572_info; +extern struct e1000_info e1000_82573_info; +extern struct e1000_info e1000_ich8_info; +extern struct e1000_info e1000_ich9_info; +extern struct e1000_info e1000_es2_info; + +extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num); + +extern s32 e1000e_commit_phy(struct e1000_hw *hw); + +extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); + +extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +extern s32 e1000e_led_on_generic(struct e1000_hw *hw); +extern s32 e1000e_led_off_generic(struct e1000_hw *hw); +extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); +extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +extern s32 e1000e_id_led_init(struct e1000_hw *hw); +extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +extern s32 e1000e_setup_link(struct e1000_hw *hw); +extern void e1000e_clear_vfta(struct e1000_hw *hw); +extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count); +extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +extern void e1000e_config_collision_dist(struct e1000_hw *hw); +extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); +extern s32 e1000e_blink_led(struct e1000_hw *hw); +extern void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +extern void e1000e_reset_adaptive(struct e1000_hw *hw); +extern void e1000e_update_adaptive(struct e1000_hw *hw); + +extern s32 e1000e_setup_copper_link(struct e1000_hw *hw); +extern s32 e1000e_get_phy_id(struct e1000_hw *hw); +extern void e1000e_put_hw_semaphore(struct e1000_hw *hw); +extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); +extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +extern s32 e1000e_check_downshift(struct e1000_hw *hw); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset_phy(hw); +} + +static inline s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + return hw->phy.ops.check_reset_block(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_phy_reg(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_phy_reg(hw, offset, data); +} + +static inline s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + return hw->phy.ops.get_cable_length(hw); +} + +extern s32 e1000e_acquire_nvm(struct e1000_hw *hw); +extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +extern void e1000e_release_nvm(struct e1000_hw *hw); +extern void e1000e_reload_nvm(struct e1000_hw *hw); +extern s32 e1000e_read_mac_addr(struct e1000_hw *hw); + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate_nvm(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update_nvm(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.read_nvm(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + return hw->nvm.ops.write_nvm(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_phy_info(hw); +} + +extern bool e1000e_check_mng_mode(struct e1000_hw *hw); +extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->hw_addr + reg); +} + +#endif /* _E1000_H_ */ diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c new file mode 100644 index 0000000000000000000000000000000000000000..88657adf965f29ac11386dea0d9da95a19088bd0 --- /dev/null +++ b/drivers/net/e1000e/es2lan.c @@ -0,0 +1,1232 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include +#include +#include +#include + +#include "e1000.h" + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 + /* 1=Reverse Auto-Negotiation */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M + 1 = 50-80M + 2 = 80-110M + 3 = 110-140M + 4 = >140M */ + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Power Management Control Register (Page 193, Register 20) */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + /* 1=Enable SERDES Electrical Idle */ + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = + { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + * + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ + switch (adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + hw->media_type = e1000_media_type_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = + (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; + + /* check for link */ + switch (hw->media_type) { + case e1000_media_type_copper: + func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; + func->check_for_link = e1000e_check_for_copper_link; + break; + case e1000_media_type_fiber: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_fiber_link; + break; + case e1000_media_type_internal_serdes: + func->setup_physical_interface = e1000e_setup_fiber_serdes_link; + func->check_for_link = e1000e_check_for_serdes_link; + break; + default: + return -E1000_ERR_CONFIG; + break; + } + + return 0; +} + +static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. This is a + * function pointer entry point called by the api module. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. This is a + * function pointer entry point called by the api module. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 200; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + hw_dbg(hw, + "Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0); + /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) + page_select = GG82563_PHY_PAGE_SELECT; + else + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp); + if (ret_val) + return ret_val; + + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + ret_val = -E1000_ERR_PHY; + return ret_val; + } + + udelay(200); + + ret_val = e1000e_read_phy_reg_m88(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. This is a function pointer entry + * point called by the api module. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) + page_select = GG82563_PHY_PAGE_SELECT; + else + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_m88(hw, page_select, temp); + if (ret_val) + return ret_val; + + + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + udelay(200); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_m88(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) + return -E1000_ERR_PHY; + + udelay(200); + + ret_val = e1000e_write_phy_reg_m88(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + udelay(200); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. This is a function + * pointer entry point called by the api module. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + msleep(1); + timeout--; + } + if (!timeout) { + hw_dbg(hw, "MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link " + "on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + u16 index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index+5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, + speed, + duplex); + if (ret_val) + return ret_val; + if (*speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, + *duplex); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + u32 icr; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + + hw_dbg(hw, "Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + msleep(10); + + ctrl = er32(CTRL); + + hw_dbg(hw, "Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + icr = er32(ICR); + + return 0; +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + * This is a function pointer entry point called by the api module. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) { + hw_dbg(hw, "Error initializing identification LED\n"); + return ret_val; + } + + /* Disabling VLAN filtering */ + hw_dbg(hw, "Initializing the IEEE VLAN\n"); + e1000e_clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + hw_dbg(hw, "Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000e_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL, reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL1); + reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; + ew32(TXDCTL1, reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL); + reg |= (1 << 22); + ew32(TXDCTL, reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL1); + reg |= (1 << 22); + ew32(TXDCTL1, reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC0); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC0, reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC1); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC1, reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl_ext; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, + &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, + data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) { + hw_dbg(hw, "Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass RX and TX FIFO's */ + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, + E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); + ew32(CTRL_EXT, ctrl_ext); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!e1000e_check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. */ + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_setup_copper_link(hw); + + return 0; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u16 reg_data; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return 0; +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + u32 tipg; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); + + return ret_val; +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + u32 temp; + + e1000e_clear_hw_cntrs_base(hw); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); + + temp = er32(IAC); + temp = er32(ICRXOC); + + temp = er32(ICRXPTC); + temp = er32(ICRXATC); + temp = er32(ICTXPTC); + temp = er32(ICTXATC); + temp = er32(ICTXQEC); + temp = er32(ICTXQMTC); + temp = er32(ICRXDMTC); +} + +static struct e1000_mac_operations es2_mac_ops = { + .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .mc_addr_list_update = e1000e_mc_addr_list_update_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link, + /* setup_physical_interface dependent on media type */ +}; + +static struct e1000_phy_operations es2_phy_ops = { + .acquire_phy = e1000_acquire_phy_80003es2lan, + .check_reset_block = e1000e_check_reset_block_generic, + .commit_phy = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_phy_info = e1000e_get_phy_info_m88, + .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release_phy = e1000_release_phy_80003es2lan, + .reset_phy = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, +}; + +static struct e1000_nvm_operations es2_nvm_ops = { + .acquire_nvm = e1000_acquire_nvm_80003es2lan, + .read_nvm = e1000e_read_nvm_eerd, + .release_nvm = e1000_release_nvm_80003es2lan, + .update_nvm = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate_nvm = e1000e_validate_nvm_checksum_generic, + .write_nvm = e1000_write_nvm_80003es2lan, +}; + +struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_STATS_PTC_PRC + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_STATS_ICR_ICT + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, + .pba = 38, + .get_invariants = e1000_get_invariants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; + diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..b7a7e2ae5e139c6f37bb5021220d943cb0f5573a --- /dev/null +++ b/drivers/net/e1000e/ethtool.c @@ -0,0 +1,1780 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include +#include +#include +#include + +#include "e1000.h" + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(net_stats.rx_errors) }, + { "tx_errors", E1000_STAT(net_stats.tx_errors) }, + { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, + { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "rx_header_split", E1000_STAT(rx_hdr_split) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, + { "rx_dma_failed", E1000_STAT(rx_dma_failed) }, + { "tx_dma_failed", E1000_STAT(tx_dma_failed) }, +}; + +#define E1000_GLOBAL_STATS_LEN \ + sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + + adapter->hw.mac.ops.get_link_up_info(hw, &adapter->link_speed, + &adapter->link_duplex); + ecmd->speed = adapter->link_speed; + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF */ + + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.media_type == e1000_media_type_fiber) && + spddplx != (SPEED_1000 + DUPLEX_FULL)) { + ndev_err(adapter->netdev, "Unsupported Speed/Duplex " + "configuration\n"); + return -EINVAL; + } + + switch (spddplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + ndev_err(adapter->netdev, "Unsupported Speed/Duplex " + "configuration\n"); + return -EINVAL; + } + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed */ + if (e1000_check_reset_block(hw)) { + ndev_err(netdev, "Cannot change link " + "characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + } else { + if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->mac.fc == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->mac.fc == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->mac.fc == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->mac.fc = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->mac.fc = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->mac.fc = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->mac.fc = e1000_fc_none; + + hw->mac.original_fc = hw->mac.fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->mac.fc = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + retval = ((hw->media_type == e1000_media_type_fiber) ? + hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_rx_csum(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return (adapter->flags & FLAG_RX_CSUM_ENABLED); +} + +static int e1000_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) + adapter->flags |= FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + return 0; +} + +static u32 e1000_get_tx_csum(struct net_device *netdev) +{ + return ((netdev->features & NETIF_F_HW_CSUM) != 0); +} + +static int e1000_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int e1000_set_tso(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + + ndev_info(netdev, "TSO is %s\n", + data ? "Enabled" : "Disabled"); + adapter->flags |= FLAG_TSO_FORCE; + return 0; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + u8 revision_id; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); + + regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for 82573 controllers */ + if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82573))) + e1000e_update_nvm_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + char firmware_version[32]; + u16 eeprom_data; + + strncpy(drvinfo->driver, e1000e_driver_name, 32); + strncpy(drvinfo->version, e1000e_driver_version, 32); + + /* EEPROM image version # is reported as firmware version # for + * PCI-E controllers */ + e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); + sprintf(firmware_version, "%d.%d-%d", + (eeprom_data & 0xF000) >> 12, + (eeprom_data & 0x0FF0) >> 4, + eeprom_data & 0x000F); + + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring, *tx_old; + struct e1000_ring *rx_ring, *rx_old; + int err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000e_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!tx_ring) + goto err_alloc_tx; + + rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!rx_ring) + goto err_alloc_rx; + + adapter->tx_ring = tx_ring; + adapter->rx_ring = rx_ring; + + rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); + rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); + tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again */ + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000e_free_rx_resources(adapter); + e1000e_free_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rx_ring; + adapter->tx_ring = tx_ring; + err = e1000e_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +err_setup_tx: + e1000e_free_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rx_ring); +err_alloc_rx: + kfree(tx_ring); +err_alloc_tx: + e1000e_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +#define REG_PATTERN_TEST(R, M, W) REG_PATTERN_TEST_ARRAY(R, 0, M, W) +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, writeable) \ +{ \ + u32 _pat; \ + u32 _value; \ + u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ + for (_pat = 0; _pat < ARRAY_SIZE(_test); _pat++) { \ + E1000_WRITE_REG_ARRAY(hw, reg, offset, \ + (_test[_pat] & writeable)); \ + _value = E1000_READ_REG_ARRAY(hw, reg, offset); \ + if (_value != (_test[_pat] & writeable & mask)) { \ + ndev_err(netdev, "pattern test reg %04X " \ + "failed: got 0x%08X expected 0x%08X\n", \ + reg + offset, \ + value, (_test[_pat] & writeable & mask)); \ + *data = reg; \ + return 1; \ + } \ + } \ +} + +#define REG_SET_AND_CHECK(R, M, W) \ +{ \ + u32 _value; \ + __ew32(hw, R, W & M); \ + _value = __er32(hw, R); \ + if ((W & M) != (_value & M)) { \ + ndev_err(netdev, "set/check reg %04X test failed: " \ + "got 0x%08X expected 0x%08X\n", R, (_value & M), \ + (W & M)); \ + *data = R; \ + return 1; \ + } \ +} + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct net_device *netdev = adapter->netdev; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + switch (mac->type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + case e1000_82573: + case e1000_ich8lan: + case e1000_ich9lan: + toggle = 0x7FFFF033; + break; + default: + toggle = 0xFFFFF833; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + ndev_err(netdev, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = (((mac->type == e1000_ich8lan) || + (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + if ((mac->type != e1000_ich8lan) && + (mac->type != e1000_ich9lan)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + for (i = 0; i < mac->rar_entry_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), + 0x8003FFFF, 0xFFFFFFFF); + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16) NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet */ + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + ndev_info(netdev, "testing %s interrupt\n", + (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + msleep(10); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + + if (((adapter->hw.mac.type == e1000_ich8lan) || + (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) + continue; + + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + if (tx_ring->buffer_info[i].dma) + pci_unmap_single(pdev, + tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].length, + PCI_DMA_TODEVICE); + if (tx_ring->buffer_info[i].skb) + dev_kfree_skb(tx_ring->buffer_info[i].skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + if (rx_ring->buffer_info[i].dma) + pci_unmap_single(pdev, + rx_ring->buffer_info[i].dma, + 2048, PCI_DMA_FROMDEVICE); + if (rx_ring->buffer_info[i].skb) + dev_kfree_skb(rx_ring->buffer_info[i].skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int size; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + size = tx_ring->count * sizeof(struct e1000_buffer); + tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); + if (!tx_ring->buffer_info) { + ret_val = 1; + goto err_nomem; + } + memset(tx_ring->buffer_info, 0, size); + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL, + ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64) tx_ring->dma >> 32)); + ew32(TDLEN, + tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, + E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + pci_map_single(pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64( + tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + size = rx_ring->count * sizeof(struct e1000_buffer); + rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); + if (!rx_ring->buffer_info) { + ret_val = 5; + goto err_nomem; + } + memset(rx_ring->buffer_info, 0, size); + + rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64) rx_ring->dma >> 32)); + ew32(RDLEN, rx_ring->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + pci_map_single(pdev, skb->data, 2048, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + adapter->hw.mac.autoneg = 0; + + if (adapter->hw.phy.type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + e1e_wphy(hw, PHY_CONTROL, 0x8140); + } else if (adapter->hw.phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + + ctrl_reg = er32(CTRL); + + if (adapter->hw.phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + } else { + /* force 1000, set loopback */ + e1e_wphy(hw, PHY_CONTROL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + } + + if (adapter->hw.media_type == e1000_media_type_copper && + adapter->hw.phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* Set the ILOS bit on the fiber Nic if half duplex link is + * detected. */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (adapter->hw.phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link = 0; + + /* special requirements for 82571/82572 fiber adapters */ + + /* jump through hoops to make sure link is up because serdes + * link is hardwired up */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* special write to serdes control register to enable SerDes analog + * loopback */ +#define E1000_SERDES_LB_ON 0x410 + ew32(SCTL, E1000_SERDES_LB_ON); + msleep(10); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* save CTRL_EXT to restore later, reuse an empty variable (unused + on mac_type 80003es2lan) */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + break; + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, + adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { +#define E1000_SERDES_LB_OFF 0x400 + ew32(SCTL, E1000_SERDES_LB_OFF); + msleep(10); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, PHY_CONTROL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1e_wphy(hw, PHY_CONTROL, phy_reg); + e1000e_commit_phy(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT, rx_ring->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame( + tx_ring->buffer_info[i].skb, 1024); + pci_dma_sync_single_for_device(pdev, + tx_ring->buffer_info[k].dma, + tx_ring->buffer_info[k].length, + PCI_DMA_TODEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT, k); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + pci_dma_sync_single_for_cpu(pdev, + rx_ring->buffer_info[l].dma, 2048, + PCI_DMA_FROMDEVICE); + + ret_val = e1000_check_lbtest_frame( + rx_ring->buffer_info[l].skb, 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active */ + if (e1000_check_reset_block(&adapter->hw)) { + ndev_err(adapter->netdev, "Cannot do PHY loopback test " + "when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->mac.serdes_has_link = 0; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + msleep(4000); + + if (!(er32(STATUS) & + E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + ndev_info(netdev, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000e_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + /* make sure the phy is powered up */ + e1000e_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.wait_for_link = 1; + e1000e_reset(adapter); + adapter->hw.phy.wait_for_link = 0; + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + ndev_info(netdev, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->state); + } + msleep_interruptible(4 * 1000); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + ndev_err(netdev, "Interface does not support " + "directed (unicast) frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (!(adapter->flags & FLAG_HAS_WOL)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + return 0; +} + +/* toggle LED 4 times per second = 2 "blinks" per second */ +#define E1000_ID_INTERVAL (HZ/4) + +/* bit defines for adapter->led_status */ +#define E1000_LED_ON 0 + +static void e1000_led_blink_callback(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + if (test_and_change_bit(E1000_LED_ON, &adapter->led_status)) + adapter->hw.mac.ops.led_off(&adapter->hw); + else + adapter->hw.mac.ops.led_on(&adapter->hw); + + mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL); +} + +static int e1000_phys_id(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) + data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); + + if (adapter->hw.phy.type == e1000_phy_ife) { + if (!adapter->blink_timer.function) { + init_timer(&adapter->blink_timer); + adapter->blink_timer.function = + e1000_led_blink_callback; + adapter->blink_timer.data = (unsigned long) adapter; + } + mod_timer(&adapter->blink_timer, jiffies); + msleep_interruptible(data * 1000); + del_timer_sync(&adapter->blink_timer); + e1e_wphy(&adapter->hw, + IFE_PHY_SPECIAL_CONTROL_LED, 0); + } else { + e1000e_blink_led(&adapter->hw); + msleep_interruptible(data * 1000); + } + + adapter->hw.mac.ops.led_off(&adapter->hw); + clear_bit(E1000_LED_ON, &adapter->led_status); + adapter->hw.mac.ops.cleanup_led(&adapter->hw); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + + e1000e_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e1000_gstrings_test, + E1000_TEST_LEN*ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .get_rx_csum = e1000_get_rx_csum, + .set_rx_csum = e1000_set_rx_csum, + .get_tx_csum = e1000_get_tx_csum, + .set_tx_csum = e1000_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = e1000_set_tso, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .phys_id = e1000_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h new file mode 100644 index 0000000000000000000000000000000000000000..aa82f1afb7fb7d8fc0dc9b36c24ffa1e1e21681a --- /dev/null +++ b/drivers/net/e1000e/hw.h @@ -0,0 +1,864 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include + +struct e1000_hw; +struct e1000_adapter; + +#include "defines.h" + +#define er32(reg) __er32(hw, E1000_##reg) +#define ew32(reg,val) __ew32(hw, E1000_##reg, (val)) +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (writel((value), ((a)->hw_addr + reg + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +enum e1e_registers { + E1000_CTRL = 0x00000, /* Device Control - RW */ + E1000_STATUS = 0x00008, /* Device Status - RO */ + E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */ + E1000_EERD = 0x00014, /* EEPROM Read - RW */ + E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */ + E1000_FLA = 0x0001C, /* Flash Access - RW */ + E1000_MDIC = 0x00020, /* MDI Control - RW */ + E1000_SCTL = 0x00024, /* SerDes Control - RW */ + E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ + E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ + E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ + E1000_FCT = 0x00030, /* Flow Control Type - RW */ + E1000_VET = 0x00038, /* VLAN Ether Type - RW */ + E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ + E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ + E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ + E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ + E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ + E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ + E1000_RCTL = 0x00100, /* RX Control - RW */ + E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ + E1000_TXCW = 0x00178, /* TX Configuration Word - RW */ + E1000_RXCW = 0x00180, /* RX Configuration Word - RO */ + E1000_TCTL = 0x00400, /* TX Control - RW */ + E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */ + E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */ + E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */ + E1000_LEDCTL = 0x00E00, /* LED Control - RW */ + E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ + E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ + E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ + E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ + E1000_PBS = 0x01008, /* Packet Buffer Size */ + E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ + E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ + E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ + E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ + E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ + E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ + E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ + E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */ + E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */ + E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */ + E1000_RDH = 0x02810, /* RX Descriptor Head - RW */ + E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */ + E1000_RDTR = 0x02820, /* RX Delay Timer - RW */ + E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + * + */ +#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) + E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ + E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */ + E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */ + E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */ + E1000_TDH = 0x03810, /* TX Descriptor Head - RW */ + E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */ + E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */ + E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */ + E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */ + E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */ + E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */ + E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */ + E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ + E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ + E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ + E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */ + E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */ + E1000_SCC = 0x04014, /* Single Collision Count - R/clr */ + E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */ + E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */ + E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ + E1000_COLC = 0x04028, /* Collision Count - R/clr */ + E1000_DC = 0x04030, /* Defer Count - R/clr */ + E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */ + E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ + E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ + E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ + E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */ + E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */ + E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */ + E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */ + E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */ + E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */ + E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */ + E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */ + E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */ + E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */ + E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */ + E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */ + E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */ + E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */ + E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */ + E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */ + E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */ + E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */ + E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */ + E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */ + E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */ + E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */ + E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */ + E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */ + E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */ + E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ + E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */ + E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */ + E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */ + E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */ + E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */ + E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */ + E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */ + E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */ + E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */ + E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */ + E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */ + E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */ + E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */ + E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */ + E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */ + E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */ + E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */ + E1000_IAC = 0x04100, /* Interrupt Assertion Count */ + E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ + E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ + E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */ + E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */ + E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */ + E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ + E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ + E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ + E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ + E1000_RFCTL = 0x05008, /* Receive Filter Control*/ + E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ + E1000_RA = 0x05400, /* Receive Address - RW Array */ + E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ + E1000_WUC = 0x05800, /* Wakeup Control - RW */ + E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ + E1000_WUS = 0x05810, /* Wakeup Status - RO */ + E1000_MANC = 0x05820, /* Management Control - RW */ + E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ + E1000_HOST_IF = 0x08800, /* Host Interface */ + + E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ + E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ + E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ + E1000_GCR = 0x05B00, /* PCI-Ex Control */ + E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ + E1000_SWSM = 0x05B50, /* SW Semaphore */ + E1000_FWSM = 0x05B54, /* FW Semaphore */ + E1000_HICR = 0x08F00, /* Host Inteface Control */ +}; + +/* RSS registers */ + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0008 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +/* manage.c */ +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +#define E1000_HICR_C 0x02 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +/* nvm.c */ +#define E1000_STM_OPCODE 0xDB00 + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A + +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB + +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 + +#define E1000_FUNC_1 1 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity{ + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +/* Receive Descriptor */ +struct e1000_rx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + u16 length; /* Length of data DMAed into data buffer */ + u16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + u16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + u64 buffer_addr; + u64 reserved; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + u64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + u32 mrq; /* Multiple Rx Queues */ + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length0; /* length of buffer 0 */ + u16 vlan; /* VLAN tag */ + } middle; + struct { + u16 header_status; + u16 length[3]; /* length of buffers 1-3 */ + } upper; + u64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + u16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + u32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + u16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + u32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + u16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + u32 cmd_and_length; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + u16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + u64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + u16 special; /* */ + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +/* Function pointers and static data for the MAC. */ +struct e1000_mac_operations { + u32 mng_mode_enab; + + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, + u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); +}; + +/* Function pointers for the PHY. */ +struct e1000_phy_operations { + s32 (*acquire_phy)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit_phy)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_phy_info)(struct e1000_hw *); + s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); + void (*release_phy)(struct e1000_hw *); + s32 (*reset_phy)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire_nvm)(struct e1000_hw *); + s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); + void (*release_nvm)(struct e1000_hw *); + s32 (*update_nvm)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate_nvm)(struct e1000_hw *); + s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + enum e1000_fc_mode fc; + enum e1000_fc_mode original_fc; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 max_frame_size; + u32 mc_filter_type; + u32 min_frame_size; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 rar_entry_count; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + + u8 forced_speed_duplex; + + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool wait_for_link; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; + + enum e1000_media_type media_type; +}; + +#ifdef DEBUG +#define hw_dbg(hw, format, arg...) \ + printk(KERN_DEBUG, "%s: " format, e1000e_get_hw_dev_name(hw), ##arg); +#else +static inline int __attribute__ ((format (printf, 2, 3))) +hw_dbg(struct e1000_hw *hw, const char *format, ...) +{ + return 0; +} +#endif + +#endif diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c new file mode 100644 index 0000000000000000000000000000000000000000..8f8139de1f4841fd7cdb350d14d2e12c6c9d16cc --- /dev/null +++ b/drivers/net/e1000e/ich8lan.c @@ -0,0 +1,2225 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + */ + +#include +#include +#include +#include + +#include "e1000.h" + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 + +#define ICH_FLASH_READ_COMMAND_TIMEOUT 500 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_OFF2 << 8) | \ + (ID_LED_DEF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone :1; /* bit 0 Flash Cycle Done */ + u16 flcerr :1; /* bit 1 Flash Cycle Error */ + u16 dael :1; /* bit 2 Direct Access error Log */ + u16 berasesz :2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog :1; /* bit 5 flash cycle in Progress */ + u16 reserved1 :2; /* bit 13:6 Reserved */ + u16 reserved2 :6; /* bit 13:6 Reserved */ + u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo :1; /* 0 Flash Cycle Go */ + u16 flcycle :2; /* 2:1 Flash Cycle */ + u16 reserved :5; /* 7:3 Reserved */ + u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ + u16 flockdn :6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra :8; /* 0:7 GbE region Read Access */ + u32 grwa :8; /* 8:15 GbE region Write Access */ + u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + msleep(1); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg; + u32 sector_base_addr; + u32 sector_end_addr; + u16 i; + + /* Can't read flash registers if the register set isn't mapped. + */ + if (!hw->flash_address) { + hw_dbg(hw, "ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. */ + nvm->flash_bank_size = (sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT; + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = 0; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* Set if manageability features are enabled. */ + mac->arc_subsystem_valid = 1; + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); + + return 0; +} + +static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(adapter); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_ich8lan(hw); + if (rc) + return rc; + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type == e1000_phy_igp_3)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + return 0; +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing NVM and PHY + * operations. This is a function pointer entry point only called by + * read/write routines for the PHY and NVM parts. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + u32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + mdelay(1); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); + return -E1000_ERR_CONFIG; + } + + return 0; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing NVM and PHY operations. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; +} + +/** + * e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + if (phy->type != e1000_phy_ife) { + ret_val = e1000e_phy_force_speed_duplex_igp(hw); + return ret_val; + } + + ret_val = e1e_rphy(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "IFE PMC: %X\n", data); + + udelay(1); + + if (phy->wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + + if (!link) + hw_dbg(hw, "Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i; + u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val; + u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { + struct e1000_adapter *adapter = hw->adapter; + + /* Check if SW needs configure the PHY */ + if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || + (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M)) + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + else + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + return 0; + + /* Wait for basic configuration completes before proceeding*/ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + udelay(100); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) { + hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); + } + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration */ + data = er32(EXTCNF_CTRL); + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + return 0; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + return 0; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + /* Configure LCD from extended configuration + * region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, + (word_addr + i * 2), + 1, + ®_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_nvm(hw, + (word_addr + i * 2 + 1), + 1, + ®_addr); + if (ret_val) + return ret_val; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr |= phy_page; + + ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + * This function is only called by other family-specific + * routines. + **/ +static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + hw_dbg(hw, "Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = (!(data & IFE_PSC_AUTO_POLARITY_DISABLE)); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife_ich8lan(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = (data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return 0; +} + +/** + * e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info + * @hw: pointer to the HW structure + * + * Wrapper for calling the get_phy_info routines for the appropriate phy type. + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) +{ + switch (hw->phy.type) { + case e1000_phy_ife: + return e1000_get_phy_info_ife_ich8lan(hw); + break; + case e1000_phy_igp_3: + return e1000e_get_phy_info_igp(hw); + break; + default: + break; + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reveral feature being enabled. + * This function is only called by other family-specific + * routines. + **/ +static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* Polarity is determined based on the reversal feature + * being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type != e1000_phy_igp_3) + return ret_val; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3)) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers */ + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3)) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return 0; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Start with the bank offset, then add the relative offset. */ + act_offset = (er32(EECD) & E1000_EECD_SEC1VAL) + ? nvm->flash_bank_size + : 0; + act_offset += offset; + + for (i = 0; i < words; i++) { + if ((dev_spec->shadow_ram) && + (dev_spec->shadow_ram[offset+i].modified)) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + e1000_release_swflag_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + s32 i = 0; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (hsfsts.hsf_status.fldesvalid == 0) { + hw_dbg(hw, "Flash descriptor invalid. " + "SW Sequencing must be used."); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after harware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (hsfsts.hsf_status.flcinprog == 0) { + /* There is no cycle running at present, + * so we can start a cycle */ + /* Begin by setting Flash Cycle Done. */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + /* otherwise poll for sometime so the current + * cycle has a chance to end before giving up. */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcinprog == 0) { + ret_val = 0; + break; + } + udelay(1); + } + if (ret_val == 0) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + hw_dbg(hw, "Flash controller busy, cannot get access"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone == 1) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) + return 0; + + return ret_val; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != 0) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb */ + if (ret_val == 0) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) { + *data = (u8)(flash_data & 0x000000FF); + } else if (size == 2) { + *data = (u16)(flash_data & 0x0000FFFF); + } + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) { + /* Repeat for some time before giving up. */ + continue; + } else if (hsfsts.hsf_status.flcdone == 0) { + hw_dbg(hw, "Timeout error - flash cycle " + "did not complete."); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return ret_val; + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset+i].modified = 1; + dev_spec->shadow_ram[offset+i].value = data[i]; + } + + e1000_release_swflag_ich8lan(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a succesful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset; + s32 ret_val; + u16 data; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val;; + + if (nvm->type != e1000_nvm_flash_sw) + return ret_val;; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + return ret_val;; + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written */ + if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + e1000_erase_flash_bank_ich8lan(hw, 1); + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + e1000_erase_flash_bank_ich8lan(hw, 0); + } + + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + e1000_read_flash_word_ich8lan(hw, + i + old_bank_offset, + &data); + } + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + udelay(100); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + udelay(100); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. */ + if (ret_val) { + hw_dbg(hw, "Flash commit failed.\n"); + e1000_release_swflag_ich8lan(hw); + return ret_val; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + e1000_read_flash_word_ich8lan(hw, act_offset, &data); + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) { + e1000_release_swflag_ich8lan(hw); + return ret_val; + } + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) { + e1000_release_swflag_ich8lan(hw); + return ret_val; + } + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = 0; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + e1000_release_swflag_ich8lan(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + e1000e_reload_nvm(hw); + msleep(10); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + /* Read 0x19 and check bit 6. If this bit is 0, the checksum + * needs to be fixed. This bit is an indication that the NVM + * was prepared by OEM software and did not calculate the + * checksum...a likely scenario. + */ + ret_val = e1000_read_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + + if ((data & 0x40) == 0) { + data |= 0x40; + ret_val = e1000_write_nvm(hw, 0x19, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr; + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size -1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* Repeat for some time before giving up. */ + continue; + if (hsfsts.hsf_status.flcdone == 0) { + hw_dbg(hw, "Timeout error - flash cycle " + "did not complete."); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); + udelay(100); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 iteration; + s32 sector_size; + s32 j; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register */ + /* 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K; + break; + case 2: + if (hw->mac.type == e1000_ich9lan) { + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K; + } else { + return -E1000_ERR_NVM; + } + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? (sector_size * iteration) : 0; + + for (j = 0; j < iteration ; j++) { + do { + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_ERASE_COMMAND_TIMEOUT); + if (ret_val == 0) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr == 1) + /* repeat for some time before + * giving up */ + continue; + else if (hsfsts.hsf_status.flcdone == 0) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl, icr, kab; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) { + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + } + + hw_dbg(hw, "Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + msleep(10); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + ctrl = er32(CTRL); + + if (!e1000_check_reset_block(hw)) { + /* PHY HW reset requires MAC CORE reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + hw_dbg(hw, "Issuing a global reset to ich8lan"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + msleep(20); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + hw_dbg(hw, "Auto Read Done did not complete\n"); + } + + ew32(IMC, 0xffffffff); + icr = er32(ICR); + + kab = er32(KABGTXD); + kab |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, kab); + + return ret_val; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit discriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = e1000e_id_led_init(hw); + if (ret_val) { + hw_dbg(hw, "Error initializing identification LED\n"); + return ret_val; + } + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + hw_dbg(hw, "Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = e1000_setup_link_ich8lan(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL, txdctl); + txdctl = er32(TXDCTL1); + txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH; + ew32(TXDCTL1, txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32) ~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return 0; +} +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL); + reg |= (1 << 22); + ew32(TXDCTL, reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL1); + reg |= (1 << 22); + ew32(TXDCTL1, reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC0); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC0, reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC1); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC1, reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + if (e1000_check_reset_block(hw)) + return 0; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (mac->fc == e1000_fc_default) + mac->fc = e1000_fc_full; + + mac->original_fc = mac->fc; + + hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); + + /* Continue to configure the copper link. */ + ret_val = e1000_setup_copper_link_ich8lan(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, mac->fc_pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. */ + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); + if (ret_val) + return ret_val; + + if (hw->phy.type == e1000_phy_igp_3) { + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + } + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retreive the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && + (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Giga disable before accessing + * any PHY registers */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumaran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - TRUE + * /disabled - FALSE). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + hw_dbg(hw, "Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* Call gig speed drop workaround on Giga disable before + * accessing any PHY registers */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Giga disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with IGP_3 Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || + (hw->phy.type != e1000_phy_igp_3)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LED's on + * @hw: pointer to the HW structure + * + * Turn on the LED's. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LED's off + * @hw: pointer to the HW structure + * + * Turn off the LED's. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u32 temp; + + e1000e_clear_hw_cntrs_base(hw); + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); + + temp = er32(IAC); + temp = er32(ICRXOC); + +} + +static struct e1000_mac_operations ich8_mac_ops = { + .mng_mode_enab = E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, + .check_for_link = e1000e_check_for_copper_link, + .cleanup_led = e1000_cleanup_led_ich8lan, + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + .led_on = e1000_led_on_ich8lan, + .led_off = e1000_led_off_ich8lan, + .mc_addr_list_update = e1000e_mc_addr_list_update_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface= e1000_setup_copper_link_ich8lan, +}; + +static struct e1000_phy_operations ich8_phy_ops = { + .acquire_phy = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit_phy = NULL, + .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, + .get_cfg_done = e1000e_get_cfg_done, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_phy_info = e1000_get_phy_info_ich8lan, + .read_phy_reg = e1000e_read_phy_reg_igp, + .release_phy = e1000_release_swflag_ich8lan, + .reset_phy = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_phy_reg = e1000e_write_phy_reg_igp, +}; + +static struct e1000_nvm_operations ich8_nvm_ops = { + .acquire_nvm = e1000_acquire_swflag_ich8lan, + .read_nvm = e1000_read_nvm_ich8lan, + .release_nvm = e1000_release_swflag_ich8lan, + .update_nvm = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate_nvm = e1000_validate_nvm_checksum_ich8lan, + .write_nvm = e1000_write_nvm_ich8lan, +}; + +struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .get_invariants = e1000_get_invariants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_RX_CSUM_ENABLED + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_ERT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 10, + .get_invariants = e1000_get_invariants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c new file mode 100644 index 0000000000000000000000000000000000000000..0bdeca30c75fd3348f5b56fe49e8c03ed581d600 --- /dev/null +++ b/drivers/net/e1000e/lib.c @@ -0,0 +1,2493 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include + +#include "e1000.h" + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management + * Technology signature */ + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u32 status; + u16 pcie_link_status, pci_header_type, cap_offset; + + cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER, + &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = er32(STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } + + return 0; +} + +/** + * e1000e_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setups the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + + /* Setup the receive address */ + hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); + + e1000e_rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); + e1e_flush(); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); + e1e_flush(); + } +} + +/** + * e1000e_rar_set - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + rar_high |= E1000_RAH_AV; + + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); +} + +/** + * e1000_mta_set - Set multicast filter table address + * @hw: pointer to the HW structure + * @hash_value: determines the MTA register and bit to set + * + * The multicast table address is a register array of 32-bit registers. + * The hash_value is used to determine what register the bit is in, the + * current value is read, the new bit is OR'd in and the new value is + * written back into the register. + **/ +static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg, mta; + + /* The MTA is a register array of 32-bit registers. It is + * treated like an array of (32*mta_reg_count) bits. We want to + * set bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The (hw->mac.mta_reg_count - 1) serves as a + * mask to bits 31:5 of the hash value which gives us the + * register we're modifying. The hash bit within that register + * is determined by the lower 5 bits of the hash value. + */ + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg); + + mta |= (1 << hash_bit); + + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta); + e1e_flush(); +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + */ + /* For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_mc_addr_list_update_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + u32 hash_value; + u32 i; + + /* Load the first set of multicast addresses into the exact + * filters (RAR). If there are not enough to fill the RAR + * array, clear the filters. + */ + for (i = rar_used_count; i < rar_count; i++) { + if (mc_addr_count) { + e1000e_rar_set(hw, mc_addr_list, i); + mc_addr_count--; + mc_addr_list += ETH_ALEN; + } else { + E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0); + e1e_flush(); + E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0); + e1e_flush(); + } + } + + /* Clear the old settings from the MTA */ + hw_dbg(hw, "Clearing MTA\n"); + for (i = 0; i < hw->mac.mta_reg_count; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + e1e_flush(); + } + + /* Load any remaining multicast addresses into the hash table. */ + for (; mc_addr_count > 0; mc_addr_count--) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); + e1000_mta_set(hw, hash_value); + mc_addr_list += ETH_ALEN; + } +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + u32 temp; + + temp = er32(CRCERRS); + temp = er32(SYMERRS); + temp = er32(MPC); + temp = er32(SCC); + temp = er32(ECOL); + temp = er32(MCC); + temp = er32(LATECOL); + temp = er32(COLC); + temp = er32(DC); + temp = er32(SEC); + temp = er32(RLEC); + temp = er32(XONRXC); + temp = er32(XONTXC); + temp = er32(XOFFRXC); + temp = er32(XOFFTXC); + temp = er32(FCRUC); + temp = er32(GPRC); + temp = er32(BPRC); + temp = er32(MPRC); + temp = er32(GPTC); + temp = er32(GORCL); + temp = er32(GORCH); + temp = er32(GOTCL); + temp = er32(GOTCH); + temp = er32(RNBC); + temp = er32(RUC); + temp = er32(RFC); + temp = er32(ROC); + temp = er32(RJC); + temp = er32(TORL); + temp = er32(TORH); + temp = er32(TOTL); + temp = er32(TOTH); + temp = er32(TPR); + temp = er32(TPT); + temp = er32(MPTC); + temp = er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return ret_val; /* No link detected */ + + mac->get_link_status = 0; + + /* Check if there was DownShift, must be checked + * immediately after link-up */ + e1000e_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + e1000e_config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + hw_dbg(hw, "Error configuring flow control\n"); + } + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + hw_dbg(hw, "Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = 1; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + return 0; + } + hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + hw_dbg(hw, "Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = 1; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + if (E1000_RXCW_SYNCH & er32(RXCW)) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = 1; + hw_dbg(hw, "SERDES: Link is up.\n"); + } + } else { + mac->serdes_has_link = 0; + hw_dbg(hw, "SERDES: Link is down.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + mac->serdes_has_link = (status & E1000_STATUS_LU); + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 nvm_data; + + if (mac->fc != e1000_fc_default) + return 0; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + mac->fc = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + mac->fc = e1000_fc_tx_pause; + else + mac->fc = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + return 0; + + /* + * If flow control is set to default, set flow control based on + * the EEPROM flow control settings. + */ + if (mac->fc == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + mac->original_fc = mac->fc; + + hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = mac->ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, mac->fc_pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (mac->fc) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled and TX Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of RX Pause ONLY, we will + * advertise that we support both symmetric and asymmetric RX + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + hw_dbg(hw, "Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + hw_dbg(hw, "Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + e1000e_config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + hw_dbg(hw, "Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + msleep(1); + + /* For these adapters, the SW defineable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + hw_dbg(hw, "No signal detected\n"); + } + + return 0; +} + +/** + * e1000e_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000e_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * tansmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (mac->fc & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = mac->fc_low_water; + fcrtl |= E1000_FCRTL_XONE; + fcrth = mac->fc_high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "mac->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + hw_dbg(hw, "mac->fc = %u\n", mac->fc); + + switch (mac->fc) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + hw_dbg(hw, "Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + hw_dbg(hw, "Copper PHY and Auto Neg " + "has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected RX ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (mac->original_fc == e1000_fc_full) { + mac->fc = e1000_fc_full; + hw_dbg(hw, "Flow Control = FULL.\r\n"); + } else { + mac->fc = e1000_fc_rx_pause; + hw_dbg(hw, "Flow Control = " + "RX PAUSE frames only.\r\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + mac->fc = e1000_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + mac->fc = e1000_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); + } + /* Per the IEEE spec, at this point flow control should be + * disabled. However, we want to consider that we could + * be connected to a legacy switch that doesn't advertise + * desired flow control, but can be forced on the link + * partner. So if we advertised no flow control, that is + * what we will resolve to. If we advertised some kind of + * receive capability (Rx Pause Only or Full Flow Control) + * and the link partner advertised none, we will configure + * ourselves to enable Rx Flow Control only. We can do + * this safely for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, no + * harm done since we won't be receiving any PAUSE frames + * anyway. If the intent on the link partner was to have + * flow control enabled, then by us enabling RX only, we + * can at least receive pause frames and process them. + * This is a good idea because in most cases, since we are + * predominantly a server NIC, more times than not we will + * be asked to delay transmission of packets than asking + * our link partner to pause transmission of frames. + */ + else if ((mac->original_fc == e1000_fc_none) || + (mac->original_fc == e1000_fc_tx_pause)) { + mac->fc = e1000_fc_none; + hw_dbg(hw, "Flow Control = NONE.\r\n"); + } else { + mac->fc = e1000_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + hw_dbg(hw, "Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + mac->fc = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + hw_dbg(hw, "Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + hw_dbg(hw, "1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + hw_dbg(hw, "100 Mbs, "); + } else { + *speed = SPEED_10; + hw_dbg(hw, "10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + hw_dbg(hw, "Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + hw_dbg(hw, "Half Duplex\n"); + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + udelay(50); + i++; + } + + if (i == timeout) { + hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + udelay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + hw_dbg(hw, "Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + msleep(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led - Blink LED + * @hw: pointer to the HW structure + * + * Blink the led's which are set to be on. + **/ +s32 e1000e_blink_led(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + udelay(100); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = 0; + ew32(AIT, 0); +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = 1; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = 0; + ew32(AIT, 0); + } + } +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + hw_dbg(hw, "Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 timeout = 0; + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + udelay(1); + timeout = NVM_MAX_RETRY_SPI; + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire_nvm(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release_nvm(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + nvm->ops.release_nvm(hw); + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> + E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likley contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg(hw, "nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire_nvm(hw); + if (ret_val) + return ret_val; + + msleep(10); + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release_nvm(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + msleep(10); + return 0; +} + +/** + * e1000e_read_mac_addr - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + s32 ret_val; + u16 offset, nvm_data, i; + + for (i = 0; i < ETH_ALEN; i += 2) { + offset = i >> 1; + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); + hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); + } + + /* Flip last bit of mac address if we're on second port */ + if (hw->bus.func == E1000_FUNC_1) + hw->mac.perm_addr[5] ^= 1; + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg(hw, "NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + hw_dbg(hw, "NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if ((hicr & E1000_HICR_EN) == 0) { + hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + hw_dbg(hw, "Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode - check managament mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab; +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + /* No manageability, no filtering */ + if (!e1000e_check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = 0; + return 0; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val != 0) { + hw->mac.tx_pkt_filtering = 0; + return ret_val; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = 1; + return 1; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { + hw->mac.tx_pkt_filtering = 0; + return 0; + } + + hw->mac.tx_pkt_filtering = 1; + return 1; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Enable processing of ARP's + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to allow ARPs to be processed by the host. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = 0; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return ret_val; + + if (hw->mac.arc_subsystem_valid) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = 1; + return ret_val; + } + } else { + if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = 1; + return ret_val; + } + } + + return ret_val; +} + +s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num) +{ + s32 ret_val; + u16 nvm_data; + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num = (u32)(nvm_data << 16); + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num |= nvm_data; + + return 0; +} diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c new file mode 100644 index 0000000000000000000000000000000000000000..033e124d1c1f5424c70e633f726d92f4c68d4c7d --- /dev/null +++ b/drivers/net/e1000e/netdev.c @@ -0,0 +1,4438 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +#define DRV_VERSION "0.2.0" +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, +}; + +#ifdef DEBUG +/** + * e1000_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *e1000e_get_hw_dev_name(struct e1000_hw *hw) +{ + return hw->adapter->netdev->name; +} +#endif + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, + struct sk_buff *skb, + u8 status, u16 vlan) +{ + skb->protocol = eth_type_trans(skb, netdev); + + if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(vlan) & + E1000_RXD_SPC_VLAN_MASK); + else + netif_receive_skb(skb); + + netdev->last_rx = jiffies; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + /* TCP/UDP checksum error bit is set */ + if (errors & E1000_RXD_ERR_TCPE) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (status & E1000_RXD_STAT_TCPCS) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + /* IP fragment with UDP payload */ + /* Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + csum = ntohl(csum ^ 0xFFFF); + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + adapter->hw_csum_good++; +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = netdev_alloc_skb(netdev, bufsz); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = pci_map_single(pdev, skb->data, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&pdev->dev, "RX DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + + j]; + if (j < adapter->rx_ps_pages) { + if (!ps_page->page) { + ps_page->page = alloc_page(GFP_ATOMIC); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = pci_map_page(pdev, + ps_page->page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error( + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "RX DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* + * Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j+1] = + cpu_to_le64(ps_page->dma); + } else { + rx_desc->read.buffer_addr[j+1] = ~0; + } + } + + skb = netdev_alloc_skb(netdev, + adapter->rx_ps_bsize0 + NET_IP_ALIGN); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; + buffer_info->dma = pci_map_single(pdev, skb->data, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&pdev->dev, "RX DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + if (!(i--)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + /* Hardware increments by 16 bytes, but packet split + * descriptors are 32 bytes...so we increment tail + * twice as much. + */ + writel(i<<1, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers + * + * @adapter: address of board private structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - + 16 /*for skb_reserve */ - + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = netdev_alloc_skb(netdev, bufsz); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) + buffer_info->dma = pci_map_page(pdev, + buffer_info->page, 0, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + pci_unmap_single(pdev, + buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* !EOP means multiple descriptors were used to store a single + * packet, also make sure the frame isn't just CRC only */ + if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { + /* All receives must fit into a single buffer */ + ndev_dbg(netdev, "%s: Receive packet consumed " + "multiple buffers\n", netdev->name); + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + if (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + length -= 4; + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += length; + total_rx_packets++; + + /* code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack */ + if (length < copybreak) { + struct sk_buff *new_skb = + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); + if (new_skb) { + skb_reserve(new_skb, NET_IP_ALIGN); + memcpy(new_skb->data - NET_IP_ALIGN, + skb->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + return cleaned; +} + +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; +} + +static void e1000_put_txbuf(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + pci_unmap_page(adapter->pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } +} + +static void e1000_print_tx_hang(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct net_device *netdev = adapter->netdev; + + /* detected Tx unit hang */ + ndev_err(netdev, + "Detected Tx Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + bool cleaned = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { + for (cleaned = 0; !cleaned; ) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + struct sk_buff *skb = buffer_info->skb; + unsigned int segs, bytecount; + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_tx_packets += segs; + total_tx_bytes += bytecount; + } + + e1000_put_txbuf(adapter, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); +#define E1000_TX_WEIGHT 64 + /* weight of a sort for tx, to avoid endless transmit cleanup */ + if (count++ == E1000_TX_WEIGHT) + break; + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + if (cleaned && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = 0; + if (tx_ring->buffer_info[eop].dma && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) + && !(er32(STATUS) & + E1000_STATUS_TXOFF)) { + e1000_print_tx_hang(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + pci_unmap_page(pdev, + buffer_info->dma, + PAGE_SIZE, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if ((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + +#define rxtop rx_ring->rx_skb_top + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, + length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page, + KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), + vaddr, length); + kunmap_atomic(vaddr, + KM_SKB_DATA_SOFTIRQ); + /* re-use the page, so don't erase + * buffer_info->page */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + pskb_trim(skb, skb->len - 4); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + ndev_err(netdev, "__pskb_pull_tail failed.\n"); + dev_kfree_skb(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + return cleaned; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + int *work_done, int work_to_do) +{ + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = 0; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = 1; + cleaned_count++; + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + + if (!(staterr & E1000_RXD_STAT_EOP)) { + ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " + "up the full packet\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + ndev_dbg(netdev, "%s: Last part of the packet spanning" + " multiple descriptors\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* this looks ugly, but it seems compiler issues make it + more efficient than reusing j */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* page alloc/put takes too long and effects small packet + * throughput, so unsplit small packets and save the alloc/put*/ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; + + /* there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long */ + pci_dma_sync_single_for_cpu(pdev, ps_page->dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); + pci_dma_sync_single_for_device(pdev, ps_page->dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + /* remove the CRC */ + l1 -= 4; + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; + pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive */ + pskb_trim(skb, skb->len - 4); + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, le16_to_cpu( + rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, + staterr, rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(adapter, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo) + pci_unmap_page(pdev, buffer_info->dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + + j]; + if (!ps_page->page) + break; + pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + size = sizeof(struct e1000_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + size = sizeof(struct e1000_ps_page) + * (rx_ring->count * PS_PAGE_BUFFERS); + memset(rx_ring->ps_pages, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* read ICR disables interrupts using IAM, so keep up with our + * enable/disable accounting */ + atomic_inc(&adapter->irq_sem); + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->napi); + } else { + atomic_dec(&adapter->irq_sem); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + u32 rctl, icr = er32(ICR); + if (!icr) + return IRQ_NONE; /* Not our interrupt */ + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write, but it does mean we should + * account for it ASAP. */ + atomic_inc(&adapter->irq_sem); + + if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + hw->mac.get_link_status = 1; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev, &adapter->napi); + } else { + atomic_dec(&adapter->irq_sem); + } + + return IRQ_HANDLED; +} + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + void (*handler) = &e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = pci_enable_msi(adapter->pdev); + if (err) { + ndev_warn(netdev, + "Unable to allocate MSI interrupt Error: %d\n", err); + } else { + adapter->flags |= FLAG_MSI_ENABLED; + handler = &e1000_intr_msi; + irq_flags = 0; + } + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + if (adapter->flags & FLAG_MSI_ENABLED) + pci_disable_msi(adapter->pdev); + ndev_err(netdev, + "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + atomic_inc(&adapter->irq_sem); + ew32(IMC, ~0); + e1e_flush(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (atomic_dec_and_test(&adapter->irq_sem)) { + ew32(IMS, IMS_ENABLE_MASK); + e1e_flush(); + } +} + +/** + * e1000_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +static void e1000_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +static void e1000_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + if (adapter->flags & FLAG_MNG_PT_ENABLED) { + struct e1000_hw *hw = &adapter->hw; + + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + /* don't explicitly have to mess with MANC2H since + * MANC has an enable disable that gates MANC2H */ + ew32(MANC, manc); + } +} + +/** + * @e1000_alloc_ring - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vmalloc(size); + if (!tx_ring->buffer_info) + goto err; + memset(tx_ring->buffer_info, 0, size); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + spin_lock_init(&adapter->tx_queue_lock); + + return 0; +err: + vfree(tx_ring->buffer_info); + ndev_err(adapter->netdev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + int size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vmalloc(size); + if (!rx_ring->buffer_info) + goto err; + memset(rx_ring->buffer_info, 0, size); + + rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!rx_ring->ps_pages) + goto err; + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; +err: + vfree(rx_ring->buffer_info); + kfree(rx_ring->ps_pages); + ndev_err(adapter->netdev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + + e1000_clean_tx_ring(adapter); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * + * Free all receive software resources + **/ + +void e1000e_free_rx_resources(struct e1000_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_ring *rx_ring = adapter->rx_ring; + + e1000_clean_rx_ring(adapter); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + kfree(rx_ring->ps_pages); + rx_ring->ps_pages = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) { + retval = low_latency; + } + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) { + retval = bulk_latency; + } else if ((packets < 10) || ((bytes/packets) > 1200)) { + retval = bulk_latency; + } else if ((packets > 35)) { + retval = lowest_latency; + } + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) { + retval = low_latency; + } + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, + adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, + adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +/** + * e1000_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 0, work_done = 0; + + /* Must NOT use netdev_priv macro here. */ + adapter = poll_dev->priv; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(poll_dev)) + goto quit_polling; + + /* e1000_clean is called per-cpu. This lock protects + * tx_ring from being cleaned by multiple cpus + * simultaneously. A failure obtaining the lock means + * tx_ring is currently being cleaned anyway. */ + if (spin_trylock(&adapter->tx_queue_lock)) { + tx_cleaned = e1000_clean_tx_irq(adapter); + spin_unlock(&adapter->tx_queue_lock); + } + + adapter->clean_rx(adapter, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((!tx_cleaned && (work_done < budget)) || + !netif_running(poll_dev)) { +quit_polling: + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + netif_rx_complete(poll_dev, napi); + e1000_irq_enable(adapter); + } + + return work_done; +} + +static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return; + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000e_write_vfta(hw, index, vfta); +} + +static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + e1000_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + e1000_irq_enable(adapter); + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000_release_hw_control(adapter); + return; + } + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000e_write_vfta(hw, index, vfta); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!adapter->vlgrp) + return; + + if (!vlan_group_get_device(adapter->vlgrp, vid)) { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !vlan_group_get_device(adapter->vlgrp, old_vid)) + e1000_vlan_rx_kill_vid(netdev, old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + + +static void e1000_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, rctl; + + e1000_irq_disable(adapter); + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } + } else { + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + if (adapter->mng_vlan_id != + (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } + } + + e1000_irq_enable(adapter); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); + + if (!adapter->vlgrp) + return; + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + e1000_vlan_rx_add_vid(adapter->netdev, vid); + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); +#define E1000_MNG2HOST_PORT_623 (1 << 5) +#define E1000_MNG2HOST_PORT_664 (1 << 6) + manc2h |= E1000_MNG2HOST_PORT_623; + manc2h |= E1000_MNG2HOST_PORT_664; + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tipg, tarc; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL, (tdba & DMA_32BIT_MASK)); + ew32(TDBAH, (tdba >> 32)); + ew32(TDLEN, tdlen); + ew32(TDH, 0); + ew32(TDT, 0); + tx_ring->head = E1000_TDH; + tx_ring->tail = E1000_TDT; + + /* Set the default values for the Tx Inter Packet Gap timer */ + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ + ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ + ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ + + if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) + ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ + + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC0); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC0, tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC0); + tarc |= 1; + ew32(TARC0, tarc); + tarc = er32(TARC1); + tarc |= 1; + ew32(TARC1, tarc); + } + + e1000e_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + adapter->tx_queue_len = adapter->netdev->tx_queue_len; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 psrctl = 0; + u32 pages = 0; + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 256: + rctl |= E1000_RCTL_SZ_256; + rctl &= ~E1000_RCTL_BSEX; + break; + case 512: + rctl |= E1000_RCTL_SZ_512; + rctl &= ~E1000_RCTL_BSEX; + break; + case 1024: + rctl |= E1000_RCTL_SZ_1024; + rctl &= ~E1000_RCTL_BSEX; + break; + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* + * 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + adapter->rx_ps_pages = 0; + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + + if (adapter->rx_ps_pages) { + /* Configure extra packet-split registers */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* disable packet split support for IPv6 extension headers, + * because some malformed IPv6 headers can hang the RX */ + rfctl |= (E1000_RFCTL_IPV6_EX_DIS | + E1000_RFCTL_NEW_IPV6_EXT_DIS); + + ew32(RFCTL, rfctl); + + /* disable the stripping of CRC because it breaks + * BMC firmware connected over SMBUS */ + rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; + + psrctl |= adapter->rx_ps_bsize0 >> + E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + case 2: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + case 1: + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) { + rdlen = rx_ring->count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq_jumbo; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo; + } else { + rdlen = rx_ring->count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + msleep(10); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, + 1000000000 / (adapter->itr * 256)); + + ctrl_ext = er32(CTRL_EXT); + /* Reset delay timers after every interrupt */ + ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + rdba = rx_ring->dma; + ew32(RDBAL, (rdba & DMA_32BIT_MASK)); + ew32(RDBAH, (rdba >> 32)); + ew32(RDLEN, rdlen); + ew32(RDH, 0); + ew32(RDT, 0); + rx_ring->head = E1000_RDH; + rx_ring->tail = E1000_RDT; + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->flags & FLAG_RX_CSUM_ENABLED) { + rxcsum |= E1000_RXCSUM_TUOFL; + + /* IPv4 payload checksum for UDP fragments must be + * used in conjunction with packet-split. */ + if (adapter->rx_ps_pages) + rxcsum |= E1000_RXCSUM_IPPCSE; + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* no need to clear IPPCSE as it defaults to 0 */ + } + ew32(RXCSUM, rxcsum); + + /* Enable early receives on supported devices, only takes effect when + * packet size is equal or larger than the specified value (in 8 byte + * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ + if ((adapter->flags & FLAG_HAS_ERT) && + (adapter->netdev->mtu > ETH_DATA_LEN)) + ew32(ERT, E1000_ERT_2048); + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000_mc_addr_list_update - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. Currently no func pointer + * exists and all implementations are handled in the generic version of this + * function. + **/ +static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 rar_used_count, + u32 rar_count) +{ + hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count, + rar_used_count, rar_count); +} + +/** + * e1000_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_multi(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; + struct dev_mc_list *mc_ptr; + u8 *mta_list; + u32 rctl; + int i; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + } else if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + rctl &= ~E1000_RCTL_UPE; + } else { + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + } + + ew32(RCTL, rctl); + + if (netdev->mc_count) { + mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); + if (!mta_list) + return; + + /* prepare a packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, + ETH_ALEN); + mc_ptr = mc_ptr->next; + } + + e1000_mc_addr_list_update(hw, mta_list, i, 1, + mac->rar_entry_count); + kfree(mta_list); + } else { + /* + * if we're called from probe, we might not have + * anything to do here, so clear out the list + */ + e1000_mc_addr_list_update(hw, NULL, 0, 1, + mac->rar_entry_count); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + e1000_set_multi(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, + e1000_desc_unused(adapter->rx_ring)); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (adapter->hw.media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle */ + e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); + } + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down + * The PHY cannot be powered down is management or WoL is active + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg; + + /* WoL is enabled */ + if (!adapter->wol) + return; + + /* non-copper PHY? */ + if (adapter->hw.media_type != e1000_media_type_copper) + return; + + /* reset is blocked because of a SoL/IDER session */ + if (e1000e_check_mng_mode(hw) || + e1000_check_reset_block(hw)) + return; + + /* managebility (AMT) is enabled */ + if (er32(MANC) & E1000_MANC_SMBUS_EN) + return; + + /* power down the PHY */ + e1e_rphy(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1e_wphy(hw, PHY_CONTROL, mii_reg); + mdelay(1); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for rx, tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u16 hwm; + + if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. */ + adapter->pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = adapter->pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + adapter->pba &= 0xffff; + /* the tx fifo also stores 16 bytes of information about the tx + * but don't include ethernet FCS because hardware appends it */ + min_tx_space = (mac->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = mac->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < adapter->pba)) { + adapter->pba -= - (min_tx_space - tx_space); + + /* if short on rx space, rx wins and must trump tx + * adjustment or use Early Receive if available */ + if ((adapter->pba < min_rx_space) && + (!(adapter->flags & FLAG_HAS_ERT))) + /* ERT enabled in e1000_configure_rx */ + adapter->pba = min_rx_space; + } + } + + ew32(PBA, adapter->pba); + + /* flow control settings */ + /* The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame */ + if (adapter->flags & FLAG_HAS_ERT) + hwm = min(((adapter->pba << 10) * 9 / 10), + ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); + else + hwm = min(((adapter->pba << 10) * 9 / 10), + ((adapter->pba << 10) - mac->max_frame_size)); + + mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + mac->fc_low_water = mac->fc_high_water - 8; + + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + mac->fc_pause_time = 0xFFFF; + else + mac->fc_pause_time = E1000_FC_PAUSE_TIME; + mac->fc = mac->original_fc; + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + ndev_err(adapter->netdev, "Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + e1000_get_phy_info(hw); + + if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } + + e1000_release_manageability(adapter); +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + e1000_irq_enable(adapter); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +void e1000e_down(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__E1000_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + e1e_flush(); + msleep(10); + + napi_disable(&adapter->napi); + e1000_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + netdev->tx_queue_len = adapter->tx_queue_len; + netif_carrier_off(netdev); + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000e_reset(adapter); + e1000_clean_tx_ring(adapter); + e1000_clean_rx_ring(adapter); + + /* + * TODO: for power management, we could drop the link and + * pci_disable_device here. + */ +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + e1000e_down(adapter); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit e1000_sw_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + + adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + + spin_lock_init(&adapter->tx_queue_lock); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + atomic_set(&adapter->irq_sem, 0); + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; + +err: + ndev_err(netdev, "Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* If AMT is enabled, let the firmware know that the network + * interface is now open */ + if ((adapter->flags & FLAG_HAS_AMT) && + e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter); +err_setup_rx: + e1000e_free_tx_resources(adapter); +err_setup_tx: + e1000e_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000e_free_tx_resources(adapter); + e1000e_free_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + !(adapter->vlgrp && + vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed */ + if ((adapter->flags & FLAG_HAS_AMT) && + e1000e_check_mng_mode(&adapter->hw)) + e1000_release_hw_control(adapter); + + return 0; +} +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] */ + e1000e_rar_set(&adapter->hw, + adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy */ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + e1000_get_phy_info(&adapter->hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long irq_flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* + * Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + + /* these counters are modified from e1000_adjust_tbi_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) { + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.sec += er32(SEC); + } + + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + if (adapter->flags & FLAG_HAS_STATS_PTC_PRC) { + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + } + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + hw->mac.collision_delta = er32(COLC); + adapter->stats.colc += hw->mac.collision_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + adapter->stats.iac += er32(IAC); + + if (adapter->flags & FLAG_HAS_STATS_ICR_ICT) { + adapter->stats.icrxoc += er32(ICRXOC); + adapter->stats.icrxptc += er32(ICRXPTC); + adapter->stats.icrxatc += er32(ICRXATC); + adapter->stats.ictxptc += er32(ICTXPTC); + adapter->stats.ictxatc += er32(ICTXATC); + adapter->stats.ictxqec += er32(ICTXQEC); + adapter->stats.ictxqmtc += er32(ICTXQMTC); + adapter->stats.icrxdmtc += er32(ICRXDMTC); + } + + /* Fill out the OS statistics structure */ + adapter->net_stats.rx_packets = adapter->stats.gprc; + adapter->net_stats.tx_packets = adapter->stats.gptc; + adapter->net_stats.rx_bytes = adapter->stats.gorcl; + adapter->net_stats.tx_bytes = adapter->stats.gotcl; + adapter->net_stats.multicast = adapter->stats.mprc; + adapter->net_stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC */ + adapter->net_stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->net_stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; + adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; + adapter->net_stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->net_stats.tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; + adapter->net_stats.tx_window_errors = adapter->stats.latecol; + adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + } + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + ndev_info(netdev, + "Link is Up %d Mbps %s, Flow Control: %s\n", + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "RX/TX" : + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, watchdog_task); + + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + s32 ret_val; + int tx_pending = 0; + + if ((netif_carrier_ok(netdev)) && + (er32(STATUS) & E1000_STATUS_LU)) + goto link_up; + + ret_val = mac->ops.check_for_link(hw); + if ((ret_val == E1000_ERR_PHY) && + (adapter->hw.phy.type == e1000_phy_igp_3) && + (er32(CTRL) & + E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + ndev_info(netdev, + "Gigabit has been disabled, downgrading speed\n"); + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if ((adapter->hw.media_type == e1000_media_type_internal_serdes) && + !(er32(TXCW) & E1000_TXCW_ANE)) + link = adapter->hw.mac.serdes_has_link; + else + link = er32(STATUS) & E1000_STATUS_LU; + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = 1; + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + /* tweak tx_queue_len according to speed/duplex + * and adjust the timeout factor */ + netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 14; + break; + case SPEED_100: + txb2b = 0; + netdev->tx_queue_len = 100; + /* maybe add some timeout factor ? */ + break; + } + + /* workaround: re-program speed mode bit after + * link-up event */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + tarc0 = er32(TARC0); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC0, tarc0); + } + + /* disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + ndev_info(netdev, + "10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* enable transmits in the hardware, need to do this + * after setting TARC0 */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } else { + /* make sure the receive unit is started */ + if (adapter->flags & FLAG_RX_NEEDS_RESTART) { + u32 rctl = er32(RCTL); + ew32(RCTL, rctl | + E1000_RCTL_EN); + } + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + ndev_info(netdev, "Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + schedule_work(&adapter->reset_task); + } + } + +link_up: + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000e_update_adaptive(&adapter->hw); + + if (!netif_carrier_ok(netdev)) { + tx_pending = (e1000_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] */ + if (e1000e_get_laa_state_82571(hw)) + e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; + } + + return 0; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + css = skb_transport_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; + } + + return 0; +} + +#define E1000_MAX_PER_TXD 8192 +#define E1000_MAX_TXD_PWR 12 + +static int e1000_tx_map(struct e1000_adapter *adapter, + struct sk_buff *skb, unsigned int first, + unsigned int max_per_txd, unsigned int nr_frags, + unsigned int mss) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_buffer *buffer_info; + unsigned int len = skb->len - skb->data_len; + unsigned int offset = 0, size, count = 0, i; + unsigned int f; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (mss && !nr_frags && size == len && size > 8) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->dma = + pci_map_single(adapter->pdev, + skb->data + offset, + size, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); + adapter->tx_dma_failed++; + return -1; + } + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc */ + if (mss && f == (nr_frags-1) && size == len && size > 8) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->dma = + pci_map_page(adapter->pdev, + frag->page, + offset, + size, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(buffer_info->dma)) { + dev_err(&adapter->pdev->dev, + "TX DMA page map failed\n"); + adapter->tx_dma_failed++; + return -1; + } + + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + + i++; + if (i == tx_ring->count) + i = 0; + } + } + + if (i == 0) + i = tx_ring->count - 1; + else + i--; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + int tx_flags, int count) +{ + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems */ + mmiowb(); +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (vlan_tx_tag_present(skb)) { + if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) + && (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + } + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. */ + if (e1000_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000_desc_unused(adapter->tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int max_per_txd = E1000_MAX_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb->len - skb->data_len; + unsigned long irq_flags; + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + ndev_err(netdev, + "__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb->len - skb->data_len; + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, + max_txd_pwr); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags)) + /* Collision - tell upper layer to requeue */ + return NETDEV_TX_LOCKED; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time */ + if (e1000_maybe_stop_tx(netdev, count + 2)) { + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_BUSY; + } + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(adapter, skb)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. */ + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); + if (count < 0) { + /* handle pci_map_single() error in e1000_tx_map */ + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_OK; + } + + e1000_tx_queue(adapter, tx_flags, count); + + netdev->trans_start = jiffies; + + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); + + spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + ndev_err(netdev, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame size limits */ + if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { + if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + ndev_err(netdev, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + if (adapter->hw.phy.type == e1000_phy_ife) { + ndev_err(netdev, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + } + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + ndev_err(netdev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + msleep(1); + /* e1000e_down has a dependency on max_frame_size */ + adapter->hw.mac.max_frame_size = max_frame; + if (netif_running(netdev)) + e1000e_down(adapter); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo* routines, jumbo receives will use + * fragmented skbs */ + + if (max_frame <= 256) + adapter->rx_buffer_len = 256; + else if (max_frame <= 512) + adapter->rx_buffer_len = 512; + else if (max_frame <= 1024) + adapter->rx_buffer_len = 1024; + else if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN ; + + ndev_info(netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long irq_flags; + + if (adapter->hw.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + if (e1e_rphy(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_free_irq(adapter); + } + + retval = pci_save_state(pdev); + if (retval) + return retval; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_multi(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.media_type == e1000_media_type_fiber || + adapter->hw.media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + } + + e1000_release_manageability(adapter); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->flags & FLAG_MNG_PT_ENABLED) { + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); + } + + if (adapter->hw.phy.type == e1000_phy_igp_3) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000e_power_up_phy(adapter); + e1000e_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000e_up(adapter); + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. */ + if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + return 0; +} +#endif + +static void e1000_shutdown(struct pci_dev *pdev) +{ + e1000_suspend(pdev, PMSG_SUSPEND); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + + e1000_clean_tx_irq(adapter); + + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + e1000e_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. */ + if (!(adapter->flags & FLAG_HAS_AMT) || + !e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 part_num; + + /* print bus type/speed/width info */ + ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) + ? "10/100" : "1000"); + e1000e_read_part_num(hw, &part_num); + ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, + (part_num >> 8), (part_num & 0xff)); +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit e1000_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + unsigned long mmio_start, mmio_len; + unsigned long flash_start, flash_len; + + static int cards_found; + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + err = pci_set_consistent_dma_mask(pdev, + DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_regions(pdev, e1000e_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* construct the net_device struct */ + netdev->open = &e1000_open; + netdev->stop = &e1000_close; + netdev->hard_start_xmit = &e1000_xmit_frame; + netdev->get_stats = &e1000_get_stats; + netdev->set_multicast_list = &e1000_set_multi; + netdev->set_mac_address = &e1000_set_mac; + netdev->change_mtu = &e1000_change_mtu; + netdev->do_ioctl = &e1000_ioctl; + e1000e_set_ethtool_ops(netdev); + netdev->tx_timeout = &e1000_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + netdev->vlan_rx_register = e1000_vlan_rx_register; + netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid; + netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = e1000_netpoll; +#endif + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_invariants(adapter); + if (err) + goto err_hw_init; + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.wait_for_link = 0; + + /* Copper options */ + if (adapter->hw.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (e1000_check_reset_block(&adapter->hw)) + ndev_info(netdev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + /* We should not be using LLTX anymore, but we are still TX faster with + * it. */ + netdev->features |= NETIF_F_LLTX; + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* before reading the NVM, reset the controller to + * put the device in a known good starting state */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* + * systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + /* copy the MAC address out of the NVM */ + if (e1000e_read_mac_addr(&adapter->hw)) + ndev_err(netdev, "NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + ndev_err(netdev, "Invalid MAC Address: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->perm_addr[0], netdev->perm_addr[1], + netdev->perm_addr[2], netdev->perm_addr[3], + netdev->perm_addr[4], netdev->perm_addr[5]); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long) adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = &e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long) adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + + e1000e_check_options(adapter); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = 1; + adapter->hw.mac.original_fc = e1000_fc_default; + adapter->hw.mac.fc = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* ring size defaults */ + adapter->rx_ring->count = 256; + adapter->tx_ring->count = 256; + + /* + * Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + else + e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* + * now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. */ + if (!(adapter->flags & FLAG_HAS_AMT) || + !e1000e_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + /* tell the stack to leave us alone until e1000_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_print_device_info(adapter); + + return 0; + +err_register: +err_hw_init: + e1000_release_hw_control(adapter); +err_eeprom: + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + +err_flashmap: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled */ + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + flush_scheduled_work(); + + e1000_release_manageability(adapter); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + unregister_netdev(netdev); + + if (!e1000_check_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static struct pci_device_id e1000_pci_tbl[] = { + /* + * Support for 82571/2/3, es2lan and ich8 will be phased in + * stepwise. + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + */ + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = __devexit_p(e1000_remove), +#ifdef CONFIG_PM + /* Power Managment Hooks */ + .suspend = e1000_suspend, + .resume = e1000_resume, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_name, e1000e_driver_version); + printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", + e1000e_driver_name); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* e1000_main.c */ diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c new file mode 100644 index 0000000000000000000000000000000000000000..e4e655efb23cbd8b9cfe0e06896ced7db796b2ab --- /dev/null +++ b/drivers/net/e1000e/param.c @@ -0,0 +1,382 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int __devinit e1000_validate_option(int *value, + struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + ndev_info(adapter->netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + ndev_info(adapter->netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + ndev_info(adapter->netdev, + "%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + ndev_info(adapter->netdev, "%s\n", + ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void __devinit e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + ndev_notice(netdev, + "Warning: no configuration for board #%i\n", bd); + ndev_notice(netdev, "Using defaults for all values\n"); + } + + { /* Transmit Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + /* modify min and default if 82573 for slow ping w/a, + * a value greater than 8 needs to be set for RDTR */ + if (adapter->flags & FLAG_HAS_ASPM) { + opt.def = 32; + opt.arg.r.min = 8; + } + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + ndev_info(netdev, "%s turned off\n", + opt.name); + break; + case 1: + ndev_info(netdev, + "%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + ndev_info(netdev, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* + * save the setting, because the dynamic bits + * change itr. clear the lower two bits + * because they are used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) + && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + { /* Kumeran Lock Loss Workaround */ + struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_KumeranLockLoss > bd) { + int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + kmrn_lock_loss); + } else { + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + opt.def); + } + } +} diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c new file mode 100644 index 0000000000000000000000000000000000000000..793231810ae0a6c119aef80111c2d428578812ca --- /dev/null +++ b/drivers/net/e1000e/phy.c @@ -0,0 +1,1773 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include + +#include "e1000.h" + +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = + { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; + +static const u16 e1000_igp_2_cable_length_table[] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id; + + ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + udelay(20); + ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + return 0; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control regsiter in the PHY at offset and stores the + * information read to data. + **/ +static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg(hw, "PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg(hw, "MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + hw_dbg(hw, "MDI Error\n"); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + return 0; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + hw_dbg(hw, "PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + hw_dbg(hw, "MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release_phy(hw); + return ret_val; + } + } + + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) { + hw->phy.ops.release_phy(hw); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + + udelay(2); + hw->phy.ops.release_phy(hw); + + return ret_val; +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (phy->revision < 4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = e1000e_commit_phy(hw); + if (ret_val) + hw_dbg(hw, "Error committing the PHY changes\n"); + + return ret_val; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + hw_dbg(hw, "Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from NVM settings. */ + msleep(15); + + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, 0); + if (ret_val) { + hw_dbg(hw, "Error Disabling LPLU D0\n"); + return ret_val; + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + hw_dbg(hw, "Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + hw_dbg(hw, "Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + hw_dbg(hw, "Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + hw_dbg(hw, "Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + hw_dbg(hw, "Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->mac.fc) { + case e1000_fc_none: + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + return ret_val; + } + + ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + } + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * wait_for_link, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + hw_dbg(hw, "Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->wait_for_link) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + hw_dbg(hw, "Error while waiting for " + "autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = 1; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. */ + hw_dbg(hw, "Forcing Speed and Duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + hw_dbg(hw, "Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + hw_dbg(hw, "Valid link established!!!\n"); + e1000e_config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + hw_dbg(hw, "Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + + if (!link) + hw_dbg(hw, "Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + return ret_val; + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on TX must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= MII_CR_RESET; + + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (phy->wait_for_link) { + hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + mac->fc = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + hw_dbg(hw, "Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + hw_dbg(hw, "Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + hw_dbg(hw, "Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + hw_dbg(hw, "Forcing 10mb\n"); + } + + e1000e_config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, + IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occured + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = 0; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +static s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +static s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg compeletion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + mdelay(usec_interval/1000); + else + udelay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index+1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return ret_val; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which reperesent the + * cobination of course and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * course and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return ret_val; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (hw->media_type != e1000_media_type_copper) { + hw_dbg(hw, "Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + hw_dbg(hw, "Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = (phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + hw_dbg(hw, "Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = 1; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = e1000_get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and relase the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + ret_val = e1000_check_reset_block(hw); + if (ret_val) + return 0; + + ret_val = phy->ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + udelay(150); + + phy->ops.release_phy(hw); + + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done(struct e1000_hw *hw) +{ + mdelay(10); + return 0; +} + +/* Internal function pointers */ + +/** + * e1000_get_phy_cfg_done - Generic PHY configuration done + * @hw: pointer to the HW structure + * + * Return success if silicon family did not implement a family specific + * get_cfg_done function. + **/ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cfg_done) + return hw->phy.ops.get_cfg_done(hw); + + return 0; +} + +/** + * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex + * @hw: pointer to the HW structure + * + * When the silicon family has not implemented a forced speed/duplex + * function for the PHY, simply return 0. + **/ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + if (hw->phy.ops.force_speed_duplex) + return hw->phy.ops.force_speed_duplex(hw); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_commit_phy - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000e_commit_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit_phy) + return hw->phy.ops.commit_phy(hw); + + return 0; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return 0; +} diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index b2b0a96218ca9a339e11500f7da6c2a9cc732a7a..6390f51ea6fbdc7e455ed70c6650b29edc53fc8a 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c @@ -124,8 +124,6 @@ static int __init do_e2100_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return e21_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 47680237f783f04b05a5c233012b6c48d4d0d491..83bda6ccde98ef1fcb855be404cfd8fa7445afaf 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -192,7 +192,6 @@ static unsigned int net_debug = NET_DEBUG; /* Information that need to be kept for each board. */ struct eepro_local { - struct net_device_stats stats; unsigned rx_start; unsigned tx_start; /* start of the transmit chain */ int tx_last; /* pointer to last packet in the transmit chain */ @@ -315,7 +314,6 @@ static irqreturn_t eepro_interrupt(int irq, void *dev_id); static void eepro_rx(struct net_device *dev); static void eepro_transmit_interrupt(struct net_device *dev); static int eepro_close(struct net_device *dev); -static struct net_device_stats *eepro_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void eepro_tx_timeout (struct net_device *dev); @@ -514,7 +512,7 @@ buffer (transmit-buffer = 32K - receive-buffer). /* a complete sel reset */ #define eepro_complete_selreset(ioaddr) { \ - lp->stats.tx_errors++;\ + dev->stats.tx_errors++;\ eepro_sel_reset(ioaddr);\ lp->tx_end = \ lp->xmt_lower_limit;\ @@ -537,8 +535,6 @@ static int __init do_eepro_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - #ifdef PnPWakeup /* XXXX for multiple cards should this only be run once? */ @@ -594,8 +590,6 @@ struct net_device * __init eepro_probe(int unit) if (!dev) return ERR_PTR(-ENODEV); - SET_MODULE_OWNER(dev); - sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); @@ -696,6 +690,7 @@ static void __init eepro_print_info (struct net_device *dev) struct eepro_local * lp = netdev_priv(dev); int i; const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; + DECLARE_MAC_BUF(mac); i = inb(dev->base_addr + ID_REG); printk(KERN_DEBUG " id: %#x ",i); @@ -717,10 +712,10 @@ static void __init eepro_print_info (struct net_device *dev) case LAN595: printk("%s: Intel 82595-based lan card at %#x,", dev->name, (unsigned)dev->base_addr); + break; } - for (i=0; i < 6; i++) - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); + printk(" %s", print_mac(mac, dev->dev_addr)); if (net_debug > 3) printk(KERN_DEBUG ", %dK RCV buffer", @@ -860,7 +855,6 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe) dev->open = eepro_open; dev->stop = eepro_close; dev->hard_start_xmit = eepro_send_packet; - dev->get_stats = eepro_get_stats; dev->set_multicast_list = &set_multicast_list; dev->tx_timeout = eepro_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1158,9 +1152,9 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) if (hardware_send_packet(dev, buf, length)) /* we won't wake queue here because we're out of space */ - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; else { - lp->stats.tx_bytes+=skb->len; + dev->stats.tx_bytes+=skb->len; dev->trans_start = jiffies; netif_wake_queue(dev); } @@ -1170,7 +1164,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb (skb); /* You might need to clean up and record Tx statistics here. */ - /* lp->stats.tx_aborted_errors++; */ + /* dev->stats.tx_aborted_errors++; */ if (net_debug > 5) printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name); @@ -1277,16 +1271,6 @@ static int eepro_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats * -eepro_get_stats(struct net_device *dev) -{ - struct eepro_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /* Set or clear the multicast filter for this adaptor. */ static void @@ -1579,12 +1563,12 @@ eepro_rx(struct net_device *dev) /* Malloc up new buffer. */ struct sk_buff *skb; - lp->stats.rx_bytes+=rcv_size; + dev->stats.rx_bytes+=rcv_size; rcv_size &= 0x3fff; skb = dev_alloc_skb(rcv_size+5); if (skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG); @@ -1606,28 +1590,28 @@ eepro_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } else { /* Not sure will ever reach here, I set the 595 to discard bad received frames */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rcv_status & 0x0100) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; else if (rcv_status & 0x0400) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; else if (rcv_status & 0x0800) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); } if (rcv_status & 0x1000) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; rcv_car = lp->rx_start + RCV_HEADER + rcv_size; lp->rx_start = rcv_next_frame; @@ -1670,11 +1654,11 @@ eepro_transmit_interrupt(struct net_device *dev) netif_wake_queue (dev); if (xmt_status & TX_OK) - lp->stats.tx_packets++; + dev->stats.tx_packets++; else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (xmt_status & 0x0400) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; printk(KERN_DEBUG "%s: carrier error\n", dev->name); printk(KERN_DEBUG "%s: XMT status = %#x\n", @@ -1688,11 +1672,11 @@ eepro_transmit_interrupt(struct net_device *dev) } } if (xmt_status & 0x000f) { - lp->stats.collisions += (xmt_status & 0x000f); + dev->stats.collisions += (xmt_status & 0x000f); } if ((xmt_status & 0x0040) == 0x0) { - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; } } } diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c index 3c54014acece68be4c3217a5565ec2f47eafd7f9..1548a80f917d52650feb344369ab065b16122a4f 100644 --- a/drivers/net/eepro100.c +++ b/drivers/net/eepro100.c @@ -622,6 +622,7 @@ static int __devinit speedo_found1(struct pci_dev *pdev, int size; void *tx_ring_space; dma_addr_t tx_ring_dma; + DECLARE_MAC_BUF(mac); size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats); tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma); @@ -635,7 +636,6 @@ static int __devinit speedo_found1(struct pci_dev *pdev, return -1; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (dev->mem_start > 0) @@ -706,12 +706,8 @@ static int __devinit speedo_found1(struct pci_dev *pdev, else product = pci_name(pdev); - printk(KERN_INFO "%s: %s, ", dev->name, product); - - for (i = 0; i < 5; i++) - printk("%2.2X:", dev->dev_addr[i]); - printk("%2.2X, ", dev->dev_addr[i]); - printk("IRQ %d.\n", pdev->irq); + printk(KERN_INFO "%s: %s, %s, IRQ %d.\n", dev->name, product, + print_mac(mac, dev->dev_addr), pdev->irq); sp = netdev_priv(dev); diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 7934ea37f9442e55a16e37a02deb360a2cc12253..9c85e50014b469a11a96de418b49d77d07258efd 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -135,7 +135,6 @@ struct net_local { - struct net_device_stats stats; unsigned long last_tx; /* jiffies when last transmit started */ unsigned long init_time; /* jiffies when eexp_hw_init586 called */ unsigned short rx_first; /* first rx buf, same as RX_BUF_START */ @@ -247,7 +246,6 @@ static char mca_irqmap[] = { 12, 9, 3, 4, 5, 10, 11, 15 }; static int eexp_open(struct net_device *dev); static int eexp_close(struct net_device *dev); static void eexp_timeout(struct net_device *dev); -static struct net_device_stats *eexp_stats(struct net_device *dev); static int eexp_xmit(struct sk_buff *buf, struct net_device *dev); static irqreturn_t eexp_irq(int irq, void *dev_addr); @@ -341,8 +339,6 @@ static int __init do_express_probe(struct net_device *dev) int dev_irq = dev->irq; int err; - SET_MODULE_OWNER(dev); - dev->if_port = 0xff; /* not set */ #ifdef CONFIG_MCA_LEGACY @@ -534,17 +530,6 @@ static int eexp_close(struct net_device *dev) return 0; } -/* - * Return interface stats - */ - -static struct net_device_stats *eexp_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /* * This gets called when a higher level thinks we are broken. Check that * nothing has become jammed in the CU. @@ -648,7 +633,7 @@ static void eexp_timeout(struct net_device *dev) printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name, (SCB_complete(status)?"lost interrupt": "board on fire")); - lp->stats.tx_errors++; + dev->stats.tx_errors++; lp->last_tx = jiffies; if (!SCB_complete(status)) { scb_command(dev, SCB_CUabort); @@ -696,7 +681,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) { unsigned short *data = (unsigned short *)buf->data; - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; eexp_hw_tx_pio(dev,data,length); } @@ -845,7 +830,7 @@ static irqreturn_t eexp_irq(int irq, void *dev_info) outw(rbd+8, ioaddr+READ_PTR); printk("[%04x]\n", inw(ioaddr+DATAPORT)); #endif - lp->stats.rx_errors++; + dev->stats.rx_errors++; #if 1 eexp_hw_rxinit(dev); #else @@ -954,17 +939,17 @@ static void eexp_hw_rx_pio(struct net_device *dev) } else if (!FD_OK(status)) { - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (FD_CRC(status)) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (FD_Align(status)) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (FD_Resrc(status)) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (FD_DMA(status)) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (FD_Short(status)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else { @@ -974,7 +959,7 @@ static void eexp_hw_rx_pio(struct net_device *dev) if (skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); @@ -983,8 +968,8 @@ static void eexp_hw_rx_pio(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } outw(rx_block, ioaddr+WRITE_PTR); outw(0, ioaddr+DATAPORT); @@ -1055,7 +1040,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf, outw(0xFFFF, ioaddr+SIGNAL_CA); } - lp->stats.tx_packets++; + dev->stats.tx_packets++; lp->last_tx = jiffies; } @@ -1182,7 +1167,6 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr) dev->open = eexp_open; dev->stop = eexp_close; dev->hard_start_xmit = eexp_xmit; - dev->get_stats = eexp_stats; dev->set_multicast_list = &eexp_set_multicast; dev->tx_timeout = eexp_timeout; dev->watchdog_timeo = 2*HZ; @@ -1265,35 +1249,35 @@ static unsigned short eexp_hw_lasttxstat(struct net_device *dev) else { lp->last_tx_restart = 0; - lp->stats.collisions += Stat_NoColl(status); + dev->stats.collisions += Stat_NoColl(status); if (!Stat_OK(status)) { char *whatsup = NULL; - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (Stat_Abort(status)) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (Stat_TNoCar(status)) { whatsup = "aborted, no carrier"; - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } if (Stat_TNoCTS(status)) { whatsup = "aborted, lost CTS"; - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } if (Stat_TNoDMA(status)) { whatsup = "FIFO underran"; - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } if (Stat_TXColl(status)) { whatsup = "aborted, too many collisions"; - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } if (whatsup) printk(KERN_INFO "%s: transmit %s\n", dev->name, whatsup); } else - lp->stats.tx_packets++; + dev->stats.tx_packets++; } if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE)) lp->tx_reap = tx_block = TX_BUF_START; diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 8d58be56f4e3a6df903c11f324716c846764509b..ac21526b6de8688836c60f42b0b648c7ae4cd865 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -33,19 +33,20 @@ #include #include #include +#include #include #include #include #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0074" +#define DRV_VERSION "EHEA_0078" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 #define DLPAR_MEM_ADD 2 #define DLPAR_MEM_REM 4 -#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) +#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) @@ -58,6 +59,7 @@ #define EHEA_SMALL_QUEUES #define EHEA_NUM_TX_QP 1 +#define EHEA_LRO_MAX_AGGR 64 #ifdef EHEA_SMALL_QUEUES #define EHEA_MAX_CQE_COUNT 1023 @@ -84,6 +86,8 @@ #define EHEA_RQ2_PKT_SIZE 1522 #define EHEA_L_PKT_SIZE 256 /* low latency */ +#define MAX_LRO_DESCRIPTORS 8 + /* Send completion signaling */ /* Protection Domain Identifier */ @@ -351,6 +355,7 @@ struct ehea_q_skb_arr { * Port resources */ struct ehea_port_res { + struct napi_struct napi; struct port_stats p_stats; struct ehea_mr send_mr; /* send memory region */ struct ehea_mr recv_mr; /* receive memory region */ @@ -362,7 +367,6 @@ struct ehea_port_res { struct ehea_cq *send_cq; struct ehea_cq *recv_cq; struct ehea_eq *eq; - struct net_device *d_netdev; struct ehea_q_skb_arr rq1_skba; struct ehea_q_skb_arr rq2_skba; struct ehea_q_skb_arr rq3_skba; @@ -376,6 +380,8 @@ struct ehea_port_res { u64 tx_packets; u64 rx_packets; u32 poll_counter; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; }; @@ -385,7 +391,6 @@ struct ehea_adapter { struct ibmebus_dev *ebus_dev; struct ehea_port *port[EHEA_MAX_PORTS]; struct ehea_eq *neq; /* notification event queue */ - struct workqueue_struct *ehea_wq; struct tasklet_struct neq_tasklet; struct ehea_mr mr; u32 pd; /* protection domain */ @@ -429,6 +434,7 @@ struct ehea_port { u32 msg_enable; u32 sig_comp_iv; u32 state; + u32 lro_max_aggr; u8 phy_link; u8 full_duplex; u8 autoneg; diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index decec8cfe96b8c2789705971409f2344f8b5558d..679f40ee957263af5ee375f5e76df76e42b45c52 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -183,6 +183,9 @@ static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { {"PR5 free_swqes"}, {"PR6 free_swqes"}, {"PR7 free_swqes"}, + {"LRO aggregated"}, + {"LRO flushed"}, + {"LRO no_desc"}, }; static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -193,9 +196,14 @@ static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } -static int ehea_get_stats_count(struct net_device *dev) +static int ehea_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(ehea_ethtool_stats_keys); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ehea_ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } } static void ehea_get_ethtool_stats(struct net_device *dev, @@ -204,7 +212,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev, int i, k, tmp; struct ehea_port *port = netdev_priv(dev); - for (i = 0; i < ehea_get_stats_count(dev); i++) + for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++) data[i] = 0; i = 0; @@ -239,6 +247,18 @@ static void ehea_get_ethtool_stats(struct net_device *dev, for (k = 0; k < 8; k++) data[i++] = atomic_read(&port->port_res[k].swqe_avail); + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].lro_mgr.stats.aggregated; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].lro_mgr.stats.flushed; + data[i++] = tmp; + + for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++) + tmp |= port->port_res[k].lro_mgr.stats.no_desc; + data[i++] = tmp; + } const struct ethtool_ops ehea_ethtool_ops = { @@ -247,12 +267,9 @@ const struct ethtool_ops ehea_ethtool_ops = { .get_msglevel = ehea_get_msglevel, .set_msglevel = ehea_set_msglevel, .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_strings = ehea_get_strings, - .get_stats_count = ehea_get_stats_count, + .get_sset_count = ehea_get_sset_count, .get_ethtool_stats = ehea_get_ethtool_stats, .get_rx_csum = ehea_get_rx_csum, .set_settings = ehea_set_settings, diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 717b12984d10068ae1f13c9620012ad113d7a117..2ba57e6ace4dfe2af3d8d3bb135e075f94ce89a9 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -52,6 +52,8 @@ static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; static int sq_entries = EHEA_DEF_ENTRIES_SQ; static int use_mcs = 0; +static int use_lro = 0; +static int lro_max_aggr = EHEA_LRO_MAX_AGGR; static int num_tx_qps = EHEA_NUM_TX_QP; static int prop_carrier_state = 0; @@ -62,6 +64,8 @@ module_param(rq3_entries, int, 0); module_param(sq_entries, int, 0); module_param(prop_carrier_state, int, 0); module_param(use_mcs, int, 0); +module_param(use_lro, int, 0); +module_param(lro_max_aggr, int, 0); module_param(num_tx_qps, int, 0); MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS"); @@ -82,12 +86,17 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); +MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " + __MODULE_STRING(EHEA_LRO_MAX_AGGR)); +MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " + "Default = 0"); + static int port_name_cnt = 0; static LIST_HEAD(adapter_list); u64 ehea_driver_flags = 0; -struct workqueue_struct *ehea_driver_wq; struct work_struct ehea_rereg_mr_task; +struct semaphore dlpar_mem_lock; static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, const struct of_device_id *id); @@ -168,16 +177,24 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; struct net_device *dev = pr->port->netdev; int max_index_mask = pr->rq1_skba.len - 1; + int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; + int adder = 0; int i; - if (!nr_of_wqes) + pr->rq1_skba.os_skbs = 0; + + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + pr->rq1_skba.index = index; + pr->rq1_skba.os_skbs = fill_wqes; return; + } - for (i = 0; i < nr_of_wqes; i++) { + for (i = 0; i < fill_wqes; i++) { if (!skb_arr_rq1[index]) { skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { + pr->rq1_skba.os_skbs = fill_wqes - i; ehea_error("%s: no mem for skb/%d wqes filled", dev->name, i); break; @@ -185,9 +202,14 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) } index--; index &= max_index_mask; + adder++; } + + if (adder == 0) + return; + /* Ring doorbell */ - ehea_update_rq1a(pr->qp, i); + ehea_update_rq1a(pr->qp, adder); } static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) @@ -221,16 +243,21 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, struct sk_buff **skb_arr = q_skba->arr; struct ehea_rwqe *rwqe; int i, index, max_index_mask, fill_wqes; + int adder = 0; int ret = 0; fill_wqes = q_skba->os_skbs + num_wqes; + q_skba->os_skbs = 0; - if (!fill_wqes) + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + q_skba->os_skbs = fill_wqes; return ret; + } index = q_skba->index; max_index_mask = q_skba->len - 1; for (i = 0; i < fill_wqes; i++) { + u64 tmp_addr; struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); if (!skb) { ehea_error("%s: no mem for skb/%d wqes filled", @@ -242,30 +269,37 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, skb_reserve(skb, NET_IP_ALIGN); skb_arr[index] = skb; + tmp_addr = ehea_map_vaddr(skb->data); + if (tmp_addr == -1) { + dev_kfree_skb(skb); + q_skba->os_skbs = fill_wqes - i; + ret = 0; + break; + } rwqe = ehea_get_next_rwqe(qp, rq_nr); rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); rwqe->sg_list[0].l_key = pr->recv_mr.lkey; - rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + rwqe->sg_list[0].vaddr = tmp_addr; rwqe->sg_list[0].len = packet_size; rwqe->data_segments = 1; index++; index &= max_index_mask; - - if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) - goto out; + adder++; } q_skba->index = index; + if (adder == 0) + goto out; /* Ring doorbell */ iosync(); if (rq_nr == 2) - ehea_update_rq2a(pr->qp, i); + ehea_update_rq2a(pr->qp, adder); else - ehea_update_rq3a(pr->qp, i); + ehea_update_rq3a(pr->qp, adder); out: return ret; } @@ -386,16 +420,70 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { ehea_error("Critical receive error. Resetting port."); - queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task); + schedule_work(&pr->port->reset_task); return 1; } return 0; } -static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, - struct ehea_port_res *pr, - int *budget) +static int get_skb_hdr(struct sk_buff *skb, void **iphdr, + void **tcph, u64 *hdr_flags, void *priv) +{ + struct ehea_cqe *cqe = priv; + unsigned int ip_len; + struct iphdr *iph; + + /* non tcp/udp packets */ + if (!cqe->header_length) + return -1; + + /* non tcp packet */ + skb_reset_network_header(skb); + iph = ip_hdr(skb); + if (iph->protocol != IPPROTO_TCP) + return -1; + + ip_len = ip_hdrlen(skb); + skb_set_transport_header(skb, ip_len); + *tcph = tcp_hdr(skb); + + /* check if ip header and tcp header are complete */ + if (iph->tot_len < ip_len + tcp_hdrlen(skb)) + return -1; + + *hdr_flags = LRO_IPV4 | LRO_TCP; + *iphdr = iph; + + return 0; +} + +static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe, + struct sk_buff *skb) +{ + int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) + && pr->port->vgrp; + + if (use_lro) { + if (vlan_extracted) + lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, + pr->port->vgrp, + cqe->vlan_tag, + cqe); + else + lro_receive_skb(&pr->lro_mgr, skb, cqe); + } else { + if (vlan_extracted) + vlan_hwaccel_receive_skb(skb, pr->port->vgrp, + cqe->vlan_tag); + else + netif_receive_skb(skb); + } +} + +static int ehea_proc_rwqes(struct net_device *dev, + struct ehea_port_res *pr, + int budget) { struct ehea_port *port = pr->port; struct ehea_qp *qp = pr->qp; @@ -408,18 +496,16 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, int skb_arr_rq2_len = pr->rq2_skba.len; int skb_arr_rq3_len = pr->rq3_skba.len; int processed, processed_rq1, processed_rq2, processed_rq3; - int wqe_index, last_wqe_index, rq, my_quota, port_reset; + int wqe_index, last_wqe_index, rq, port_reset; processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; last_wqe_index = 0; - my_quota = min(*budget, dev->quota); cqe = ehea_poll_rq1(qp, &wqe_index); - while ((my_quota > 0) && cqe) { + while ((processed < budget) && cqe) { ehea_inc_rq1(qp); processed_rq1++; processed++; - my_quota--; if (netif_msg_rx_status(port)) ehea_dump(cqe, sizeof(*cqe), "CQE"); @@ -434,14 +520,14 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, if (netif_msg_rx_err(port)) ehea_error("LL rq1: skb=NULL"); - skb = netdev_alloc_skb(port->netdev, + skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb) break; } skb_copy_to_linear_data(skb, ((char*)cqe) + 64, cqe->num_bytes_transfered - 4); - ehea_fill_skb(port->netdev, skb, cqe); + ehea_fill_skb(dev, skb, cqe); } else if (rq == 2) { /* RQ2 */ skb = get_skb_by_index(skb_arr_rq2, skb_arr_rq2_len, cqe); @@ -450,7 +536,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, ehea_error("rq2: skb=NULL"); break; } - ehea_fill_skb(port->netdev, skb, cqe); + ehea_fill_skb(dev, skb, cqe); processed_rq2++; } else { /* RQ3 */ skb = get_skb_by_index(skb_arr_rq3, @@ -460,18 +546,12 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, ehea_error("rq3: skb=NULL"); break; } - ehea_fill_skb(port->netdev, skb, cqe); + ehea_fill_skb(dev, skb, cqe); processed_rq3++; } - if ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) - && port->vgrp) - vlan_hwaccel_receive_skb(skb, port->vgrp, - cqe->vlan_tag); - else - netif_receive_skb(skb); - - port->netdev->last_rx = jiffies; + ehea_proc_skb(pr, cqe, skb); + dev->last_rx = jiffies; } else { pr->p_stats.poll_receive_errors++; port_reset = ehea_treat_poll_error(pr, rq, cqe, @@ -482,16 +562,16 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, } cqe = ehea_poll_rq1(qp, &wqe_index); } + if (use_lro) + lro_flush_all(&pr->lro_mgr); pr->rx_packets += processed; - *budget -= processed; ehea_refill_rq1(pr, last_wqe_index, processed_rq1); ehea_refill_rq2(pr, processed_rq2); ehea_refill_rq3(pr, processed_rq3); - cqe = ehea_poll_rq1(qp, &wqe_index); - return cqe; + return processed; } static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) @@ -515,8 +595,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) ehea_error("Send Completion Error: Resetting port"); if (netif_msg_tx_err(pr->port)) ehea_dump(cqe, sizeof(*cqe), "Send CQE"); - queue_work(pr->port->adapter->ehea_wq, - &pr->port->reset_task); + schedule_work(&pr->port->reset_task); break; } @@ -554,22 +633,27 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) } #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 +#define EHEA_POLL_MAX_CQES 65535 -static int ehea_poll(struct net_device *dev, int *budget) +static int ehea_poll(struct napi_struct *napi, int budget) { - struct ehea_port_res *pr = dev->priv; + struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi); + struct net_device *dev = pr->port->netdev; struct ehea_cqe *cqe; struct ehea_cqe *cqe_skb = NULL; int force_irq, wqe_index; - - cqe = ehea_poll_rq1(pr->qp, &wqe_index); - cqe_skb = ehea_poll_cq(pr->send_cq); + int rx = 0; force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); + cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); - if ((!cqe && !cqe_skb) || force_irq) { + if (!force_irq) + rx += ehea_proc_rwqes(dev, pr, budget - rx); + + while ((rx != budget) || force_irq) { pr->poll_counter = 0; - netif_rx_complete(dev); + force_irq = 0; + netif_rx_complete(dev, napi); ehea_reset_cq_ep(pr->recv_cq); ehea_reset_cq_ep(pr->send_cq); ehea_reset_cq_n1(pr->recv_cq); @@ -578,43 +662,35 @@ static int ehea_poll(struct net_device *dev, int *budget) cqe_skb = ehea_poll_cq(pr->send_cq); if (!cqe && !cqe_skb) - return 0; - - if (!netif_rx_reschedule(dev, dev->quota)) - return 0; - } + return rx; - cqe = ehea_proc_rwqes(dev, pr, budget); - cqe_skb = ehea_proc_cqes(pr, 300); + if (!netif_rx_reschedule(dev, napi)) + return rx; - if (cqe || cqe_skb) - pr->poll_counter++; + cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); + rx += ehea_proc_rwqes(dev, pr, budget - rx); + } - return 1; + pr->poll_counter++; + return rx; } #ifdef CONFIG_NET_POLL_CONTROLLER static void ehea_netpoll(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); + int i; - netif_rx_schedule(port->port_res[0].d_netdev); + for (i = 0; i < port->num_def_qps; i++) + netif_rx_schedule(dev, &port->port_res[i].napi); } #endif -static int ehea_poll_firstqueue(struct net_device *dev, int *budget) -{ - struct ehea_port *port = netdev_priv(dev); - struct net_device *d_dev = port->port_res[0].d_netdev; - - return ehea_poll(d_dev, budget); -} - static irqreturn_t ehea_recv_irq_handler(int irq, void *param) { struct ehea_port_res *pr = param; - netif_rx_schedule(pr->d_netdev); + netif_rx_schedule(pr->port->netdev, &pr->napi); return IRQ_HANDLED; } @@ -638,7 +714,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) eqe = ehea_poll_eq(port->qp_eq); } - queue_work(port->adapter->ehea_wq, &port->reset_task); + schedule_work(&port->reset_task); return IRQ_HANDLED; } @@ -1236,14 +1312,16 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, kfree(init_attr); - pr->d_netdev = alloc_netdev(0, "", ether_setup); - if (!pr->d_netdev) - goto out_free; - pr->d_netdev->priv = pr; - pr->d_netdev->weight = 64; - pr->d_netdev->poll = ehea_poll; - set_bit(__LINK_STATE_START, &pr->d_netdev->state); - strcpy(pr->d_netdev->name, port->netdev->name); + netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); + + pr->lro_mgr.max_aggr = pr->port->lro_max_aggr; + pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; + pr->lro_mgr.lro_arr = pr->lro_desc; + pr->lro_mgr.get_skb_header = get_skb_hdr; + pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; + pr->lro_mgr.dev = port->netdev; + pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; + pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; ret = 0; goto out; @@ -1266,8 +1344,6 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) { int ret, i; - free_netdev(pr->d_netdev); - ret = ehea_destroy_qp(pr->qp); if (!ret) { @@ -1915,11 +1991,12 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ehea_dump(swqe, 512, "swqe"); } - if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) - goto out; + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { + netif_stop_queue(dev); + swqe->tx_control |= EHEA_SWQE_PURGE; + } ehea_post_swqe(pr->qp, swqe); - pr->tx_packets++; if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { spin_lock_irqsave(&pr->netif_queue, flags); @@ -1932,7 +2009,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } dev->trans_start = jiffies; spin_unlock(&pr->xmit_lock); -out: + return NETDEV_TX_OK; } @@ -2248,6 +2325,22 @@ static int ehea_up(struct net_device *dev) return ret; } +static void port_napi_disable(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_disable(&port->port_res[i].napi); +} + +static void port_napi_enable(struct ehea_port *port) +{ + int i; + + for (i = 0; i < port->num_def_qps; i++) + napi_enable(&port->port_res[i].napi); +} + static int ehea_open(struct net_device *dev) { int ret; @@ -2259,8 +2352,10 @@ static int ehea_open(struct net_device *dev) ehea_info("enabling port %s", dev->name); ret = ehea_up(dev); - if (!ret) + if (!ret) { + port_napi_enable(port); netif_start_queue(dev); + } up(&port->port_lock); @@ -2269,7 +2364,7 @@ static int ehea_open(struct net_device *dev) static int ehea_down(struct net_device *dev) { - int ret, i; + int ret; struct ehea_port *port = netdev_priv(dev); if (port->state == EHEA_PORT_DOWN) @@ -2278,10 +2373,7 @@ static int ehea_down(struct net_device *dev) ehea_drop_multicast_list(dev); ehea_free_interrupts(dev); - for (i = 0; i < port->num_def_qps; i++) - while (test_bit(__LINK_STATE_RX_SCHED, - &port->port_res[i].d_netdev->state)) - msleep(1); + port_napi_disable(port); port->state = EHEA_PORT_DOWN; @@ -2301,7 +2393,7 @@ static int ehea_stop(struct net_device *dev) if (netif_msg_ifdown(port)) ehea_info("disabling port %s", dev->name); - flush_workqueue(port->adapter->ehea_wq); + flush_scheduled_work(); down(&port->port_lock); netif_stop_queue(dev); ret = ehea_down(dev); @@ -2309,6 +2401,192 @@ static int ehea_stop(struct net_device *dev) return ret; } +void ehea_purge_sq(struct ehea_qp *orig_qp) +{ + struct ehea_qp qp = *orig_qp; + struct ehea_qp_init_attr *init_attr = &qp.init_attr; + struct ehea_swqe *swqe; + int wqe_index; + int i; + + for (i = 0; i < init_attr->act_nr_send_wqes; i++) { + swqe = ehea_get_swqe(&qp, &wqe_index); + swqe->tx_control |= EHEA_SWQE_PURGE; + } +} + +int ehea_stop_qps(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + struct hcp_modify_qp_cb0* cb0; + int ret = -EIO; + int dret; + int i; + u64 hret; + u64 dummy64 = 0; + u16 dummy16 = 0; + + cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; + struct ehea_qp *qp = pr->qp; + + /* Purge send queue */ + ehea_purge_sq(qp); + + /* Disable queue pair */ + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (1)"); + goto out; + } + + cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; + cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; + + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, + 1), cb0, &dummy64, + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + ehea_error("modify_ehea_qp failed (1)"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (2)"); + goto out; + } + + /* deregister shared memory regions */ + dret = ehea_rem_smrs(pr); + if (dret) { + ehea_error("unreg shared memory region failed"); + goto out; + } + } + + ret = 0; +out: + kfree(cb0); + + return ret; +} + +void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr) +{ + struct ehea_qp qp = *orig_qp; + struct ehea_qp_init_attr *init_attr = &qp.init_attr; + struct ehea_rwqe *rwqe; + struct sk_buff **skba_rq2 = pr->rq2_skba.arr; + struct sk_buff **skba_rq3 = pr->rq3_skba.arr; + struct sk_buff *skb; + u32 lkey = pr->recv_mr.lkey; + + + int i; + int index; + + for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { + rwqe = ehea_get_next_rwqe(&qp, 2); + rwqe->sg_list[0].l_key = lkey; + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); + skb = skba_rq2[index]; + if (skb) + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + } + + for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { + rwqe = ehea_get_next_rwqe(&qp, 3); + rwqe->sg_list[0].l_key = lkey; + index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); + skb = skba_rq3[index]; + if (skb) + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); + } +} + +int ehea_restart_qps(struct net_device *dev) +{ + struct ehea_port *port = netdev_priv(dev); + struct ehea_adapter *adapter = port->adapter; + int ret = 0; + int i; + + struct hcp_modify_qp_cb0* cb0; + u64 hret; + u64 dummy64 = 0; + u16 dummy16 = 0; + + cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!cb0) { + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { + struct ehea_port_res *pr = &port->port_res[i]; + struct ehea_qp *qp = pr->qp; + + ret = ehea_gen_smrs(pr); + if (ret) { + ehea_error("creation of shared memory regions failed"); + goto out; + } + + ehea_update_rqs(qp, pr); + + /* Enable queue pair */ + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (1)"); + goto out; + } + + cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; + cb0->qp_ctl_reg |= H_QP_CR_ENABLED; + + hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, + 1), cb0, &dummy64, + &dummy64, &dummy16, &dummy16); + if (hret != H_SUCCESS) { + ehea_error("modify_ehea_qp failed (1)"); + goto out; + } + + hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, + EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), + cb0); + if (hret != H_SUCCESS) { + ehea_error("query_ehea_qp failed (2)"); + goto out; + } + + /* refill entire queue */ + ehea_refill_rq1(pr, pr->rq1_skba.index, 0); + ehea_refill_rq2(pr, 0); + ehea_refill_rq3(pr, 0); + } +out: + kfree(cb0); + + return ret; +} + static void ehea_reset_port(struct work_struct *work) { int ret; @@ -2319,7 +2597,8 @@ static void ehea_reset_port(struct work_struct *work) port->resets++; down(&port->port_lock); netif_stop_queue(dev); - netif_poll_disable(dev); + + port_napi_disable(port); ehea_down(dev); @@ -2327,10 +2606,13 @@ static void ehea_reset_port(struct work_struct *work) if (ret) goto out; + ehea_set_multicast_list(dev); + if (netif_msg_timer(port)) ehea_info("Device %s resetted successfully", dev->name); - netif_poll_enable(dev); + port_napi_enable(port); + netif_wake_queue(dev); out: up(&port->port_lock); @@ -2342,6 +2624,7 @@ static void ehea_rereg_mrs(struct work_struct *work) int ret, i; struct ehea_adapter *adapter; + down(&dlpar_mem_lock); ehea_info("LPAR memory enlarged - re-initializing driver"); list_for_each_entry(adapter, &adapter_list, list) @@ -2354,12 +2637,14 @@ static void ehea_rereg_mrs(struct work_struct *work) struct net_device *dev = port->netdev; if (dev->flags & IFF_UP) { - ehea_info("stopping %s", - dev->name); down(&port->port_lock); netif_stop_queue(dev); - netif_poll_disable(dev); - ehea_down(dev); + ret = ehea_stop_qps(dev); + if (ret) { + up(&port->port_lock); + goto out; + } + port_napi_disable(port); up(&port->port_lock); } } @@ -2375,10 +2660,11 @@ static void ehea_rereg_mrs(struct work_struct *work) } ehea_destroy_busmap(); - ret = ehea_create_busmap(); - if (ret) + if (ret) { + ehea_error("creating ehea busmap failed"); goto out; + } clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); @@ -2400,21 +2686,18 @@ static void ehea_rereg_mrs(struct work_struct *work) struct net_device *dev = port->netdev; if (dev->flags & IFF_UP) { - ehea_info("restarting %s", - dev->name); down(&port->port_lock); - - ret = ehea_up(dev); - if (!ret) { - netif_poll_enable(dev); + port_napi_enable(port); + ret = ehea_restart_qps(dev); + if (!ret) netif_wake_queue(dev); - } - up(&port->port_lock); } } } } + up(&dlpar_mem_lock); + ehea_info("re-initializing driver complete"); out: return; } @@ -2423,8 +2706,9 @@ static void ehea_tx_watchdog(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); - if (netif_carrier_ok(dev)) - queue_work(port->adapter->ehea_wq, &port->reset_task); + if (netif_carrier_ok(dev) && + !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) + schedule_work(&port->reset_task); } int ehea_sense_adapter_attr(struct ehea_adapter *adapter) @@ -2639,16 +2923,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, SET_NETDEV_DEV(dev, port_dev); /* initialize net_device structure */ - SET_MODULE_OWNER(dev); - memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); dev->open = ehea_open; - dev->poll = ehea_poll_firstqueue; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = ehea_netpoll; #endif - dev->weight = 64; dev->stop = ehea_stop; dev->hard_start_xmit = ehea_start_xmit; dev->get_stats = ehea_get_stats; @@ -2681,6 +2961,8 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, goto out_dereg_bc; } + port->lro_max_aggr = lro_max_aggr; + ret = ehea_get_jumboframe_status(port, &jumbo); if (ret) ehea_error("failed determining jumbo frame status for %s", @@ -2959,15 +3241,9 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, goto out_kill_eq; } - adapter->ehea_wq = create_workqueue("ehea_wq"); - if (!adapter->ehea_wq) { - ret = -EIO; - goto out_free_irq; - } - ret = ehea_create_device_sysfs(dev); if (ret) - goto out_kill_wq; + goto out_free_irq; ret = ehea_setup_ports(adapter); if (ret) { @@ -2981,9 +3257,6 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, out_rem_dev_sysfs: ehea_remove_device_sysfs(dev); -out_kill_wq: - destroy_workqueue(adapter->ehea_wq); - out_free_irq: ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); @@ -3009,7 +3282,7 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev) ehea_remove_device_sysfs(dev); - destroy_workqueue(adapter->ehea_wq); + flush_scheduled_work(); ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); tasklet_kill(&adapter->neq_tasklet); @@ -3067,9 +3340,9 @@ int __init ehea_module_init(void) printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); - ehea_driver_wq = create_workqueue("ehea_driver_wq"); INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); + sema_init(&dlpar_mem_lock, 1); ret = check_module_parm(); if (ret) @@ -3100,7 +3373,7 @@ int __init ehea_module_init(void) static void __exit ehea_module_exit(void) { - destroy_workqueue(ehea_driver_wq); + flush_scheduled_work(); driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); ehea_destroy_busmap(); diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index 89b63531ff261261ba0e897ce7938049482dfc4c..faa191d23b8614f095bb0a39bc93673304cb330d 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h @@ -126,6 +126,7 @@ struct hcp_modify_qp_cb0 { #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ +#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */ struct hcp_modify_qp_cb1 { u32 qpn; /* 00 */ diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index c82e245960746ed2f126a4f4b78d77ec30eba3bc..83b76432b41a8cc6c299ad3530b4e4bd98b5ac2a 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c @@ -34,7 +34,6 @@ struct ehea_busmap ehea_bmap = { 0, 0, NULL }; extern u64 ehea_driver_flags; -extern struct workqueue_struct *ehea_driver_wq; extern struct work_struct ehea_rereg_mr_task; @@ -563,8 +562,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) int ehea_create_busmap( void ) { u64 vaddr = EHEA_BUSMAP_START; - unsigned long abs_max_pfn = 0; - unsigned long sec_max_pfn; + unsigned long high_section_index = 0; int i; /* @@ -574,14 +572,10 @@ int ehea_create_busmap( void ) ehea_bmap.valid_sections = 0; for (i = 0; i < NR_MEM_SECTIONS; i++) - if (valid_section_nr(i)) { - sec_max_pfn = section_nr_to_pfn(i); - if (sec_max_pfn > abs_max_pfn) - abs_max_pfn = sec_max_pfn; - ehea_bmap.valid_sections++; - } + if (valid_section_nr(i)) + high_section_index = i; - ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; + ehea_bmap.entries = high_section_index + 1; ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); if (!ehea_bmap.vaddr) @@ -593,6 +587,7 @@ int ehea_create_busmap( void ) if (pfn_valid(pfn)) { ehea_bmap.vaddr[i] = vaddr; vaddr += EHEA_SECTSIZE; + ehea_bmap.valid_sections++; } else ehea_bmap.vaddr[i] = 0; } @@ -622,7 +617,7 @@ u64 ehea_map_vaddr(void *caddr) if (unlikely(mapped_addr == -1)) if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) - queue_work(ehea_driver_wq, &ehea_rereg_mr_task); + schedule_work(&ehea_rereg_mr_task); return mapped_addr; } @@ -637,7 +632,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; - pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!pt) { ehea_error("no mem"); ret = -ENOMEM; @@ -660,8 +655,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) void *sectbase = __va(i << SECTION_SIZE_BITS); unsigned long k = 0; - for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); - j++) { + for (j = 0; j < (EHEA_PAGES_PER_SECTION / + EHEA_MAX_RPAGE); j++) { for (m = 0; m < EHEA_MAX_RPAGE; m++) { pg = sectbase + ((k++) * EHEA_PAGESIZE); diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index b71f8452a5e36be88a5c1d4283695f4cfec1a261..562de0ebdd851a3e3e3cabee9f41df66fcaa4e1a 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -39,7 +39,7 @@ #define EHEA_PAGESHIFT 12 #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) #define EHEA_SECTSIZE (1UL << 24) -#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) +#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE #error eHEA module can't work if kernel sectionsize < ehea sectionsize @@ -145,7 +145,7 @@ struct ehea_rwqe { #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 #define EHEA_CQE_TYPE_RQ 0x60 -#define EHEA_CQE_STAT_ERR_MASK 0x721F +#define EHEA_CQE_STAT_ERR_MASK 0x720F #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F #define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_IP 0x2000 diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 119778401e486f9ea808f6266702adfe2ee7e403..ecdd3fc8d70ca8af89dc4fbd2f3033139a1fbeb0 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -262,6 +262,7 @@ struct epic_private { /* Ring pointers. */ spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t napi_lock; + struct napi_struct napi; unsigned int reschedule_in_poll; unsigned int cur_tx, dirty_tx; @@ -294,7 +295,7 @@ static void epic_tx_timeout(struct net_device *dev); static void epic_init_ring(struct net_device *dev); static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); static int epic_rx(struct net_device *dev, int budget); -static int epic_poll(struct net_device *dev, int *budget); +static int epic_poll(struct napi_struct *napi, int budget); static irqreturn_t epic_interrupt(int irq, void *dev_instance); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; @@ -316,6 +317,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev, int i, ret, option = 0, duplex = 0; void *ring_space; dma_addr_t ring_dma; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -351,7 +353,6 @@ static int __devinit epic_init_one (struct pci_dev *pdev, dev_err(&pdev->dev, "no memory for eth device\n"); goto err_out_free_res; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #ifdef USE_IO_OPS @@ -487,18 +488,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev, dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; dev->tx_timeout = &epic_tx_timeout; - dev->poll = epic_poll; - dev->weight = 64; + netif_napi_add(dev, &ep->napi, epic_poll, 64); ret = register_netdev(dev); if (ret < 0) goto err_out_unmap_rx; - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", - dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x.\n", dev->dev_addr[i]); + printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %s\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, + print_mac(mac, dev->dev_addr)); out: return ret; @@ -660,8 +658,11 @@ static int epic_open(struct net_device *dev) /* Soft reset the chip. */ outl(0x4001, ioaddr + GENCTL); - if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) + napi_enable(&ep->napi); + if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) { + napi_disable(&ep->napi); return retval; + } epic_init_ring(dev); @@ -1103,9 +1104,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { spin_lock(&ep->napi_lock); - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &ep->napi)) { epic_napi_irq_off(dev, ep); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &ep->napi); } else ep->reschedule_in_poll++; spin_unlock(&ep->napi_lock); @@ -1257,26 +1258,22 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep) outw(RxQueued, ioaddr + COMMAND); } -static int epic_poll(struct net_device *dev, int *budget) +static int epic_poll(struct napi_struct *napi, int budget) { - struct epic_private *ep = dev->priv; - int work_done = 0, orig_budget; + struct epic_private *ep = container_of(napi, struct epic_private, napi); + struct net_device *dev = ep->mii.dev; + int work_done = 0; long ioaddr = dev->base_addr; - orig_budget = (*budget > dev->quota) ? dev->quota : *budget; - rx_action: epic_tx(dev, ep); - work_done += epic_rx(dev, *budget); + work_done += epic_rx(dev, budget); epic_rx_err(dev, ep); - *budget -= work_done; - dev->quota -= work_done; - - if (netif_running(dev) && (work_done < orig_budget)) { + if (netif_running(dev) && (work_done < budget)) { unsigned long flags; int more; @@ -1286,7 +1283,7 @@ static int epic_poll(struct net_device *dev, int *budget) more = ep->reschedule_in_poll; if (!more) { - __netif_rx_complete(dev); + __netif_rx_complete(dev, napi); outl(EpicNapiEvent, ioaddr + INTSTAT); epic_napi_irq_on(dev, ep); } else @@ -1298,7 +1295,7 @@ static int epic_poll(struct net_device *dev, int *budget) goto rx_action; } - return (work_done >= orig_budget); + return work_done; } static int epic_close(struct net_device *dev) @@ -1309,6 +1306,7 @@ static int epic_close(struct net_device *dev) int i; netif_stop_queue(dev); + napi_disable(&ep->napi); if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", @@ -1495,8 +1493,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .begin = ethtool_begin, .complete = ethtool_complete }; diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 102218c4a907ec9606ab53f04b927775aa22ccd7..18f1364d3d5bfeb25c94e30572cb98115fbf7af9 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -116,6 +116,7 @@ #include #include #include +#include #include #include @@ -127,7 +128,6 @@ static int eql_open(struct net_device *dev); static int eql_close(struct net_device *dev); static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *eql_get_stats(struct net_device *dev); #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) @@ -166,8 +166,6 @@ static void __init eql_setup(struct net_device *dev) { equalizer_t *eql = netdev_priv(dev); - SET_MODULE_OWNER(dev); - init_timer(&eql->timer); eql->timer.data = (unsigned long) eql; eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; @@ -181,7 +179,6 @@ static void __init eql_setup(struct net_device *dev) dev->stop = eql_close; dev->do_ioctl = eql_ioctl; dev->hard_start_xmit = eql_slave_xmit; - dev->get_stats = eql_get_stats; /* * Now we undo some of the things that eth_setup does @@ -338,9 +335,9 @@ static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) skb->priority = 1; slave->bytes_queued += skb->len; dev_queue_xmit(skb); - eql->stats.tx_packets++; + dev->stats.tx_packets++; } else { - eql->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } @@ -349,12 +346,6 @@ static int eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats * eql_get_stats(struct net_device *dev) -{ - equalizer_t *eql = netdev_priv(dev); - return &eql->stats; -} - /* * Private ioctl functions */ @@ -412,7 +403,7 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user * if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) return -EFAULT; - slave_dev = dev_get_by_name(srq.slave_name); + slave_dev = dev_get_by_name(&init_net, srq.slave_name); if (slave_dev) { if ((master_dev->flags & IFF_UP) == IFF_UP) { /* slave is not a master & not already a slave: */ @@ -460,7 +451,7 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) return -EFAULT; - slave_dev = dev_get_by_name(srq.slave_name); + slave_dev = dev_get_by_name(&init_net, srq.slave_name); ret = -EINVAL; if (slave_dev) { spin_lock_bh(&eql->queue.lock); @@ -493,7 +484,7 @@ static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) if (copy_from_user(&sc, scp, sizeof (slave_config_t))) return -EFAULT; - slave_dev = dev_get_by_name(sc.slave_name); + slave_dev = dev_get_by_name(&init_net, sc.slave_name); if (!slave_dev) return -ENODEV; @@ -528,7 +519,7 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) if (copy_from_user(&sc, scp, sizeof (slave_config_t))) return -EFAULT; - slave_dev = dev_get_by_name(sc.slave_name); + slave_dev = dev_get_by_name(&init_net, sc.slave_name); if (!slave_dev) return -ENODEV; diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 822e5bfd1a716836b2e7d125dcdf944b5a524810..deefa51b8c31b652cdbbc1509dfcb6d006f0d915 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c @@ -130,8 +130,6 @@ static int __init do_es_probe(struct net_device *dev) int irq = dev->irq; int mem_start = dev->mem_start; - SET_MODULE_OWNER(dev); - if (ioaddr > 0x1ff) /* Check a single specified location. */ return es_probe1(dev, ioaddr); else if (ioaddr > 0) /* Don't probe at all. */ @@ -181,6 +179,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) { int i, retval; unsigned long eisa_id; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) return -ENODEV; @@ -192,7 +191,6 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6)); #endif - /* Check the EISA ID of the card. */ eisa_id = inl(ioaddr + ES_ID_PORT); if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) { @@ -200,21 +198,21 @@ static int __init es_probe1(struct net_device *dev, int ioaddr) goto out; } + for (i = 0; i < ETHER_ADDR_LEN ; i++) + dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i); + /* Check the Racal vendor ID as well. */ - if (inb(ioaddr + ES_SA_PROM + 0) != ES_ADDR0 - || inb(ioaddr + ES_SA_PROM + 1) != ES_ADDR1 - || inb(ioaddr + ES_SA_PROM + 2) != ES_ADDR2 ) { - printk("es3210.c: card not found"); - for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", inb(ioaddr + ES_SA_PROM + i)); - printk(" (invalid prefix).\n"); + if (dev->dev_addr[0] != ES_ADDR0 || + dev->dev_addr[1] != ES_ADDR1 || + dev->dev_addr[2] != ES_ADDR2) { + printk("es3210.c: card not found %s (invalid_prefix).\n", + print_mac(mac, dev->dev_addr)); retval = -ENODEV; goto out; } - printk("es3210.c: ES3210 rev. %ld at %#x, node", eisa_id>>24, ioaddr); - for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i))); + printk("es3210.c: ES3210 rev. %ld at %#x, node %s", + eisa_id>>24, ioaddr, print_mac(mac, dev->dev_addr)); /* Snarf the interrupt now. */ if (dev->irq == 0) { diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index 04abf59e5007fd86017561eeddb3c1fbca580247..243fc6b354b5effd99030e2fee51efaf9dc9b78b 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -380,7 +380,6 @@ static unsigned int eth16i_debug = ETH16I_DEBUG; /* Information for each board */ struct eth16i_local { - struct net_device_stats stats; unsigned char tx_started; unsigned char tx_buf_busy; unsigned short tx_queue; /* Number of packets in transmit buffer */ @@ -426,8 +425,6 @@ static int eth16i_set_irq(struct net_device *dev); static ushort eth16i_parse_mediatype(const char* s); #endif -static struct net_device_stats *eth16i_get_stats(struct net_device *dev); - static char cardname[] __initdata = "ICL EtherTeam 16i/32"; static int __init do_eth16i_probe(struct net_device *dev) @@ -436,8 +433,6 @@ static int __init do_eth16i_probe(struct net_device *dev) int ioaddr; int base_addr = dev->base_addr; - SET_MODULE_OWNER(dev); - if(eth16i_debug > 4) printk(KERN_DEBUG "Probing started for %s\n", cardname); @@ -559,7 +554,6 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr) dev->open = eth16i_open; dev->stop = eth16i_close; dev->hard_start_xmit = eth16i_tx; - dev->get_stats = eth16i_get_stats; dev->set_multicast_list = eth16i_multicast; dev->tx_timeout = eth16i_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1047,7 +1041,7 @@ static void eth16i_timeout(struct net_device *dev) printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len); printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started); } - lp->stats.tx_errors++; + dev->stats.tx_errors++; eth16i_reset(dev); dev->trans_start = jiffies; outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); @@ -1132,7 +1126,6 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev) static void eth16i_rx(struct net_device *dev) { - struct eth16i_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int boguscount = MAX_RX_LOOP; @@ -1151,16 +1144,16 @@ static void eth16i_rx(struct net_device *dev) inb(ioaddr + RECEIVE_MODE_REG), status); if( !(status & PKT_GOOD) ) { - lp->stats.rx_errors++; + dev->stats.rx_errors++; if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) { - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; eth16i_reset(dev); return; } else { eth16i_skip_packet(dev); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else { /* Ok so now we should have a good packet */ @@ -1171,7 +1164,7 @@ static void eth16i_rx(struct net_device *dev) printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n", dev->name, pkt_len); eth16i_skip_packet(dev); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -1214,8 +1207,8 @@ static void eth16i_rx(struct net_device *dev) } netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } /* else */ @@ -1252,32 +1245,32 @@ static irqreturn_t eth16i_interrupt(int irq, void *dev_id) if( status & 0x7f00 ) { - lp->stats.rx_errors++; + dev->stats.rx_errors++; if(status & (BUS_RD_ERR << 8) ) printk(KERN_WARNING "%s: Bus read error.\n",dev->name); - if(status & (SHORT_PKT_ERR << 8) ) lp->stats.rx_length_errors++; - if(status & (ALIGN_ERR << 8) ) lp->stats.rx_frame_errors++; - if(status & (CRC_ERR << 8) ) lp->stats.rx_crc_errors++; - if(status & (RX_BUF_OVERFLOW << 8) ) lp->stats.rx_over_errors++; + if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++; + if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++; + if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++; + if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++; } if( status & 0x001a) { - lp->stats.tx_errors++; + dev->stats.tx_errors++; - if(status & CR_LOST) lp->stats.tx_carrier_errors++; - if(status & TX_JABBER_ERR) lp->stats.tx_window_errors++; + if(status & CR_LOST) dev->stats.tx_carrier_errors++; + if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++; #if 0 if(status & COLLISION) { - lp->stats.collisions += + dev->stats.collisions += ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4); } #endif if(status & COLLISIONS_16) { if(lp->col_16 < MAX_COL_16) { lp->col_16++; - lp->stats.collisions++; + dev->stats.collisions++; /* Resume transmitting, skip failed packet */ outb(0x02, ioaddr + COL_16_REG); } @@ -1290,8 +1283,8 @@ static irqreturn_t eth16i_interrupt(int irq, void *dev_id) if( status & 0x00ff ) { /* Let's check the transmit status reg */ if(status & TX_DONE) { /* The transmit has been done */ - lp->stats.tx_packets = lp->tx_buffered_packets; - lp->stats.tx_bytes += lp->tx_buffered_bytes; + dev->stats.tx_packets = lp->tx_buffered_packets; + dev->stats.tx_bytes += lp->tx_buffered_bytes; lp->col_16 = 0; if(lp->tx_queue) { /* Is there still packets ? */ @@ -1371,12 +1364,6 @@ static void eth16i_multicast(struct net_device *dev) } } -static struct net_device_stats *eth16i_get_stats(struct net_device *dev) -{ - struct eth16i_local *lp = netdev_priv(dev); - return &lp->stats; -} - static void eth16i_select_regbank(unsigned char banknbr, int ioaddr) { unsigned char data; diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index cb0792c187bab45fd2a3b5df9e571f04a37e6cac..593a120e31b2bdc406dd8e3e6cfd8aeca27d1798 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c @@ -275,7 +275,6 @@ struct ewrk3_private { u_long shmem_base; /* Shared memory start address */ void __iomem *shmem; u_long shmem_length; /* Shared memory window length */ - struct net_device_stats stats; /* Public stats */ struct ewrk3_stats pktStats; /* Private stats counters */ u_char irq_mask; /* Adapter IRQ mask bits */ u_char mPage; /* Maximum 2kB Page number */ @@ -302,7 +301,6 @@ static int ewrk3_open(struct net_device *dev); static int ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); static int ewrk3_close(struct net_device *dev); -static struct net_device_stats *ewrk3_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops ethtool_ops_203; @@ -356,7 +354,6 @@ struct net_device * __init ewrk3_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); err = ewrk3_probe1(dev, dev->base_addr, dev->irq); if (err) @@ -399,6 +396,7 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) u_long mem_start, shmem_length; u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0; u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0; + DECLARE_MAC_BUF(mac); /* ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot. @@ -463,10 +461,7 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) if (lemac != LeMAC2) DevicePresent(iobase); /* need after EWRK3_INIT */ status = get_hw_addr(dev, eeprom_image, lemac); - for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ - printk("%2.2x:", dev->dev_addr[i]); - } - printk("%2.2x,\n", dev->dev_addr[i]); + printk("%s\n", print_mac(mac, dev->dev_addr)); if (status) { printk(" which has an EEPROM CRC error.\n"); @@ -612,7 +607,6 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase) dev->open = ewrk3_open; dev->hard_start_xmit = ewrk3_queue_pkt; dev->stop = ewrk3_close; - dev->get_stats = ewrk3_get_stats; dev->set_multicast_list = set_multicast_list; dev->do_ioctl = ewrk3_ioctl; if (lp->adapter_name[4] == '3') @@ -632,7 +626,7 @@ static int ewrk3_open(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; - int i, status = 0; + int status = 0; u_char icr, csr; /* @@ -652,12 +646,10 @@ static int ewrk3_open(struct net_device *dev) ewrk3_init(dev); if (ewrk3_debug > 1) { + DECLARE_MAC_BUF(mac); printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq); - printk(" physical address: "); - for (i = 0; i < 5; i++) { - printk("%2.2x:", (u_char) dev->dev_addr[i]); - } - printk("%2.2x\n", (u_char) dev->dev_addr[i]); + printk(" physical address: %s\n", + print_mac(mac, dev->dev_addr)); if (lp->shmem_length == 0) { printk(" no shared memory, I/O only mode\n"); } else { @@ -864,7 +856,7 @@ static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev) ENABLE_IRQs; spin_unlock_irq (&lp->hw_lock); - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; dev_kfree_skb (skb); @@ -981,13 +973,13 @@ static int ewrk3_rx(struct net_device *dev) } if (!(rx_status & R_ROK)) { /* There was an error. */ - lp->stats.rx_errors++; /* Update the error stats. */ + dev->stats.rx_errors++; /* Update the error stats. */ if (rx_status & R_DBE) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & R_CRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rx_status & R_PLL) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } else { struct sk_buff *skb; @@ -1038,11 +1030,11 @@ static int ewrk3_rx(struct net_device *dev) ** Update stats */ dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } else { printk("%s: Insufficient memory; nuking packet.\n", dev->name); - lp->stats.rx_dropped++; /* Really, deferred. */ + dev->stats.rx_dropped++; /* Really, deferred. */ break; } } @@ -1072,11 +1064,11 @@ static int ewrk3_tx(struct net_device *dev) while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ if (tx_status & T_VSTS) { /* The status is valid */ if (tx_status & T_TXE) { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & T_NCL) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & T_LCL) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (tx_status & T_CTU) { if ((tx_status & T_COLL) ^ T_XUR) { lp->pktStats.tx_underruns++; @@ -1085,13 +1077,13 @@ static int ewrk3_tx(struct net_device *dev) } } else if (tx_status & T_COLL) { if ((tx_status & T_COLL) ^ T_XCOLL) { - lp->stats.collisions++; + dev->stats.collisions++; } else { lp->pktStats.excessive_collisions++; } } } else { - lp->stats.tx_packets++; + dev->stats.tx_packets++; } } } @@ -1134,14 +1126,6 @@ static int ewrk3_close(struct net_device *dev) return 0; } -static struct net_device_stats *ewrk3_get_stats(struct net_device *dev) -{ - struct ewrk3_private *lp = netdev_priv(dev); - - /* Null body since there is no framing error counter */ - return &lp->stats; -} - /* ** Set or clear the multicast filter for this adapter. */ diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index ff9f177d7157ec9f43326d6c6fc61226d8fab3ba..43f7647ff24631f2142ac6e68be404a4fc723eab 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -486,6 +486,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, #else int bar = 1; #endif + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -527,7 +528,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, err = -ENOMEM; goto err_out_unmap; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* read ethernet id */ @@ -665,11 +665,9 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev, if (err) goto err_out_free_tx; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, + print_mac(mac, dev->dev_addr), irq); return 0; @@ -1892,8 +1890,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, }; static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 4e8df910c00d0705a503ee4441cd0df2233e9c4b..2b5782056ddae658343fd0f280ee05b38d2420d5 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -204,7 +204,6 @@ struct fec_enet_private { cbd_t *tx_bd_base; cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ cbd_t *dirty_tx; /* The ring entries to be free()ed. */ - struct net_device_stats stats; uint tx_full; spinlock_t lock; @@ -234,7 +233,6 @@ static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); static void fec_enet_tx(struct net_device *dev); static void fec_enet_rx(struct net_device *dev); static int fec_enet_close(struct net_device *dev); -static struct net_device_stats *fec_enet_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void fec_restart(struct net_device *dev, int duplex); static void fec_stop(struct net_device *dev); @@ -359,7 +357,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) */ fep->tx_skbuff[fep->skb_cur] = skb; - fep->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; /* Push the data cache so the CPM does not get stale memory @@ -409,7 +407,7 @@ fec_timeout(struct net_device *dev) struct fec_enet_private *fep = netdev_priv(dev); printk("%s: transmit timed out.\n", dev->name); - fep->stats.tx_errors++; + dev->stats.tx_errors++; #ifndef final_version { int i; @@ -511,19 +509,19 @@ fec_enet_tx(struct net_device *dev) if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { - fep->stats.tx_errors++; + dev->stats.tx_errors++; if (status & BD_ENET_TX_HB) /* No heartbeat */ - fep->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (status & BD_ENET_TX_LC) /* Late collision */ - fep->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (status & BD_ENET_TX_RL) /* Retrans limit */ - fep->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & BD_ENET_TX_UN) /* Underrun */ - fep->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (status & BD_ENET_TX_CSL) /* Carrier lost */ - fep->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } else { - fep->stats.tx_packets++; + dev->stats.tx_packets++; } #ifndef final_version @@ -534,7 +532,7 @@ fec_enet_tx(struct net_device *dev) * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) - fep->stats.collisions++; + dev->stats.collisions++; /* Free the sk buffer associated with this last transmit. */ @@ -607,17 +605,17 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { - fep->stats.rx_errors++; + dev->stats.rx_errors++; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { /* Frame too long or too short. */ - fep->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } if (status & BD_ENET_RX_NO) /* Frame alignment */ - fep->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ - fep->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (status & BD_ENET_RX_OV) /* FIFO overrun */ - fep->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } /* Report late collisions as a frame error. @@ -625,16 +623,16 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { * have in the buffer. So, just drop this frame on the floor. */ if (status & BD_ENET_RX_CL) { - fep->stats.rx_errors++; - fep->stats.rx_frame_errors++; + dev->stats.rx_errors++; + dev->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ - fep->stats.rx_packets++; + dev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; - fep->stats.rx_bytes += pkt_len; + dev->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr); /* This does 16 byte alignment, exactly what we need. @@ -646,7 +644,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - fep->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { skb_put(skb,pkt_len-4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len-4); @@ -2220,13 +2218,6 @@ fec_enet_close(struct net_device *dev) return 0; } -static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - return &fep->stats; -} - /* Set or clear the multicast filter for this adaptor. * Skeleton taken from sunlance driver. * The CPM Ethernet implementation allows Multicast as well as individual @@ -2462,7 +2453,6 @@ int __init fec_enet_init(struct net_device *dev) dev->tx_timeout = fec_timeout; dev->watchdog_timeo = TX_TIMEOUT; dev->stop = fec_enet_close; - dev->get_stats = fec_enet_get_stats; dev->set_multicast_list = set_multicast_list; for (i=0; iname); - for (j = 0; (j < 5); j++) - printk("%02x:", dev->dev_addr[j]); - printk("%02x\n", dev->dev_addr[5]); + printk("%s: ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } return 0; } diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h index 5af60b0f92086e7af2506e6d477c06edda3ace90..f3b1c6fbba8b81c99fe35b702cc13facab74e34f 100644 --- a/drivers/net/fec_8xx/fec_8xx.h +++ b/drivers/net/fec_8xx/fec_8xx.h @@ -105,6 +105,8 @@ struct fec; struct fec_enet_private { spinlock_t lock; /* during all ops except TX pckt processing */ spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ + struct net_device *dev; + struct napi_struct napi; int fecno; struct fec *fecp; const struct fec_platform_info *fpi; diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c index e5502af5b8e21a2bf7e3cef3aa13b6e1f1f9c69b..8d2904fa57896d33db21f274ca9181fb6f2f828a 100644 --- a/drivers/net/fec_8xx/fec_main.c +++ b/drivers/net/fec_8xx/fec_main.c @@ -465,9 +465,9 @@ void fec_stop(struct net_device *dev) } /* common receive function */ -static int fec_enet_rx_common(struct net_device *dev, int *budget) +static int fec_enet_rx_common(struct fec_enet_private *ep, + struct net_device *dev, int budget) { - struct fec_enet_private *fep = netdev_priv(dev); fec_t *fecp = fep->fecp; const struct fec_platform_info *fpi = fep->fpi; cbd_t *bdp; @@ -475,11 +475,8 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) int received = 0; __u16 pkt_len, sc; int curidx; - int rx_work_limit; if (fpi->use_napi) { - rx_work_limit = min(dev->quota, *budget); - if (!netif_running(dev)) return 0; } @@ -530,11 +527,6 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) BUG_ON(skbn == NULL); } else { - - /* napi, got packet but no quota */ - if (fpi->use_napi && --rx_work_limit < 0) - break; - skb = fep->rx_skbuff[curidx]; BUG_ON(skb == NULL); @@ -599,25 +591,24 @@ static int fec_enet_rx_common(struct net_device *dev, int *budget) * able to keep up at the expense of system resources. */ FW(fecp, r_des_active, 0x01000000); + + if (received >= budget) + break; + } fep->cur_rx = bdp; if (fpi->use_napi) { - dev->quota -= received; - *budget -= received; - - if (rx_work_limit < 0) - return 1; /* not done */ + if (received < budget) { + netif_rx_complete(dev, &fep->napi); - /* done */ - netif_rx_complete(dev); - - /* enable RX interrupt bits */ - FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + /* enable RX interrupt bits */ + FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + } } - return 0; + return received; } static void fec_enet_tx(struct net_device *dev) @@ -743,12 +734,12 @@ fec_enet_interrupt(int irq, void *dev_id) if ((int_events & FEC_ENET_RXF) != 0) { if (!fpi->use_napi) - fec_enet_rx_common(dev, NULL); + fec_enet_rx_common(fep, dev, ~0); else { - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &fep->napi)) { /* disable rx interrupts */ FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &fep->napi); } else { printk(KERN_ERR DRV_MODULE_NAME ": %s driver bug! interrupt while in poll!\n", @@ -893,10 +884,13 @@ static int fec_enet_open(struct net_device *dev) const struct fec_platform_info *fpi = fep->fpi; unsigned long flags; + napi_enable(&fep->napi); + /* Install our interrupt handler. */ if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { printk(KERN_ERR DRV_MODULE_NAME ": %s Could not allocate FEC IRQ!", dev->name); + napi_disable(&fep->napi); return -EINVAL; } @@ -907,6 +901,7 @@ static int fec_enet_open(struct net_device *dev) printk(KERN_ERR DRV_MODULE_NAME ": %s Could not allocate PHY IRQ!", dev->name); free_irq(fpi->fec_irq, dev); + napi_disable(&fep->napi); return -EINVAL; } @@ -932,6 +927,7 @@ static int fec_enet_close(struct net_device *dev) unsigned long flags; netif_stop_queue(dev); + napi_disable(&fep->napi); netif_carrier_off(dev); if (fpi->use_mdio) @@ -955,9 +951,12 @@ static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) return &fep->stats; } -static int fec_enet_poll(struct net_device *dev, int *budget) +static int fec_enet_poll(struct napi_struct *napi, int budget) { - return fec_enet_rx_common(dev, budget); + struct fec_enet_private *fep = container_of(napi, struct fec_enet_private, napi); + struct net_device *dev = fep->dev; + + return fec_enet_rx_common(fep, dev, budget); } /*************************************************************************/ @@ -1042,9 +1041,7 @@ static const struct ethtool_ops fec_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = fec_get_msglevel, .set_msglevel = fec_set_msglevel, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_regs = fec_get_regs, }; @@ -1104,9 +1101,9 @@ int fec_8xx_init_one(const struct fec_platform_info *fpi, err = -ENOMEM; goto err; } - SET_MODULE_OWNER(dev); fep = netdev_priv(dev); + fep->dev = dev; /* partial reset of FEC */ fec_whack_reset(fecp); @@ -1172,10 +1169,9 @@ int fec_8xx_init_one(const struct fec_platform_info *fpi, dev->get_stats = fec_enet_get_stats; dev->set_multicast_list = fec_set_multicast_list; dev->set_mac_address = fec_set_mac_address; - if (fpi->use_napi) { - dev->poll = fec_enet_poll; - dev->weight = fpi->napi_weight; - } + netif_napi_add(dev, &fec->napi, + fec_enet_poll, fpi->napi_weight); + dev->ethtool_ops = &fec_ethtool_ops; dev->do_ioctl = fec_ioctl; diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c index b3fa0d6a159ce51f5490f16dcae58d03a927fbca..e8e10a02d202948429c4445e535cf6a0ffbb9f49 100644 --- a/drivers/net/fec_8xx/fec_mii.c +++ b/drivers/net/fec_8xx/fec_mii.c @@ -308,12 +308,11 @@ int fec_mii_phy_id_detect(struct net_device *dev) return -1; } - for (i = 0, phy = phy_info; i < sizeof(phy_info) / sizeof(phy_info[0]); - i++, phy++) + for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++) if (phy->id == (phy_hwid >> 4) || phy->id == 0) break; - if (i >= sizeof(phy_info) / sizeof(phy_info[0])) { + if (i >= ARRAY_SIZE(phy_info)) { printk(KERN_ERR DRV_MODULE_NAME ": %s PHY id 0x%08x is not supported!\n", dev->name, phy_hwid); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 1938d6dfc8631db80e77ef3040f7d1e876f2e805..dae30b731342ef5e25e98318018984893b687665 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -159,6 +159,8 @@ #define dprintk(x...) do { } while (0) #endif +#define TX_WORK_PER_LOOP 64 +#define RX_WORK_PER_LOOP 64 /* * Hardware access: @@ -745,6 +747,9 @@ struct nv_skb_map { struct fe_priv { spinlock_t lock; + struct net_device *dev; + struct napi_struct napi; + /* General data: * Locking: spin_lock(&np->lock); */ struct net_device_stats stats; @@ -1586,9 +1591,10 @@ static int nv_alloc_rx_optimized(struct net_device *dev) static void nv_do_rx_refill(unsigned long data) { struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); /* Just reschedule NAPI rx processing */ - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); } #else static void nv_do_rx_refill(unsigned long data) @@ -2997,7 +3003,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); /* Disable furthur receive irq's */ spin_lock(&np->lock); @@ -3010,7 +3016,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) spin_unlock(&np->lock); } #else - if (nv_rx_process(dev, dev->weight)) { + if (nv_rx_process(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx(dev))) { spin_lock(&np->lock); if (!np->in_shutdown) @@ -3079,8 +3085,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data) return IRQ_RETVAL(i); } -#define TX_WORK_PER_LOOP 64 -#define RX_WORK_PER_LOOP 64 /** * All _optimized functions are used to help increase performance * (reduce CPU and increase throughput). They use descripter version 3, @@ -3114,7 +3118,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) #ifdef CONFIG_FORCEDETH_NAPI if (events & NVREG_IRQ_RX_ALL) { - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); /* Disable furthur receive irq's */ spin_lock(&np->lock); @@ -3127,7 +3131,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) spin_unlock(&np->lock); } #else - if (nv_rx_process_optimized(dev, dev->weight)) { + if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx_optimized(dev))) { spin_lock(&np->lock); if (!np->in_shutdown) @@ -3245,19 +3249,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) } #ifdef CONFIG_FORCEDETH_NAPI -static int nv_napi_poll(struct net_device *dev, int *budget) +static int nv_napi_poll(struct napi_struct *napi, int budget) { - int pkts, limit = min(*budget, dev->quota); - struct fe_priv *np = netdev_priv(dev); + struct fe_priv *np = container_of(napi, struct fe_priv, napi); + struct net_device *dev = np->dev; u8 __iomem *base = get_hwbase(dev); unsigned long flags; - int retcode; + int pkts, retcode; if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { - pkts = nv_rx_process(dev, limit); + pkts = nv_rx_process(dev, budget); retcode = nv_alloc_rx(dev); } else { - pkts = nv_rx_process_optimized(dev, limit); + pkts = nv_rx_process_optimized(dev, budget); retcode = nv_alloc_rx_optimized(dev); } @@ -3268,13 +3272,12 @@ static int nv_napi_poll(struct net_device *dev, int *budget) spin_unlock_irqrestore(&np->lock, flags); } - if (pkts < limit) { - /* all done, no more packets present */ - netif_rx_complete(dev); - + if (pkts < budget) { /* re-enable receive interrupts */ spin_lock_irqsave(&np->lock, flags); + __netif_rx_complete(dev, napi); + np->irqmask |= NVREG_IRQ_RX_ALL; if (np->msi_flags & NV_MSI_X_ENABLED) writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); @@ -3282,13 +3285,8 @@ static int nv_napi_poll(struct net_device *dev, int *budget) writel(np->irqmask, base + NvRegIrqMask); spin_unlock_irqrestore(&np->lock, flags); - return 0; - } else { - /* used up our quantum, so reschedule */ - dev->quota -= pkts; - *budget -= pkts; - return 1; } + return pkts; } #endif @@ -3296,6 +3294,7 @@ static int nv_napi_poll(struct net_device *dev, int *budget) static irqreturn_t nv_nic_irq_rx(int foo, void *data) { struct net_device *dev = (struct net_device *) data; + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; @@ -3303,7 +3302,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); if (events) { - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); /* disable receive interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); @@ -3329,7 +3328,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) if (!(events & np->irqmask)) break; - if (nv_rx_process_optimized(dev, dev->weight)) { + if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) { if (unlikely(nv_alloc_rx_optimized(dev))) { spin_lock_irqsave(&np->lock, flags); if (!np->in_shutdown) @@ -4334,16 +4333,26 @@ static int nv_set_sg(struct net_device *dev, u32 data) return -EOPNOTSUPP; } -static int nv_get_stats_count(struct net_device *dev) +static int nv_get_sset_count(struct net_device *dev, int sset) { struct fe_priv *np = netdev_priv(dev); - if (np->driver_data & DEV_HAS_STATISTICS_V1) - return NV_DEV_STATISTICS_V1_COUNT; - else if (np->driver_data & DEV_HAS_STATISTICS_V2) - return NV_DEV_STATISTICS_V2_COUNT; - else - return 0; + switch (sset) { + case ETH_SS_TEST: + if (np->driver_data & DEV_HAS_TEST_EXTENDED) + return NV_TEST_COUNT_EXTENDED; + else + return NV_TEST_COUNT_BASE; + case ETH_SS_STATS: + if (np->driver_data & DEV_HAS_STATISTICS_V1) + return NV_DEV_STATISTICS_V1_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V2) + return NV_DEV_STATISTICS_V2_COUNT; + else + return 0; + default: + return -EOPNOTSUPP; + } } static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) @@ -4353,17 +4362,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e /* update stats */ nv_do_stats_poll((unsigned long)dev); - memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); -} - -static int nv_self_test_count(struct net_device *dev) -{ - struct fe_priv *np = netdev_priv(dev); - - if (np->driver_data & DEV_HAS_TEST_EXTENDED) - return NV_TEST_COUNT_EXTENDED; - else - return NV_TEST_COUNT_BASE; + memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); } static int nv_link_test(struct net_device *dev) @@ -4610,7 +4609,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int result; - memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); + memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64)); if (!nv_link_test(dev)) { test->flags |= ETH_TEST_FL_FAILED; @@ -4620,7 +4619,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 if (test->flags & ETH_TEST_FL_OFFLINE) { if (netif_running(dev)) { netif_stop_queue(dev); - netif_poll_disable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_disable(&np->napi); +#endif netif_tx_lock_bh(dev); spin_lock_irq(&np->lock); nv_disable_hw_interrupts(dev, np->irqmask); @@ -4679,7 +4680,9 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); - netif_poll_enable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_enable(&np->napi); +#endif nv_enable_hw_interrupts(dev, np->irqmask); } } @@ -4689,10 +4692,10 @@ static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) { switch (stringset) { case ETH_SS_STATS: - memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); + memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str)); break; case ETH_SS_TEST: - memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); + memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str)); break; } } @@ -4707,7 +4710,6 @@ static const struct ethtool_ops ops = { .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, - .get_tso = ethtool_op_get_tso, .set_tso = nv_set_tso, .get_ringparam = nv_get_ringparam, .set_ringparam = nv_set_ringparam, @@ -4715,14 +4717,11 @@ static const struct ethtool_ops ops = { .set_pauseparam = nv_set_pauseparam, .get_rx_csum = nv_get_rx_csum, .set_rx_csum = nv_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = nv_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = nv_set_sg, .get_strings = nv_get_strings, - .get_stats_count = nv_get_stats_count, .get_ethtool_stats = nv_get_ethtool_stats, - .self_test_count = nv_self_test_count, + .get_sset_count = nv_get_sset_count, .self_test = nv_self_test, }; @@ -4911,12 +4910,14 @@ static int nv_open(struct net_device *dev) nv_start_rx(dev); nv_start_tx(dev); netif_start_queue(dev); - netif_poll_enable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_enable(&np->napi); +#endif if (ret) { netif_carrier_on(dev); } else { - printk("%s: no link during initialization.\n", dev->name); + printk(KERN_INFO "%s: no link during initialization.\n", dev->name); netif_carrier_off(dev); } if (oom) @@ -4942,7 +4943,9 @@ static int nv_close(struct net_device *dev) spin_lock_irq(&np->lock); np->in_shutdown = 1; spin_unlock_irq(&np->lock); - netif_poll_disable(dev); +#ifdef CONFIG_FORCEDETH_NAPI + napi_disable(&np->napi); +#endif synchronize_irq(dev->irq); del_timer_sync(&np->oom_kick); @@ -4987,6 +4990,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i u32 powerstate, txreg; u32 phystate_orig = 0, phystate; int phyinitialized = 0; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct fe_priv)); err = -ENOMEM; @@ -4994,9 +4998,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out; np = netdev_priv(dev); + np->dev = dev; np->pci_dev = pci_dev; spin_lock_init(&np->lock); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pci_dev->dev); init_timer(&np->oom_kick); @@ -5155,9 +5159,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = nv_poll_controller; #endif - dev->weight = RX_WORK_PER_LOOP; #ifdef CONFIG_FORCEDETH_NAPI - dev->poll = nv_napi_poll; + netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); #endif SET_ETHTOOL_OPS(dev, &ops); dev->tx_timeout = nv_tx_timeout; @@ -5202,10 +5205,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab */ - printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", - pci_name(pci_dev), - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_ERR "%s: Invalid Mac address detected: %s\n", + pci_name(pci_dev), print_mac(mac, dev->dev_addr)); printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x00; @@ -5213,9 +5214,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i get_random_bytes(&dev->dev_addr[3], 3); } - dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + dprintk(KERN_DEBUG "%s: MAC Address %s\n", + pci_name(pci_dev), print_mac(mac, dev->dev_addr)); /* set mac address */ nv_copy_mac_to_hw(dev); diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig index e27ee210b605ff0fa41da3197ce84a62a628183e..2765e49e07df121982e8c4ebe5bc4f0bff2f28b8 100644 --- a/drivers/net/fs_enet/Kconfig +++ b/drivers/net/fs_enet/Kconfig @@ -11,6 +11,7 @@ config FS_ENET_HAS_SCC config FS_ENET_HAS_FCC bool "Chip has an FCC usable for ethernet" depends on FS_ENET && CPM2 + select MDIO_BITBANG default y config FS_ENET_HAS_FEC diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index a4a2a0ea43d3e7972b222be9016ad04933b4d6dc..04c6faec88d2e5154007adce7c5e0cf0906e3e05 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1,17 +1,17 @@ /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou - * - * 2005 (c) MontaVista Software, Inc. + * + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug * * Heavily based on original FEC driver by Dan Malek * and modifications by Joakim Tjernlund * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -38,29 +38,36 @@ #include #include -#include - #include #include #include +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include +#endif + #include "fs_enet.h" /*************************************************/ +#ifndef CONFIG_PPC_CPM_NEW_BINDING static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; +#endif MODULE_AUTHOR("Pantelis Antoniou "); MODULE_DESCRIPTION("Freescale Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ +static int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ module_param(fs_enet_debug, int, 0); MODULE_PARM_DESC(fs_enet_debug, "Freescale bitmapped debugging message enable value"); +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev); +#endif static void fs_set_multicast_list(struct net_device *dev) { @@ -69,19 +76,25 @@ static void fs_set_multicast_list(struct net_device *dev) (*fep->ops->set_multicast_list)(dev); } +static void skb_align(struct sk_buff *skb, int align) +{ + int off = ((unsigned long)skb->data) & (align - 1); + + if (off) + skb_reserve(skb, align - off); +} + /* NAPI receive function */ -static int fs_enet_rx_napi(struct net_device *dev, int *budget) +static int fs_enet_rx_napi(struct napi_struct *napi, int budget) { - struct fs_enet_private *fep = netdev_priv(dev); + struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); + struct net_device *dev = to_net_dev(fep->dev); const struct fs_platform_info *fpi = fep->fpi; - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb, *skbn, *skbt; int received = 0; u16 pkt_len, sc; int curidx; - int rx_work_limit = 0; /* pacify gcc */ - - rx_work_limit = min(dev->quota, *budget); if (!netif_running(dev)) return 0; @@ -96,7 +109,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) (*fep->ops->napi_clear_rx_event)(dev); while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { - curidx = bdp - fep->rx_bd_base; /* @@ -109,7 +121,7 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) dev->name); /* - * Check for errors. + * Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { @@ -136,11 +148,6 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) skbn = skb; } else { - - /* napi, got packet but no quota */ - if (--rx_work_limit < 0) - break; - skb = fep->rx_skbuff[curidx]; dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), @@ -166,9 +173,13 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) skb = skbn; skbn = skbt; } - } else + } else { skbn = dev_alloc_skb(ENET_RX_FRSIZE); + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + if (skbn != NULL) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); @@ -191,7 +202,7 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); /* - * Update BD pointer to next entry. + * Update BD pointer to next entry. */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; @@ -199,22 +210,19 @@ static int fs_enet_rx_napi(struct net_device *dev, int *budget) bdp = fep->rx_bd_base; (*fep->ops->rx_bd_done)(dev); + + if (received >= budget) + break; } fep->cur_rx = bdp; - dev->quota -= received; - *budget -= received; - - if (rx_work_limit < 0) - return 1; /* not done */ - - /* done */ - netif_rx_complete(dev); - - (*fep->ops->napi_enable_rx)(dev); - - return 0; + if (received >= budget) { + /* done */ + netif_rx_complete(dev, napi); + (*fep->ops->napi_enable_rx)(dev); + } + return received; } /* non NAPI receive function */ @@ -222,7 +230,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb, *skbn, *skbt; int received = 0; u16 pkt_len, sc; @@ -247,7 +255,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) dev->name); /* - * Check for errors. + * Check for errors. */ if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { @@ -300,9 +308,13 @@ static int fs_enet_rx_non_napi(struct net_device *dev) skb = skbn; skbn = skbt; } - } else + } else { skbn = dev_alloc_skb(ENET_RX_FRSIZE); + if (skbn) + skb_align(skbn, ENET_RX_ALIGN); + } + if (skbn != NULL) { skb_put(skb, pkt_len); /* Make room */ skb->protocol = eth_type_trans(skb, dev); @@ -325,7 +337,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev) CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); /* - * Update BD pointer to next entry. + * Update BD pointer to next entry. */ if ((sc & BD_ENET_RX_WRAP) == 0) bdp++; @@ -343,17 +355,16 @@ static int fs_enet_rx_non_napi(struct net_device *dev) static void fs_enet_tx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb; int dirtyidx, do_wake, do_restart; u16 sc; - spin_lock(&fep->lock); + spin_lock(&fep->tx_lock); bdp = fep->dirty_tx; do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { - dirtyidx = bdp - fep->tx_bd_base; if (fep->tx_free == fep->tx_ring) @@ -362,7 +373,7 @@ static void fs_enet_tx(struct net_device *dev) skb = fep->tx_skbuff[dirtyidx]; /* - * Check for errors. + * Check for errors. */ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { @@ -402,13 +413,13 @@ static void fs_enet_tx(struct net_device *dev) skb->len, DMA_TO_DEVICE); /* - * Free the sk buffer associated with this last transmit. + * Free the sk buffer associated with this last transmit. */ dev_kfree_skb_irq(skb); fep->tx_skbuff[dirtyidx] = NULL; /* - * Update pointer to next buffer descriptor to be transmitted. + * Update pointer to next buffer descriptor to be transmitted. */ if ((sc & BD_ENET_TX_WRAP) == 0) bdp++; @@ -428,7 +439,7 @@ static void fs_enet_tx(struct net_device *dev) if (do_restart) (*fep->ops->tx_restart)(dev); - spin_unlock(&fep->lock); + spin_unlock(&fep->tx_lock); if (do_wake) netif_wake_queue(dev); @@ -454,7 +465,6 @@ fs_enet_interrupt(int irq, void *dev_id) nr = 0; while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { - nr++; int_clr_events = int_events; @@ -470,7 +480,7 @@ fs_enet_interrupt(int irq, void *dev_id) if (!fpi->use_napi) fs_enet_rx_non_napi(dev); else { - napi_ok = netif_rx_schedule_prep(dev); + napi_ok = napi_schedule_prep(&fep->napi); (*fep->ops->napi_disable_rx)(dev); (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); @@ -478,7 +488,7 @@ fs_enet_interrupt(int irq, void *dev_id) /* NOTE: it is possible for FCCs in NAPI mode */ /* to submit a spurious interrupt while in poll */ if (napi_ok) - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &fep->napi); } } @@ -493,7 +503,7 @@ fs_enet_interrupt(int irq, void *dev_id) void fs_init_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; + cbd_t __iomem *bdp; struct sk_buff *skb; int i; @@ -504,7 +514,7 @@ void fs_init_bds(struct net_device *dev) fep->cur_rx = fep->rx_bd_base; /* - * Initialize the receive buffer descriptors. + * Initialize the receive buffer descriptors. */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { skb = dev_alloc_skb(ENET_RX_FRSIZE); @@ -514,6 +524,7 @@ void fs_init_bds(struct net_device *dev) dev->name); break; } + skb_align(skb, ENET_RX_ALIGN); fep->rx_skbuff[i] = skb; CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, @@ -524,7 +535,7 @@ void fs_init_bds(struct net_device *dev) ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); } /* - * if we failed, fillup remainder + * if we failed, fillup remainder */ for (; i < fep->rx_ring; i++, bdp++) { fep->rx_skbuff[i] = NULL; @@ -532,7 +543,7 @@ void fs_init_bds(struct net_device *dev) } /* - * ...and the same for transmit. + * ...and the same for transmit. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fep->tx_skbuff[i] = NULL; @@ -546,11 +557,11 @@ void fs_cleanup_bds(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); struct sk_buff *skb; - cbd_t *bdp; + cbd_t __iomem *bdp; int i; /* - * Reset SKB transmit buffers. + * Reset SKB transmit buffers. */ for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { if ((skb = fep->tx_skbuff[i]) == NULL) @@ -565,7 +576,7 @@ void fs_cleanup_bds(struct net_device *dev) } /* - * Reset SKB receive buffers + * Reset SKB receive buffers */ for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { if ((skb = fep->rx_skbuff[i]) == NULL) @@ -587,7 +598,7 @@ void fs_cleanup_bds(struct net_device *dev) static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - cbd_t *bdp; + cbd_t __iomem *bdp; int curidx; u16 sc; unsigned long flags; @@ -595,7 +606,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&fep->tx_lock, flags); /* - * Fill in a Tx ring entry + * Fill in a Tx ring entry */ bdp = fep->cur_tx; @@ -614,19 +625,19 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) curidx = bdp - fep->tx_bd_base; /* - * Clear all of the status flags. + * Clear all of the status flags. */ CBDC_SC(bdp, BD_ENET_TX_STATS); /* - * Save skb pointer. + * Save skb pointer. */ fep->tx_skbuff[curidx] = skb; fep->stats.tx_bytes += skb->len; /* - * Push the data cache so the CPM does not get stale memory data. + * Push the data cache so the CPM does not get stale memory data. */ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE)); @@ -635,7 +646,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; /* - * If this was the last BD in the ring, start at the beginning again. + * If this was the last BD in the ring, start at the beginning again. */ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) fep->cur_tx++; @@ -710,45 +721,43 @@ static void fs_timeout(struct net_device *dev) *-----------------------------------------------------------------------------*/ static void generic_adjust_link(struct net_device *dev) { - struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phydev; - int new_state = 0; - - if (phydev->link) { - - /* adjust to duplex mode */ - if (phydev->duplex != fep->oldduplex){ - new_state = 1; - fep->oldduplex = phydev->duplex; - } - - if (phydev->speed != fep->oldspeed) { - new_state = 1; - fep->oldspeed = phydev->speed; - } - - if (!fep->oldlink) { - new_state = 1; - fep->oldlink = 1; - netif_schedule(dev); - netif_carrier_on(dev); - netif_start_queue(dev); - } - - if (new_state) - fep->ops->restart(dev); - - } else if (fep->oldlink) { - new_state = 1; - fep->oldlink = 0; - fep->oldspeed = 0; - fep->oldduplex = -1; - netif_carrier_off(dev); - netif_stop_queue(dev); - } - - if (new_state && netif_msg_link(fep)) - phy_print_status(phydev); + struct fs_enet_private *fep = netdev_priv(dev); + struct phy_device *phydev = fep->phydev; + int new_state = 0; + + if (phydev->link) { + /* adjust to duplex mode */ + if (phydev->duplex != fep->oldduplex) { + new_state = 1; + fep->oldduplex = phydev->duplex; + } + + if (phydev->speed != fep->oldspeed) { + new_state = 1; + fep->oldspeed = phydev->speed; + } + + if (!fep->oldlink) { + new_state = 1; + fep->oldlink = 1; + netif_schedule(dev); + netif_carrier_on(dev); + netif_start_queue(dev); + } + + if (new_state) + fep->ops->restart(dev); + } else if (fep->oldlink) { + new_state = 1; + fep->oldlink = 0; + fep->oldspeed = 0; + fep->oldduplex = -1; + netif_carrier_off(dev); + netif_stop_queue(dev); + } + + if (new_state && netif_msg_link(fep)) + phy_print_status(phydev); } @@ -792,25 +801,28 @@ static int fs_init_phy(struct net_device *dev) return 0; } - static int fs_enet_open(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); int r; int err; + napi_enable(&fep->napi); + /* Install our interrupt handler. */ r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); if (r != 0) { printk(KERN_ERR DRV_MODULE_NAME ": %s Could not allocate FS_ENET IRQ!", dev->name); + napi_disable(&fep->napi); return -EINVAL; } err = fs_init_phy(dev); - if(err) + if(err) { + napi_disable(&fep->napi); return err; - + } phy_start(fep->phydev); return 0; @@ -823,10 +835,13 @@ static int fs_enet_close(struct net_device *dev) netif_stop_queue(dev); netif_carrier_off(dev); + napi_disable(&fep->napi); phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); + spin_lock(&fep->tx_lock); (*fep->ops->stop)(dev); + spin_unlock(&fep->tx_lock); spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ @@ -915,9 +930,7 @@ static const struct ethtool_ops fs_ethtool_ops = { .get_link = ethtool_op_get_link, .get_msglevel = fs_get_msglevel, .set_msglevel = fs_set_msglevel, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_regs = fs_get_regs, }; @@ -941,6 +954,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) extern int fs_mii_connect(struct net_device *dev); extern void fs_mii_disconnect(struct net_device *dev); +#ifndef CONFIG_PPC_CPM_NEW_BINDING static struct net_device *fs_init_instance(struct device *dev, struct fs_platform_info *fpi) { @@ -961,10 +975,8 @@ static struct net_device *fs_init_instance(struct device *dev, err = -ENOMEM; goto err; } - SET_MODULE_OWNER(ndev); fep = netdev_priv(ndev); - memset(fep, 0, privsize); /* clear everything */ fep->dev = dev; dev_set_drvdata(dev, ndev); @@ -978,7 +990,7 @@ static struct net_device *fs_init_instance(struct device *dev, #endif #ifdef CONFIG_FS_ENET_HAS_SCC - if (fs_get_scc_index(fpi->fs_no) >=0 ) + if (fs_get_scc_index(fpi->fs_no) >=0) fep->ops = &fs_scc_ops; #endif @@ -1013,13 +1025,13 @@ static struct net_device *fs_init_instance(struct device *dev, spin_lock_init(&fep->tx_lock); /* - * Set the Ethernet address. + * Set the Ethernet address. */ for (i = 0; i < 6; i++) ndev->dev_addr[i] = fpi->macaddr[i]; - + r = (*fep->ops->allocate_bd)(ndev); - + if (fep->ring_base == NULL) { printk(KERN_ERR DRV_MODULE_NAME ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r); @@ -1038,7 +1050,7 @@ static struct net_device *fs_init_instance(struct device *dev, fep->rx_ring = fpi->rx_ring; /* - * The FEC Ethernet specific entries in the device structure. + * The FEC Ethernet specific entries in the device structure. */ ndev->open = fs_enet_open; ndev->hard_start_xmit = fs_enet_start_xmit; @@ -1047,10 +1059,14 @@ static struct net_device *fs_init_instance(struct device *dev, ndev->stop = fs_enet_close; ndev->get_stats = fs_enet_get_stats; ndev->set_multicast_list = fs_set_multicast_list; - if (fpi->use_napi) { - ndev->poll = fs_enet_rx_napi; - ndev->weight = fpi->napi_weight; - } + +#ifdef CONFIG_NET_POLL_CONTROLLER + ndev->poll_controller = fs_enet_netpoll; +#endif + + netif_napi_add(ndev, &fep->napi, + fs_enet_rx_napi, fpi->napi_weight); + ndev->ethtool_ops = &fs_ethtool_ops; ndev->do_ioctl = fs_ioctl; @@ -1069,9 +1085,8 @@ static struct net_device *fs_init_instance(struct device *dev, return ndev; - err: +err: if (ndev != NULL) { - if (registered) unregister_netdev(ndev); @@ -1106,7 +1121,7 @@ static int fs_cleanup_instance(struct net_device *ndev) unregister_netdev(ndev); dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, fep->ring_mem_addr); + (void __force *)fep->ring_base, fep->ring_mem_addr); /* reset it */ (*fep->ops->cleanup_data)(ndev); @@ -1121,43 +1136,259 @@ static int fs_cleanup_instance(struct net_device *ndev) return 0; } +#endif /**************************************************************************************/ /* handy pointer to the immap */ -void *fs_enet_immap = NULL; +void __iomem *fs_enet_immap = NULL; static int setup_immap(void) { - phys_addr_t paddr = 0; - unsigned long size = 0; - #ifdef CONFIG_CPM1 - paddr = IMAP_ADDR; - size = 0x10000; /* map 64K */ + fs_enet_immap = ioremap(IMAP_ADDR, 0x4000); + WARN_ON(!fs_enet_immap); +#elif defined(CONFIG_CPM2) + fs_enet_immap = cpm2_immr; #endif -#ifdef CONFIG_CPM2 - paddr = CPM_MAP_ADDR; - size = 0x40000; /* map 256 K */ -#endif - fs_enet_immap = ioremap(paddr, size); - if (fs_enet_immap == NULL) - return -EBADF; /* XXX ahem; maybe just BUG_ON? */ - return 0; } static void cleanup_immap(void) { - if (fs_enet_immap != NULL) { - iounmap(fs_enet_immap); - fs_enet_immap = NULL; - } +#if defined(CONFIG_CPM1) + iounmap(fs_enet_immap); +#endif } /**************************************************************************************/ +#ifdef CONFIG_PPC_CPM_NEW_BINDING +static int __devinit find_phy(struct device_node *np, + struct fs_platform_info *fpi) +{ + struct device_node *phynode, *mdionode; + struct resource res; + int ret = 0, len; + + const u32 *data = of_get_property(np, "phy-handle", &len); + if (!data || len != 4) + return -EINVAL; + + phynode = of_find_node_by_phandle(*data); + if (!phynode) + return -EINVAL; + + mdionode = of_get_parent(phynode); + if (!mdionode) + goto out_put_phy; + + ret = of_address_to_resource(mdionode, 0, &res); + if (ret) + goto out_put_mdio; + + data = of_get_property(phynode, "reg", &len); + if (!data || len != 4) + goto out_put_mdio; + + snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data); + +out_put_mdio: + of_node_put(mdionode); +out_put_phy: + of_node_put(phynode); + return ret; +} + +#ifdef CONFIG_FS_ENET_HAS_FEC +#define IS_FEC(match) ((match)->data == &fs_fec_ops) +#else +#define IS_FEC(match) 0 +#endif + +static int __devinit fs_enet_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct net_device *ndev; + struct fs_enet_private *fep; + struct fs_platform_info *fpi; + const u32 *data; + const u8 *mac_addr; + int privsize, len, ret = -ENODEV; + + fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); + if (!fpi) + return -ENOMEM; + + if (!IS_FEC(match)) { + data = of_get_property(ofdev->node, "fsl,cpm-command", &len); + if (!data || len != 4) + goto out_free_fpi; + + fpi->cp_command = *data; + } + + fpi->rx_ring = 32; + fpi->tx_ring = 32; + fpi->rx_copybreak = 240; + fpi->use_napi = 0; + fpi->napi_weight = 17; + + ret = find_phy(ofdev->node, fpi); + if (ret) + goto out_free_fpi; + + privsize = sizeof(*fep) + + sizeof(struct sk_buff **) * + (fpi->rx_ring + fpi->tx_ring); + + ndev = alloc_etherdev(privsize); + if (!ndev) { + ret = -ENOMEM; + goto out_free_fpi; + } + + SET_MODULE_OWNER(ndev); + dev_set_drvdata(&ofdev->dev, ndev); + + fep = netdev_priv(ndev); + fep->dev = &ofdev->dev; + fep->fpi = fpi; + fep->ops = match->data; + + ret = fep->ops->setup_data(ndev); + if (ret) + goto out_free_dev; + + fep->rx_skbuff = (struct sk_buff **)&fep[1]; + fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; + + spin_lock_init(&fep->lock); + spin_lock_init(&fep->tx_lock); + + mac_addr = of_get_mac_address(ofdev->node); + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, 6); + + ret = fep->ops->allocate_bd(ndev); + if (ret) + goto out_cleanup_data; + + fep->rx_bd_base = fep->ring_base; + fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; + + fep->tx_ring = fpi->tx_ring; + fep->rx_ring = fpi->rx_ring; + + ndev->open = fs_enet_open; + ndev->hard_start_xmit = fs_enet_start_xmit; + ndev->tx_timeout = fs_timeout; + ndev->watchdog_timeo = 2 * HZ; + ndev->stop = fs_enet_close; + ndev->get_stats = fs_enet_get_stats; + ndev->set_multicast_list = fs_set_multicast_list; + if (fpi->use_napi) { + ndev->poll = fs_enet_rx_napi; + ndev->weight = fpi->napi_weight; + } + ndev->ethtool_ops = &fs_ethtool_ops; + ndev->do_ioctl = fs_ioctl; + + init_timer(&fep->phy_timer_list); + + netif_carrier_off(ndev); + + ret = register_netdev(ndev); + if (ret) + goto out_free_bd; + + printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n", + ndev->name, + ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], + ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + + return 0; + +out_free_bd: + fep->ops->free_bd(ndev); +out_cleanup_data: + fep->ops->cleanup_data(ndev); +out_free_dev: + free_netdev(ndev); + dev_set_drvdata(&ofdev->dev, NULL); +out_free_fpi: + kfree(fpi); + return ret; +} + +static int fs_enet_remove(struct of_device *ofdev) +{ + struct net_device *ndev = dev_get_drvdata(&ofdev->dev); + struct fs_enet_private *fep = netdev_priv(ndev); + + unregister_netdev(ndev); + + fep->ops->free_bd(ndev); + fep->ops->cleanup_data(ndev); + dev_set_drvdata(fep->dev, NULL); + + free_netdev(ndev); + return 0; +} + +static struct of_device_id fs_enet_match[] = { +#ifdef CONFIG_FS_ENET_HAS_SCC + { + .compatible = "fsl,cpm1-scc-enet", + .data = (void *)&fs_scc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FCC + { + .compatible = "fsl,cpm2-fcc-enet", + .data = (void *)&fs_fcc_ops, + }, +#endif +#ifdef CONFIG_FS_ENET_HAS_FEC + { + .compatible = "fsl,pq1-fec-enet", + .data = (void *)&fs_fec_ops, + }, +#endif + {} +}; + +static struct of_platform_driver fs_enet_driver = { + .name = "fs_enet", + .match_table = fs_enet_match, + .probe = fs_enet_probe, + .remove = fs_enet_remove, +}; + +static int __init fs_init(void) +{ + int r = setup_immap(); + if (r != 0) + return r; + + r = of_register_platform_driver(&fs_enet_driver); + if (r != 0) + goto out; + + return 0; + +out: + cleanup_immap(); + return r; +} + +static void __exit fs_cleanup(void) +{ + of_unregister_platform_driver(&fs_enet_driver); + cleanup_immap(); +} +#else static int __devinit fs_enet_probe(struct device *dev) { struct net_device *ndev; @@ -1262,7 +1493,6 @@ static int __init fs_init(void) err: cleanup_immap(); return r; - } static void __exit fs_cleanup(void) @@ -1272,6 +1502,16 @@ static void __exit fs_cleanup(void) driver_unregister(&fs_enet_scc_driver); cleanup_immap(); } +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fs_enet_netpoll(struct net_device *dev) +{ + disable_irq(dev->irq); + fs_enet_interrupt(dev->irq, dev, NULL); + enable_irq(dev->irq); +} +#endif /**************************************************************************************/ diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h index 569be225cd056b0955c3310c36706abb040fc8a7..baf6477165af182450adb1cad6037456676ef416 100644 --- a/drivers/net/fs_enet/fs_enet.h +++ b/drivers/net/fs_enet/fs_enet.h @@ -15,8 +15,8 @@ #include struct fec_info { - fec_t* fecp; - u32 mii_speed; + fec_t __iomem *fecp; + u32 mii_speed; }; #endif @@ -24,19 +24,6 @@ struct fec_info { #include #endif -/* This is used to operate with pins. - Note that the actual port size may - be different; cpm(s) handle it OK */ -struct bb_info { - u8 mdio_dat_msk; - u8 mdio_dir_msk; - u8 *mdio_dir; - u8 *mdio_dat; - u8 mdc_msk; - u8 *mdc_dat; - int delay; -}; - /* hw driver ops */ struct fs_ops { int (*setup_data)(struct net_device *dev); @@ -82,60 +69,26 @@ struct phy_info { /* Must be a multiple of 32 (to cover both FEC & FCC) */ #define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) /* This is needed so that invalidate_xxx wont invalidate too much */ -#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE) - -struct fs_enet_mii_bus { - struct list_head list; - spinlock_t mii_lock; - const struct fs_mii_bus_info *bus_info; - int refs; - u32 usage_map; - - int (*mii_read)(struct fs_enet_mii_bus *bus, - int phy_id, int location); - - void (*mii_write)(struct fs_enet_mii_bus *bus, - int phy_id, int location, int value); - - union { - struct { - unsigned int mii_speed; - void *fecp; - } fec; - - struct { - /* note that the actual port size may */ - /* be different; cpm(s) handle it OK */ - u8 mdio_msk; - u8 *mdio_dir; - u8 *mdio_dat; - u8 mdc_msk; - u8 *mdc_dir; - u8 *mdc_dat; - } bitbang; - - struct { - u16 lpa; - } fixed; - }; -}; +#define ENET_RX_ALIGN 16 +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) struct fs_enet_private { + struct napi_struct napi; struct device *dev; /* pointer back to the device (must be initialized first) */ spinlock_t lock; /* during all ops except TX pckt processing */ spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ - const struct fs_platform_info *fpi; + struct fs_platform_info *fpi; const struct fs_ops *ops; int rx_ring, tx_ring; dma_addr_t ring_mem_addr; - void *ring_base; + void __iomem *ring_base; struct sk_buff **rx_skbuff; struct sk_buff **tx_skbuff; - cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ - cbd_t *tx_bd_base; - cbd_t *dirty_tx; /* ring entries to be free()ed. */ - cbd_t *cur_rx; - cbd_t *cur_tx; + cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t __iomem *tx_bd_base; + cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ + cbd_t __iomem *cur_rx; + cbd_t __iomem *cur_tx; int tx_free; struct net_device_stats stats; struct timer_list phy_timer_list; @@ -143,7 +96,6 @@ struct fs_enet_private { u32 msg_enable; struct mii_if_info mii_if; unsigned int last_mii_status; - struct fs_enet_mii_bus *mii_bus; int interrupt; struct phy_device *phydev; @@ -161,23 +113,23 @@ struct fs_enet_private { union { struct { int idx; /* FEC1 = 0, FEC2 = 1 */ - void *fecp; /* hw registers */ + void __iomem *fecp; /* hw registers */ u32 hthi, htlo; /* state for multicast */ } fec; struct { int idx; /* FCC1-3 = 0-2 */ - void *fccp; /* hw registers */ - void *ep; /* parameter ram */ - void *fcccp; /* hw registers cont. */ - void *mem; /* FCC DPRAM */ + void __iomem *fccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ + void __iomem *fcccp; /* hw registers cont. */ + void __iomem *mem; /* FCC DPRAM */ u32 gaddrh, gaddrl; /* group address */ } fcc; struct { int idx; /* FEC1 = 0, FEC2 = 1 */ - void *sccp; /* hw registers */ - void *ep; /* parameter ram */ + void __iomem *sccp; /* hw registers */ + void __iomem *ep; /* parameter ram */ u32 hthi, htlo; /* state for multicast */ } scc; @@ -185,9 +137,10 @@ struct fs_enet_private { }; /***************************************************************************/ +#ifndef CONFIG_PPC_CPM_NEW_BINDING int fs_enet_mdio_bb_init(void); -int fs_mii_fixed_init(struct fs_enet_mii_bus *bus); int fs_enet_mdio_fec_init(void); +#endif void fs_init_bds(struct net_device *dev); void fs_cleanup_bds(struct net_device *dev); @@ -247,7 +200,7 @@ extern const struct fs_ops fs_scc_ops; /*******************************************************************/ /* handy pointer to the immap */ -extern void *fs_enet_immap; +extern void __iomem *fs_enet_immap; /*******************************************************************/ diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 5603121132cdc2ef1d1ff89b023fd55db1a61fe7..da4efbca646e3b666fa7e9b2c947e2a8a66ab761 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -1,14 +1,14 @@ /* * FCC driver for Motorola MPC82xx (PQ2). * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou * - * 2005 (c) MontaVista Software, Inc. + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -42,34 +42,29 @@ #include #include +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include +#endif + #include "fs_enet.h" /*************************************************/ /* FCC access macros */ -#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x) -#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x) -#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x) -#define __fcc_in32(addr) in_be32((unsigned *)addr) -#define __fcc_in16(addr) in_be16((unsigned short *)addr) -#define __fcc_in8(addr) in_8((unsigned char *)addr) - -/* parameter space */ - /* write, read, set bits, clear bits */ -#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v)) -#define R32(_p, _m) __fcc_in32(&(_p)->_m) +#define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v)) +#define R32(_p, _m) in_be32(&(_p)->_m) #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) -#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v)) -#define R16(_p, _m) __fcc_in16(&(_p)->_m) +#define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v)) +#define R16(_p, _m) in_be16(&(_p)->_m) #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) -#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v)) -#define R8(_p, _m) __fcc_in8(&(_p)->_m) +#define W8(_p, _m, _v) out_8(&(_p)->_m, (_v)) +#define R8(_p, _m) in_8(&(_p)->_m) #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) @@ -83,34 +78,62 @@ #define MAX_CR_CMD_LOOPS 10000 -static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op) +static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op) { const struct fs_platform_info *fpi = fep->fpi; - - cpm2_map_t *immap = fs_enet_immap; - cpm_cpm2_t *cpmp = &immap->im_cpm; - u32 v; int i; - /* Currently I don't know what feature call will look like. But - I guess there'd be something like do_cpm_cmd() which will require page & sblock */ - v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op); - W32(cpmp, cp_cpcr, v | CPM_CR_FLG); + W32(cpmp, cp_cpcr, fpi->cp_command | op | CPM_CR_FLG); for (i = 0; i < MAX_CR_CMD_LOOPS; i++) if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0) - break; + return 0; - if (i >= MAX_CR_CMD_LOOPS) { - printk(KERN_ERR "%s(): Not able to issue CPM command\n", - __FUNCTION__); - return 1; - } - - return 0; + printk(KERN_ERR "%s(): Not able to issue CPM command\n", + __FUNCTION__); + return 1; } static int do_pd_setup(struct fs_enet_private *fep) { +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct of_device *ofdev = to_of_device(fep->dev); + struct fs_platform_info *fpi = fep->fpi; + int ret = -EINVAL; + + fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + if (fep->interrupt == NO_IRQ) + goto out; + + fep->fcc.fccp = of_iomap(ofdev->node, 0); + if (!fep->fcc.fccp) + goto out; + + fep->fcc.ep = of_iomap(ofdev->node, 1); + if (!fep->fcc.ep) + goto out_fccp; + + fep->fcc.fcccp = of_iomap(ofdev->node, 2); + if (!fep->fcc.fcccp) + goto out_ep; + + fep->fcc.mem = (void __iomem *)cpm2_immr; + fpi->dpram_offset = cpm_dpalloc(128, 8); + if (IS_ERR_VALUE(fpi->dpram_offset)) { + ret = fpi->dpram_offset; + goto out_fcccp; + } + + return 0; + +out_fcccp: + iounmap(fep->fcc.fcccp); +out_ep: + iounmap(fep->fcc.ep); +out_fccp: + iounmap(fep->fcc.fccp); +out: + return ret; +#else struct platform_device *pdev = to_platform_device(fep->dev); struct resource *r; @@ -121,33 +144,33 @@ static int do_pd_setup(struct fs_enet_private *fep) /* Attach the memory for the FCC Parameter RAM */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); - fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); + fep->fcc.ep = ioremap(r->start, r->end - r->start + 1); if (fep->fcc.ep == NULL) return -EINVAL; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); - fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); + fep->fcc.fccp = ioremap(r->start, r->end - r->start + 1); if (fep->fcc.fccp == NULL) return -EINVAL; if (fep->fpi->fcc_regs_c) { - - fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; + fep->fcc.fcccp = (void __iomem *)fep->fpi->fcc_regs_c; } else { r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs_c"); - fep->fcc.fcccp = (void *)ioremap(r->start, + fep->fcc.fcccp = ioremap(r->start, r->end - r->start + 1); } if (fep->fcc.fcccp == NULL) return -EINVAL; - fep->fcc.mem = (void *)fep->fpi->mem_offset; + fep->fcc.mem = (void __iomem *)fep->fpi->mem_offset; if (fep->fcc.mem == NULL) return -EINVAL; return 0; +#endif } #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) @@ -158,11 +181,17 @@ static int do_pd_setup(struct fs_enet_private *fep) static int setup_data(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; +#ifndef CONFIG_PPC_CPM_NEW_BINDING + struct fs_platform_info *fpi = fep->fpi; + + fpi->cp_command = (fpi->cp_page << 26) | + (fpi->cp_block << 21) | + (12 << 6); fep->fcc.idx = fs_get_fcc_index(fpi->fs_no); if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ return -EINVAL; +#endif if (do_pd_setup(fep) != 0) return -EINVAL; @@ -180,7 +209,7 @@ static int allocate_bd(struct net_device *dev) struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fep->ring_base = dma_alloc_coherent(fep->dev, + fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), &fep->ring_mem_addr, GFP_KERNEL); @@ -198,7 +227,7 @@ static void free_bd(struct net_device *dev) if (fep->ring_base) dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, fep->ring_mem_addr); + (void __force *)fep->ring_base, fep->ring_mem_addr); } static void cleanup_data(struct net_device *dev) @@ -209,7 +238,7 @@ static void cleanup_data(struct net_device *dev) static void set_promiscuous_mode(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; S32(fccp, fcc_fpsmr, FCC_PSMR_PRO); } @@ -217,7 +246,7 @@ static void set_promiscuous_mode(struct net_device *dev) static void set_multicast_start(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_enet_t *ep = fep->fcc.ep; + fcc_enet_t __iomem *ep = fep->fcc.ep; W32(ep, fen_gaddrh, 0); W32(ep, fen_gaddrl, 0); @@ -226,7 +255,7 @@ static void set_multicast_start(struct net_device *dev) static void set_multicast_one(struct net_device *dev, const u8 *mac) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_enet_t *ep = fep->fcc.ep; + fcc_enet_t __iomem *ep = fep->fcc.ep; u16 taddrh, taddrm, taddrl; taddrh = ((u16)mac[5] << 8) | mac[4]; @@ -236,14 +265,14 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac) W16(ep, fen_taddrh, taddrh); W16(ep, fen_taddrm, taddrm); W16(ep, fen_taddrl, taddrl); - fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR); + fcc_cr_cmd(fep, CPM_CR_SET_GADDR); } static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; - fcc_enet_t *ep = fep->fcc.ep; + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; /* clear promiscuous always */ C32(fccp, fcc_fpsmr, FCC_PSMR_PRO); @@ -278,12 +307,14 @@ static void restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fcc_t *fccp = fep->fcc.fccp; - fcc_c_t *fcccp = fep->fcc.fcccp; - fcc_enet_t *ep = fep->fcc.ep; + fcc_t __iomem *fccp = fep->fcc.fccp; + fcc_c_t __iomem *fcccp = fep->fcc.fcccp; + fcc_enet_t __iomem *ep = fep->fcc.ep; dma_addr_t rx_bd_base_phys, tx_bd_base_phys; u16 paddrh, paddrm, paddrl; +#ifndef CONFIG_PPC_CPM_NEW_BINDING u16 mem_addr; +#endif const unsigned char *mac; int i; @@ -291,7 +322,7 @@ static void restart(struct net_device *dev) /* clear everything (slow & steady does it) */ for (i = 0; i < sizeof(*ep); i++) - __fcc_out8((char *)ep + i, 0); + out_8((u8 __iomem *)ep + i, 0); /* get physical address */ rx_bd_base_phys = fep->ring_mem_addr; @@ -315,14 +346,22 @@ static void restart(struct net_device *dev) * this area. */ +#ifdef CONFIG_PPC_CPM_NEW_BINDING + W16(ep, fen_genfcc.fcc_riptr, fpi->dpram_offset); + W16(ep, fen_genfcc.fcc_tiptr, fpi->dpram_offset + 32); + + W16(ep, fen_padptr, fpi->dpram_offset + 64); +#else mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */ W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff)); W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff)); + W16(ep, fen_padptr, mem_addr + 64); +#endif /* fill with special symbol... */ - memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); + memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); W32(ep, fen_genfcc.fcc_rbptr, 0); W32(ep, fen_genfcc.fcc_tbptr, 0); @@ -407,7 +446,7 @@ static void restart(struct net_device *dev) S8(fcccp, fcc_gfemr, 0x20); } - fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX); + fcc_cr_cmd(fep, CPM_CR_INIT_TRX); /* clear events */ W16(fccp, fcc_fcce, 0xffff); @@ -438,7 +477,7 @@ static void restart(struct net_device *dev) static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; /* stop ethernet */ C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); @@ -465,7 +504,7 @@ static void post_free_irq(struct net_device *dev, int irq) static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK); } @@ -473,7 +512,7 @@ static void napi_clear_rx_event(struct net_device *dev) static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); } @@ -481,7 +520,7 @@ static void napi_enable_rx(struct net_device *dev) static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); } @@ -494,15 +533,15 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; - S32(fccp, fcc_ftodr, 0x80); + S16(fccp, fcc_ftodr, 0x8000); } static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; return (u32)R16(fccp, fcc_fcce); } @@ -510,7 +549,7 @@ static u32 get_int_events(struct net_device *dev) static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; W16(fccp, fcc_fcce, int_events & 0xffff); } @@ -521,47 +560,46 @@ static void ev_error(struct net_device *dev, u32 int_events) ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events); } -int get_regs(struct net_device *dev, void *p, int *sizep) +static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); - if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t)) + if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1) return -EINVAL; memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); p = (char *)p + sizeof(fcc_t); - memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t)); - p = (char *)p + sizeof(fcc_c_t); - memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); + p = (char *)p + sizeof(fcc_enet_t); + memcpy_fromio(p, fep->fcc.fcccp, 1); return 0; } -int get_regs_len(struct net_device *dev) +static int get_regs_len(struct net_device *dev) { - return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t); + return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1; } /* Some transmit errors cause the transmitter to shut * down. We now issue a restart transmit. Since the * errors close the BD and update the pointers, the restart * _should_ pick up without having to reset any of our - * pointers either. Also, To workaround 8260 device erratum + * pointers either. Also, To workaround 8260 device erratum * CPM37, we must disable and then re-enable the transmitter * following a Late Collision, Underrun, or Retry Limit error. */ -void tx_restart(struct net_device *dev) +static void tx_restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fcc_t *fccp = fep->fcc.fccp; + fcc_t __iomem *fccp = fep->fcc.fccp; C32(fccp, fcc_gfmr, FCC_GFMR_ENT); udelay(10); S32(fccp, fcc_gfmr, FCC_GFMR_ENT); - fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX); + fcc_cr_cmd(fep, CPM_CR_RESTART_TX); } /*************************************************************************/ diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index 04b4f80a1cde50d679996559fbf6f56091030584..c1fee48517e3eaaab58ed60a66be363db9b3c1c8 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -1,14 +1,14 @@ /* * Freescale Ethernet controllers * - * Copyright (c) 2005 Intracom S.A. + * Copyright (c) 2005 Intracom S.A. * by Pantelis Antoniou * - * 2005 (c) MontaVista Software, Inc. + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -43,6 +43,10 @@ #include #endif +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include +#endif + #include "fs_enet.h" #include "fec.h" @@ -79,7 +83,7 @@ */ #define FEC_RESET_DELAY 50 -static int whack_reset(fec_t * fecp) +static int whack_reset(fec_t __iomem *fecp) { int i; @@ -95,9 +99,22 @@ static int whack_reset(fec_t * fecp) static int do_pd_setup(struct fs_enet_private *fep) { - struct platform_device *pdev = to_platform_device(fep->dev); +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct of_device *ofdev = to_of_device(fep->dev); + + fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->fec.fecp = of_iomap(ofdev->node, 0); + if (!fep->fcc.fccp) + return -EINVAL; + + return 0; +#else + struct platform_device *pdev = to_platform_device(fep->dev); struct resource *r; - + /* Fill out IRQ field */ fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); if (fep->interrupt < 0) @@ -110,7 +127,7 @@ static int do_pd_setup(struct fs_enet_private *fep) return -EINVAL; return 0; - +#endif } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) @@ -141,8 +158,8 @@ static int allocate_bd(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - - fep->ring_base = dma_alloc_coherent(fep->dev, + + fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), &fep->ring_mem_addr, GFP_KERNEL); @@ -160,7 +177,7 @@ static void free_bd(struct net_device *dev) if(fep->ring_base) dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), - fep->ring_base, + (void __force *)fep->ring_base, fep->ring_mem_addr); } @@ -172,7 +189,7 @@ static void cleanup_data(struct net_device *dev) static void set_promiscuous_mode(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FS(fecp, r_cntrl, FEC_RCNTRL_PROM); } @@ -220,7 +237,7 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac) static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; /* if all multi or too many multicasts; just enable all */ if ((dev->flags & IFF_ALLMULTI) != 0 || @@ -254,7 +271,7 @@ static void restart(struct net_device *dev) u32 cptr; #endif struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; const struct fs_platform_info *fpi = fep->fpi; dma_addr_t rx_bd_base_phys, tx_bd_base_phys; int r; @@ -280,13 +297,13 @@ static void restart(struct net_device *dev) FW(fecp, addr_high, addrlo); /* - * Reset all multicast. + * Reset all multicast. */ FW(fecp, hash_table_high, fep->fec.hthi); FW(fecp, hash_table_low, fep->fec.htlo); /* - * Set maximum receive buffer size. + * Set maximum receive buffer size. */ FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); FW(fecp, r_hash, PKT_MAXBUF_SIZE); @@ -296,7 +313,7 @@ static void restart(struct net_device *dev) tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; /* - * Set receive and transmit descriptor base. + * Set receive and transmit descriptor base. */ FW(fecp, r_des_start, rx_bd_base_phys); FW(fecp, x_des_start, tx_bd_base_phys); @@ -304,7 +321,7 @@ static void restart(struct net_device *dev) fs_init_bds(dev); /* - * Enable big endian and don't care about SDMA FC. + * Enable big endian and don't care about SDMA FC. */ FW(fecp, fun_code, 0x78000000); @@ -366,13 +383,13 @@ static void restart(struct net_device *dev) } /* - * Enable interrupts we wish to service. + * Enable interrupts we wish to service. */ FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB); /* - * And last, enable the transmit and receive processing. + * And last, enable the transmit and receive processing. */ FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); FW(fecp, r_des_active, 0x01000000); @@ -382,7 +399,7 @@ static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); const struct fs_platform_info *fpi = fep->fpi; - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; struct fec_info* feci= fep->phydev->bus->priv; @@ -401,7 +418,7 @@ static void stop(struct net_device *dev) ": %s FEC timeout on graceful transmit stop\n", dev->name); /* - * Disable FEC. Let only MII interrupts. + * Disable FEC. Let only MII interrupts. */ FW(fecp, imask, 0); FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); @@ -444,7 +461,7 @@ static void post_free_irq(struct net_device *dev, int irq) static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); } @@ -452,7 +469,7 @@ static void napi_clear_rx_event(struct net_device *dev) static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } @@ -460,7 +477,7 @@ static void napi_enable_rx(struct net_device *dev) static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); } @@ -468,7 +485,7 @@ static void napi_disable_rx(struct net_device *dev) static void rx_bd_done(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, r_des_active, 0x01000000); } @@ -476,7 +493,7 @@ static void rx_bd_done(struct net_device *dev) static void tx_kickstart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, x_des_active, 0x01000000); } @@ -484,7 +501,7 @@ static void tx_kickstart(struct net_device *dev) static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; return FR(fecp, ievent) & FR(fecp, imask); } @@ -492,7 +509,7 @@ static u32 get_int_events(struct net_device *dev) static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); - fec_t *fecp = fep->fec.fecp; + fec_t __iomem *fecp = fep->fec.fecp; FW(fecp, ievent, int_events); } @@ -503,7 +520,7 @@ static void ev_error(struct net_device *dev, u32 int_events) ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events); } -int get_regs(struct net_device *dev, void *p, int *sizep) +static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); @@ -515,12 +532,12 @@ int get_regs(struct net_device *dev, void *p, int *sizep) return 0; } -int get_regs_len(struct net_device *dev) +static int get_regs_len(struct net_device *dev) { return sizeof(fec_t); } -void tx_restart(struct net_device *dev) +static void tx_restart(struct net_device *dev) { /* nothing */ } diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index 7540966687ec24a84cc2064b9cb8b752e7c37947..03134f47a4eb602489f3ca2c10f4ebcbe8076bd6 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -1,14 +1,14 @@ /* * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou - * - * 2005 (c) MontaVista Software, Inc. + * + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ @@ -43,6 +43,10 @@ #include #endif +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include +#endif + #include "fs_enet.h" /*************************************************/ @@ -82,34 +86,45 @@ #define SCC_MAX_MULTICAST_ADDRS 64 /* - * Delay to wait for SCC reset command to complete (in us) + * Delay to wait for SCC reset command to complete (in us) */ #define SCC_RESET_DELAY 50 #define MAX_CR_CMD_LOOPS 10000 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) { - cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm; - u32 v, ch; - int i = 0; + const struct fs_platform_info *fpi = fep->fpi; + int i; - ch = fep->scc.idx << 2; - v = mk_cr_cmd(ch, op); - W16(cpmp, cp_cpcr, v | CPM_CR_FLG); + W16(cpmp, cp_cpcr, fpi->cp_command | CPM_CR_FLG | (op << 8)); for (i = 0; i < MAX_CR_CMD_LOOPS; i++) if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0) - break; + return 0; - if (i >= MAX_CR_CMD_LOOPS) { - printk(KERN_ERR "%s(): Not able to issue CPM command\n", - __FUNCTION__); - return 1; - } - return 0; + printk(KERN_ERR "%s(): Not able to issue CPM command\n", + __FUNCTION__); + return 1; } static int do_pd_setup(struct fs_enet_private *fep) { +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct of_device *ofdev = to_of_device(fep->dev); + + fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL); + if (fep->interrupt == NO_IRQ) + return -EINVAL; + + fep->scc.sccp = of_iomap(ofdev->node, 0); + if (!fep->scc.sccp) + return -EINVAL; + + fep->scc.ep = of_iomap(ofdev->node, 1); + if (!fep->scc.ep) { + iounmap(fep->scc.sccp); + return -EINVAL; + } +#else struct platform_device *pdev = to_platform_device(fep->dev); struct resource *r; @@ -129,6 +144,7 @@ static int do_pd_setup(struct fs_enet_private *fep) if (fep->scc.ep == NULL) return -EINVAL; +#endif return 0; } @@ -141,12 +157,17 @@ static int do_pd_setup(struct fs_enet_private *fep) static int setup_data(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - const struct fs_platform_info *fpi = fep->fpi; + +#ifdef CONFIG_PPC_CPM_NEW_BINDING + struct fs_platform_info *fpi = fep->fpi; fep->scc.idx = fs_get_scc_index(fpi->fs_no); - if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */ + if ((unsigned int)fep->fcc.idx >= 4) /* max 4 SCCs */ return -EINVAL; + fpi->cp_command = fep->fcc.idx << 6; +#endif + do_pd_setup(fep); fep->scc.hthi = 0; @@ -154,7 +175,7 @@ static int setup_data(struct net_device *dev) fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; fep->ev_rx = SCC_RX_EVENT; - fep->ev_tx = SCC_TX_EVENT; + fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; fep->ev_err = SCC_ERR_EVENT_MSK; return 0; @@ -170,7 +191,8 @@ static int allocate_bd(struct net_device *dev) if (IS_ERR_VALUE(fep->ring_mem_addr)) return -ENOMEM; - fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr); + fep->ring_base = (void __iomem __force*) + cpm_dpram_addr(fep->ring_mem_addr); return 0; } @@ -189,9 +211,9 @@ static void cleanup_data(struct net_device *dev) } static void set_promiscuous_mode(struct net_device *dev) -{ +{ struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; S16(sccp, scc_psmr, SCC_PSMR_PRO); } @@ -199,7 +221,7 @@ static void set_promiscuous_mode(struct net_device *dev) static void set_multicast_start(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_enet_t *ep = fep->scc.ep; + scc_enet_t __iomem *ep = fep->scc.ep; W16(ep, sen_gaddr1, 0); W16(ep, sen_gaddr2, 0); @@ -210,7 +232,7 @@ static void set_multicast_start(struct net_device *dev) static void set_multicast_one(struct net_device *dev, const u8 * mac) { struct fs_enet_private *fep = netdev_priv(dev); - scc_enet_t *ep = fep->scc.ep; + scc_enet_t __iomem *ep = fep->scc.ep; u16 taddrh, taddrm, taddrl; taddrh = ((u16) mac[5] << 8) | mac[4]; @@ -226,8 +248,8 @@ static void set_multicast_one(struct net_device *dev, const u8 * mac) static void set_multicast_finish(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; - scc_enet_t *ep = fep->scc.ep; + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; /* clear promiscuous always */ C16(sccp, scc_psmr, SCC_PSMR_PRO); @@ -264,8 +286,8 @@ static void set_multicast_list(struct net_device *dev) static void restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; - scc_enet_t *ep = fep->scc.ep; + scc_t __iomem *sccp = fep->scc.sccp; + scc_enet_t __iomem *ep = fep->scc.ep; const struct fs_platform_info *fpi = fep->fpi; u16 paddrh, paddrm, paddrl; const unsigned char *mac; @@ -275,7 +297,7 @@ static void restart(struct net_device *dev) /* clear everything (slow & steady does it) */ for (i = 0; i < sizeof(*ep); i++) - __fs_out8((char *)ep + i, 0); + __fs_out8((u8 __iomem *)ep + i, 0); /* point to bds */ W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); @@ -323,7 +345,7 @@ static void restart(struct net_device *dev) W16(ep, sen_iaddr3, 0); W16(ep, sen_iaddr4, 0); - /* set address + /* set address */ mac = dev->dev_addr; paddrh = ((u16) mac[5] << 8) | mac[4]; @@ -345,7 +367,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_scce, 0xffff); - /* Enable interrupts we wish to service. + /* Enable interrupts we wish to service. */ W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); @@ -373,10 +395,10 @@ static void restart(struct net_device *dev) S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } -static void stop(struct net_device *dev) +static void stop(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; int i; for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++) @@ -420,7 +442,7 @@ static void post_free_irq(struct net_device *dev, int irq) static void napi_clear_rx_event(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK); } @@ -428,7 +450,7 @@ static void napi_clear_rx_event(struct net_device *dev) static void napi_enable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); } @@ -436,7 +458,7 @@ static void napi_enable_rx(struct net_device *dev) static void napi_disable_rx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); } @@ -454,7 +476,7 @@ static void tx_kickstart(struct net_device *dev) static u32 get_int_events(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; return (u32) R16(sccp, scc_scce); } @@ -462,7 +484,7 @@ static u32 get_int_events(struct net_device *dev) static void clear_int_events(struct net_device *dev, u32 int_events) { struct fs_enet_private *fep = netdev_priv(dev); - scc_t *sccp = fep->scc.sccp; + scc_t __iomem *sccp = fep->scc.sccp; W16(sccp, scc_scce, int_events & 0xffff); } @@ -477,20 +499,20 @@ static int get_regs(struct net_device *dev, void *p, int *sizep) { struct fs_enet_private *fep = netdev_priv(dev); - if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t)) + if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *)) return -EINVAL; memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); p = (char *)p + sizeof(scc_t); - memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t)); + memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); return 0; } static int get_regs_len(struct net_device *dev) { - return sizeof(scc_t) + sizeof(scc_enet_t); + return sizeof(scc_t) + sizeof(scc_enet_t __iomem *); } static void tx_restart(struct net_device *dev) diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index d3840108ffbdbeebbad4f287937d1c497d275afb..b8e4a736a130e6c79d537252f4fc2da3e7c2dddf 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -1,314 +1,288 @@ /* * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. * - * Copyright (c) 2003 Intracom S.A. + * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou - * - * 2005 (c) MontaVista Software, Inc. + * + * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ - #include -#include -#include -#include -#include -#include #include #include -#include #include -#include +#include #include #include -#include -#include #include -#include -#include #include +#include -#include -#include -#include +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include +#endif #include "fs_enet.h" -static int bitbang_prep_bit(u8 **datp, u8 *mskp, - struct fs_mii_bit *mii_bit) -{ - void *dat; - int adv; - u8 msk; - - dat = (void*) mii_bit->offset; - - adv = mii_bit->bit >> 3; - dat = (char *)dat + adv; - - msk = 1 << (7 - (mii_bit->bit & 7)); - - *datp = dat; - *mskp = msk; - - return 0; -} +struct bb_info { + struct mdiobb_ctrl ctrl; + __be32 __iomem *dir; + __be32 __iomem *dat; + u32 mdio_msk; + u32 mdc_msk; +}; -static inline void bb_set(u8 *p, u8 m) +/* FIXME: If any other users of GPIO crop up, then these will have to + * have some sort of global synchronization to avoid races with other + * pins on the same port. The ideal solution would probably be to + * bind the ports to a GPIO driver, and have this be a client of it. + */ +static inline void bb_set(u32 __iomem *p, u32 m) { - out_8(p, in_8(p) | m); + out_be32(p, in_be32(p) | m); } -static inline void bb_clr(u8 *p, u8 m) +static inline void bb_clr(u32 __iomem *p, u32 m) { - out_8(p, in_8(p) & ~m); + out_be32(p, in_be32(p) & ~m); } -static inline int bb_read(u8 *p, u8 m) +static inline int bb_read(u32 __iomem *p, u32 m) { - return (in_8(p) & m) != 0; + return (in_be32(p) & m) != 0; } -static inline void mdio_active(struct bb_info *bitbang) +static inline void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { - bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); -} + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); -static inline void mdio_tristate(struct bb_info *bitbang ) -{ - bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); + if (dir) + bb_set(bitbang->dir, bitbang->mdio_msk); + else + bb_clr(bitbang->dir, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dir); } -static inline int mdio_read(struct bb_info *bitbang ) +static inline int mdio_read(struct mdiobb_ctrl *ctrl) { - return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + return bb_read(bitbang->dat, bitbang->mdio_msk); } -static inline void mdio(struct bb_info *bitbang , int what) +static inline void mdio(struct mdiobb_ctrl *ctrl, int what) { + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + if (what) - bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); + bb_set(bitbang->dat, bitbang->mdio_msk); else - bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); + bb_clr(bitbang->dat, bitbang->mdio_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); } -static inline void mdc(struct bb_info *bitbang , int what) +static inline void mdc(struct mdiobb_ctrl *ctrl, int what) { + struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + if (what) - bb_set(bitbang->mdc_dat, bitbang->mdc_msk); + bb_set(bitbang->dat, bitbang->mdc_msk); else - bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); + bb_clr(bitbang->dat, bitbang->mdc_msk); + + /* Read back to flush the write. */ + in_be32(bitbang->dat); } -static inline void mii_delay(struct bb_info *bitbang ) +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = mdc, + .set_mdio_dir = mdio_dir, + .set_mdio_data = mdio, + .get_mdio_data = mdio_read, +}; + +#ifdef CONFIG_PPC_CPM_NEW_BINDING +static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, + struct device_node *np) { - udelay(bitbang->delay); + struct resource res; + const u32 *data; + int mdio_pin, mdc_pin, len; + struct bb_info *bitbang = bus->priv; + + int ret = of_address_to_resource(np, 0, &res); + if (ret) + return ret; + + if (res.end - res.start < 13) + return -ENODEV; + + /* This should really encode the pin number as well, but all + * we get is an int, and the odds of multiple bitbang mdio buses + * is low enough that it's not worth going too crazy. + */ + bus->id = res.start; + + data = of_get_property(np, "fsl,mdio-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdio_pin = *data; + + data = of_get_property(np, "fsl,mdc-pin", &len); + if (!data || len != 4) + return -ENODEV; + mdc_pin = *data; + + bitbang->dir = ioremap(res.start, res.end - res.start + 1); + if (!bitbang->dir) + return -ENOMEM; + + bitbang->dat = bitbang->dir + 4; + bitbang->mdio_msk = 1 << (31 - mdio_pin); + bitbang->mdc_msk = 1 << (31 - mdc_pin); + + return 0; } -/* Utility to send the preamble, address, and register (common to read and write). */ -static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) +static void __devinit add_phy(struct mii_bus *bus, struct device_node *np) { - int j; - - /* - * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure. - * The IEEE spec says this is a PHY optional requirement. The AMD - * 79C874 requires one after power up and one after a MII communications - * error. This means that we are doing more preambles than we need, - * but it is safer and will be much more robust. - */ + const u32 *data; + int len, id, irq; - mdio_active(bitbang); - mdio(bitbang, 1); - for (j = 0; j < 32; j++) { - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - } + data = of_get_property(np, "reg", &len); + if (!data || len != 4) + return; - /* send the start bit (01) and the read opcode (10) or write (10) */ - mdc(bitbang, 0); - mdio(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, read); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, !read); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - - /* send the PHY address */ - for (j = 0; j < 5; j++) { - mdc(bitbang, 0); - mdio(bitbang, (addr & 0x10) != 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - addr <<= 1; - } + id = *data; + bus->phy_mask &= ~(1 << id); - /* send the register address */ - for (j = 0; j < 5; j++) { - mdc(bitbang, 0); - mdio(bitbang, (reg & 0x10) != 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - reg <<= 1; - } + irq = of_irq_to_resource(np, 0, NULL); + if (irq != NO_IRQ) + bus->irq[id] = irq; } -static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) +static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, + const struct of_device_id *match) { - u16 rdreg; - int ret, j; - u8 addr = phy_id & 0xff; - u8 reg = location & 0xff; - struct bb_info* bitbang = bus->priv; - - bitbang_pre(bitbang, 1, addr, reg); - - /* tri-state our MDIO I/O pin so we can read */ - mdc(bitbang, 0); - mdio_tristate(bitbang); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - - /* check the turnaround bit: the PHY should be driving it to zero */ - if (mdio_read(bitbang) != 0) { - /* PHY didn't drive TA low */ - for (j = 0; j < 32; j++) { - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - } - ret = -1; + struct device_node *np = NULL; + struct mii_bus *new_bus; + struct bb_info *bitbang; + int ret = -ENOMEM; + int i; + + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + if (!bitbang) goto out; - } - mdc(bitbang, 0); - mii_delay(bitbang); - - /* read 16 bits of register data, MSB first */ - rdreg = 0; - for (j = 0; j < 16; j++) { - mdc(bitbang, 1); - mii_delay(bitbang); - rdreg <<= 1; - rdreg |= mdio_read(bitbang); - mdc(bitbang, 0); - mii_delay(bitbang); - } + bitbang->ctrl.ops = &bb_ops; + + new_bus = alloc_mdio_bitbang(&bitbang->ctrl); + if (!new_bus) + goto out_free_priv; + + new_bus->name = "CPM2 Bitbanged MII", + + ret = fs_mii_bitbang_init(new_bus, ofdev->node); + if (ret) + goto out_free_bus; + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) + goto out_unmap_regs; + + for (i = 0; i < PHY_MAX_ADDR; i++) + new_bus->irq[i] = -1; + + while ((np = of_get_next_child(ofdev->node, np))) + if (!strcmp(np->type, "ethernet-phy")) + add_phy(new_bus, np); + + new_bus->dev = &ofdev->dev; + dev_set_drvdata(&ofdev->dev, new_bus); + + ret = mdiobus_register(new_bus); + if (ret) + goto out_free_irqs; - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); + return 0; - ret = rdreg; +out_free_irqs: + dev_set_drvdata(&ofdev->dev, NULL); + kfree(new_bus->irq); +out_unmap_regs: + iounmap(bitbang->dir); +out_free_bus: + kfree(new_bus); +out_free_priv: + free_mdio_bitbang(new_bus); out: return ret; } -static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) +static int fs_enet_mdio_remove(struct of_device *ofdev) { - int j; - struct bb_info* bitbang = bus->priv; - - u8 addr = phy_id & 0xff; - u8 reg = location & 0xff; - u16 value = val & 0xffff; - - bitbang_pre(bitbang, 0, addr, reg); - - /* send the turnaround (10) */ - mdc(bitbang, 0); - mdio(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - mdc(bitbang, 0); - mdio(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - - /* write 16 bits of register data, MSB first */ - for (j = 0; j < 16; j++) { - mdc(bitbang, 0); - mdio(bitbang, (value & 0x8000) != 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - value <<= 1; - } + struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct bb_info *bitbang = bus->priv; - /* - * Tri-state the MDIO line. - */ - mdio_tristate(bitbang); - mdc(bitbang, 0); - mii_delay(bitbang); - mdc(bitbang, 1); - mii_delay(bitbang); - return 0; -} + mdiobus_unregister(bus); + free_mdio_bitbang(bus); + dev_set_drvdata(&ofdev->dev, NULL); + kfree(bus->irq); + iounmap(bitbang->dir); + kfree(bitbang); + kfree(bus); -static int fs_enet_mii_bb_reset(struct mii_bus *bus) -{ - /*nothing here - dunno how to reset it*/ return 0; } -static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) -{ - int r; +static struct of_device_id fs_enet_mdio_bb_match[] = { + { + .compatible = "fsl,cpm2-mdio-bitbang", + }, + {}, +}; - bitbang->delay = fmpi->delay; +static struct of_platform_driver fs_enet_bb_mdio_driver = { + .name = "fsl-bb-mdio", + .match_table = fs_enet_mdio_bb_match, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; - r = bitbang_prep_bit(&bitbang->mdio_dir, - &bitbang->mdio_dir_msk, - &fmpi->mdio_dir); - if (r != 0) - return r; +static int fs_enet_mdio_bb_init(void) +{ + return of_register_platform_driver(&fs_enet_bb_mdio_driver); +} - r = bitbang_prep_bit(&bitbang->mdio_dat, - &bitbang->mdio_dat_msk, - &fmpi->mdio_dat); - if (r != 0) - return r; +static void fs_enet_mdio_bb_exit(void) +{ + of_unregister_platform_driver(&fs_enet_bb_mdio_driver); +} - r = bitbang_prep_bit(&bitbang->mdc_dat, - &bitbang->mdc_msk, - &fmpi->mdc_dat); - if (r != 0) - return r; +module_init(fs_enet_mdio_bb_init); +module_exit(fs_enet_mdio_bb_exit); +#else +static int __devinit fs_mii_bitbang_init(struct bb_info *bitbang, + struct fs_mii_bb_platform_info *fmpi) +{ + bitbang->dir = (u32 __iomem *)fmpi->mdio_dir.offset; + bitbang->dat = (u32 __iomem *)fmpi->mdio_dat.offset; + bitbang->mdio_msk = 1U << (31 - fmpi->mdio_dat.bit); + bitbang->mdc_msk = 1U << (31 - fmpi->mdc_dat.bit); return 0; } - static int __devinit fs_enet_mdio_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -320,20 +294,19 @@ static int __devinit fs_enet_mdio_probe(struct device *dev) if (NULL == dev) return -EINVAL; - new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); - if (NULL == new_bus) + if (NULL == bitbang) return -ENOMEM; - bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); + bitbang->ctrl.ops = &bb_ops; - if (NULL == bitbang) + new_bus = alloc_mdio_bitbang(&bitbang->ctrl); + + if (NULL == new_bus) return -ENOMEM; new_bus->name = "BB MII Bus", - new_bus->read = &fs_enet_mii_bb_read, - new_bus->write = &fs_enet_mii_bb_write, - new_bus->reset = &fs_enet_mii_bb_reset, new_bus->id = pdev->id; new_bus->phy_mask = ~0x9; @@ -365,13 +338,12 @@ static int __devinit fs_enet_mdio_probe(struct device *dev) return 0; bus_register_fail: + free_mdio_bitbang(new_bus); kfree(bitbang); - kfree(new_bus); return err; } - static int fs_enet_mdio_remove(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); @@ -380,9 +352,7 @@ static int fs_enet_mdio_remove(struct device *dev) dev_set_drvdata(dev, NULL); - iounmap((void *) (&bus->priv)); - bus->priv = NULL; - kfree(bus); + free_mdio_bitbang(bus); return 0; } @@ -403,4 +373,4 @@ void fs_enet_mdio_bb_exit(void) { driver_unregister(&fs_enet_bb_mdio_driver); } - +#endif diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 0a563a83016f5ed5e59de22dbf1c92541fc61216..a89cf15090b8840ed3d79f986ab2c30e1b6bbedf 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -36,6 +36,10 @@ #include #include +#ifdef CONFIG_PPC_CPM_NEW_BINDING +#include +#endif + #include "fs_enet.h" #include "fec.h" @@ -47,6 +51,7 @@ #define FEC_MII_LOOPS 10000 +#ifndef CONFIG_PPC_CPM_NEW_BINDING static int match_has_phy (struct device *dev, void* data) { struct platform_device* pdev = container_of(dev, struct platform_device, dev); @@ -65,7 +70,7 @@ static int match_has_phy (struct device *dev, void* data) static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) { struct resource *r; - fec_t *fecp; + fec_t __iomem *fecp; char* name = "fsl-cpm-fec"; /* we need fec in order to be useful */ @@ -80,7 +85,7 @@ static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); - fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); + fec->fecp = fecp = ioremap(r->start,sizeof(fec_t)); fec->mii_speed = fmpi->mii_speed; setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ @@ -90,11 +95,12 @@ static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info return 0; } +#endif static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) { struct fec_info* fec = bus->priv; - fec_t *fecp = fec->fecp; + fec_t __iomem *fecp = fec->fecp; int i, ret = -1; if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) @@ -113,13 +119,12 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) } return ret; - } static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) { struct fec_info* fec = bus->priv; - fec_t *fecp = fec->fecp; + fec_t __iomem *fecp = fec->fecp; int i; /* this must never happen */ @@ -146,6 +151,141 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) return 0; } +#ifdef CONFIG_PPC_CPM_NEW_BINDING +static void __devinit add_phy(struct mii_bus *bus, struct device_node *np) +{ + const u32 *data; + int len, id, irq; + + data = of_get_property(np, "reg", &len); + if (!data || len != 4) + return; + + id = *data; + bus->phy_mask &= ~(1 << id); + + irq = of_irq_to_resource(np, 0, NULL); + if (irq != NO_IRQ) + bus->irq[id] = irq; +} + +static int __devinit fs_enet_mdio_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = NULL; + struct resource res; + struct mii_bus *new_bus; + struct fec_info *fec; + int ret = -ENOMEM, i; + + new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + if (!new_bus) + goto out; + + fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); + if (!fec) + goto out_mii; + + new_bus->priv = fec; + new_bus->name = "FEC MII Bus"; + new_bus->read = &fs_enet_fec_mii_read; + new_bus->write = &fs_enet_fec_mii_write; + new_bus->reset = &fs_enet_fec_mii_reset; + + ret = of_address_to_resource(ofdev->node, 0, &res); + if (ret) + return ret; + + new_bus->id = res.start; + + fec->fecp = ioremap(res.start, res.end - res.start + 1); + if (!fec->fecp) + goto out_fec; + + fec->mii_speed = ((ppc_proc_freq + 4999999) / 5000000) << 1; + + setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); + setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | + FEC_ECNTRL_ETHER_EN); + out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII); + out_be32(&fec->fecp->fec_mii_speed, fec->mii_speed); + + new_bus->phy_mask = ~0; + new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!new_bus->irq) + goto out_unmap_regs; + + for (i = 0; i < PHY_MAX_ADDR; i++) + new_bus->irq[i] = -1; + + while ((np = of_get_next_child(ofdev->node, np))) + if (!strcmp(np->type, "ethernet-phy")) + add_phy(new_bus, np); + + new_bus->dev = &ofdev->dev; + dev_set_drvdata(&ofdev->dev, new_bus); + + ret = mdiobus_register(new_bus); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + dev_set_drvdata(&ofdev->dev, NULL); + kfree(new_bus->irq); +out_unmap_regs: + iounmap(fec->fecp); +out_fec: + kfree(fec); +out_mii: + kfree(new_bus); +out: + return ret; +} + +static int fs_enet_mdio_remove(struct of_device *ofdev) +{ + struct mii_bus *bus = dev_get_drvdata(&ofdev->dev); + struct fec_info *fec = bus->priv; + + mdiobus_unregister(bus); + dev_set_drvdata(&ofdev->dev, NULL); + kfree(bus->irq); + iounmap(fec->fecp); + kfree(fec); + kfree(bus); + + return 0; +} + +static struct of_device_id fs_enet_mdio_fec_match[] = { + { + .compatible = "fsl,pq1-fec-mdio", + }, + {}, +}; + +static struct of_platform_driver fs_enet_fec_mdio_driver = { + .name = "fsl-fec-mdio", + .match_table = fs_enet_mdio_fec_match, + .probe = fs_enet_mdio_probe, + .remove = fs_enet_mdio_remove, +}; + +static int fs_enet_mdio_fec_init(void) +{ + return of_register_platform_driver(&fs_enet_fec_mdio_driver); +} + +static void fs_enet_mdio_fec_exit(void) +{ + of_unregister_platform_driver(&fs_enet_fec_mdio_driver); +} + +module_init(fs_enet_mdio_fec_init); +module_exit(fs_enet_mdio_fec_exit); +#else static int __devinit fs_enet_fec_mdio_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -236,4 +376,4 @@ void fs_enet_mdio_fec_exit(void) { driver_unregister(&fs_enet_fec_mdio_driver); } - +#endif diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index f92690555dd9c0cd46c314e2e53e36c967e87cf3..0db5e6fabe73146ddc1f29e4e3b220f3ce4fcfec 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -116,7 +116,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); -static struct net_device_stats *gfar_get_stats(struct net_device *dev); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@ -134,7 +133,7 @@ static void gfar_configure_serdes(struct net_device *dev); extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); #ifdef CONFIG_GFAR_NAPI -static int gfar_poll(struct net_device *dev, int *budget); +static int gfar_poll(struct napi_struct *napi, int budget); #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void gfar_netpoll(struct net_device *dev); @@ -171,6 +170,7 @@ static int gfar_probe(struct platform_device *pdev) struct resource *r; int idx; int err = 0; + DECLARE_MAC_BUF(mac); einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; @@ -188,6 +188,7 @@ static int gfar_probe(struct platform_device *pdev) return -ENOMEM; priv = netdev_priv(dev); + priv->dev = dev; /* Set the info in the priv to the current info */ priv->einfo = einfo; @@ -253,7 +254,6 @@ static int gfar_probe(struct platform_device *pdev) /* Set the dev->base_addr to the gfar reg region */ dev->base_addr = (unsigned long) (priv->regs); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Fill in the dev structure */ @@ -261,15 +261,11 @@ static int gfar_probe(struct platform_device *pdev) dev->hard_start_xmit = gfar_start_xmit; dev->tx_timeout = gfar_timeout; dev->watchdog_timeo = TX_TIMEOUT; -#ifdef CONFIG_GFAR_NAPI - dev->poll = gfar_poll; - dev->weight = GFAR_DEV_WEIGHT; -#endif + netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = gfar_netpoll; #endif dev->stop = gfar_close; - dev->get_stats = gfar_get_stats; dev->change_mtu = gfar_change_mtu; dev->mtu = 1500; dev->set_multicast_list = gfar_set_multi; @@ -361,10 +357,8 @@ static int gfar_probe(struct platform_device *pdev) gfar_init_sysfs(dev); /* Print out the device info */ - printk(KERN_INFO DEVICE_NAME, dev->name); - for (idx = 0; idx < 6; idx++) - printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); - printk("\n"); + printk(KERN_INFO DEVICE_NAME "%s\n", + dev->name, print_mac(mac, dev->dev_addr)); /* Even more device info helps when determining which kernel */ /* provided which set of benchmarks. */ @@ -939,6 +933,8 @@ static int gfar_enet_open(struct net_device *dev) { int err; + napi_enable(&priv->napi); + /* Initialize a bunch of registers */ init_registers(dev); @@ -946,10 +942,14 @@ static int gfar_enet_open(struct net_device *dev) err = init_phy(dev); - if(err) + if(err) { + napi_disable(&priv->napi); return err; + } err = startup_gfar(dev); + if (err) + napi_disable(&priv->napi); netif_start_queue(dev); @@ -1010,7 +1010,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; /* Update transmit stats */ - priv->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* Lock priv now */ spin_lock_irqsave(&priv->txlock, flags); @@ -1083,7 +1083,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) if (txbdp == priv->dirty_tx) { netif_stop_queue(dev); - priv->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } /* Update the current txbd to the next one */ @@ -1102,6 +1102,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); + + napi_disable(&priv->napi); + stop_gfar(dev); /* Disconnect from the PHY */ @@ -1113,14 +1116,6 @@ static int gfar_close(struct net_device *dev) return 0; } -/* returns a net_device_stats structure pointer */ -static struct net_device_stats * gfar_get_stats(struct net_device *dev) -{ - struct gfar_private *priv = netdev_priv(dev); - - return &(priv->stats); -} - /* Changes the mac address if the controller is not running. */ int gfar_set_mac_address(struct net_device *dev) { @@ -1232,7 +1227,7 @@ static void gfar_timeout(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - priv->stats.tx_errors++; + dev->stats.tx_errors++; if (dev->flags & IFF_UP) { stop_gfar(dev); @@ -1262,12 +1257,12 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id) if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) break; - priv->stats.tx_packets++; + dev->stats.tx_packets++; /* Deferred means some collisions occurred during transmit, */ /* but we eventually sent the packet. */ if (bdp->status & TXBD_DEF) - priv->stats.collisions++; + dev->stats.collisions++; /* Free the sk buffer associated with this TxBD */ dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); @@ -1318,7 +1313,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) return NULL; alignamount = RXBUF_ALIGNMENT - - (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)); + (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)); /* We need the data buffer to be aligned properly. We will reserve * as many bytes as needed to align the data properly @@ -1339,7 +1334,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) static inline void count_errors(unsigned short status, struct gfar_private *priv) { - struct net_device_stats *stats = &priv->stats; + struct net_device_stats *stats = &dev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; /* If the packet was truncated, none of the other errors @@ -1390,12 +1385,12 @@ irqreturn_t gfar_receive(int irq, void *dev_id) /* support NAPI */ #ifdef CONFIG_GFAR_NAPI - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &priv->napi)) { tempval = gfar_read(&priv->regs->imask); tempval &= IMASK_RX_DISABLED; gfar_write(&priv->regs->imask, tempval); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &priv->napi); } else { if (netif_msg_rx_err(priv)) printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", @@ -1464,7 +1459,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, if (NULL == skb) { if (netif_msg_rx_err(priv)) printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; priv->extra_stats.rx_skbmissing++; } else { int ret; @@ -1522,7 +1517,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { /* Increment the number of packets */ - priv->stats.rx_packets++; + dev->stats.rx_packets++; howmany++; /* Remove the FCS from the packet length */ @@ -1530,7 +1525,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) gfar_process_frame(dev, skb, pkt_len); - priv->stats.rx_bytes += pkt_len; + dev->stats.rx_bytes += pkt_len; } else { count_errors(bdp->status, priv); @@ -1569,23 +1564,16 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) } #ifdef CONFIG_GFAR_NAPI -static int gfar_poll(struct net_device *dev, int *budget) +static int gfar_poll(struct napi_struct *napi, int budget) { + struct gfar_private *priv = container_of(napi, struct gfar_private, napi); + struct net_device *dev = priv->dev; int howmany; - struct gfar_private *priv = netdev_priv(dev); - int rx_work_limit = *budget; - - if (rx_work_limit > dev->quota) - rx_work_limit = dev->quota; - howmany = gfar_clean_rx_ring(dev, rx_work_limit); + howmany = gfar_clean_rx_ring(dev, budget); - dev->quota -= howmany; - rx_work_limit -= howmany; - *budget -= howmany; - - if (rx_work_limit > 0) { - netif_rx_complete(dev); + if (howmany < budget) { + netif_rx_complete(dev, napi); /* Clear the halt bit in RSTAT */ gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); @@ -1601,8 +1589,7 @@ static int gfar_poll(struct net_device *dev, int *budget) gfar_write(&priv->regs->rxic, 0); } - /* Return 1 if there's more work to do */ - return (rx_work_limit > 0) ? 0 : 1; + return howmany; } #endif @@ -1918,17 +1905,17 @@ static irqreturn_t gfar_error(int irq, void *dev_id) /* Update the error counters */ if (events & IEVENT_TXE) { - priv->stats.tx_errors++; + dev->stats.tx_errors++; if (events & IEVENT_LC) - priv->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (events & IEVENT_CRL) - priv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (events & IEVENT_XFUN) { if (netif_msg_tx_err(priv)) printk(KERN_DEBUG "%s: TX FIFO underrun, " "packet dropped.\n", dev->name); - priv->stats.tx_dropped++; + dev->stats.tx_dropped++; priv->extra_stats.tx_underrun++; /* Reactivate the Tx Queues */ @@ -1938,7 +1925,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); } if (events & IEVENT_BSY) { - priv->stats.rx_errors++; + dev->stats.rx_errors++; priv->extra_stats.rx_bsy++; gfar_receive(irq, dev_id); @@ -1953,7 +1940,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id) dev->name, gfar_read(&priv->regs->rstat)); } if (events & IEVENT_BABR) { - priv->stats.rx_errors++; + dev->stats.rx_errors++; priv->extra_stats.rx_babr++; if (netif_msg_rx_err(priv)) diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index d8e779c102faf3becd6c281cab68cee44f8ea9b6..c16cc8b946a92be6a762495b5b7c0c4cf0eb521a 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -45,7 +45,6 @@ #include #include #include -#include #include #include "gianfar_mii.h" @@ -691,6 +690,9 @@ struct gfar_private { /* RX Locked fields */ spinlock_t rxlock; + struct net_device *dev; + struct napi_struct napi; + /* skb array and index */ struct sk_buff ** rx_skbuff; u16 skb_currx; diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 7b411c1514db364c1c662d7abb3e90a4c8ea6356..6007147cc1e917c08e79b636d18e6fff56d1040e 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -153,15 +152,19 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, buf[i] = extra[i]; } -/* Returns the number of stats (and their corresponding strings) */ -static int gfar_stats_count(struct net_device *dev) +static int gfar_sset_count(struct net_device *dev, int sset) { struct gfar_private *priv = netdev_priv(dev); - if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) - return GFAR_STATS_LEN; - else - return GFAR_EXTRA_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) + return GFAR_STATS_LEN; + else + return GFAR_EXTRA_STATS_LEN; + default: + return -EOPNOTSUPP; + } } /* Fills in the drvinfo structure with some basic info */ @@ -172,8 +175,6 @@ static void gfar_gdrvinfo(struct net_device *dev, struct strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); - drvinfo->n_stats = GFAR_STATS_LEN; - drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } @@ -576,7 +577,7 @@ const struct ethtool_ops gfar_ethtool_ops = { .get_ringparam = gfar_gringparam, .set_ringparam = gfar_sringparam, .get_strings = gfar_gstrings, - .get_stats_count = gfar_stats_count, + .get_sset_count = gfar_sset_count, .get_ethtool_stats = gfar_fill_stats, .get_rx_csum = gfar_get_rx_csum, .get_tx_csum = gfar_get_tx_csum, diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 15254dc7876a453e19eac1f88525a0fa9b17da8f..ed407c85708f13c31bcbaf4ad68dc4be995b6fe4 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -580,6 +580,7 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, void *ring_space; dma_addr_t ring_dma; int ret = -ENOMEM; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -613,7 +614,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, if (!dev) goto err_out_iounmap; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #ifdef TX_CHECKSUM @@ -742,12 +742,9 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev, goto err_out_unmap_rx; } - printk(KERN_INFO "%s: %s type %x at %p, ", + printk(KERN_INFO "%s: %s type %x at %p, %s, IRQ %d.\n", dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev), - ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + ioaddr, print_mac(mac, dev->dev_addr), irq); i = readb(ioaddr + PCIClkMeas); printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers " "%2.2x, LPA %4.4x.\n", @@ -1020,7 +1017,7 @@ static inline int hamachi_tx(struct net_device *dev) break; /* Free the original skb. */ skb = hmp->tx_skbuff[entry]; - if (skb != 0) { + if (skb) { pci_unmap_single(hmp->pci_dev, hmp->tx_ring[entry].addr, skb->len, PCI_DMA_TODEVICE); @@ -1072,7 +1069,6 @@ static void hamachi_tx_timeout(struct net_device *dev) " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus)); { - int i; printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(" %8.8x", (unsigned int)hmp->rx_ring[i].status_n_length); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 760d04a671f937269789411fd3141b18eb637de5..ecd156def0398b24864313843b712caa186fb9ca 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -288,7 +288,8 @@ static int sp_close(struct net_device *dev) /* Return the frame type ID */ static int sp_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { #ifdef CONFIG_INET if (type != htons(ETH_P_AX25)) @@ -323,6 +324,11 @@ static int sp_rebuild_header(struct sk_buff *skb) #endif } +static const struct header_ops sp_header_ops = { + .create = sp_header, + .rebuild = sp_rebuild_header, +}; + static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ @@ -331,22 +337,21 @@ static void sp_setup(struct net_device *dev) dev->open = sp_open_dev; dev->destructor = free_netdev; dev->stop = sp_close; - dev->hard_header = sp_header; + dev->get_stats = sp_get_stats; dev->set_mac_address = sp_set_mac_address; dev->hard_header_len = AX25_MAX_HEADER_LEN; + dev->header_ops = &sp_header_ops; + dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->rebuild_header = sp_rebuild_header; dev->tx_timeout = NULL; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); - SET_MODULE_OWNER(dev); - dev->flags = 0; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 355c6cf3d11212870f16628ebd0e7dde489354da..1a5a75acf73e4a0609bf6ecf52515496318e47f8 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -1159,8 +1159,7 @@ static void baycom_probe(struct net_device *dev) /* Fill in the fields of the device structure */ bc->skb = NULL; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = baycom_set_mac_address; dev->type = ARPHRD_AX25; /* AF_AX25 device */ diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index cc0ee93669eac11c4c4cad1d99c611889c866da4..5ddf8b0c34f9286fe7fe61060088d4327fe30481 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -64,7 +64,7 @@ #include #include #include -#include +#include #include #include #include @@ -83,6 +83,7 @@ #include #include +#include #include @@ -94,7 +95,6 @@ static char bpq_eth_addr[6]; static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); -static const char *bpq_print_ethaddr(const unsigned char *); static struct packet_type bpq_packet_type = { .type = __constant_htons(ETH_P_BPQ), @@ -172,6 +172,9 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty struct ethhdr *eth; struct bpqdev *bpq; + if (dev->nd_net != &init_net) + goto drop; + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; @@ -283,7 +286,7 @@ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) skb->protocol = ax25_type_trans(skb, dev); skb_reset_network_header(skb); - dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); + dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); bpq->stats.tx_packets++; bpq->stats.tx_bytes+=skb->len; @@ -379,16 +382,6 @@ static int bpq_close(struct net_device *dev) /* * Proc filesystem */ -static const char * bpq_print_ethaddr(const unsigned char *e) -{ - static char buf[18]; - - sprintf(buf, "%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X", - e[0], e[1], e[2], e[3], e[4], e[5]); - - return buf; -} - static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) { int i = 1; @@ -434,14 +427,16 @@ static int bpq_seq_show(struct seq_file *seq, void *v) "dev ether destination accept from\n"); else { const struct bpqdev *bpqdev = v; + DECLARE_MAC_BUF(mac); seq_printf(seq, "%-5s %-10s %s ", bpqdev->axdev->name, bpqdev->ethdev->name, - bpq_print_ethaddr(bpqdev->dest_addr)); + print_mac(mac, bpqdev->dest_addr)); - seq_printf(seq, "%s\n", - (bpqdev->acpt_addr[0] & 0x01) ? "*" - : bpq_print_ethaddr(bpqdev->acpt_addr)); + if (is_multicast_ether_addr(bpqdev->acpt_addr)) + seq_printf(seq, "*\n"); + else + seq_printf(seq, "%s\n", print_mac(mac, bpqdev->acpt_addr)); } return 0; @@ -488,8 +483,7 @@ static void bpq_setup(struct net_device *dev) dev->flags = 0; #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; #endif dev->type = ARPHRD_AX25; @@ -559,6 +553,9 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi { struct net_device *dev = (struct net_device *)ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (!dev_is_ethdev(dev)) return NOTIFY_DONE; @@ -594,7 +591,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi static int __init bpq_init_driver(void) { #ifdef CONFIG_PROC_FS - if (!proc_net_fops_create("bpqether", S_IRUGO, &bpq_info_fops)) { + if (!proc_net_fops_create(&init_net, "bpqether", S_IRUGO, &bpq_info_fops)) { printk(KERN_ERR "bpq: cannot create /proc/net/bpqether entry.\n"); return -ENOENT; @@ -618,7 +615,7 @@ static void __exit bpq_cleanup_driver(void) unregister_netdevice_notifier(&bpq_dev_notifier); - proc_net_remove("bpqether"); + proc_net_remove(&init_net, "bpqether"); rtnl_lock(); while (!list_empty(&bpq_devices)) { diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 205f09672492b4f4ec42236a264779717cc6297d..bc02e469480401274cee6e7f771f9ab62afe3c0e 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -581,8 +581,7 @@ static int __init setup_adapter(int card_base, int type, int n) dev->do_ioctl = scc_ioctl; dev->hard_start_xmit = scc_send_packet; dev->get_stats = scc_get_stats; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = scc_set_mac_address; } if (register_netdev(info->dev[0])) { diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index b33adc6a340b055ad8b1a2076331f5697f83a517..ae9629fa6882bb1dd2181e804b9a881a6c49c962 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -682,8 +682,7 @@ static void hdlcdrv_setup(struct net_device *dev) s->skb = NULL; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = hdlcdrv_set_mac_address; dev->type = ARPHRD_AX25; /* AF_AX25 device */ diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index d08fbc3966481fd40c6d790efbf6b455abc2e56a..9e43c47691caa4c9d12a6015a1668277424c3b25 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -578,8 +578,9 @@ static int ax_open_dev(struct net_device *dev) #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) /* Return the frame type ID */ -static int ax_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int ax_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { #ifdef CONFIG_INET if (type != htons(ETH_P_AX25)) @@ -670,6 +671,11 @@ static struct net_device_stats *ax_get_stats(struct net_device *dev) return &ax->stats; } +static const struct header_ops ax_header_ops = { + .create = ax_header, + .rebuild = ax_rebuild_header, +}; + static void ax_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ @@ -683,8 +689,8 @@ static void ax_setup(struct net_device *dev) dev->addr_len = 0; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; - dev->hard_header = ax_header; - dev->rebuild_header = ax_rebuild_header; + dev->header_ops = &ax_header_ops; + memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 6fdaad5a457750e21adaad56dd185d928c4a92b2..353d13e543ce12228428b90d14df93f903d76b45 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -174,6 +174,7 @@ #include #include +#include #include #include @@ -1550,8 +1551,8 @@ static void scc_net_setup(struct net_device *dev) dev->stop = scc_net_close; dev->hard_start_xmit = scc_net_tx; - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; + dev->set_mac_address = scc_net_set_mac_address; dev->get_stats = scc_net_get_stats; dev->do_ioctl = scc_net_ioctl; @@ -2114,7 +2115,7 @@ static int __init scc_init_driver (void) } rtnl_unlock(); - proc_net_fops_create("z8530drv", 0, &scc_net_seq_fops); + proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops); return 0; } @@ -2169,7 +2170,7 @@ static void __exit scc_cleanup_driver(void) if (Vector_Latch) release_region(Vector_Latch, 1); - proc_net_remove("z8530drv"); + proc_net_remove(&init_net, "z8530drv"); } MODULE_AUTHOR("Joerg Reuter "); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 467559debfd68a6bb2e93c9e758ea1f1707bcf72..1c942862a3f4944710de74c546cba66943b8b81a 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include @@ -1096,8 +1097,7 @@ static void yam_setup(struct net_device *dev) skb_queue_head_init(&yp->send_queue); - dev->hard_header = ax25_hard_header; - dev->rebuild_header = ax25_rebuild_header; + dev->header_ops = &ax25_header_ops; dev->set_mac_address = yam_set_mac_address; @@ -1142,7 +1142,7 @@ static int __init yam_init_driver(void) yam_timer.expires = jiffies + HZ / 100; add_timer(&yam_timer); - proc_net_fops_create("yam", S_IRUGO, &yam_info_fops); + proc_net_fops_create(&init_net, "yam", S_IRUGO, &yam_info_fops); return 0; error: while (--i >= 0) { @@ -1174,7 +1174,7 @@ static void __exit yam_cleanup_driver(void) kfree(p); } - proc_net_remove("yam"); + proc_net_remove(&init_net, "yam"); } /* --------------------------------------------------------------------- */ diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c index 99a36cc3f8df4dddcba0d4f5e70c7847628e6bcb..c2c4f49d7578955de11c6749423d331f786beec6 100644 --- a/drivers/net/hp-plus.c +++ b/drivers/net/hp-plus.c @@ -122,8 +122,6 @@ static int __init do_hpp_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return hpp_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -168,6 +166,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) const char name[] = "HP-PC-LAN+"; int mem_start; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -182,7 +181,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) if (ei_debug && version_printed++ == 0) printk(version); - printk("%s: %s at %#3x,", dev->name, name, ioaddr); + printk("%s: %s at %#3x, ", dev->name, name, ioaddr); /* Retrieve and checksum the station address. */ outw(MAC_Page, ioaddr + HP_PAGING); @@ -191,10 +190,11 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr) unsigned char inval = inb(ioaddr + 8 + i); dev->dev_addr[i] = inval; checksum += inval; - printk(" %2.2x", inval); } checksum += inb(ioaddr + 14); + printk("%s", print_mac(mac, dev->dev_addr)); + if (checksum != 0xff) { printk(" bad checksum %2.2x.\n", checksum); retval = -ENODEV; diff --git a/drivers/net/hp.c b/drivers/net/hp.c index 635b13c2e2aac856f786253f07a6cd95a48a9edf..c649a8019bebc0ec2b49373710c072b926e9a38a 100644 --- a/drivers/net/hp.c +++ b/drivers/net/hp.c @@ -86,8 +86,6 @@ static int __init do_hp_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return hp_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -129,6 +127,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) int i, retval, board_id, wordmode; const char *name; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -160,7 +159,9 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr) printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr); for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + + printk(" %s", print_mac(mac, dev->dev_addr)); /* Snarf the interrupt now. Someday this could be moved to open(). */ if (dev->irq < 2) { diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index 8caa591c56494f486461dd831a152bf09d229bb1..e4fde17e28418690483be105a8dfa4a684a9cced 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -404,8 +404,6 @@ struct net_device * __init hp100_probe(int unit) if (!dev) return ERR_PTR(-ENODEV); - SET_MODULE_OWNER(dev); - #ifdef HP100_DEBUG_B hp100_outw(0x4200, TRACE); printk("hp100: %s: probe\n", dev->name); @@ -2095,9 +2093,9 @@ static void hp100_set_multicast_list(struct net_device *dev) addrs = dmi->dmi_addr; if ((*addrs & 0x01) == 0x01) { /* multicast address? */ #ifdef HP100_DEBUG - printk("hp100: %s: multicast = %02x:%02x:%02x:%02x:%02x:%02x, ", - dev->name, addrs[0], addrs[1], addrs[2], - addrs[3], addrs[4], addrs[5]); + DECLARE_MAC_BUF(mac); + printk("hp100: %s: multicast = %s, ", + dev->name, print_mac(mac, addrs)); #endif for (j = idx = 0; j < 6; j++) { idx ^= *addrs++ & 0x3f; @@ -2843,7 +2841,6 @@ static int __init hp100_eisa_probe (struct device *gendev) if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &edev->dev); err = hp100_probe1(dev, edev->base_addr + 0xC38, HP100_BUS_EISA, NULL); @@ -2896,7 +2893,6 @@ static int __devinit hp100_pci_probe (struct pci_dev *pdev, goto out0; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); pci_read_config_word(pdev, PCI_COMMAND, &pci_command); @@ -2993,7 +2989,6 @@ static int __init hp100_isa_init(void) return -ENOMEM; } - SET_MODULE_OWNER(dev); err = hp100_isa_probe(dev, hp100_port[i]); if (!err) diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c index c991cb82ff22bc8cf53c84dbf608c75af0ca83b5..be6e5bc7c88117cc9940f41dd5098ddd220adcad 100644 --- a/drivers/net/hplance.c +++ b/drivers/net/hplance.c @@ -141,7 +141,6 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d) dev->poll_controller = lance_poll; #endif dev->hard_start_xmit = &lance_start_xmit; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->dma = 0; diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index f970bfbb9db2f1d49cb449680df3e36baf59efe3..b96cf2dcb10932bf7a6758fcd399dba1385b50f1 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -103,6 +103,7 @@ static int __devinit hydra_init(struct zorro_dev *z) int start_page, stop_page; int j; int err; + DECLARE_MAC_BUF(mac); static u32 hydra_offsets[16] = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, @@ -112,7 +113,6 @@ static int __devinit hydra_init(struct zorro_dev *z) dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); for(j = 0; j < ETHER_ADDR_LEN; j++) dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j)); @@ -163,10 +163,8 @@ static int __devinit hydra_init(struct zorro_dev *z) zorro_set_drvdata(z, dev); printk(KERN_INFO "%s: Hydra at 0x%08lx, address " - "%02x:%02x:%02x:%02x:%02x:%02x (hydra.c " HYDRA_VERSION ")\n", - dev->name, z->resource.start, dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5]); + "%s (hydra.c " HYDRA_VERSION ")\n", + dev->name, z->resource.start, print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/net/ibm_emac/Kconfig b/drivers/net/ibm_emac/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..f61c48047dc0093cd3c77fab0f5946c3ceec10d3 --- /dev/null +++ b/drivers/net/ibm_emac/Kconfig @@ -0,0 +1,70 @@ +config IBM_EMAC + tristate "PowerPC 4xx on-chip Ethernet support" + depends on 4xx && !PPC_MERGE + help + This driver supports the PowerPC 4xx EMAC family of on-chip + Ethernet controllers. + +config IBM_EMAC_RXB + int "Number of receive buffers" + depends on IBM_EMAC + default "128" + +config IBM_EMAC_TXB + int "Number of transmit buffers" + depends on IBM_EMAC + default "64" + +config IBM_EMAC_POLL_WEIGHT + int "MAL NAPI polling weight" + depends on IBM_EMAC + default "32" + +config IBM_EMAC_RX_COPY_THRESHOLD + int "RX skb copy threshold (bytes)" + depends on IBM_EMAC + default "256" + +config IBM_EMAC_RX_SKB_HEADROOM + int "Additional RX skb headroom (bytes)" + depends on IBM_EMAC + default "0" + help + Additional receive skb headroom. Note, that driver + will always reserve at least 2 bytes to make IP header + aligned, so usually there is no need to add any additional + headroom. + + If unsure, set to 0. + +config IBM_EMAC_PHY_RX_CLK_FIX + bool "PHY Rx clock workaround" + depends on IBM_EMAC && (405EP || 440GX || 440EP || 440GR) + help + Enable this if EMAC attached to a PHY which doesn't generate + RX clock if there is no link, if this is the case, you will + see "TX disable timeout" or "RX disable timeout" in the system + log. + + If unsure, say N. + +config IBM_EMAC_DEBUG + bool "Debugging" + depends on IBM_EMAC + default n + +config IBM_EMAC_ZMII + bool + depends on IBM_EMAC && (NP405H || NP405L || 44x) + default y + +config IBM_EMAC_RGMII + bool + depends on IBM_EMAC && 440GX + default y + +config IBM_EMAC_TAH + bool + depends on IBM_EMAC && 440GX + default y + diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index f752e5fc65ba31d43a7267a5cb721275dbf0e89a..73664f226f323b8850d7292ff65fe7f14571d14a 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c @@ -353,10 +353,9 @@ static void emac_hash_mc(struct ocp_enet_private *dev) for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { int bit; - DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL, - dev->def->index, - dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], - dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); + DECLARE_MAC_BUF(mac); + DBG2("%d: mc %s" NL, + dev->def->index, print_mac(mac, dmi->dmi_addr)); bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26); gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f); @@ -1843,9 +1842,14 @@ static int emac_ethtool_nway_reset(struct net_device *ndev) return res; } -static int emac_ethtool_get_stats_count(struct net_device *ndev) +static int emac_get_sset_count(struct net_device *ndev, int sset) { - return EMAC_ETHTOOL_STATS_COUNT; + switch (sset) { + case ETH_SS_STATS: + return EMAC_ETHTOOL_STATS_COUNT; + default: + return -EOPNOTSUPP; + } } static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, @@ -1876,7 +1880,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strcpy(info->version, DRV_VERSION); info->fw_version[0] = '\0'; sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index); - info->n_stats = emac_ethtool_get_stats_count(ndev); info->regdump_len = emac_ethtool_get_regs_len(ndev); } @@ -1896,12 +1899,10 @@ static const struct ethtool_ops emac_ethtool_ops = { .get_rx_csum = emac_ethtool_get_rx_csum, .get_strings = emac_ethtool_get_strings, - .get_stats_count = emac_ethtool_get_stats_count, + .get_sset_count = emac_get_sset_count, .get_ethtool_stats = emac_ethtool_get_ethtool_stats, .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, }; static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -1942,6 +1943,7 @@ static int __init emac_probe(struct ocp_device *ocpdev) struct ocp_device *maldev; struct ocp_enet_private *dev; int err, i; + DECLARE_MAC_BUF(mac); DBG("%d: probe" NL, ocpdev->def->index); @@ -1962,7 +1964,6 @@ static int __init emac_probe(struct ocp_device *ocpdev) dev->ndev = ndev; dev->ldev = &ocpdev->dev; dev->def = ocpdev->def; - SET_MODULE_OWNER(ndev); /* Find MAL device we are connected to */ maldev = @@ -2191,10 +2192,8 @@ static int __init emac_probe(struct ocp_device *ocpdev) ocp_set_drvdata(ocpdev, dev); - printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", - ndev->name, dev->def->index, - ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], - ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + printk("%s: emac%d, MAC %s\n", + ndev->name, dev->def->index, print_mac(mac, ndev->dev_addr)); if (dev->phy.address >= 0) printk("%s: found %s PHY (0x%02x)\n", ndev->name, diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c index 92f970d402df85c86bfd8a01d4f6d363373b8d56..1f70906cfb98b79147dfde2cc1beb39d2fe5cdb1 100644 --- a/drivers/net/ibm_emac/ibm_emac_debug.c +++ b/drivers/net/ibm_emac/ibm_emac_debug.c @@ -132,7 +132,7 @@ void emac_dbg_register(int idx, struct ocp_enet_private *dev) { unsigned long flags; - if (idx >= sizeof(__emacs) / sizeof(__emacs[0])) { + if (idx >= ARRAY_SIZE(__emacs)) { printk(KERN_WARNING "invalid index %d when registering EMAC for debugging\n", idx); @@ -148,7 +148,7 @@ void mal_dbg_register(int idx, struct ibm_ocp_mal *mal) { unsigned long flags; - if (idx >= sizeof(__mals) / sizeof(__mals[0])) { + if (idx >= ARRAY_SIZE(__mals)) { printk(KERN_WARNING "invalid index %d when registering MAL for debugging\n", idx); @@ -167,11 +167,11 @@ void emac_dbg_dump_all(void) local_irq_save(flags); - for (i = 0; i < sizeof(__mals) / sizeof(__mals[0]); ++i) + for (i = 0; i < ARRAY_SIZE(__mals); ++i) if (__mals[i]) emac_mal_dump(__mals[i]); - for (i = 0; i < sizeof(__emacs) / sizeof(__emacs[0]); ++i) + for (i = 0; i < ARRAY_SIZE(__emacs); ++i) if (__emacs[i]) emac_mac_dump(i, __emacs[i]); diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index cabd9846a5ee55784845d4644d1a5027e1ac177a..4e49e8c4f8715306c3606dc1e7b67542d589c674 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -207,10 +207,10 @@ static irqreturn_t mal_serr(int irq, void *dev_instance) static inline void mal_schedule_poll(struct ibm_ocp_mal *mal) { - if (likely(netif_rx_schedule_prep(&mal->poll_dev))) { + if (likely(napi_schedule_prep(&mal->napi))) { MAL_DBG2("%d: schedule_poll" NL, mal->def->index); mal_disable_eob_irq(mal); - __netif_rx_schedule(&mal->poll_dev); + __napi_schedule(&mal->napi); } else MAL_DBG2("%d: already in poll" NL, mal->def->index); } @@ -273,11 +273,11 @@ static irqreturn_t mal_rxde(int irq, void *dev_instance) return IRQ_HANDLED; } -static int mal_poll(struct net_device *ndev, int *budget) +static int mal_poll(struct napi_struct *napi, int budget) { - struct ibm_ocp_mal *mal = ndev->priv; + struct ibm_ocp_mal *mal = container_of(napi, struct ibm_ocp_mal, napi); struct list_head *l; - int rx_work_limit = min(ndev->quota, *budget), received = 0, done; + int received = 0; MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget, rx_work_limit); @@ -295,38 +295,34 @@ static int mal_poll(struct net_device *ndev, int *budget) list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); - int n = mc->ops->poll_rx(mc->dev, rx_work_limit); + int n = mc->ops->poll_rx(mc->dev, budget); if (n) { received += n; - rx_work_limit -= n; - if (rx_work_limit <= 0) { - done = 0; + budget -= n; + if (budget <= 0) goto more_work; // XXX What if this is the last one ? - } } } /* We need to disable IRQs to protect from RXDE IRQ here */ local_irq_disable(); - __netif_rx_complete(ndev); + __napi_complete(napi); mal_enable_eob_irq(mal); local_irq_enable(); - done = 1; - /* Check for "rotting" packet(s) */ list_for_each(l, &mal->poll_list) { struct mal_commac *mc = list_entry(l, struct mal_commac, poll_list); if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) { MAL_DBG2("%d: rotting packet" NL, mal->def->index); - if (netif_rx_reschedule(ndev, received)) + if (napi_reschedule(napi)) mal_disable_eob_irq(mal); else MAL_DBG2("%d: already in poll list" NL, mal->def->index); - if (rx_work_limit > 0) + if (budget > 0) goto again; else goto more_work; @@ -335,12 +331,8 @@ static int mal_poll(struct net_device *ndev, int *budget) } more_work: - ndev->quota -= received; - *budget -= received; - - MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget, - done ? 0 : 1); - return done ? 0 : 1; + MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, budget, received); + return received; } static void mal_reset(struct ibm_ocp_mal *mal) @@ -425,11 +417,8 @@ static int __init mal_probe(struct ocp_device *ocpdev) mal->def = ocpdev->def; INIT_LIST_HEAD(&mal->poll_list); - set_bit(__LINK_STATE_START, &mal->poll_dev.state); - mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; - mal->poll_dev.poll = mal_poll; - mal->poll_dev.priv = mal; - atomic_set(&mal->poll_dev.refcnt, 1); + mal->napi.weight = CONFIG_IBM_EMAC_POLL_WEIGHT; + mal->napi.poll = mal_poll; INIT_LIST_HEAD(&mal->list); @@ -520,11 +509,8 @@ static void __exit mal_remove(struct ocp_device *ocpdev) MAL_DBG("%d: remove" NL, mal->def->index); - /* Syncronize with scheduled polling, - stolen from net/core/dev.c:dev_close() - */ - clear_bit(__LINK_STATE_START, &mal->poll_dev.state); - netif_poll_disable(&mal->poll_dev); + /* Synchronize with scheduled polling */ + napi_disable(&mal->napi); if (!list_empty(&mal->list)) { /* This is *very* bad */ diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h index 64bc338acc6c9ccb47126e01ae23a7d1cd5cd828..8f54d621994d0c058f210f6f1f6a9e2070242460 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.h +++ b/drivers/net/ibm_emac/ibm_emac_mal.h @@ -195,7 +195,7 @@ struct ibm_ocp_mal { dcr_host_t dcrhost; struct list_head poll_list; - struct net_device poll_dev; + struct napi_struct napi; struct list_head list; u32 tx_chan_mask; diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..0d3e7380bad0b268343646668bc38f884c327883 --- /dev/null +++ b/drivers/net/ibm_newemac/Kconfig @@ -0,0 +1,63 @@ +config IBM_NEW_EMAC + tristate "IBM EMAC Ethernet support" + depends on PPC_DCR && PPC_MERGE + help + This driver supports the IBM EMAC family of Ethernet controllers + typically found on 4xx embedded PowerPC chips, but also on the + Axon southbridge for Cell. + +config IBM_NEW_EMAC_RXB + int "Number of receive buffers" + depends on IBM_NEW_EMAC + default "128" + +config IBM_NEW_EMAC_TXB + int "Number of transmit buffers" + depends on IBM_NEW_EMAC + default "64" + +config IBM_NEW_EMAC_POLL_WEIGHT + int "MAL NAPI polling weight" + depends on IBM_NEW_EMAC + default "32" + +config IBM_NEW_EMAC_RX_COPY_THRESHOLD + int "RX skb copy threshold (bytes)" + depends on IBM_NEW_EMAC + default "256" + +config IBM_NEW_EMAC_RX_SKB_HEADROOM + int "Additional RX skb headroom (bytes)" + depends on IBM_NEW_EMAC + default "0" + help + Additional receive skb headroom. Note, that driver + will always reserve at least 2 bytes to make IP header + aligned, so usually there is no need to add any additional + headroom. + + If unsure, set to 0. + +config IBM_NEW_EMAC_DEBUG + bool "Debugging" + depends on IBM_NEW_EMAC + default n + +# The options below has to be select'ed by the respective +# processor types or platforms + +config IBM_NEW_EMAC_ZMII + bool + default n + +config IBM_NEW_EMAC_RGMII + bool + default n + +config IBM_NEW_EMAC_TAH + bool + default n + +config IBM_NEW_EMAC_EMAC4 + bool + default n diff --git a/drivers/net/ibm_newemac/Makefile b/drivers/net/ibm_newemac/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0b5c995127628fc740da0210135c58d992d1b44f --- /dev/null +++ b/drivers/net/ibm_newemac/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the PowerPC 4xx on-chip ethernet driver +# + +obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac.o + +ibm_newemac-y := mal.o core.o phy.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_ZMII) += zmii.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_RGMII) += rgmii.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_TAH) += tah.o +ibm_newemac-$(CONFIG_IBM_NEW_EMAC_DEBUG) += debug.o diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c new file mode 100644 index 0000000000000000000000000000000000000000..8ea500961871e79ce6e713199756e536797db441 --- /dev/null +++ b/drivers/net/ibm_newemac/core.c @@ -0,0 +1,2906 @@ +/* + * drivers/net/ibm_newemac/core.c + * + * Driver for PowerPC 4xx on-chip ethernet controller. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Matt Porter + * (c) 2003 Benjamin Herrenschmidt + * Armin Kuster + * Johnnie Peters + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "core.h" + +/* + * Lack of dma_unmap_???? calls is intentional. + * + * API-correct usage requires additional support state information to be + * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to + * EMAC design (e.g. TX buffer passed from network stack can be split into + * several BDs, dma_map_single/dma_map_page can be used to map particular BD), + * maintaining such information will add additional overhead. + * Current DMA API implementation for 4xx processors only ensures cache coherency + * and dma_unmap_???? routines are empty and are likely to stay this way. + * I decided to omit dma_unmap_??? calls because I don't want to add additional + * complexity just for the sake of following some abstract API, when it doesn't + * add any real benefit to the driver. I understand that this decision maybe + * controversial, but I really tried to make code API-correct and efficient + * at the same time and didn't come up with code I liked :(. --ebs + */ + +#define DRV_NAME "emac" +#define DRV_VERSION "3.54" +#define DRV_DESC "PPC 4xx OCP EMAC driver" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR + ("Eugene Surovegin or "); +MODULE_LICENSE("GPL"); + +/* + * PPC64 doesn't (yet) have a cacheable_memcpy + */ +#ifdef CONFIG_PPC64 +#define cacheable_memcpy(d,s,n) memcpy((d),(s),(n)) +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4) + +/* If packet size is less than this number, we allocate small skb and copy packet + * contents into it instead of just sending original big skb up + */ +#define EMAC_RX_COPY_THRESH CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD + +/* Since multiple EMACs share MDIO lines in various ways, we need + * to avoid re-using the same PHY ID in cases where the arch didn't + * setup precise phy_map entries + * + * XXX This is something that needs to be reworked as we can have multiple + * EMAC "sets" (multiple ASICs containing several EMACs) though we can + * probably require in that case to have explicit PHY IDs in the device-tree + */ +static u32 busy_phy_map; +static DEFINE_MUTEX(emac_phy_map_lock); + +/* This is the wait queue used to wait on any event related to probe, that + * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc... + */ +static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait); + +/* Having stable interface names is a doomed idea. However, it would be nice + * if we didn't have completely random interface names at boot too :-) It's + * just a matter of making everybody's life easier. Since we are doing + * threaded probing, it's a bit harder though. The base idea here is that + * we make up a list of all emacs in the device-tree before we register the + * driver. Every emac will then wait for the previous one in the list to + * initialize before itself. We should also keep that list ordered by + * cell_index. + * That list is only 4 entries long, meaning that additional EMACs don't + * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased. + */ + +#define EMAC_BOOT_LIST_SIZE 4 +static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE]; + +/* How long should I wait for dependent devices ? */ +#define EMAC_PROBE_DEP_TIMEOUT (HZ * 5) + +/* I don't want to litter system log with timeout errors + * when we have brain-damaged PHY. + */ +static inline void emac_report_timeout_error(struct emac_instance *dev, + const char *error) +{ + if (net_ratelimit()) + printk(KERN_ERR "%s: %s\n", dev->ndev->name, error); +} + +/* PHY polling intervals */ +#define PHY_POLL_LINK_ON HZ +#define PHY_POLL_LINK_OFF (HZ / 5) + +/* Graceful stop timeouts in us. + * We should allow up to 1 frame time (full-duplex, ignoring collisions) + */ +#define STOP_TIMEOUT_10 1230 +#define STOP_TIMEOUT_100 124 +#define STOP_TIMEOUT_1000 13 +#define STOP_TIMEOUT_1000_JUMBO 73 + +/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */ +static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = { + "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum", + "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom", + "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu", + "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet", + "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error", + "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range", + "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun", + "rx_bad_packet", "rx_runt_packet", "rx_short_event", + "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long", + "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors", + "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral", + "tx_bd_excessive_collisions", "tx_bd_late_collision", + "tx_bd_multple_collisions", "tx_bd_single_collision", + "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe", + "tx_errors" +}; + +static irqreturn_t emac_irq(int irq, void *dev_instance); +static void emac_clean_tx_ring(struct emac_instance *dev); +static void __emac_set_multicast_list(struct emac_instance *dev); + +static inline int emac_phy_supports_gige(int phy_mode) +{ + return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_RGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline int emac_phy_gpcs(int phy_mode) +{ + return phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline void emac_tx_enable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "tx_enable" NL); + + r = in_be32(&p->mr0); + if (!(r & EMAC_MR0_TXE)) + out_be32(&p->mr0, r | EMAC_MR0_TXE); +} + +static void emac_tx_disable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "tx_disable" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_TXE) { + int n = dev->stop_timeout; + out_be32(&p->mr0, r & ~EMAC_MR0_TXE); + while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, "TX disable timeout"); + } +} + +static void emac_rx_enable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) + goto out; + + DBG(dev, "rx_enable" NL); + + r = in_be32(&p->mr0); + if (!(r & EMAC_MR0_RXE)) { + if (unlikely(!(r & EMAC_MR0_RXI))) { + /* Wait if previous async disable is still in progress */ + int n = dev->stop_timeout; + while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, + "RX disable timeout"); + } + out_be32(&p->mr0, r | EMAC_MR0_RXE); + } + out: + ; +} + +static void emac_rx_disable(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "rx_disable" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_RXE) { + int n = dev->stop_timeout; + out_be32(&p->mr0, r & ~EMAC_MR0_RXE); + while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) { + udelay(1); + --n; + } + if (unlikely(!n)) + emac_report_timeout_error(dev, "RX disable timeout"); + } +} + +static inline void emac_netif_stop(struct emac_instance *dev) +{ + netif_tx_lock_bh(dev->ndev); + dev->no_mcast = 1; + netif_tx_unlock_bh(dev->ndev); + dev->ndev->trans_start = jiffies; /* prevent tx timeout */ + mal_poll_disable(dev->mal, &dev->commac); + netif_tx_disable(dev->ndev); +} + +static inline void emac_netif_start(struct emac_instance *dev) +{ + netif_tx_lock_bh(dev->ndev); + dev->no_mcast = 0; + if (dev->mcast_pending && netif_running(dev->ndev)) + __emac_set_multicast_list(dev); + netif_tx_unlock_bh(dev->ndev); + + netif_wake_queue(dev->ndev); + + /* NOTE: unconditional netif_wake_queue is only appropriate + * so long as all callers are assured to have free tx slots + * (taken from tg3... though the case where that is wrong is + * not terribly harmful) + */ + mal_poll_enable(dev->mal, &dev->commac); +} + +static inline void emac_rx_disable_async(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r; + + DBG(dev, "rx_disable_async" NL); + + r = in_be32(&p->mr0); + if (r & EMAC_MR0_RXE) + out_be32(&p->mr0, r & ~EMAC_MR0_RXE); +} + +static int emac_reset(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + int n = 20; + + DBG(dev, "reset" NL); + + if (!dev->reset_failed) { + /* 40x erratum suggests stopping RX channel before reset, + * we stop TX as well + */ + emac_rx_disable(dev); + emac_tx_disable(dev); + } + + out_be32(&p->mr0, EMAC_MR0_SRST); + while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n) + --n; + + if (n) { + dev->reset_failed = 0; + return 0; + } else { + emac_report_timeout_error(dev, "reset timeout"); + dev->reset_failed = 1; + return -ETIMEDOUT; + } +} + +static void emac_hash_mc(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u16 gaht[4] = { 0 }; + struct dev_mc_list *dmi; + + DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count); + + for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { + int bit; + DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL, + dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], + dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); + + bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26); + gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f); + } + out_be32(&p->gaht1, gaht[0]); + out_be32(&p->gaht2, gaht[1]); + out_be32(&p->gaht3, gaht[2]); + out_be32(&p->gaht4, gaht[3]); +} + +static inline u32 emac_iff2rmr(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + u32 r; + + r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE; + + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r |= EMAC4_RMR_BASE; + else + r |= EMAC_RMR_BASE; + + if (ndev->flags & IFF_PROMISC) + r |= EMAC_RMR_PME; + else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32) + r |= EMAC_RMR_PMME; + else if (ndev->mc_count > 0) + r |= EMAC_RMR_MAE; + + return r; +} + +static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT; + + DBG2(dev, "__emac_calc_base_mr1" NL); + + switch(tx_size) { + case 2048: + ret |= EMAC_MR1_TFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, tx_size); + } + + switch(rx_size) { + case 16384: + ret |= EMAC_MR1_RFS_16K; + break; + case 4096: + ret |= EMAC_MR1_RFS_4K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, rx_size); + } + + return ret; +} + +static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR | + EMAC4_MR1_OBCI(dev->opb_bus_freq); + + DBG2(dev, "__emac4_calc_base_mr1" NL); + + switch(tx_size) { + case 4096: + ret |= EMAC4_MR1_TFS_4K; + break; + case 2048: + ret |= EMAC4_MR1_TFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, tx_size); + } + + switch(rx_size) { + case 16384: + ret |= EMAC4_MR1_RFS_16K; + break; + case 4096: + ret |= EMAC4_MR1_RFS_4K; + break; + case 2048: + ret |= EMAC4_MR1_RFS_2K; + break; + default: + printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + dev->ndev->name, rx_size); + } + + return ret; +} + +static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size) +{ + return emac_has_feature(dev, EMAC_FTR_EMAC4) ? + __emac4_calc_base_mr1(dev, tx_size, rx_size) : + __emac_calc_base_mr1(dev, tx_size, rx_size); +} + +static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4; + else + return ((size >> 6) - 1) << EMAC_TRTR_SHIFT; +} + +static inline u32 emac_calc_rwmr(struct emac_instance *dev, + unsigned int low, unsigned int high) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return (low << 22) | ( (high & 0x3ff) << 6); + else + return (low << 23) | ( (high & 0x1ff) << 7); +} + +static int emac_configure(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + struct net_device *ndev = dev->ndev; + int tx_size, rx_size; + u32 r, mr1 = 0; + + DBG(dev, "configure" NL); + + if (emac_reset(dev) < 0) + return -ETIMEDOUT; + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_reset(dev->tah_dev); + + DBG(dev, " duplex = %d, pause = %d, asym_pause = %d\n", + dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause); + + /* Default fifo sizes */ + tx_size = dev->tx_fifo_size; + rx_size = dev->rx_fifo_size; + + /* Check for full duplex */ + if (dev->phy.duplex == DUPLEX_FULL) + mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001; + + /* Adjust fifo sizes, mr1 and timeouts based on link speed */ + dev->stop_timeout = STOP_TIMEOUT_10; + switch (dev->phy.speed) { + case SPEED_1000: + if (emac_phy_gpcs(dev->phy.mode)) { + mr1 |= EMAC_MR1_MF_1000GPCS | + EMAC_MR1_MF_IPPA(dev->phy.address); + + /* Put some arbitrary OUI, Manuf & Rev IDs so we can + * identify this GPCS PHY later. + */ + out_be32(&p->ipcr, 0xdeadbeef); + } else + mr1 |= EMAC_MR1_MF_1000; + + /* Extended fifo sizes */ + tx_size = dev->tx_fifo_size_gige; + rx_size = dev->rx_fifo_size_gige; + + if (dev->ndev->mtu > ETH_DATA_LEN) { + mr1 |= EMAC_MR1_JPSM; + dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO; + } else + dev->stop_timeout = STOP_TIMEOUT_1000; + break; + case SPEED_100: + mr1 |= EMAC_MR1_MF_100; + dev->stop_timeout = STOP_TIMEOUT_100; + break; + default: /* make gcc happy */ + break; + } + + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port, + dev->phy.speed); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed); + + /* on 40x erratum forces us to NOT use integrated flow control, + * let's hope it works on 44x ;) + */ + if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) && + dev->phy.duplex == DUPLEX_FULL) { + if (dev->phy.pause) + mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP; + else if (dev->phy.asym_pause) + mr1 |= EMAC_MR1_APP; + } + + /* Add base settings & fifo sizes & program MR1 */ + mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size); + out_be32(&p->mr1, mr1); + + /* Set individual MAC address */ + out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); + out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); + + /* VLAN Tag Protocol ID */ + out_be32(&p->vtpid, 0x8100); + + /* Receive mode register */ + r = emac_iff2rmr(ndev); + if (r & EMAC_RMR_MAE) + emac_hash_mc(dev); + out_be32(&p->rmr, r); + + /* FIFOs thresholds */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1, + tx_size / 2 / dev->fifo_entry_size); + else + r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1, + tx_size / 2 / dev->fifo_entry_size); + out_be32(&p->tmr1, r); + out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2)); + + /* PAUSE frame is sent when RX FIFO reaches its high-water mark, + there should be still enough space in FIFO to allow the our link + partner time to process this frame and also time to send PAUSE + frame itself. + + Here is the worst case scenario for the RX FIFO "headroom" + (from "The Switch Book") (100Mbps, without preamble, inter-frame gap): + + 1) One maximum-length frame on TX 1522 bytes + 2) One PAUSE frame time 64 bytes + 3) PAUSE frame decode time allowance 64 bytes + 4) One maximum-length frame on RX 1522 bytes + 5) Round-trip propagation delay of the link (100Mb) 15 bytes + ---------- + 3187 bytes + + I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes) + low-water mark to RX_FIFO_SIZE / 8 (512 bytes) + */ + r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size, + rx_size / 4 / dev->fifo_entry_size); + out_be32(&p->rwmr, r); + + /* Set PAUSE timer to the maximum */ + out_be32(&p->ptr, 0xffff); + + /* IRQ sources */ + r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE | + EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE | + EMAC_ISR_IRE | EMAC_ISR_TE; + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE | + EMAC4_ISR_RXOE | */; + out_be32(&p->iser, r); + + /* We need to take GPCS PHY out of isolate mode after EMAC reset */ + if (emac_phy_gpcs(dev->phy.mode)) + emac_mii_reset_phy(&dev->phy); + + return 0; +} + +static void emac_reinitialize(struct emac_instance *dev) +{ + DBG(dev, "reinitialize" NL); + + emac_netif_stop(dev); + if (!emac_configure(dev)) { + emac_tx_enable(dev); + emac_rx_enable(dev); + } + emac_netif_start(dev); +} + +static void emac_full_tx_reset(struct emac_instance *dev) +{ + DBG(dev, "full_tx_reset" NL); + + emac_tx_disable(dev); + mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); + emac_clean_tx_ring(dev); + dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0; + + emac_configure(dev); + + mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); + emac_tx_enable(dev); + emac_rx_enable(dev); +} + +static void emac_reset_work(struct work_struct *work) +{ + struct emac_instance *dev = container_of(work, struct emac_instance, reset_work); + + DBG(dev, "reset_work" NL); + + mutex_lock(&dev->link_lock); + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + mutex_unlock(&dev->link_lock); +} + +static void emac_tx_timeout(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "tx_timeout" NL); + + schedule_work(&dev->reset_work); +} + + +static inline int emac_phy_done(struct emac_instance *dev, u32 stacr) +{ + int done = !!(stacr & EMAC_STACR_OC); + + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + done = !done; + + return done; +}; + +static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r = 0; + int n, err = -ETIMEDOUT; + + mutex_lock(&dev->mdio_lock); + + DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg); + + /* Enable proper MDIO port */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_get_mdio(dev->zmii_dev, dev->zmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); + + /* Wait for management interface to become idle */ + n = 10; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait idle\n"); + goto bail; + } + } + + /* Issue read command */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_STACR_BASE(dev->opb_bus_freq); + else + r = EMAC_STACR_BASE(dev->opb_bus_freq); + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + r |= EMAC_STACR_OC; + if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR)) + r |= EMACX_STACR_STAC_READ; + else + r |= EMAC_STACR_STAC_READ; + r |= (reg & EMAC_STACR_PRA_MASK) + | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT); + out_be32(&p->stacr, r); + + /* Wait for read to complete */ + n = 100; + while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait complete\n"); + goto bail; + } + } + + if (unlikely(r & EMAC_STACR_PHYE)) { + DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg); + err = -EREMOTEIO; + goto bail; + } + + r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK); + + DBG2(dev, "mdio_read -> %04x" NL, r); + err = 0; + bail: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_put_mdio(dev->zmii_dev, dev->zmii_port); + mutex_unlock(&dev->mdio_lock); + + return err == 0 ? r : err; +} + +static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg, + u16 val) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 r = 0; + int n, err = -ETIMEDOUT; + + mutex_lock(&dev->mdio_lock); + + DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val); + + /* Enable proper MDIO port */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_get_mdio(dev->zmii_dev, dev->zmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port); + + /* Wait for management interface to be idle */ + n = 10; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait idle\n"); + goto bail; + } + } + + /* Issue write command */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + r = EMAC4_STACR_BASE(dev->opb_bus_freq); + else + r = EMAC_STACR_BASE(dev->opb_bus_freq); + if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT)) + r |= EMAC_STACR_OC; + if (emac_has_feature(dev, EMAC_FTR_HAS_AXON_STACR)) + r |= EMACX_STACR_STAC_WRITE; + else + r |= EMAC_STACR_STAC_WRITE; + r |= (reg & EMAC_STACR_PRA_MASK) | + ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) | + (val << EMAC_STACR_PHYD_SHIFT); + out_be32(&p->stacr, r); + + /* Wait for write to complete */ + n = 100; + while (!emac_phy_done(dev, in_be32(&p->stacr))) { + udelay(1); + if (!--n) { + DBG2(dev, " -> timeout wait complete\n"); + goto bail; + } + } + err = 0; + bail: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_put_mdio(dev->zmii_dev, dev->zmii_port); + mutex_unlock(&dev->mdio_lock); +} + +static int emac_mdio_read(struct net_device *ndev, int id, int reg) +{ + struct emac_instance *dev = netdev_priv(ndev); + int res; + + res = __emac_mdio_read(dev->mdio_instance ? dev->mdio_instance : dev, + (u8) id, (u8) reg); + return res; +} + +static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val) +{ + struct emac_instance *dev = netdev_priv(ndev); + + __emac_mdio_write(dev->mdio_instance ? dev->mdio_instance : dev, + (u8) id, (u8) reg, (u16) val); +} + +/* Tx lock BH */ +static void __emac_set_multicast_list(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + u32 rmr = emac_iff2rmr(dev->ndev); + + DBG(dev, "__multicast %08x" NL, rmr); + + /* I decided to relax register access rules here to avoid + * full EMAC reset. + * + * There is a real problem with EMAC4 core if we use MWSW_001 bit + * in MR1 register and do a full EMAC reset. + * One TX BD status update is delayed and, after EMAC reset, it + * never happens, resulting in TX hung (it'll be recovered by TX + * timeout handler eventually, but this is just gross). + * So we either have to do full TX reset or try to cheat here :) + * + * The only required change is to RX mode register, so I *think* all + * we need is just to stop RX channel. This seems to work on all + * tested SoCs. --ebs + * + * If we need the full reset, we might just trigger the workqueue + * and do it async... a bit nasty but should work --BenH + */ + dev->mcast_pending = 0; + emac_rx_disable(dev); + if (rmr & EMAC_RMR_MAE) + emac_hash_mc(dev); + out_be32(&p->rmr, rmr); + emac_rx_enable(dev); +} + +/* Tx lock BH */ +static void emac_set_multicast_list(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "multicast" NL); + + BUG_ON(!netif_running(dev->ndev)); + + if (dev->no_mcast) { + dev->mcast_pending = 1; + return; + } + __emac_set_multicast_list(dev); +} + +static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) +{ + int rx_sync_size = emac_rx_sync_size(new_mtu); + int rx_skb_size = emac_rx_skb_size(new_mtu); + int i, ret = 0; + + mutex_lock(&dev->link_lock); + emac_netif_stop(dev); + emac_rx_disable(dev); + mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); + + if (dev->rx_sg_skb) { + ++dev->estats.rx_dropped_resize; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } + + /* Make a first pass over RX ring and mark BDs ready, dropping + * non-processed packets on the way. We need this as a separate pass + * to simplify error recovery in the case of allocation failure later. + */ + for (i = 0; i < NUM_RX_BUFF; ++i) { + if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST) + ++dev->estats.rx_dropped_resize; + + dev->rx_desc[i].data_len = 0; + dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | + (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); + } + + /* Reallocate RX ring only if bigger skb buffers are required */ + if (rx_skb_size <= dev->rx_skb_size) + goto skip; + + /* Second pass, allocate new skbs */ + for (i = 0; i < NUM_RX_BUFF; ++i) { + struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC); + if (!skb) { + ret = -ENOMEM; + goto oom; + } + + BUG_ON(!dev->rx_skb[i]); + dev_kfree_skb(dev->rx_skb[i]); + + skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); + dev->rx_desc[i].data_ptr = + dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size, + DMA_FROM_DEVICE) + 2; + dev->rx_skb[i] = skb; + } + skip: + /* Check if we need to change "Jumbo" bit in MR1 */ + if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) { + /* This is to prevent starting RX channel in emac_rx_enable() */ + set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + + dev->ndev->mtu = new_mtu; + emac_full_tx_reset(dev); + } + + mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu)); + oom: + /* Restart RX */ + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + dev->rx_slot = 0; + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_rx_enable(dev); + emac_netif_start(dev); + mutex_unlock(&dev->link_lock); + + return ret; +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct emac_instance *dev = netdev_priv(ndev); + int ret = 0; + + if (new_mtu < EMAC_MIN_MTU || new_mtu > dev->max_mtu) + return -EINVAL; + + DBG(dev, "change_mtu(%d)" NL, new_mtu); + + if (netif_running(ndev)) { + /* Check if we really need to reinitalize RX ring */ + if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu)) + ret = emac_resize_rx_ring(dev, new_mtu); + } + + if (!ret) { + ndev->mtu = new_mtu; + dev->rx_skb_size = emac_rx_skb_size(new_mtu); + dev->rx_sync_size = emac_rx_sync_size(new_mtu); + } + + return ret; +} + +static void emac_clean_tx_ring(struct emac_instance *dev) +{ + int i; + + for (i = 0; i < NUM_TX_BUFF; ++i) { + if (dev->tx_skb[i]) { + dev_kfree_skb(dev->tx_skb[i]); + dev->tx_skb[i] = NULL; + if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY) + ++dev->estats.tx_dropped; + } + dev->tx_desc[i].ctrl = 0; + dev->tx_desc[i].data_ptr = 0; + } +} + +static void emac_clean_rx_ring(struct emac_instance *dev) +{ + int i; + + for (i = 0; i < NUM_RX_BUFF; ++i) + if (dev->rx_skb[i]) { + dev->rx_desc[i].ctrl = 0; + dev_kfree_skb(dev->rx_skb[i]); + dev->rx_skb[i] = NULL; + dev->rx_desc[i].data_ptr = 0; + } + + if (dev->rx_sg_skb) { + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } +} + +static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, + gfp_t flags) +{ + struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); + if (unlikely(!skb)) + return -ENOMEM; + + dev->rx_skb[slot] = skb; + dev->rx_desc[slot].data_len = 0; + + skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); + dev->rx_desc[slot].data_ptr = + dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, + DMA_FROM_DEVICE) + 2; + wmb(); + dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | + (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); + + return 0; +} + +static void emac_print_link_status(struct emac_instance *dev) +{ + if (netif_carrier_ok(dev->ndev)) + printk(KERN_INFO "%s: link is up, %d %s%s\n", + dev->ndev->name, dev->phy.speed, + dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX", + dev->phy.pause ? ", pause enabled" : + dev->phy.asym_pause ? ", asymmetric pause enabled" : ""); + else + printk(KERN_INFO "%s: link is down\n", dev->ndev->name); +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_open(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int err, i; + + DBG(dev, "open" NL); + + /* Setup error IRQ handler */ + err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev); + if (err) { + printk(KERN_ERR "%s: failed to request IRQ %d\n", + ndev->name, dev->emac_irq); + return err; + } + + /* Allocate RX ring */ + for (i = 0; i < NUM_RX_BUFF; ++i) + if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { + printk(KERN_ERR "%s: failed to allocate RX ring\n", + ndev->name); + goto oom; + } + + dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0; + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + dev->rx_sg_skb = NULL; + + mutex_lock(&dev->link_lock); + + /* XXX Start PHY polling now. Shouldn't wr do like sungem instead and + * always poll the PHY even when the iface is down ? That would allow + * things like laptop-net to work. --BenH + */ + if (dev->phy.address >= 0) { + int link_poll_interval; + if (dev->phy.def->ops->poll_link(&dev->phy)) { + dev->phy.def->ops->read_link(&dev->phy); + netif_carrier_on(dev->ndev); + link_poll_interval = PHY_POLL_LINK_ON; + } else { + netif_carrier_off(dev->ndev); + link_poll_interval = PHY_POLL_LINK_OFF; + } + dev->link_polling = 1; + wmb(); + schedule_delayed_work(&dev->link_work, link_poll_interval); + emac_print_link_status(dev); + } else + netif_carrier_on(dev->ndev); + + emac_configure(dev); + mal_poll_add(dev->mal, &dev->commac); + mal_enable_tx_channel(dev->mal, dev->mal_tx_chan); + mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu)); + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_tx_enable(dev); + emac_rx_enable(dev); + emac_netif_start(dev); + + mutex_unlock(&dev->link_lock); + + return 0; + oom: + emac_clean_rx_ring(dev); + free_irq(dev->emac_irq, dev); + + return -ENOMEM; +} + +/* BHs disabled */ +#if 0 +static int emac_link_differs(struct emac_instance *dev) +{ + u32 r = in_be32(&dev->emacp->mr1); + + int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF; + int speed, pause, asym_pause; + + if (r & EMAC_MR1_MF_1000) + speed = SPEED_1000; + else if (r & EMAC_MR1_MF_100) + speed = SPEED_100; + else + speed = SPEED_10; + + switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) { + case (EMAC_MR1_EIFC | EMAC_MR1_APP): + pause = 1; + asym_pause = 0; + break; + case EMAC_MR1_APP: + pause = 0; + asym_pause = 1; + break; + default: + pause = asym_pause = 0; + } + return speed != dev->phy.speed || duplex != dev->phy.duplex || + pause != dev->phy.pause || asym_pause != dev->phy.asym_pause; +} +#endif + +static void emac_link_timer(struct work_struct *work) +{ + struct emac_instance *dev = + container_of((struct delayed_work *)work, + struct emac_instance, link_work); + int link_poll_interval; + + mutex_lock(&dev->link_lock); + + DBG2(dev, "link timer" NL); + + if (dev->phy.def->ops->poll_link(&dev->phy)) { + if (!netif_carrier_ok(dev->ndev)) { + /* Get new link parameters */ + dev->phy.def->ops->read_link(&dev->phy); + + netif_carrier_on(dev->ndev); + emac_netif_stop(dev); + emac_full_tx_reset(dev); + emac_netif_start(dev); + emac_print_link_status(dev); + } + link_poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(dev->ndev)) { + emac_reinitialize(dev); + netif_carrier_off(dev->ndev); + netif_tx_disable(dev->ndev); + emac_print_link_status(dev); + } + link_poll_interval = PHY_POLL_LINK_OFF; + } + schedule_delayed_work(&dev->link_work, link_poll_interval); + + mutex_unlock(&dev->link_lock); +} + +static void emac_force_link_update(struct emac_instance *dev) +{ + netif_carrier_off(dev->ndev); + if (dev->link_polling) { + cancel_rearming_delayed_work(&dev->link_work); + if (dev->link_polling) + schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF); + } +} + +/* Process ctx, rtnl_lock semaphore */ +static int emac_close(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + DBG(dev, "close" NL); + + if (dev->phy.address >= 0) + cancel_rearming_delayed_work(&dev->link_work); + + emac_netif_stop(dev); + flush_scheduled_work(); + + emac_rx_disable(dev); + emac_tx_disable(dev); + mal_disable_rx_channel(dev->mal, dev->mal_rx_chan); + mal_disable_tx_channel(dev->mal, dev->mal_tx_chan); + mal_poll_del(dev->mal, &dev->commac); + + emac_clean_tx_ring(dev); + emac_clean_rx_ring(dev); + + free_irq(dev->emac_irq, dev); + + return 0; +} + +static inline u16 emac_tx_csum(struct emac_instance *dev, + struct sk_buff *skb) +{ + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH && + skb->ip_summed == CHECKSUM_PARTIAL)) { + ++dev->stats.tx_packets_csum; + return EMAC_TX_CTRL_TAH_CSUM; + } + return 0; +} + +static inline int emac_xmit_finish(struct emac_instance *dev, int len) +{ + struct emac_regs __iomem *p = dev->emacp; + struct net_device *ndev = dev->ndev; + + /* Send the packet out. If the if makes a significant perf + * difference, then we can store the TMR0 value in "dev" + * instead + */ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + out_be32(&p->tmr0, EMAC4_TMR0_XMIT); + else + out_be32(&p->tmr0, EMAC_TMR0_XMIT); + + if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) { + netif_stop_queue(ndev); + DBG2(dev, "stopped TX queue" NL); + } + + ndev->trans_start = jiffies; + ++dev->stats.tx_packets; + dev->stats.tx_bytes += len; + + return 0; +} + +/* Tx lock BH */ +static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + unsigned int len = skb->len; + int slot; + + u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | + MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb); + + slot = dev->tx_slot++; + if (dev->tx_slot == NUM_TX_BUFF) { + dev->tx_slot = 0; + ctrl |= MAL_TX_CTRL_WRAP; + } + + DBG2(dev, "xmit(%u) %d" NL, len, slot); + + dev->tx_skb[slot] = skb; + dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev, + skb->data, len, + DMA_TO_DEVICE); + dev->tx_desc[slot].data_len = (u16) len; + wmb(); + dev->tx_desc[slot].ctrl = ctrl; + + return emac_xmit_finish(dev, len); +} + +#ifdef CONFIG_IBM_NEW_EMAC_TAH +static inline int emac_xmit_split(struct emac_instance *dev, int slot, + u32 pd, int len, int last, u16 base_ctrl) +{ + while (1) { + u16 ctrl = base_ctrl; + int chunk = min(len, MAL_MAX_TX_SIZE); + len -= chunk; + + slot = (slot + 1) % NUM_TX_BUFF; + + if (last && !len) + ctrl |= MAL_TX_CTRL_LAST; + if (slot == NUM_TX_BUFF - 1) + ctrl |= MAL_TX_CTRL_WRAP; + + dev->tx_skb[slot] = NULL; + dev->tx_desc[slot].data_ptr = pd; + dev->tx_desc[slot].data_len = (u16) chunk; + dev->tx_desc[slot].ctrl = ctrl; + ++dev->tx_cnt; + + if (!len) + break; + + pd += chunk; + } + return slot; +} + +/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ +static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int len = skb->len, chunk; + int slot, i; + u16 ctrl; + u32 pd; + + /* This is common "fast" path */ + if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE)) + return emac_start_xmit(skb, ndev); + + len -= skb->data_len; + + /* Note, this is only an *estimation*, we can still run out of empty + * slots because of the additional fragmentation into + * MAL_MAX_TX_SIZE-sized chunks + */ + if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF)) + goto stop_queue; + + ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY | + emac_tx_csum(dev, skb); + slot = dev->tx_slot; + + /* skb data */ + dev->tx_skb[slot] = NULL; + chunk = min(len, MAL_MAX_TX_SIZE); + dev->tx_desc[slot].data_ptr = pd = + dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE); + dev->tx_desc[slot].data_len = (u16) chunk; + len -= chunk; + if (unlikely(len)) + slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags, + ctrl); + /* skb fragments */ + for (i = 0; i < nr_frags; ++i) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + len = frag->size; + + if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) + goto undo_frame; + + pd = dma_map_page(&dev->ofdev->dev, frag->page, frag->page_offset, len, + DMA_TO_DEVICE); + + slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1, + ctrl); + } + + DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot); + + /* Attach skb to the last slot so we don't release it too early */ + dev->tx_skb[slot] = skb; + + /* Send the packet out */ + if (dev->tx_slot == NUM_TX_BUFF - 1) + ctrl |= MAL_TX_CTRL_WRAP; + wmb(); + dev->tx_desc[dev->tx_slot].ctrl = ctrl; + dev->tx_slot = (slot + 1) % NUM_TX_BUFF; + + return emac_xmit_finish(dev, skb->len); + + undo_frame: + /* Well, too bad. Our previous estimation was overly optimistic. + * Undo everything. + */ + while (slot != dev->tx_slot) { + dev->tx_desc[slot].ctrl = 0; + --dev->tx_cnt; + if (--slot < 0) + slot = NUM_TX_BUFF - 1; + } + ++dev->estats.tx_undo; + + stop_queue: + netif_stop_queue(ndev); + DBG2(dev, "stopped TX queue" NL); + return 1; +} +#else +# define emac_start_xmit_sg emac_start_xmit +#endif /* !defined(CONFIG_IBM_NEW_EMAC_TAH) */ + +/* Tx lock BHs */ +static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl) +{ + struct emac_error_stats *st = &dev->estats; + + DBG(dev, "BD TX error %04x" NL, ctrl); + + ++st->tx_bd_errors; + if (ctrl & EMAC_TX_ST_BFCS) + ++st->tx_bd_bad_fcs; + if (ctrl & EMAC_TX_ST_LCS) + ++st->tx_bd_carrier_loss; + if (ctrl & EMAC_TX_ST_ED) + ++st->tx_bd_excessive_deferral; + if (ctrl & EMAC_TX_ST_EC) + ++st->tx_bd_excessive_collisions; + if (ctrl & EMAC_TX_ST_LC) + ++st->tx_bd_late_collision; + if (ctrl & EMAC_TX_ST_MC) + ++st->tx_bd_multple_collisions; + if (ctrl & EMAC_TX_ST_SC) + ++st->tx_bd_single_collision; + if (ctrl & EMAC_TX_ST_UR) + ++st->tx_bd_underrun; + if (ctrl & EMAC_TX_ST_SQE) + ++st->tx_bd_sqe; +} + +static void emac_poll_tx(void *param) +{ + struct emac_instance *dev = param; + u32 bad_mask; + + DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot); + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + bad_mask = EMAC_IS_BAD_TX_TAH; + else + bad_mask = EMAC_IS_BAD_TX; + + netif_tx_lock_bh(dev->ndev); + if (dev->tx_cnt) { + u16 ctrl; + int slot = dev->ack_slot, n = 0; + again: + ctrl = dev->tx_desc[slot].ctrl; + if (!(ctrl & MAL_TX_CTRL_READY)) { + struct sk_buff *skb = dev->tx_skb[slot]; + ++n; + + if (skb) { + dev_kfree_skb(skb); + dev->tx_skb[slot] = NULL; + } + slot = (slot + 1) % NUM_TX_BUFF; + + if (unlikely(ctrl & bad_mask)) + emac_parse_tx_error(dev, ctrl); + + if (--dev->tx_cnt) + goto again; + } + if (n) { + dev->ack_slot = slot; + if (netif_queue_stopped(dev->ndev) && + dev->tx_cnt < EMAC_TX_WAKEUP_THRESH) + netif_wake_queue(dev->ndev); + + DBG2(dev, "tx %d pkts" NL, n); + } + } + netif_tx_unlock_bh(dev->ndev); +} + +static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot, + int len) +{ + struct sk_buff *skb = dev->rx_skb[slot]; + + DBG2(dev, "recycle %d %d" NL, slot, len); + + if (len) + dma_map_single(&dev->ofdev->dev, skb->data - 2, + EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE); + + dev->rx_desc[slot].data_len = 0; + wmb(); + dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | + (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); +} + +static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl) +{ + struct emac_error_stats *st = &dev->estats; + + DBG(dev, "BD RX error %04x" NL, ctrl); + + ++st->rx_bd_errors; + if (ctrl & EMAC_RX_ST_OE) + ++st->rx_bd_overrun; + if (ctrl & EMAC_RX_ST_BP) + ++st->rx_bd_bad_packet; + if (ctrl & EMAC_RX_ST_RP) + ++st->rx_bd_runt_packet; + if (ctrl & EMAC_RX_ST_SE) + ++st->rx_bd_short_event; + if (ctrl & EMAC_RX_ST_AE) + ++st->rx_bd_alignment_error; + if (ctrl & EMAC_RX_ST_BFCS) + ++st->rx_bd_bad_fcs; + if (ctrl & EMAC_RX_ST_PTL) + ++st->rx_bd_packet_too_long; + if (ctrl & EMAC_RX_ST_ORE) + ++st->rx_bd_out_of_range; + if (ctrl & EMAC_RX_ST_IRE) + ++st->rx_bd_in_range; +} + +static inline void emac_rx_csum(struct emac_instance *dev, + struct sk_buff *skb, u16 ctrl) +{ +#ifdef CONFIG_IBM_NEW_EMAC_TAH + if (!ctrl && dev->tah_dev) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + ++dev->stats.rx_packets_csum; + } +#endif +} + +static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) +{ + if (likely(dev->rx_sg_skb != NULL)) { + int len = dev->rx_desc[slot].data_len; + int tot_len = dev->rx_sg_skb->len + len; + + if (unlikely(tot_len + 2 > dev->rx_skb_size)) { + ++dev->estats.rx_dropped_mtu; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } else { + cacheable_memcpy(dev->rx_sg_skb->tail, + dev->rx_skb[slot]->data, len); + skb_put(dev->rx_sg_skb, len); + emac_recycle_rx_skb(dev, slot, len); + return 0; + } + } + emac_recycle_rx_skb(dev, slot, 0); + return -1; +} + +/* NAPI poll context */ +static int emac_poll_rx(void *param, int budget) +{ + struct emac_instance *dev = param; + int slot = dev->rx_slot, received = 0; + + DBG2(dev, "poll_rx(%d)" NL, budget); + + again: + while (budget > 0) { + int len; + struct sk_buff *skb; + u16 ctrl = dev->rx_desc[slot].ctrl; + + if (ctrl & MAL_RX_CTRL_EMPTY) + break; + + skb = dev->rx_skb[slot]; + mb(); + len = dev->rx_desc[slot].data_len; + + if (unlikely(!MAL_IS_SINGLE_RX(ctrl))) + goto sg; + + ctrl &= EMAC_BAD_RX_MASK; + if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) { + emac_parse_rx_error(dev, ctrl); + ++dev->estats.rx_dropped_error; + emac_recycle_rx_skb(dev, slot, 0); + len = 0; + goto next; + } + + if (len && len < EMAC_RX_COPY_THRESH) { + struct sk_buff *copy_skb = + alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC); + if (unlikely(!copy_skb)) + goto oom; + + skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); + cacheable_memcpy(copy_skb->data - 2, skb->data - 2, + len + 2); + emac_recycle_rx_skb(dev, slot, len); + skb = copy_skb; + } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) + goto oom; + + skb_put(skb, len); + push_packet: + skb->dev = dev->ndev; + skb->protocol = eth_type_trans(skb, dev->ndev); + emac_rx_csum(dev, skb, ctrl); + + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + ++dev->estats.rx_dropped_stack; + next: + ++dev->stats.rx_packets; + skip: + dev->stats.rx_bytes += len; + slot = (slot + 1) % NUM_RX_BUFF; + --budget; + ++received; + continue; + sg: + if (ctrl & MAL_RX_CTRL_FIRST) { + BUG_ON(dev->rx_sg_skb); + if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) { + DBG(dev, "rx OOM %d" NL, slot); + ++dev->estats.rx_dropped_oom; + emac_recycle_rx_skb(dev, slot, 0); + } else { + dev->rx_sg_skb = skb; + skb_put(skb, len); + } + } else if (!emac_rx_sg_append(dev, slot) && + (ctrl & MAL_RX_CTRL_LAST)) { + + skb = dev->rx_sg_skb; + dev->rx_sg_skb = NULL; + + ctrl &= EMAC_BAD_RX_MASK; + if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) { + emac_parse_rx_error(dev, ctrl); + ++dev->estats.rx_dropped_error; + dev_kfree_skb(skb); + len = 0; + } else + goto push_packet; + } + goto skip; + oom: + DBG(dev, "rx OOM %d" NL, slot); + /* Drop the packet and recycle skb */ + ++dev->estats.rx_dropped_oom; + emac_recycle_rx_skb(dev, slot, 0); + goto next; + } + + if (received) { + DBG2(dev, "rx %d BDs" NL, received); + dev->rx_slot = slot; + } + + if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) { + mb(); + if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) { + DBG2(dev, "rx restart" NL); + received = 0; + goto again; + } + + if (dev->rx_sg_skb) { + DBG2(dev, "dropping partial rx packet" NL); + ++dev->estats.rx_dropped_error; + dev_kfree_skb(dev->rx_sg_skb); + dev->rx_sg_skb = NULL; + } + + clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); + mal_enable_rx_channel(dev->mal, dev->mal_rx_chan); + emac_rx_enable(dev); + dev->rx_slot = 0; + } + return received; +} + +/* NAPI poll context */ +static int emac_peek_rx(void *param) +{ + struct emac_instance *dev = param; + + return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY); +} + +/* NAPI poll context */ +static int emac_peek_rx_sg(void *param) +{ + struct emac_instance *dev = param; + + int slot = dev->rx_slot; + while (1) { + u16 ctrl = dev->rx_desc[slot].ctrl; + if (ctrl & MAL_RX_CTRL_EMPTY) + return 0; + else if (ctrl & MAL_RX_CTRL_LAST) + return 1; + + slot = (slot + 1) % NUM_RX_BUFF; + + /* I'm just being paranoid here :) */ + if (unlikely(slot == dev->rx_slot)) + return 0; + } +} + +/* Hard IRQ */ +static void emac_rxde(void *param) +{ + struct emac_instance *dev = param; + + ++dev->estats.rx_stopped; + emac_rx_disable_async(dev); +} + +/* Hard IRQ */ +static irqreturn_t emac_irq(int irq, void *dev_instance) +{ + struct emac_instance *dev = dev_instance; + struct emac_regs __iomem *p = dev->emacp; + struct emac_error_stats *st = &dev->estats; + u32 isr; + + spin_lock(&dev->lock); + + isr = in_be32(&p->isr); + out_be32(&p->isr, isr); + + DBG(dev, "isr = %08x" NL, isr); + + if (isr & EMAC4_ISR_TXPE) + ++st->tx_parity; + if (isr & EMAC4_ISR_RXPE) + ++st->rx_parity; + if (isr & EMAC4_ISR_TXUE) + ++st->tx_underrun; + if (isr & EMAC4_ISR_RXOE) + ++st->rx_fifo_overrun; + if (isr & EMAC_ISR_OVR) + ++st->rx_overrun; + if (isr & EMAC_ISR_BP) + ++st->rx_bad_packet; + if (isr & EMAC_ISR_RP) + ++st->rx_runt_packet; + if (isr & EMAC_ISR_SE) + ++st->rx_short_event; + if (isr & EMAC_ISR_ALE) + ++st->rx_alignment_error; + if (isr & EMAC_ISR_BFCS) + ++st->rx_bad_fcs; + if (isr & EMAC_ISR_PTLE) + ++st->rx_packet_too_long; + if (isr & EMAC_ISR_ORE) + ++st->rx_out_of_range; + if (isr & EMAC_ISR_IRE) + ++st->rx_in_range; + if (isr & EMAC_ISR_SQE) + ++st->tx_sqe; + if (isr & EMAC_ISR_TE) + ++st->tx_errors; + + spin_unlock(&dev->lock); + + return IRQ_HANDLED; +} + +static struct net_device_stats *emac_stats(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct emac_stats *st = &dev->stats; + struct emac_error_stats *est = &dev->estats; + struct net_device_stats *nst = &dev->nstats; + unsigned long flags; + + DBG2(dev, "stats" NL); + + /* Compute "legacy" statistics */ + spin_lock_irqsave(&dev->lock, flags); + nst->rx_packets = (unsigned long)st->rx_packets; + nst->rx_bytes = (unsigned long)st->rx_bytes; + nst->tx_packets = (unsigned long)st->tx_packets; + nst->tx_bytes = (unsigned long)st->tx_bytes; + nst->rx_dropped = (unsigned long)(est->rx_dropped_oom + + est->rx_dropped_error + + est->rx_dropped_resize + + est->rx_dropped_mtu); + nst->tx_dropped = (unsigned long)est->tx_dropped; + + nst->rx_errors = (unsigned long)est->rx_bd_errors; + nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun + + est->rx_fifo_overrun + + est->rx_overrun); + nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error + + est->rx_alignment_error); + nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs + + est->rx_bad_fcs); + nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet + + est->rx_bd_short_event + + est->rx_bd_packet_too_long + + est->rx_bd_out_of_range + + est->rx_bd_in_range + + est->rx_runt_packet + + est->rx_short_event + + est->rx_packet_too_long + + est->rx_out_of_range + + est->rx_in_range); + + nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors); + nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun + + est->tx_underrun); + nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss; + nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral + + est->tx_bd_excessive_collisions + + est->tx_bd_late_collision + + est->tx_bd_multple_collisions); + spin_unlock_irqrestore(&dev->lock, flags); + return nst; +} + +static struct mal_commac_ops emac_commac_ops = { + .poll_tx = &emac_poll_tx, + .poll_rx = &emac_poll_rx, + .peek_rx = &emac_peek_rx, + .rxde = &emac_rxde, +}; + +static struct mal_commac_ops emac_commac_sg_ops = { + .poll_tx = &emac_poll_tx, + .poll_rx = &emac_poll_rx, + .peek_rx = &emac_peek_rx_sg, + .rxde = &emac_rxde, +}; + +/* Ethtool support */ +static int emac_ethtool_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + + cmd->supported = dev->phy.features; + cmd->port = PORT_MII; + cmd->phy_address = dev->phy.address; + cmd->transceiver = + dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL; + + mutex_lock(&dev->link_lock); + cmd->advertising = dev->phy.advertising; + cmd->autoneg = dev->phy.autoneg; + cmd->speed = dev->phy.speed; + cmd->duplex = dev->phy.duplex; + mutex_unlock(&dev->link_lock); + + return 0; +} + +static int emac_ethtool_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + u32 f = dev->phy.features; + + DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL, + cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); + + /* Basic sanity checks */ + if (dev->phy.address < 0) + return -EOPNOTSUPP; + if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) + return -EINVAL; + if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE) { + switch (cmd->speed) { + case SPEED_10: + if (cmd->duplex == DUPLEX_HALF + && !(f & SUPPORTED_10baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL + && !(f & SUPPORTED_10baseT_Full)) + return -EINVAL; + break; + case SPEED_100: + if (cmd->duplex == DUPLEX_HALF + && !(f & SUPPORTED_100baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL + && !(f & SUPPORTED_100baseT_Full)) + return -EINVAL; + break; + case SPEED_1000: + if (cmd->duplex == DUPLEX_HALF + && !(f & SUPPORTED_1000baseT_Half)) + return -EINVAL; + if (cmd->duplex == DUPLEX_FULL + && !(f & SUPPORTED_1000baseT_Full)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + mutex_lock(&dev->link_lock); + dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed, + cmd->duplex); + mutex_unlock(&dev->link_lock); + + } else { + if (!(f & SUPPORTED_Autoneg)) + return -EINVAL; + + mutex_lock(&dev->link_lock); + dev->phy.def->ops->setup_aneg(&dev->phy, + (cmd->advertising & f) | + (dev->phy.advertising & + (ADVERTISED_Pause | + ADVERTISED_Asym_Pause))); + mutex_unlock(&dev->link_lock); + } + emac_force_link_update(dev); + + return 0; +} + +static void emac_ethtool_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *rp) +{ + rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF; + rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF; +} + +static void emac_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pp) +{ + struct emac_instance *dev = netdev_priv(ndev); + + mutex_lock(&dev->link_lock); + if ((dev->phy.features & SUPPORTED_Autoneg) && + (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause))) + pp->autoneg = 1; + + if (dev->phy.duplex == DUPLEX_FULL) { + if (dev->phy.pause) + pp->rx_pause = pp->tx_pause = 1; + else if (dev->phy.asym_pause) + pp->tx_pause = 1; + } + mutex_unlock(&dev->link_lock); +} + +static u32 emac_ethtool_get_rx_csum(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + + return dev->tah_dev != 0; +} + +static int emac_get_regs_len(struct emac_instance *dev) +{ + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) + return sizeof(struct emac_ethtool_regs_subhdr) + + EMAC4_ETHTOOL_REGS_SIZE; + else + return sizeof(struct emac_ethtool_regs_subhdr) + + EMAC_ETHTOOL_REGS_SIZE; +} + +static int emac_ethtool_get_regs_len(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int size; + + size = sizeof(struct emac_ethtool_regs_hdr) + + emac_get_regs_len(dev) + mal_get_regs_len(dev->mal); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + size += zmii_get_regs_len(dev->zmii_dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + size += rgmii_get_regs_len(dev->rgmii_dev); + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + size += tah_get_regs_len(dev->tah_dev); + + return size; +} + +static void *emac_dump_regs(struct emac_instance *dev, void *buf) +{ + struct emac_ethtool_regs_subhdr *hdr = buf; + + hdr->index = dev->cell_index; + if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { + hdr->version = EMAC4_ETHTOOL_REGS_VER; + memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE); + return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE); + } else { + hdr->version = EMAC_ETHTOOL_REGS_VER; + memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE); + return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE); + } +} + +static void emac_ethtool_get_regs(struct net_device *ndev, + struct ethtool_regs *regs, void *buf) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct emac_ethtool_regs_hdr *hdr = buf; + + hdr->components = 0; + buf = hdr + 1; + + buf = mal_dump_regs(dev->mal, buf); + buf = emac_dump_regs(dev, buf); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) { + hdr->components |= EMAC_ETHTOOL_REGS_ZMII; + buf = zmii_dump_regs(dev->zmii_dev, buf); + } + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) { + hdr->components |= EMAC_ETHTOOL_REGS_RGMII; + buf = rgmii_dump_regs(dev->rgmii_dev, buf); + } + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) { + hdr->components |= EMAC_ETHTOOL_REGS_TAH; + buf = tah_dump_regs(dev->tah_dev, buf); + } +} + +static int emac_ethtool_nway_reset(struct net_device *ndev) +{ + struct emac_instance *dev = netdev_priv(ndev); + int res = 0; + + DBG(dev, "nway_reset" NL); + + if (dev->phy.address < 0) + return -EOPNOTSUPP; + + mutex_lock(&dev->link_lock); + if (!dev->phy.autoneg) { + res = -EINVAL; + goto out; + } + + dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising); + out: + mutex_unlock(&dev->link_lock); + emac_force_link_update(dev); + return res; +} + +static int emac_ethtool_get_stats_count(struct net_device *ndev) +{ + return EMAC_ETHTOOL_STATS_COUNT; +} + +static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, + u8 * buf) +{ + if (stringset == ETH_SS_STATS) + memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys)); +} + +static void emac_ethtool_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *estats, + u64 * tmp_stats) +{ + struct emac_instance *dev = netdev_priv(ndev); + + memcpy(tmp_stats, &dev->stats, sizeof(dev->stats)); + tmp_stats += sizeof(dev->stats) / sizeof(u64); + memcpy(tmp_stats, &dev->estats, sizeof(dev->estats)); +} + +static void emac_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct emac_instance *dev = netdev_priv(ndev); + + strcpy(info->driver, "ibm_emac"); + strcpy(info->version, DRV_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", + dev->cell_index, dev->ofdev->node->full_name); + info->n_stats = emac_ethtool_get_stats_count(ndev); + info->regdump_len = emac_ethtool_get_regs_len(ndev); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_settings = emac_ethtool_get_settings, + .set_settings = emac_ethtool_set_settings, + .get_drvinfo = emac_ethtool_get_drvinfo, + + .get_regs_len = emac_ethtool_get_regs_len, + .get_regs = emac_ethtool_get_regs, + + .nway_reset = emac_ethtool_nway_reset, + + .get_ringparam = emac_ethtool_get_ringparam, + .get_pauseparam = emac_ethtool_get_pauseparam, + + .get_rx_csum = emac_ethtool_get_rx_csum, + + .get_strings = emac_ethtool_get_strings, + .get_stats_count = emac_ethtool_get_stats_count, + .get_ethtool_stats = emac_ethtool_get_ethtool_stats, + + .get_link = ethtool_op_get_link, + .get_tx_csum = ethtool_op_get_tx_csum, + .get_sg = ethtool_op_get_sg, +}; + +static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) +{ + struct emac_instance *dev = netdev_priv(ndev); + uint16_t *data = (uint16_t *) & rq->ifr_ifru; + + DBG(dev, "ioctl %08x" NL, cmd); + + if (dev->phy.address < 0) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCDEVPRIVATE: + data[0] = dev->phy.address; + /* Fall through */ + case SIOCGMIIREG: + case SIOCDEVPRIVATE + 1: + data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]); + return 0; + + case SIOCSMIIREG: + case SIOCDEVPRIVATE + 2: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + emac_mdio_write(ndev, dev->phy.address, data[1], data[2]); + return 0; + default: + return -EOPNOTSUPP; + } +} + +struct emac_depentry { + u32 phandle; + struct device_node *node; + struct of_device *ofdev; + void *drvdata; +}; + +#define EMAC_DEP_MAL_IDX 0 +#define EMAC_DEP_ZMII_IDX 1 +#define EMAC_DEP_RGMII_IDX 2 +#define EMAC_DEP_TAH_IDX 3 +#define EMAC_DEP_MDIO_IDX 4 +#define EMAC_DEP_PREV_IDX 5 +#define EMAC_DEP_COUNT 6 + +static int __devinit emac_check_deps(struct emac_instance *dev, + struct emac_depentry *deps) +{ + int i, there = 0; + struct device_node *np; + + for (i = 0; i < EMAC_DEP_COUNT; i++) { + /* no dependency on that item, allright */ + if (deps[i].phandle == 0) { + there++; + continue; + } + /* special case for blist as the dependency might go away */ + if (i == EMAC_DEP_PREV_IDX) { + np = *(dev->blist - 1); + if (np == NULL) { + deps[i].phandle = 0; + there++; + continue; + } + if (deps[i].node == NULL) + deps[i].node = of_node_get(np); + } + if (deps[i].node == NULL) + deps[i].node = of_find_node_by_phandle(deps[i].phandle); + if (deps[i].node == NULL) + continue; + if (deps[i].ofdev == NULL) + deps[i].ofdev = of_find_device_by_node(deps[i].node); + if (deps[i].ofdev == NULL) + continue; + if (deps[i].drvdata == NULL) + deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev); + if (deps[i].drvdata != NULL) + there++; + } + return (there == EMAC_DEP_COUNT); +} + +static void emac_put_deps(struct emac_instance *dev) +{ + if (dev->mal_dev) + of_dev_put(dev->mal_dev); + if (dev->zmii_dev) + of_dev_put(dev->zmii_dev); + if (dev->rgmii_dev) + of_dev_put(dev->rgmii_dev); + if (dev->mdio_dev) + of_dev_put(dev->mdio_dev); + if (dev->tah_dev) + of_dev_put(dev->tah_dev); +} + +static int __devinit emac_of_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + /* We are only intereted in device addition */ + if (action == BUS_NOTIFY_BOUND_DRIVER) + wake_up_all(&emac_probe_wait); + return 0; +} + +static struct notifier_block emac_of_bus_notifier = { + .notifier_call = emac_of_bus_notify +}; + +static int __devinit emac_wait_deps(struct emac_instance *dev) +{ + struct emac_depentry deps[EMAC_DEP_COUNT]; + int i, err; + + memset(&deps, 0, sizeof(deps)); + + deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph; + deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph; + deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph; + if (dev->tah_ph) + deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph; + if (dev->mdio_ph) + deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; + if (dev->blist && dev->blist > emac_boot_list) + deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; + bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier); + wait_event_timeout(emac_probe_wait, + emac_check_deps(dev, deps), + EMAC_PROBE_DEP_TIMEOUT); + bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier); + err = emac_check_deps(dev, deps) ? 0 : -ENODEV; + for (i = 0; i < EMAC_DEP_COUNT; i++) { + if (deps[i].node) + of_node_put(deps[i].node); + if (err && deps[i].ofdev) + of_dev_put(deps[i].ofdev); + } + if (err == 0) { + dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev; + dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev; + dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev; + dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev; + dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev; + } + if (deps[EMAC_DEP_PREV_IDX].ofdev) + of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev); + return err; +} + +static int __devinit emac_read_uint_prop(struct device_node *np, const char *name, + u32 *val, int fatal) +{ + int len; + const u32 *prop = of_get_property(np, name, &len); + if (prop == NULL || len < sizeof(u32)) { + if (fatal) + printk(KERN_ERR "%s: missing %s property\n", + np->full_name, name); + return -ENODEV; + } + *val = *prop; + return 0; +} + +static int __devinit emac_init_phy(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->node; + struct net_device *ndev = dev->ndev; + u32 phy_map, adv; + int i; + + dev->phy.dev = ndev; + dev->phy.mode = dev->phy_mode; + + /* PHY-less configuration. + * XXX I probably should move these settings to the dev tree + */ + if (dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) { + emac_reset(dev); + + /* PHY-less configuration. + * XXX I probably should move these settings to the dev tree + */ + dev->phy.address = -1; + dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII; + dev->phy.pause = 1; + + return 0; + } + + mutex_lock(&emac_phy_map_lock); + phy_map = dev->phy_map | busy_phy_map; + + DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map); + + dev->phy.mdio_read = emac_mdio_read; + dev->phy.mdio_write = emac_mdio_write; + + /* Configure EMAC with defaults so we can at least use MDIO + * This is needed mostly for 440GX + */ + if (emac_phy_gpcs(dev->phy.mode)) { + /* XXX + * Make GPCS PHY address equal to EMAC index. + * We probably should take into account busy_phy_map + * and/or phy_map here. + * + * Note that the busy_phy_map is currently global + * while it should probably be per-ASIC... + */ + dev->phy.address = dev->cell_index; + } + + emac_configure(dev); + + if (dev->phy_address != 0xffffffff) + phy_map = ~(1 << dev->phy_address); + + for (i = 0; i < 0x20; phy_map >>= 1, ++i) + if (!(phy_map & 1)) { + int r; + busy_phy_map |= 1 << i; + + /* Quick check if there is a PHY at the address */ + r = emac_mdio_read(dev->ndev, i, MII_BMCR); + if (r == 0xffff || r < 0) + continue; + if (!emac_mii_phy_probe(&dev->phy, i)) + break; + } + mutex_unlock(&emac_phy_map_lock); + if (i == 0x20) { + printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); + return -ENXIO; + } + + /* Init PHY */ + if (dev->phy.def->ops->init) + dev->phy.def->ops->init(&dev->phy); + + /* Disable any PHY features not supported by the platform */ + dev->phy.def->features &= ~dev->phy_feat_exc; + + /* Setup initial link parameters */ + if (dev->phy.features & SUPPORTED_Autoneg) { + adv = dev->phy.features; + if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x)) + adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + /* Restart autonegotiation */ + dev->phy.def->ops->setup_aneg(&dev->phy, adv); + } else { + u32 f = dev->phy.def->features; + int speed = SPEED_10, fd = DUPLEX_HALF; + + /* Select highest supported speed/duplex */ + if (f & SUPPORTED_1000baseT_Full) { + speed = SPEED_1000; + fd = DUPLEX_FULL; + } else if (f & SUPPORTED_1000baseT_Half) + speed = SPEED_1000; + else if (f & SUPPORTED_100baseT_Full) { + speed = SPEED_100; + fd = DUPLEX_FULL; + } else if (f & SUPPORTED_100baseT_Half) + speed = SPEED_100; + else if (f & SUPPORTED_10baseT_Full) + fd = DUPLEX_FULL; + + /* Force link parameters */ + dev->phy.def->ops->setup_forced(&dev->phy, speed, fd); + } + return 0; +} + +static int __devinit emac_init_config(struct emac_instance *dev) +{ + struct device_node *np = dev->ofdev->node; + const void *p; + unsigned int plen; + const char *pm, *phy_modes[] = { + [PHY_MODE_NA] = "", + [PHY_MODE_MII] = "mii", + [PHY_MODE_RMII] = "rmii", + [PHY_MODE_SMII] = "smii", + [PHY_MODE_RGMII] = "rgmii", + [PHY_MODE_TBI] = "tbi", + [PHY_MODE_GMII] = "gmii", + [PHY_MODE_RTBI] = "rtbi", + [PHY_MODE_SGMII] = "sgmii", + }; + + /* Read config from device-tree */ + if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0)) + dev->max_mtu = 1500; + if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0)) + dev->rx_fifo_size = 2048; + if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0)) + dev->tx_fifo_size = 2048; + if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0)) + dev->rx_fifo_size_gige = dev->rx_fifo_size; + if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0)) + dev->tx_fifo_size_gige = dev->tx_fifo_size; + if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0)) + dev->phy_address = 0xffffffff; + if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0)) + dev->phy_map = 0xffffffff; + if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1)) + return -ENXIO; + if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0)) + dev->tah_ph = 0; + if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0)) + dev->tah_ph = 0; + if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0)) + dev->mdio_ph = 0; + if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0)) + dev->zmii_ph = 0;; + if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0)) + dev->zmii_port = 0xffffffff;; + if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0)) + dev->rgmii_ph = 0;; + if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0)) + dev->rgmii_port = 0xffffffff;; + if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0)) + dev->fifo_entry_size = 16; + if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0)) + dev->mal_burst_size = 256; + + /* PHY mode needs some decoding */ + dev->phy_mode = PHY_MODE_NA; + pm = of_get_property(np, "phy-mode", &plen); + if (pm != NULL) { + int i; + for (i = 0; i < ARRAY_SIZE(phy_modes); i++) + if (!strcasecmp(pm, phy_modes[i])) { + dev->phy_mode = i; + break; + } + } + + /* Backward compat with non-final DT */ + if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) { + u32 nmode = *(const u32 *)pm; + if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII) + dev->phy_mode = nmode; + } + + /* Check EMAC version */ + if (of_device_is_compatible(np, "ibm,emac4")) + dev->features |= EMAC_FTR_EMAC4; + if (of_device_is_compatible(np, "ibm,emac-axon") + || of_device_is_compatible(np, "ibm,emac-440epx")) + dev->features |= EMAC_FTR_HAS_AXON_STACR + | EMAC_FTR_STACR_OC_INVERT; + if (of_device_is_compatible(np, "ibm,emac-440spe")) + dev->features |= EMAC_FTR_STACR_OC_INVERT; + + /* Fixup some feature bits based on the device tree and verify + * we have support for them compiled in + */ + if (dev->tah_ph != 0) { +#ifdef CONFIG_IBM_NEW_EMAC_TAH + dev->features |= EMAC_FTR_HAS_TAH; +#else + printk(KERN_ERR "%s: TAH support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + if (dev->zmii_ph != 0) { +#ifdef CONFIG_IBM_NEW_EMAC_ZMII + dev->features |= EMAC_FTR_HAS_ZMII; +#else + printk(KERN_ERR "%s: ZMII support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + if (dev->rgmii_ph != 0) { +#ifdef CONFIG_IBM_NEW_EMAC_RGMII + dev->features |= EMAC_FTR_HAS_RGMII; +#else + printk(KERN_ERR "%s: RGMII support not enabled !\n", + np->full_name); + return -ENXIO; +#endif + } + + /* Read MAC-address */ + p = of_get_property(np, "local-mac-address", NULL); + if (p == NULL) { + printk(KERN_ERR "%s: Can't find local-mac-address property\n", + np->full_name); + return -ENXIO; + } + memcpy(dev->ndev->dev_addr, p, 6); + + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); + DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); + DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); + DBG(dev, "max_mtu : %d\n", dev->max_mtu); + DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq); + + return 0; +} + +static int __devinit emac_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct net_device *ndev; + struct emac_instance *dev; + struct device_node *np = ofdev->node; + struct device_node **blist = NULL; + int err, i; + + /* Find ourselves in the bootlist if we are there */ + for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) + if (emac_boot_list[i] == np) + blist = &emac_boot_list[i]; + + /* Allocate our net_device structure */ + err = -ENOMEM; + ndev = alloc_etherdev(sizeof(struct emac_instance)); + if (!ndev) { + printk(KERN_ERR "%s: could not allocate ethernet device!\n", + np->full_name); + goto err_gone; + } + dev = netdev_priv(ndev); + dev->ndev = ndev; + dev->ofdev = ofdev; + dev->blist = blist; + SET_NETDEV_DEV(ndev, &ofdev->dev); + + /* Initialize some embedded data structures */ + mutex_init(&dev->mdio_lock); + mutex_init(&dev->link_lock); + spin_lock_init(&dev->lock); + INIT_WORK(&dev->reset_work, emac_reset_work); + + /* Init various config data based on device-tree */ + err = emac_init_config(dev); + if (err != 0) + goto err_free; + + /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ + dev->emac_irq = irq_of_parse_and_map(np, 0); + dev->wol_irq = irq_of_parse_and_map(np, 1); + if (dev->emac_irq == NO_IRQ) { + printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); + goto err_free; + } + ndev->irq = dev->emac_irq; + + /* Map EMAC regs */ + if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_irq_unmap; + } + // TODO : request_mem_region + dev->emacp = ioremap(dev->rsrc_regs.start, sizeof(struct emac_regs)); + if (dev->emacp == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + err = -ENOMEM; + goto err_irq_unmap; + } + + /* Wait for dependent devices */ + err = emac_wait_deps(dev); + if (err) { + printk(KERN_ERR + "%s: Timeout waiting for dependent devices\n", + np->full_name); + /* display more info about what's missing ? */ + goto err_reg_unmap; + } + dev->mal = dev_get_drvdata(&dev->mal_dev->dev); + if (dev->mdio_dev != NULL) + dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev); + + /* Register with MAL */ + dev->commac.ops = &emac_commac_ops; + dev->commac.dev = dev; + dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan); + dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan); + err = mal_register_commac(dev->mal, &dev->commac); + if (err) { + printk(KERN_ERR "%s: failed to register with mal %s!\n", + np->full_name, dev->mal_dev->node->full_name); + goto err_rel_deps; + } + dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); + dev->rx_sync_size = emac_rx_sync_size(ndev->mtu); + + /* Get pointers to BD rings */ + dev->tx_desc = + dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan); + dev->rx_desc = + dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan); + + DBG(dev, "tx_desc %p" NL, dev->tx_desc); + DBG(dev, "rx_desc %p" NL, dev->rx_desc); + + /* Clean rings */ + memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor)); + memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor)); + + /* Attach to ZMII, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) && + (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0) + goto err_unreg_commac; + + /* Attach to RGMII, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) && + (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0) + goto err_detach_zmii; + + /* Attach to TAH, if needed */ + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) && + (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0) + goto err_detach_rgmii; + + /* Set some link defaults before we can find out real parameters */ + dev->phy.speed = SPEED_100; + dev->phy.duplex = DUPLEX_FULL; + dev->phy.autoneg = AUTONEG_DISABLE; + dev->phy.pause = dev->phy.asym_pause = 0; + dev->stop_timeout = STOP_TIMEOUT_100; + INIT_DELAYED_WORK(&dev->link_work, emac_link_timer); + + /* Find PHY if any */ + err = emac_init_phy(dev); + if (err != 0) + goto err_detach_tah; + + /* Fill in the driver function table */ + ndev->open = &emac_open; +#ifdef CONFIG_IBM_NEW_EMAC_TAH + if (dev->tah_dev) { + ndev->hard_start_xmit = &emac_start_xmit_sg; + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; + } else +#endif + ndev->hard_start_xmit = &emac_start_xmit; + ndev->tx_timeout = &emac_tx_timeout; + ndev->watchdog_timeo = 5 * HZ; + ndev->stop = &emac_close; + ndev->get_stats = &emac_stats; + ndev->set_multicast_list = &emac_set_multicast_list; + ndev->do_ioctl = &emac_ioctl; + if (emac_phy_supports_gige(dev->phy_mode)) { + ndev->change_mtu = &emac_change_mtu; + dev->commac.ops = &emac_commac_sg_ops; + } + SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); + + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + err = register_netdev(ndev); + if (err) { + printk(KERN_ERR "%s: failed to register net device (%d)!\n", + np->full_name, err); + goto err_detach_tah; + } + + /* Set our drvdata last as we don't want them visible until we are + * fully initialized + */ + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + /* There's a new kid in town ! Let's tell everybody */ + wake_up_all(&emac_probe_wait); + + + printk(KERN_INFO + "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + ndev->name, dev->cell_index, np->full_name, + ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], + ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + + if (dev->phy.address >= 0) + printk("%s: found %s PHY (0x%02x)\n", ndev->name, + dev->phy.def->name, dev->phy.address); + + emac_dbg_register(dev); + + /* Life is good */ + return 0; + + /* I have a bad feeling about this ... */ + + err_detach_tah: + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_detach(dev->tah_dev, dev->tah_port); + err_detach_rgmii: + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_detach(dev->rgmii_dev, dev->rgmii_port); + err_detach_zmii: + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_detach(dev->zmii_dev, dev->zmii_port); + err_unreg_commac: + mal_unregister_commac(dev->mal, &dev->commac); + err_rel_deps: + emac_put_deps(dev); + err_reg_unmap: + iounmap(dev->emacp); + err_irq_unmap: + if (dev->wol_irq != NO_IRQ) + irq_dispose_mapping(dev->wol_irq); + if (dev->emac_irq != NO_IRQ) + irq_dispose_mapping(dev->emac_irq); + err_free: + kfree(ndev); + err_gone: + /* if we were on the bootlist, remove us as we won't show up and + * wake up all waiters to notify them in case they were waiting + * on us + */ + if (blist) { + *blist = NULL; + wake_up_all(&emac_probe_wait); + } + return err; +} + +static int __devexit emac_remove(struct of_device *ofdev) +{ + struct emac_instance *dev = dev_get_drvdata(&ofdev->dev); + + DBG(dev, "remove" NL); + + dev_set_drvdata(&ofdev->dev, NULL); + + unregister_netdev(dev->ndev); + + if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) + tah_detach(dev->tah_dev, dev->tah_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) + rgmii_detach(dev->rgmii_dev, dev->rgmii_port); + if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) + zmii_detach(dev->zmii_dev, dev->zmii_port); + + mal_unregister_commac(dev->mal, &dev->commac); + emac_put_deps(dev); + + emac_dbg_unregister(dev); + iounmap(dev->emacp); + + if (dev->wol_irq != NO_IRQ) + irq_dispose_mapping(dev->wol_irq); + if (dev->emac_irq != NO_IRQ) + irq_dispose_mapping(dev->emac_irq); + + kfree(dev->ndev); + + return 0; +} + +/* XXX Features in here should be replaced by properties... */ +static struct of_device_id emac_match[] = +{ + { + .type = "network", + .compatible = "ibm,emac", + }, + { + .type = "network", + .compatible = "ibm,emac4", + }, + {}, +}; + +static struct of_platform_driver emac_driver = { + .name = "emac", + .match_table = emac_match, + + .probe = emac_probe, + .remove = emac_remove, +}; + +static void __init emac_make_bootlist(void) +{ + struct device_node *np = NULL; + int j, max, i = 0, k; + int cell_indices[EMAC_BOOT_LIST_SIZE]; + + /* Collect EMACs */ + while((np = of_find_all_nodes(np)) != NULL) { + const u32 *idx; + + if (of_match_node(emac_match, np) == NULL) + continue; + if (of_get_property(np, "unused", NULL)) + continue; + idx = of_get_property(np, "cell-index", NULL); + if (idx == NULL) + continue; + cell_indices[i] = *idx; + emac_boot_list[i++] = of_node_get(np); + if (i >= EMAC_BOOT_LIST_SIZE) { + of_node_put(np); + break; + } + } + max = i; + + /* Bubble sort them (doh, what a creative algorithm :-) */ + for (i = 0; max > 1 && (i < (max - 1)); i++) + for (j = i; j < max; j++) { + if (cell_indices[i] > cell_indices[j]) { + np = emac_boot_list[i]; + emac_boot_list[i] = emac_boot_list[j]; + emac_boot_list[j] = np; + k = cell_indices[i]; + cell_indices[i] = cell_indices[j]; + cell_indices[j] = k; + } + } +} + +static int __init emac_init(void) +{ + int rc; + + printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n"); + + /* Init debug stuff */ + emac_init_debug(); + + /* Build EMAC boot list */ + emac_make_bootlist(); + + /* Init submodules */ + rc = mal_init(); + if (rc) + goto err; + rc = zmii_init(); + if (rc) + goto err_mal; + rc = rgmii_init(); + if (rc) + goto err_zmii; + rc = tah_init(); + if (rc) + goto err_rgmii; + rc = of_register_platform_driver(&emac_driver); + if (rc) + goto err_tah; + + return 0; + + err_tah: + tah_exit(); + err_rgmii: + rgmii_exit(); + err_zmii: + zmii_exit(); + err_mal: + mal_exit(); + err: + return rc; +} + +static void __exit emac_exit(void) +{ + int i; + + of_unregister_platform_driver(&emac_driver); + + tah_exit(); + rgmii_exit(); + zmii_exit(); + mal_exit(); + emac_fini_debug(); + + /* Destroy EMAC boot list */ + for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++) + if (emac_boot_list[i]) + of_node_put(emac_boot_list[i]); +} + +module_init(emac_init); +module_exit(emac_exit); diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h new file mode 100644 index 0000000000000000000000000000000000000000..4011803117caae85be457cc5b6774b351c097fb1 --- /dev/null +++ b/drivers/net/ibm_newemac/core.h @@ -0,0 +1,355 @@ +/* + * drivers/net/ibm_newemac/core.h + * + * Driver for PowerPC 4xx on-chip ethernet controller. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Johnnie Peters + * Copyright 2000, 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_CORE_H +#define __IBM_NEWEMAC_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "emac.h" +#include "phy.h" +#include "zmii.h" +#include "rgmii.h" +#include "mal.h" +#include "tah.h" +#include "debug.h" + +#define NUM_TX_BUFF CONFIG_IBM_NEW_EMAC_TXB +#define NUM_RX_BUFF CONFIG_IBM_NEW_EMAC_RXB + +/* Simple sanity check */ +#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256 +#error Invalid number of buffer descriptors (greater than 256) +#endif + +#define EMAC_MIN_MTU 46 + +/* Maximum L2 header length (VLAN tagged, no FCS) */ +#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4) + +/* RX BD size for the given MTU */ +static inline int emac_rx_size(int mtu) +{ + if (mtu > ETH_DATA_LEN) + return MAL_MAX_RX_SIZE; + else + return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD); +} + +#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment()) + +#define EMAC_RX_SKB_HEADROOM \ + EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM) + +/* Size of RX skb for the given MTU */ +static inline int emac_rx_skb_size(int mtu) +{ + int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); + return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM; +} + +/* RX DMA sync size */ +static inline int emac_rx_sync_size(int mtu) +{ + return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2); +} + +/* Driver statistcs is split into two parts to make it more cache friendly: + * - normal statistics (packet count, etc) + * - error statistics + * + * When statistics is requested by ethtool, these parts are concatenated, + * normal one goes first. + * + * Please, keep these structures in sync with emac_stats_keys. + */ + +/* Normal TX/RX Statistics */ +struct emac_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets_csum; + u64 tx_packets_csum; +}; + +/* Error statistics */ +struct emac_error_stats { + u64 tx_undo; + + /* Software RX Errors */ + u64 rx_dropped_stack; + u64 rx_dropped_oom; + u64 rx_dropped_error; + u64 rx_dropped_resize; + u64 rx_dropped_mtu; + u64 rx_stopped; + /* BD reported RX errors */ + u64 rx_bd_errors; + u64 rx_bd_overrun; + u64 rx_bd_bad_packet; + u64 rx_bd_runt_packet; + u64 rx_bd_short_event; + u64 rx_bd_alignment_error; + u64 rx_bd_bad_fcs; + u64 rx_bd_packet_too_long; + u64 rx_bd_out_of_range; + u64 rx_bd_in_range; + /* EMAC IRQ reported RX errors */ + u64 rx_parity; + u64 rx_fifo_overrun; + u64 rx_overrun; + u64 rx_bad_packet; + u64 rx_runt_packet; + u64 rx_short_event; + u64 rx_alignment_error; + u64 rx_bad_fcs; + u64 rx_packet_too_long; + u64 rx_out_of_range; + u64 rx_in_range; + + /* Software TX Errors */ + u64 tx_dropped; + /* BD reported TX errors */ + u64 tx_bd_errors; + u64 tx_bd_bad_fcs; + u64 tx_bd_carrier_loss; + u64 tx_bd_excessive_deferral; + u64 tx_bd_excessive_collisions; + u64 tx_bd_late_collision; + u64 tx_bd_multple_collisions; + u64 tx_bd_single_collision; + u64 tx_bd_underrun; + u64 tx_bd_sqe; + /* EMAC IRQ reported TX errors */ + u64 tx_parity; + u64 tx_underrun; + u64 tx_sqe; + u64 tx_errors; +}; + +#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \ + sizeof(struct emac_error_stats)) \ + / sizeof(u64)) + +struct emac_instance { + struct net_device *ndev; + struct resource rsrc_regs; + struct emac_regs __iomem *emacp; + struct of_device *ofdev; + struct device_node **blist; /* bootlist entry */ + + /* MAL linkage */ + u32 mal_ph; + struct of_device *mal_dev; + u32 mal_rx_chan; + u32 mal_tx_chan; + struct mal_instance *mal; + struct mal_commac commac; + + /* PHY infos */ + u32 phy_mode; + u32 phy_map; + u32 phy_address; + u32 phy_feat_exc; + struct mii_phy phy; + struct mutex link_lock; + struct delayed_work link_work; + int link_polling; + + /* Shared MDIO if any */ + u32 mdio_ph; + struct of_device *mdio_dev; + struct emac_instance *mdio_instance; + struct mutex mdio_lock; + + /* ZMII infos if any */ + u32 zmii_ph; + u32 zmii_port; + struct of_device *zmii_dev; + + /* RGMII infos if any */ + u32 rgmii_ph; + u32 rgmii_port; + struct of_device *rgmii_dev; + + /* TAH infos if any */ + u32 tah_ph; + u32 tah_port; + struct of_device *tah_dev; + + /* IRQs */ + int wol_irq; + int emac_irq; + + /* OPB bus frequency in Mhz */ + u32 opb_bus_freq; + + /* Cell index within an ASIC (for clk mgmnt) */ + u32 cell_index; + + /* Max supported MTU */ + u32 max_mtu; + + /* Feature bits (from probe table) */ + unsigned int features; + + /* Tx and Rx fifo sizes & other infos in bytes */ + u32 tx_fifo_size; + u32 tx_fifo_size_gige; + u32 rx_fifo_size; + u32 rx_fifo_size_gige; + u32 fifo_entry_size; + u32 mal_burst_size; /* move to MAL ? */ + + /* Descriptor management + */ + struct mal_descriptor *tx_desc; + int tx_cnt; + int tx_slot; + int ack_slot; + + struct mal_descriptor *rx_desc; + int rx_slot; + struct sk_buff *rx_sg_skb; /* 1 */ + int rx_skb_size; + int rx_sync_size; + + struct sk_buff *tx_skb[NUM_TX_BUFF]; + struct sk_buff *rx_skb[NUM_RX_BUFF]; + + /* Stats + */ + struct emac_error_stats estats; + struct net_device_stats nstats; + struct emac_stats stats; + + /* Misc + */ + int reset_failed; + int stop_timeout; /* in us */ + int no_mcast; + int mcast_pending; + struct work_struct reset_work; + spinlock_t lock; +}; + +/* + * Features of various EMAC implementations + */ + +/* + * No flow control on 40x according to the original driver + */ +#define EMAC_FTR_NO_FLOW_CONTROL_40x 0x00000001 +/* + * Cell is an EMAC4 + */ +#define EMAC_FTR_EMAC4 0x00000002 +/* + * For the 440SPe, AMCC inexplicably changed the polarity of + * the "operation complete" bit in the MII control register. + */ +#define EMAC_FTR_STACR_OC_INVERT 0x00000004 +/* + * Set if we have a TAH. + */ +#define EMAC_FTR_HAS_TAH 0x00000008 +/* + * Set if we have a ZMII. + */ +#define EMAC_FTR_HAS_ZMII 0x00000010 +/* + * Set if we have a RGMII. + */ +#define EMAC_FTR_HAS_RGMII 0x00000020 +/* + * Set if we have axon-type STACR + */ +#define EMAC_FTR_HAS_AXON_STACR 0x00000040 + + +/* Right now, we don't quite handle the always/possible masks on the + * most optimal way as we don't have a way to say something like + * always EMAC4. Patches welcome. + */ +enum { + EMAC_FTRS_ALWAYS = 0, + + EMAC_FTRS_POSSIBLE = +#ifdef CONFIG_IBM_NEW_EMAC_EMAC4 + EMAC_FTR_EMAC4 | EMAC_FTR_HAS_AXON_STACR | + EMAC_FTR_STACR_OC_INVERT | +#endif +#ifdef CONFIG_IBM_NEW_EMAC_TAH + EMAC_FTR_HAS_TAH | +#endif +#ifdef CONFIG_IBM_NEW_EMAC_ZMII + EMAC_FTR_HAS_ZMII | +#endif +#ifdef CONFIG_IBM_NEW_EMAC_RGMII + EMAC_FTR_HAS_RGMII | +#endif + 0, +}; + +static inline int emac_has_feature(struct emac_instance *dev, + unsigned long feature) +{ + return (EMAC_FTRS_ALWAYS & feature) || + (EMAC_FTRS_POSSIBLE & dev->features & feature); +} + + +/* Ethtool get_regs complex data. + * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH + * when available. + * + * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr, + * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers. + * Each register component is preceded with emac_ethtool_regs_subhdr. + * Order of the optional headers follows their relative bit posititions + * in emac_ethtool_regs_hdr.components + */ +#define EMAC_ETHTOOL_REGS_ZMII 0x00000001 +#define EMAC_ETHTOOL_REGS_RGMII 0x00000002 +#define EMAC_ETHTOOL_REGS_TAH 0x00000004 + +struct emac_ethtool_regs_hdr { + u32 components; +}; + +struct emac_ethtool_regs_subhdr { + u32 version; + u32 index; +}; + +#endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c new file mode 100644 index 0000000000000000000000000000000000000000..170524ee0f1927a72a93d9740c4d654c7bde25a9 --- /dev/null +++ b/drivers/net/ibm_newemac/debug.c @@ -0,0 +1,238 @@ +/* + * drivers/net/ibm_newemac/debug.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. + * + * Copyright (c) 2004, 2005 Zultys Technologies + * Eugene Surovegin or + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include +#include + +#include "core.h" + +static spinlock_t emac_dbg_lock = SPIN_LOCK_UNLOCKED; + +static void emac_desc_dump(struct emac_instance *p) +{ + int i; + printk("** EMAC %s TX BDs **\n" + " tx_cnt = %d tx_slot = %d ack_slot = %d\n", + p->ofdev->node->full_name, + p->tx_cnt, p->tx_slot, p->ack_slot); + for (i = 0; i < NUM_TX_BUFF / 2; ++i) + printk + ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", + i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ', + p->tx_desc[i].ctrl, p->tx_desc[i].data_len, + NUM_TX_BUFF / 2 + i, + p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr, + p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ', + p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl, + p->tx_desc[NUM_TX_BUFF / 2 + i].data_len); + + printk("** EMAC %s RX BDs **\n" + " rx_slot = %d flags = 0x%lx rx_skb_size = %d rx_sync_size = %d\n" + " rx_sg_skb = 0x%p\n", + p->ofdev->node->full_name, + p->rx_slot, p->commac.flags, p->rx_skb_size, + p->rx_sync_size, p->rx_sg_skb); + for (i = 0; i < NUM_RX_BUFF / 2; ++i) + printk + ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n", + i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ', + p->rx_desc[i].ctrl, p->rx_desc[i].data_len, + NUM_RX_BUFF / 2 + i, + p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr, + p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ', + p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl, + p->rx_desc[NUM_RX_BUFF / 2 + i].data_len); +} + +static void emac_mac_dump(struct emac_instance *dev) +{ + struct emac_regs __iomem *p = dev->emacp; + + printk("** EMAC %s registers **\n" + "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n" + "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n" + "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n" + "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x " + "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n" + "LSA = %04x%08x IPGVR = 0x%04x\n" + "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n" + "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n", + dev->ofdev->node->full_name, in_be32(&p->mr0), in_be32(&p->mr1), + in_be32(&p->tmr0), in_be32(&p->tmr1), + in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser), + in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid), + in_be32(&p->vtci), + in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3), + in_be32(&p->iaht4), + in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3), + in_be32(&p->gaht4), + in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr), + in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr), + in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr) + ); + + emac_desc_dump(dev); +} + +static void emac_mal_dump(struct mal_instance *mal) +{ + int i; + + printk("** MAL %s Registers **\n" + "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n" + "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n" + "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n", + mal->ofdev->node->full_name, + get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR), + get_mal_dcrn(mal, MAL_IER), + get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR), + get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR), + get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR), + get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR) + ); + + printk("TX|"); + for (i = 0; i < mal->num_tx_chans; ++i) { + if (i && !(i % 4)) + printk("\n "); + printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i))); + } + printk("\nRX|"); + for (i = 0; i < mal->num_rx_chans; ++i) { + if (i && !(i % 4)) + printk("\n "); + printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i))); + } + printk("\n "); + for (i = 0; i < mal->num_rx_chans; ++i) { + u32 r = get_mal_dcrn(mal, MAL_RCBS(i)); + if (i && !(i % 3)) + printk("\n "); + printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16); + } + printk("\n"); +} + +static struct emac_instance *__emacs[4]; +static struct mal_instance *__mals[1]; + +void emac_dbg_register(struct emac_instance *dev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__emacs); i++) + if (__emacs[i] == NULL) { + __emacs[i] = dev; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void emac_dbg_unregister(struct emac_instance *dev) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__emacs); i++) + if (__emacs[i] == dev) { + __emacs[i] = NULL; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void mal_dbg_register(struct mal_instance *mal) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__mals); i++) + if (__mals[i] == NULL) { + __mals[i] = mal; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void mal_dbg_unregister(struct mal_instance *mal) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&emac_dbg_lock, flags); + for (i = 0; i < ARRAY_SIZE(__mals); i++) + if (__mals[i] == mal) { + __mals[i] = NULL; + break; + } + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +void emac_dbg_dump_all(void) +{ + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&emac_dbg_lock, flags); + + for (i = 0; i < ARRAY_SIZE(__mals); ++i) + if (__mals[i]) + emac_mal_dump(__mals[i]); + + for (i = 0; i < ARRAY_SIZE(__emacs); ++i) + if (__emacs[i]) + emac_mac_dump(__emacs[i]); + + spin_unlock_irqrestore(&emac_dbg_lock, flags); +} + +#if defined(CONFIG_MAGIC_SYSRQ) +static void emac_sysrq_handler(int key, struct tty_struct *tty) +{ + emac_dbg_dump_all(); +} + +static struct sysrq_key_op emac_sysrq_op = { + .handler = emac_sysrq_handler, + .help_msg = "emaC", + .action_msg = "Show EMAC(s) status", +}; + +int __init emac_init_debug(void) +{ + return register_sysrq_key('c', &emac_sysrq_op); +} + +void __exit emac_fini_debug(void) +{ + unregister_sysrq_key('c', &emac_sysrq_op); +} + +#else +int __init emac_init_debug(void) +{ + return 0; +} +void __exit emac_fini_debug(void) +{ +} +#endif /* CONFIG_MAGIC_SYSRQ */ diff --git a/drivers/net/ibm_newemac/debug.h b/drivers/net/ibm_newemac/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..1dd2dcbc157fec1a49a35e7869ad136aed2cdc0e --- /dev/null +++ b/drivers/net/ibm_newemac/debug.h @@ -0,0 +1,78 @@ +/* + * drivers/net/ibm_newemac/debug.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines. + * + * Copyright (c) 2004, 2005 Zultys Technologies + * Eugene Surovegin or + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_DEBUG_H +#define __IBM_NEWEMAC_DEBUG_H + +#include + +#include "core.h" + +#if defined(CONFIG_IBM_NEW_EMAC_DEBUG) + +struct emac_instance; +struct mal_instance; + +extern void emac_dbg_register(struct emac_instance *dev); +extern void emac_dbg_unregister(struct emac_instance *dev); +extern void mal_dbg_register(struct mal_instance *mal); +extern void mal_dbg_unregister(struct mal_instance *mal); +extern int emac_init_debug(void) __init; +extern void emac_fini_debug(void) __exit; +extern void emac_dbg_dump_all(void); + +# define DBG_LEVEL 1 + +#else + +# define emac_dbg_register(x) do { } while(0) +# define emac_dbg_unregister(x) do { } while(0) +# define mal_dbg_register(x) do { } while(0) +# define mal_dbg_unregister(x) do { } while(0) +# define emac_init_debug() do { } while(0) +# define emac_fini_debug() do { } while(0) +# define emac_dbg_dump_all() do { } while(0) + +# define DBG_LEVEL 0 + +#endif + +#define EMAC_DBG(dev, name, fmt, arg...) \ + printk(KERN_DEBUG #name "%s: " fmt, dev->ofdev->node->full_name, ## arg) + +#if DBG_LEVEL > 0 +# define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) +# define MAL_DBG(d,f,x...) EMAC_DBG(d, mal, f, ##x) +# define ZMII_DBG(d,f,x...) EMAC_DBG(d, zmii, f, ##x) +# define RGMII_DBG(d,f,x...) EMAC_DBG(d, rgmii, f, ##x) +# define NL "\n" +#else +# define DBG(f,x...) ((void)0) +# define MAL_DBG(d,f,x...) ((void)0) +# define ZMII_DBG(d,f,x...) ((void)0) +# define RGMII_DBG(d,f,x...) ((void)0) +#endif +#if DBG_LEVEL > 1 +# define DBG2(d,f,x...) DBG(d,f, ##x) +# define MAL_DBG2(d,f,x...) MAL_DBG(d,f, ##x) +# define ZMII_DBG2(d,f,x...) ZMII_DBG(d,f, ##x) +# define RGMII_DBG2(d,f,x...) RGMII_DBG(d,f, ##x) +#else +# define DBG2(f,x...) ((void)0) +# define MAL_DBG2(d,f,x...) ((void)0) +# define ZMII_DBG2(d,f,x...) ((void)0) +# define RGMII_DBG2(d,f,x...) ((void)0) +#endif + +#endif /* __IBM_NEWEMAC_DEBUG_H */ diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h new file mode 100644 index 0000000000000000000000000000000000000000..bef92efeeadce4a371c0abb7b2757939df7f19de --- /dev/null +++ b/drivers/net/ibm_newemac/emac.h @@ -0,0 +1,268 @@ +/* + * drivers/net/ibm_newemac/emac.h + * + * Register definitions for PowerPC 4xx on-chip ethernet contoller + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Matt Porter + * Armin Kuster + * Copyright 2002-2004 MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_H +#define __IBM_NEWEMAC_H + +#include + +/* EMAC registers Write Access rules */ +struct emac_regs { + u32 mr0; /* special */ + u32 mr1; /* Reset */ + u32 tmr0; /* special */ + u32 tmr1; /* special */ + u32 rmr; /* Reset */ + u32 isr; /* Always */ + u32 iser; /* Reset */ + u32 iahr; /* Reset, R, T */ + u32 ialr; /* Reset, R, T */ + u32 vtpid; /* Reset, R, T */ + u32 vtci; /* Reset, R, T */ + u32 ptr; /* Reset, T */ + u32 iaht1; /* Reset, R */ + u32 iaht2; /* Reset, R */ + u32 iaht3; /* Reset, R */ + u32 iaht4; /* Reset, R */ + u32 gaht1; /* Reset, R */ + u32 gaht2; /* Reset, R */ + u32 gaht3; /* Reset, R */ + u32 gaht4; /* Reset, R */ + u32 lsah; + u32 lsal; + u32 ipgvr; /* Reset, T */ + u32 stacr; /* special */ + u32 trtr; /* special */ + u32 rwmr; /* Reset */ + u32 octx; + u32 ocrx; + u32 ipcr; +}; + +/* + * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY) + */ +#define PHY_MODE_NA 0 +#define PHY_MODE_MII 1 +#define PHY_MODE_RMII 2 +#define PHY_MODE_SMII 3 +#define PHY_MODE_RGMII 4 +#define PHY_MODE_TBI 5 +#define PHY_MODE_GMII 6 +#define PHY_MODE_RTBI 7 +#define PHY_MODE_SGMII 8 + + +#define EMAC_ETHTOOL_REGS_VER 0 +#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32)) +#define EMAC4_ETHTOOL_REGS_VER 1 +#define EMAC4_ETHTOOL_REGS_SIZE sizeof(struct emac_regs) + +/* EMACx_MR0 */ +#define EMAC_MR0_RXI 0x80000000 +#define EMAC_MR0_TXI 0x40000000 +#define EMAC_MR0_SRST 0x20000000 +#define EMAC_MR0_TXE 0x10000000 +#define EMAC_MR0_RXE 0x08000000 +#define EMAC_MR0_WKE 0x04000000 + +/* EMACx_MR1 */ +#define EMAC_MR1_FDE 0x80000000 +#define EMAC_MR1_ILE 0x40000000 +#define EMAC_MR1_VLE 0x20000000 +#define EMAC_MR1_EIFC 0x10000000 +#define EMAC_MR1_APP 0x08000000 +#define EMAC_MR1_IST 0x01000000 + +#define EMAC_MR1_MF_MASK 0x00c00000 +#define EMAC_MR1_MF_10 0x00000000 +#define EMAC_MR1_MF_100 0x00400000 +#define EMAC_MR1_MF_1000 0x00800000 +#define EMAC_MR1_MF_1000GPCS 0x00c00000 +#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6) + +#define EMAC_MR1_RFS_4K 0x00300000 +#define EMAC_MR1_RFS_16K 0x00000000 +#define EMAC_MR1_TFS_2K 0x00080000 +#define EMAC_MR1_TR0_MULT 0x00008000 +#define EMAC_MR1_JPSM 0x00000000 +#define EMAC_MR1_MWSW_001 0x00000000 +#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT) + + +#define EMAC4_MR1_RFS_2K 0x00100000 +#define EMAC4_MR1_RFS_4K 0x00180000 +#define EMAC4_MR1_RFS_16K 0x00280000 +#define EMAC4_MR1_TFS_2K 0x00020000 +#define EMAC4_MR1_TFS_4K 0x00030000 +#define EMAC4_MR1_TR 0x00008000 +#define EMAC4_MR1_MWSW_001 0x00001000 +#define EMAC4_MR1_JPSM 0x00000800 +#define EMAC4_MR1_OBCI_MASK 0x00000038 +#define EMAC4_MR1_OBCI_50 0x00000000 +#define EMAC4_MR1_OBCI_66 0x00000008 +#define EMAC4_MR1_OBCI_83 0x00000010 +#define EMAC4_MR1_OBCI_100 0x00000018 +#define EMAC4_MR1_OBCI_100P 0x00000020 +#define EMAC4_MR1_OBCI(freq) ((freq) <= 50 ? EMAC4_MR1_OBCI_50 : \ + (freq) <= 66 ? EMAC4_MR1_OBCI_66 : \ + (freq) <= 83 ? EMAC4_MR1_OBCI_83 : \ + (freq) <= 100 ? EMAC4_MR1_OBCI_100 : \ + EMAC4_MR1_OBCI_100P) + +/* EMACx_TMR0 */ +#define EMAC_TMR0_GNP 0x80000000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC4_TMR0_TFAE_2_32 0x00000001 +#define EMAC4_TMR0_TFAE_4_64 0x00000002 +#define EMAC4_TMR0_TFAE_8_128 0x00000003 +#define EMAC4_TMR0_TFAE_16_256 0x00000004 +#define EMAC4_TMR0_TFAE_32_512 0x00000005 +#define EMAC4_TMR0_TFAE_64_1024 0x00000006 +#define EMAC4_TMR0_TFAE_128_2048 0x00000007 +#define EMAC4_TMR0_DEFAULT EMAC4_TMR0_TFAE_2_32 +#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT) +#define EMAC4_TMR0_XMIT (EMAC_TMR0_GNP | EMAC4_TMR0_DEFAULT) + +/* EMACx_TMR1 */ + +#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16)) +#define EMAC4_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14)) + +/* EMACx_RMR */ +#define EMAC_RMR_SP 0x80000000 +#define EMAC_RMR_SFCS 0x40000000 +#define EMAC_RMR_RRP 0x20000000 +#define EMAC_RMR_RFP 0x10000000 +#define EMAC_RMR_ROP 0x08000000 +#define EMAC_RMR_RPIR 0x04000000 +#define EMAC_RMR_PPP 0x02000000 +#define EMAC_RMR_PME 0x01000000 +#define EMAC_RMR_PMME 0x00800000 +#define EMAC_RMR_IAE 0x00400000 +#define EMAC_RMR_MIAE 0x00200000 +#define EMAC_RMR_BAE 0x00100000 +#define EMAC_RMR_MAE 0x00080000 +#define EMAC_RMR_BASE 0x00000000 +#define EMAC4_RMR_RFAF_2_32 0x00000001 +#define EMAC4_RMR_RFAF_4_64 0x00000002 +#define EMAC4_RMR_RFAF_8_128 0x00000003 +#define EMAC4_RMR_RFAF_16_256 0x00000004 +#define EMAC4_RMR_RFAF_32_512 0x00000005 +#define EMAC4_RMR_RFAF_64_1024 0x00000006 +#define EMAC4_RMR_RFAF_128_2048 0x00000007 +#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048 + +/* EMACx_ISR & EMACx_ISER */ +#define EMAC4_ISR_TXPE 0x20000000 +#define EMAC4_ISR_RXPE 0x10000000 +#define EMAC4_ISR_TXUE 0x08000000 +#define EMAC4_ISR_RXOE 0x04000000 +#define EMAC_ISR_OVR 0x02000000 +#define EMAC_ISR_PP 0x01000000 +#define EMAC_ISR_BP 0x00800000 +#define EMAC_ISR_RP 0x00400000 +#define EMAC_ISR_SE 0x00200000 +#define EMAC_ISR_ALE 0x00100000 +#define EMAC_ISR_BFCS 0x00080000 +#define EMAC_ISR_PTLE 0x00040000 +#define EMAC_ISR_ORE 0x00020000 +#define EMAC_ISR_IRE 0x00010000 +#define EMAC_ISR_SQE 0x00000080 +#define EMAC_ISR_TE 0x00000040 +#define EMAC_ISR_MOS 0x00000002 +#define EMAC_ISR_MOF 0x00000001 + +/* EMACx_STACR */ +#define EMAC_STACR_PHYD_MASK 0xffff +#define EMAC_STACR_PHYD_SHIFT 16 +#define EMAC_STACR_OC 0x00008000 +#define EMAC_STACR_PHYE 0x00004000 +#define EMAC_STACR_STAC_MASK 0x00003000 +#define EMAC_STACR_STAC_READ 0x00001000 +#define EMAC_STACR_STAC_WRITE 0x00002000 +#define EMAC_STACR_OPBC_MASK 0x00000C00 +#define EMAC_STACR_OPBC_50 0x00000000 +#define EMAC_STACR_OPBC_66 0x00000400 +#define EMAC_STACR_OPBC_83 0x00000800 +#define EMAC_STACR_OPBC_100 0x00000C00 +#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \ + (freq) <= 66 ? EMAC_STACR_OPBC_66 : \ + (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100) +#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb) +#define EMAC4_STACR_BASE(opb) 0x00000000 +#define EMAC_STACR_PCDA_MASK 0x1f +#define EMAC_STACR_PCDA_SHIFT 5 +#define EMAC_STACR_PRA_MASK 0x1f +#define EMACX_STACR_STAC_MASK 0x00003800 +#define EMACX_STACR_STAC_READ 0x00001000 +#define EMACX_STACR_STAC_WRITE 0x00000800 +#define EMACX_STACR_STAC_IND_ADDR 0x00002000 +#define EMACX_STACR_STAC_IND_READ 0x00003800 +#define EMACX_STACR_STAC_IND_READINC 0x00003000 +#define EMACX_STACR_STAC_IND_WRITE 0x00002800 + + +/* EMACx_TRTR */ +#define EMAC_TRTR_SHIFT_EMAC4 27 +#define EMAC_TRTR_SHIFT 24 + +/* EMAC specific TX descriptor control fields (write access) */ +#define EMAC_TX_CTRL_GFCS 0x0200 +#define EMAC_TX_CTRL_GP 0x0100 +#define EMAC_TX_CTRL_ISA 0x0080 +#define EMAC_TX_CTRL_RSA 0x0040 +#define EMAC_TX_CTRL_IVT 0x0020 +#define EMAC_TX_CTRL_RVT 0x0010 +#define EMAC_TX_CTRL_TAH_CSUM 0x000e + +/* EMAC specific TX descriptor status fields (read access) */ +#define EMAC_TX_ST_BFCS 0x0200 +#define EMAC_TX_ST_LCS 0x0080 +#define EMAC_TX_ST_ED 0x0040 +#define EMAC_TX_ST_EC 0x0020 +#define EMAC_TX_ST_LC 0x0010 +#define EMAC_TX_ST_MC 0x0008 +#define EMAC_TX_ST_SC 0x0004 +#define EMAC_TX_ST_UR 0x0002 +#define EMAC_TX_ST_SQE 0x0001 +#define EMAC_IS_BAD_TX (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \ + EMAC_TX_ST_EC | EMAC_TX_ST_LC | \ + EMAC_TX_ST_MC | EMAC_TX_ST_UR) +#define EMAC_IS_BAD_TX_TAH (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \ + EMAC_TX_ST_EC | EMAC_TX_ST_LC) + +/* EMAC specific RX descriptor status fields (read access) */ +#define EMAC_RX_ST_OE 0x0200 +#define EMAC_RX_ST_PP 0x0100 +#define EMAC_RX_ST_BP 0x0080 +#define EMAC_RX_ST_RP 0x0040 +#define EMAC_RX_ST_SE 0x0020 +#define EMAC_RX_ST_AE 0x0010 +#define EMAC_RX_ST_BFCS 0x0008 +#define EMAC_RX_ST_PTL 0x0004 +#define EMAC_RX_ST_ORE 0x0002 +#define EMAC_RX_ST_IRE 0x0001 +#define EMAC_RX_TAH_BAD_CSUM 0x0003 +#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \ + EMAC_RX_ST_RP | EMAC_RX_ST_SE | \ + EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \ + EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \ + EMAC_RX_ST_IRE ) +#endif /* __IBM_NEWEMAC_H */ diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c new file mode 100644 index 0000000000000000000000000000000000000000..58854117b1a9e9ff7049cd034c5e3b475f007e13 --- /dev/null +++ b/drivers/net/ibm_newemac/mal.c @@ -0,0 +1,711 @@ +/* + * drivers/net/ibm_newemac/mal.c + * + * Memory Access Layer (MAL) support + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Benjamin Herrenschmidt , + * David Gibson , + * + * Armin Kuster + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include + +#include "core.h" + +static int mal_count; + +int __devinit mal_register_commac(struct mal_instance *mal, + struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "reg(%08x, %08x)" NL, + commac->tx_chan_mask, commac->rx_chan_mask); + + /* Don't let multiple commacs claim the same channel(s) */ + if ((mal->tx_chan_mask & commac->tx_chan_mask) || + (mal->rx_chan_mask & commac->rx_chan_mask)) { + spin_unlock_irqrestore(&mal->lock, flags); + printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n", + mal->index); + return -EBUSY; + } + + mal->tx_chan_mask |= commac->tx_chan_mask; + mal->rx_chan_mask |= commac->rx_chan_mask; + list_add(&commac->list, &mal->list); + + spin_unlock_irqrestore(&mal->lock, flags); + + return 0; +} + +void __devexit mal_unregister_commac(struct mal_instance *mal, + struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "unreg(%08x, %08x)" NL, + commac->tx_chan_mask, commac->rx_chan_mask); + + mal->tx_chan_mask &= ~commac->tx_chan_mask; + mal->rx_chan_mask &= ~commac->rx_chan_mask; + list_del_init(&commac->list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size) +{ + BUG_ON(channel < 0 || channel >= mal->num_rx_chans || + size > MAL_MAX_RX_SIZE); + + MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size); + + if (size & 0xf) { + printk(KERN_WARNING + "mal%d: incorrect RX size %lu for the channel %d\n", + mal->index, size, channel); + return -EINVAL; + } + + set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4); + return 0; +} + +int mal_tx_bd_offset(struct mal_instance *mal, int channel) +{ + BUG_ON(channel < 0 || channel >= mal->num_tx_chans); + + return channel * NUM_TX_BUFF; +} + +int mal_rx_bd_offset(struct mal_instance *mal, int channel) +{ + BUG_ON(channel < 0 || channel >= mal->num_rx_chans); + return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF; +} + +void mal_enable_tx_channel(struct mal_instance *mal, int channel) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "enable_tx(%d)" NL, channel); + + set_mal_dcrn(mal, MAL_TXCASR, + get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel)); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_disable_tx_channel(struct mal_instance *mal, int channel) +{ + set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel)); + + MAL_DBG(mal, "disable_tx(%d)" NL, channel); +} + +void mal_enable_rx_channel(struct mal_instance *mal, int channel) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "enable_rx(%d)" NL, channel); + + set_mal_dcrn(mal, MAL_RXCASR, + get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel)); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_disable_rx_channel(struct mal_instance *mal, int channel) +{ + set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel)); + + MAL_DBG(mal, "disable_rx(%d)" NL, channel); +} + +void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "poll_add(%p)" NL, commac); + + /* starts disabled */ + set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); + + list_add_tail(&commac->poll_list, &mal->poll_list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac) +{ + unsigned long flags; + + spin_lock_irqsave(&mal->lock, flags); + + MAL_DBG(mal, "poll_del(%p)" NL, commac); + + list_del(&commac->poll_list); + + spin_unlock_irqrestore(&mal->lock, flags); +} + +/* synchronized by mal_poll() */ +static inline void mal_enable_eob_irq(struct mal_instance *mal) +{ + MAL_DBG2(mal, "enable_irq" NL); + + // XXX might want to cache MAL_CFG as the DCR read can be slooooow + set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); +} + +/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ +static inline void mal_disable_eob_irq(struct mal_instance *mal) +{ + // XXX might want to cache MAL_CFG as the DCR read can be slooooow + set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE); + + MAL_DBG2(mal, "disable_irq" NL); +} + +static irqreturn_t mal_serr(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 esr = get_mal_dcrn(mal, MAL_ESR); + + /* Clear the error status register */ + set_mal_dcrn(mal, MAL_ESR, esr); + + MAL_DBG(mal, "SERR %08x" NL, esr); + + if (esr & MAL_ESR_EVB) { + if (esr & MAL_ESR_DE) { + /* We ignore Descriptor error, + * TXDE or RXDE interrupt will be generated anyway. + */ + return IRQ_HANDLED; + } + + if (esr & MAL_ESR_PEIN) { + /* PLB error, it's probably buggy hardware or + * incorrect physical address in BD (i.e. bug) + */ + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: system error, " + "PLB (ESR = 0x%08x)\n", + mal->index, esr); + return IRQ_HANDLED; + } + + /* OPB error, it's probably buggy hardware or incorrect + * EBC setup + */ + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: system error, OPB (ESR = 0x%08x)\n", + mal->index, esr); + } + return IRQ_HANDLED; +} + +static inline void mal_schedule_poll(struct mal_instance *mal) +{ + if (likely(napi_schedule_prep(&mal->napi))) { + MAL_DBG2(mal, "schedule_poll" NL); + mal_disable_eob_irq(mal); + __napi_schedule(&mal->napi); + } else + MAL_DBG2(mal, "already in poll" NL); +} + +static irqreturn_t mal_txeob(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 r = get_mal_dcrn(mal, MAL_TXEOBISR); + + MAL_DBG2(mal, "txeob %08x" NL, r); + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_TXEOBISR, r); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxeob(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 r = get_mal_dcrn(mal, MAL_RXEOBISR); + + MAL_DBG2(mal, "rxeob %08x" NL, r); + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_RXEOBISR, r); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txde(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + + u32 deir = get_mal_dcrn(mal, MAL_TXDEIR); + set_mal_dcrn(mal, MAL_TXDEIR, deir); + + MAL_DBG(mal, "txde %08x" NL, deir); + + if (net_ratelimit()) + printk(KERN_ERR + "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n", + mal->index, deir); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxde(int irq, void *dev_instance) +{ + struct mal_instance *mal = dev_instance; + struct list_head *l; + + u32 deir = get_mal_dcrn(mal, MAL_RXDEIR); + + MAL_DBG(mal, "rxde %08x" NL, deir); + + list_for_each(l, &mal->list) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + if (deir & mc->rx_chan_mask) { + set_bit(MAL_COMMAC_RX_STOPPED, &mc->flags); + mc->ops->rxde(mc->dev); + } + } + + mal_schedule_poll(mal); + set_mal_dcrn(mal, MAL_RXDEIR, deir); + + return IRQ_HANDLED; +} + +void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) +{ + /* Spinlock-type semantics: only one caller disable poll at a time */ + while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) + msleep(1); + + /* Synchronize with the MAL NAPI poller. */ + napi_disable(&mal->napi); +} + +void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) +{ + smp_wmb(); + clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); + + // XXX might want to kick a poll now... +} + +static int mal_poll(struct napi_struct *napi, int budget) +{ + struct mal_instance *mal = container_of(napi, struct mal_instance, napi); + struct list_head *l; + int received = 0; + unsigned long flags; + + MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, + rx_work_limit); + again: + /* Process TX skbs */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + mc->ops->poll_tx(mc->dev); + } + + /* Process RX skbs. + * + * We _might_ need something more smart here to enforce polling + * fairness. + */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + int n; + if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) + continue; + n = mc->ops->poll_rx(mc->dev, budget); + if (n) { + received += n; + budget -= n; + if (budget <= 0) + goto more_work; // XXX What if this is the last one ? + } + } + + /* We need to disable IRQs to protect from RXDE IRQ here */ + spin_lock_irqsave(&mal->lock, flags); + __napi_complete(napi); + mal_enable_eob_irq(mal); + spin_unlock_irqrestore(&mal->lock, flags); + + /* Check for "rotting" packet(s) */ + list_for_each(l, &mal->poll_list) { + struct mal_commac *mc = + list_entry(l, struct mal_commac, poll_list); + if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) + continue; + if (unlikely(mc->ops->peek_rx(mc->dev) || + test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) { + MAL_DBG2(mal, "rotting packet" NL); + if (napi_reschedule(napi)) + mal_disable_eob_irq(mal); + else + MAL_DBG2(mal, "already in poll list" NL); + + if (budget > 0) + goto again; + else + goto more_work; + } + mc->ops->poll_tx(mc->dev); + } + + more_work: + MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received); + return received; +} + +static void mal_reset(struct mal_instance *mal) +{ + int n = 10; + + MAL_DBG(mal, "reset" NL); + + set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR); + + /* Wait for reset to complete (1 system clock) */ + while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n) + --n; + + if (unlikely(!n)) + printk(KERN_ERR "mal%d: reset timeout\n", mal->index); +} + +int mal_get_regs_len(struct mal_instance *mal) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct mal_regs); +} + +void *mal_dump_regs(struct mal_instance *mal, void *buf) +{ + struct emac_ethtool_regs_subhdr *hdr = buf; + struct mal_regs *regs = (struct mal_regs *)(hdr + 1); + int i; + + hdr->version = mal->version; + hdr->index = mal->index; + + regs->tx_count = mal->num_tx_chans; + regs->rx_count = mal->num_rx_chans; + + regs->cfg = get_mal_dcrn(mal, MAL_CFG); + regs->esr = get_mal_dcrn(mal, MAL_ESR); + regs->ier = get_mal_dcrn(mal, MAL_IER); + regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR); + regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR); + regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR); + regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR); + regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR); + regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR); + regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR); + regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR); + + for (i = 0; i < regs->tx_count; ++i) + regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i)); + + for (i = 0; i < regs->rx_count; ++i) { + regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i)); + regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i)); + } + return regs + 1; +} + +static int __devinit mal_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct mal_instance *mal; + int err = 0, i, bd_size; + int index = mal_count++; + const u32 *prop; + u32 cfg; + + mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL); + if (!mal) { + printk(KERN_ERR + "mal%d: out of memory allocating MAL structure!\n", + index); + return -ENOMEM; + } + mal->index = index; + mal->ofdev = ofdev; + mal->version = of_device_is_compatible(ofdev->node, "ibm,mcmal2") ? 2 : 1; + + MAL_DBG(mal, "probe" NL); + + prop = of_get_property(ofdev->node, "num-tx-chans", NULL); + if (prop == NULL) { + printk(KERN_ERR + "mal%d: can't find MAL num-tx-chans property!\n", + index); + err = -ENODEV; + goto fail; + } + mal->num_tx_chans = prop[0]; + + prop = of_get_property(ofdev->node, "num-rx-chans", NULL); + if (prop == NULL) { + printk(KERN_ERR + "mal%d: can't find MAL num-rx-chans property!\n", + index); + err = -ENODEV; + goto fail; + } + mal->num_rx_chans = prop[0]; + + mal->dcr_base = dcr_resource_start(ofdev->node, 0); + if (mal->dcr_base == 0) { + printk(KERN_ERR + "mal%d: can't find DCR resource!\n", index); + err = -ENODEV; + goto fail; + } + mal->dcr_host = dcr_map(ofdev->node, mal->dcr_base, 0x100); + if (!DCR_MAP_OK(mal->dcr_host)) { + printk(KERN_ERR + "mal%d: failed to map DCRs !\n", index); + err = -ENODEV; + goto fail; + } + + mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); + mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); + mal->serr_irq = irq_of_parse_and_map(ofdev->node, 2); + mal->txde_irq = irq_of_parse_and_map(ofdev->node, 3); + mal->rxde_irq = irq_of_parse_and_map(ofdev->node, 4); + if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ || + mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ || + mal->rxde_irq == NO_IRQ) { + printk(KERN_ERR + "mal%d: failed to map interrupts !\n", index); + err = -ENODEV; + goto fail_unmap; + } + + INIT_LIST_HEAD(&mal->poll_list); + mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; + mal->napi.poll = mal_poll; + INIT_LIST_HEAD(&mal->list); + spin_lock_init(&mal->lock); + + /* Load power-on reset defaults */ + mal_reset(mal); + + /* Set the MAL configuration register */ + cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT; + cfg |= MAL_CFG_PLBB | MAL_CFG_OPBBL | MAL_CFG_LEA; + + /* Current Axon is not happy with priority being non-0, it can + * deadlock, fix it up here + */ + if (of_device_is_compatible(ofdev->node, "ibm,mcmal-axon")) + cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10); + + /* Apply configuration */ + set_mal_dcrn(mal, MAL_CFG, cfg); + + /* Allocate space for BD rings */ + BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32); + BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32); + + bd_size = sizeof(struct mal_descriptor) * + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans); + mal->bd_virt = + dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, + GFP_KERNEL); + if (mal->bd_virt == NULL) { + printk(KERN_ERR + "mal%d: out of memory allocating RX/TX descriptors!\n", + index); + err = -ENOMEM; + goto fail_unmap; + } + memset(mal->bd_virt, 0, bd_size); + + for (i = 0; i < mal->num_tx_chans; ++i) + set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + + sizeof(struct mal_descriptor) * + mal_tx_bd_offset(mal, i)); + + for (i = 0; i < mal->num_rx_chans; ++i) + set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma + + sizeof(struct mal_descriptor) * + mal_rx_bd_offset(mal, i)); + + err = request_irq(mal->serr_irq, mal_serr, 0, "MAL SERR", mal); + if (err) + goto fail2; + err = request_irq(mal->txde_irq, mal_txde, 0, "MAL TX DE", mal); + if (err) + goto fail3; + err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); + if (err) + goto fail4; + err = request_irq(mal->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); + if (err) + goto fail5; + err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); + if (err) + goto fail6; + + /* Enable all MAL SERR interrupt sources */ + if (mal->version == 2) + set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS); + else + set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS); + + /* Enable EOB interrupt */ + mal_enable_eob_irq(mal); + + printk(KERN_INFO + "MAL v%d %s, %d TX channels, %d RX channels\n", + mal->version, ofdev->node->full_name, + mal->num_tx_chans, mal->num_rx_chans); + + /* Advertise this instance to the rest of the world */ + wmb(); + dev_set_drvdata(&ofdev->dev, mal); + + mal_dbg_register(mal); + + return 0; + + fail6: + free_irq(mal->rxde_irq, mal); + fail5: + free_irq(mal->txeob_irq, mal); + fail4: + free_irq(mal->txde_irq, mal); + fail3: + free_irq(mal->serr_irq, mal); + fail2: + dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); + fail_unmap: + dcr_unmap(mal->dcr_host, mal->dcr_base, 0x100); + fail: + kfree(mal); + + return err; +} + +static int __devexit mal_remove(struct of_device *ofdev) +{ + struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); + + MAL_DBG(mal, "remove" NL); + + /* Synchronize with scheduled polling */ + napi_disable(&mal->napi); + + if (!list_empty(&mal->list)) { + /* This is *very* bad */ + printk(KERN_EMERG + "mal%d: commac list is not empty on remove!\n", + mal->index); + WARN_ON(1); + } + + dev_set_drvdata(&ofdev->dev, NULL); + + free_irq(mal->serr_irq, mal); + free_irq(mal->txde_irq, mal); + free_irq(mal->txeob_irq, mal); + free_irq(mal->rxde_irq, mal); + free_irq(mal->rxeob_irq, mal); + + mal_reset(mal); + + mal_dbg_unregister(mal); + + dma_free_coherent(&ofdev->dev, + sizeof(struct mal_descriptor) * + (NUM_TX_BUFF * mal->num_tx_chans + + NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt, + mal->bd_dma); + kfree(mal); + + return 0; +} + +static struct of_device_id mal_platform_match[] = +{ + { + .compatible = "ibm,mcmal", + }, + { + .compatible = "ibm,mcmal2", + }, + /* Backward compat */ + { + .type = "mcmal-dma", + .compatible = "ibm,mcmal", + }, + { + .type = "mcmal-dma", + .compatible = "ibm,mcmal2", + }, + {}, +}; + +static struct of_platform_driver mal_of_driver = { + .name = "mcmal", + .match_table = mal_platform_match, + + .probe = mal_probe, + .remove = mal_remove, +}; + +int __init mal_init(void) +{ + return of_register_platform_driver(&mal_of_driver); +} + +void mal_exit(void) +{ + of_unregister_platform_driver(&mal_of_driver); +} diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h new file mode 100644 index 0000000000000000000000000000000000000000..cb1a16d589fe780f74df1077ebc486fba93fd22c --- /dev/null +++ b/drivers/net/ibm_newemac/mal.h @@ -0,0 +1,276 @@ +/* + * drivers/net/ibm_newemac/mal.h + * + * Memory Access Layer (MAL) support + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_MAL_H +#define __IBM_NEWEMAC_MAL_H + +/* + * There are some variations on the MAL, we express them in this driver as + * MAL Version 1 and 2 though that doesn't match any IBM terminology. + * + * We call MAL 1 the version in 405GP, 405GPR, 405EP, 440EP, 440GR and + * NP405H. + * + * We call MAL 2 the version in 440GP, 440GX, 440SP, 440SPE and Axon + * + * The driver expects a "version" property in the emac node containing + * a number 1 or 2. New device-trees for EMAC capable platforms are thus + * required to include that when porting to arch/powerpc. + */ + +/* MALx DCR registers */ +#define MAL_CFG 0x00 +#define MAL_CFG_SR 0x80000000 +#define MAL_CFG_PLBB 0x00004000 +#define MAL_CFG_OPBBL 0x00000080 +#define MAL_CFG_EOPIE 0x00000004 +#define MAL_CFG_LEA 0x00000002 +#define MAL_CFG_SD 0x00000001 + +/* MAL V1 CFG bits */ +#define MAL1_CFG_PLBP_MASK 0x00c00000 +#define MAL1_CFG_PLBP_10 0x00800000 +#define MAL1_CFG_GA 0x00200000 +#define MAL1_CFG_OA 0x00100000 +#define MAL1_CFG_PLBLE 0x00080000 +#define MAL1_CFG_PLBT_MASK 0x00078000 +#define MAL1_CFG_DEFAULT (MAL1_CFG_PLBP_10 | MAL1_CFG_PLBT_MASK) + +/* MAL V2 CFG bits */ +#define MAL2_CFG_RPP_MASK 0x00c00000 +#define MAL2_CFG_RPP_10 0x00800000 +#define MAL2_CFG_RMBS_MASK 0x00300000 +#define MAL2_CFG_WPP_MASK 0x000c0000 +#define MAL2_CFG_WPP_10 0x00080000 +#define MAL2_CFG_WMBS_MASK 0x00030000 +#define MAL2_CFG_PLBLE 0x00008000 +#define MAL2_CFG_DEFAULT (MAL2_CFG_RMBS_MASK | MAL2_CFG_WMBS_MASK | \ + MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10) + +#define MAL_ESR 0x01 +#define MAL_ESR_EVB 0x80000000 +#define MAL_ESR_CIDT 0x40000000 +#define MAL_ESR_CID_MASK 0x3e000000 +#define MAL_ESR_CID_SHIFT 25 +#define MAL_ESR_DE 0x00100000 +#define MAL_ESR_OTE 0x00040000 +#define MAL_ESR_OSE 0x00020000 +#define MAL_ESR_PEIN 0x00010000 +#define MAL_ESR_DEI 0x00000010 +#define MAL_ESR_OTEI 0x00000004 +#define MAL_ESR_OSEI 0x00000002 +#define MAL_ESR_PBEI 0x00000001 + +/* MAL V1 ESR bits */ +#define MAL1_ESR_ONE 0x00080000 +#define MAL1_ESR_ONEI 0x00000008 + +/* MAL V2 ESR bits */ +#define MAL2_ESR_PTE 0x00800000 +#define MAL2_ESR_PRE 0x00400000 +#define MAL2_ESR_PWE 0x00200000 +#define MAL2_ESR_PTEI 0x00000080 +#define MAL2_ESR_PREI 0x00000040 +#define MAL2_ESR_PWEI 0x00000020 + + +#define MAL_IER 0x02 +#define MAL_IER_DE 0x00000010 +#define MAL_IER_OTE 0x00000004 +#define MAL_IER_OE 0x00000002 +#define MAL_IER_PE 0x00000001 +/* MAL V1 IER bits */ +#define MAL1_IER_NWE 0x00000008 +#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE +#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_OTE | \ + MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) + +/* MAL V2 IER bits */ +#define MAL2_IER_PT 0x00000080 +#define MAL2_IER_PRE 0x00000040 +#define MAL2_IER_PWE 0x00000020 +#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE) +#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_OTE | \ + MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) + + +#define MAL_TXCASR 0x04 +#define MAL_TXCARR 0x05 +#define MAL_TXEOBISR 0x06 +#define MAL_TXDEIR 0x07 +#define MAL_RXCASR 0x10 +#define MAL_RXCARR 0x11 +#define MAL_RXEOBISR 0x12 +#define MAL_RXDEIR 0x13 +#define MAL_TXCTPR(n) ((n) + 0x20) +#define MAL_RXCTPR(n) ((n) + 0x40) +#define MAL_RCBS(n) ((n) + 0x60) + +/* In reality MAL can handle TX buffers up to 4095 bytes long, + * but this isn't a good round number :) --ebs + */ +#define MAL_MAX_TX_SIZE 4080 +#define MAL_MAX_RX_SIZE 4080 + +static inline int mal_rx_size(int len) +{ + len = (len + 0xf) & ~0xf; + return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len; +} + +static inline int mal_tx_chunks(int len) +{ + return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE; +} + +#define MAL_CHAN_MASK(n) (0x80000000 >> (n)) + +/* MAL Buffer Descriptor structure */ +struct mal_descriptor { + u16 ctrl; /* MAL / Commac status control bits */ + u16 data_len; /* Max length is 4K-1 (12 bits) */ + u32 data_ptr; /* pointer to actual data buffer */ +}; + +/* the following defines are for the MadMAL status and control registers. */ +/* MADMAL transmit and receive status/control bits */ +#define MAL_RX_CTRL_EMPTY 0x8000 +#define MAL_RX_CTRL_WRAP 0x4000 +#define MAL_RX_CTRL_CM 0x2000 +#define MAL_RX_CTRL_LAST 0x1000 +#define MAL_RX_CTRL_FIRST 0x0800 +#define MAL_RX_CTRL_INTR 0x0400 +#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST) +#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE) + +#define MAL_TX_CTRL_READY 0x8000 +#define MAL_TX_CTRL_WRAP 0x4000 +#define MAL_TX_CTRL_CM 0x2000 +#define MAL_TX_CTRL_LAST 0x1000 +#define MAL_TX_CTRL_INTR 0x0400 + +struct mal_commac_ops { + void (*poll_tx) (void *dev); + int (*poll_rx) (void *dev, int budget); + int (*peek_rx) (void *dev); + void (*rxde) (void *dev); +}; + +struct mal_commac { + struct mal_commac_ops *ops; + void *dev; + struct list_head poll_list; + long flags; +#define MAL_COMMAC_RX_STOPPED 0 +#define MAL_COMMAC_POLL_DISABLED 1 + u32 tx_chan_mask; + u32 rx_chan_mask; + struct list_head list; +}; + +struct mal_instance { + int version; + int dcr_base; + dcr_host_t dcr_host; + + int num_tx_chans; /* Number of TX channels */ + int num_rx_chans; /* Number of RX channels */ + int txeob_irq; /* TX End Of Buffer IRQ */ + int rxeob_irq; /* RX End Of Buffer IRQ */ + int txde_irq; /* TX Descriptor Error IRQ */ + int rxde_irq; /* RX Descriptor Error IRQ */ + int serr_irq; /* MAL System Error IRQ */ + + struct list_head poll_list; + struct napi_struct napi; + + struct list_head list; + u32 tx_chan_mask; + u32 rx_chan_mask; + + dma_addr_t bd_dma; + struct mal_descriptor *bd_virt; + + struct of_device *ofdev; + int index; + spinlock_t lock; +}; + +static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg) +{ + return dcr_read(mal->dcr_host, mal->dcr_base + reg); +} + +static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val) +{ + dcr_write(mal->dcr_host, mal->dcr_base + reg, val); +} + +/* Register MAL devices */ +int mal_init(void); +void mal_exit(void); + +int mal_register_commac(struct mal_instance *mal, + struct mal_commac *commac); +void mal_unregister_commac(struct mal_instance *mal, + struct mal_commac *commac); +int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size); + +/* Returns BD ring offset for a particular channel + (in 'struct mal_descriptor' elements) +*/ +int mal_tx_bd_offset(struct mal_instance *mal, int channel); +int mal_rx_bd_offset(struct mal_instance *mal, int channel); + +void mal_enable_tx_channel(struct mal_instance *mal, int channel); +void mal_disable_tx_channel(struct mal_instance *mal, int channel); +void mal_enable_rx_channel(struct mal_instance *mal, int channel); +void mal_disable_rx_channel(struct mal_instance *mal, int channel); + +void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac); +void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac); + +/* Add/remove EMAC to/from MAL polling list */ +void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac); +void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac); + +/* Ethtool MAL registers */ +struct mal_regs { + u32 tx_count; + u32 rx_count; + + u32 cfg; + u32 esr; + u32 ier; + u32 tx_casr; + u32 tx_carr; + u32 tx_eobisr; + u32 tx_deir; + u32 rx_casr; + u32 rx_carr; + u32 rx_eobisr; + u32 rx_deir; + u32 tx_ctpr[32]; + u32 rx_ctpr[32]; + u32 rcbs[32]; +}; + +int mal_get_regs_len(struct mal_instance *mal); +void *mal_dump_regs(struct mal_instance *mal, void *buf); + +#endif /* __IBM_NEWEMAC_MAL_H */ diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c new file mode 100644 index 0000000000000000000000000000000000000000..aa1f0ddf1e3ed417ed3aa3a84bd5cf718ff6e316 --- /dev/null +++ b/drivers/net/ibm_newemac/phy.c @@ -0,0 +1,373 @@ +/* + * drivers/net/ibm_newemac/phy.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, PHY support. + * Borrowed from sungem_phy.c, though I only kept the generic MII + * driver for now. + * + * This file should be shared with other drivers or eventually + * merged as the "low level" part of miilib + * + * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) + * (c) 2004-2005, Eugene Surovegin + * + */ +#include +#include +#include +#include +#include +#include +#include + +#include "emac.h" +#include "phy.h" + +static inline int phy_read(struct mii_phy *phy, int reg) +{ + return phy->mdio_read(phy->dev, phy->address, reg); +} + +static inline void phy_write(struct mii_phy *phy, int reg, int val) +{ + phy->mdio_write(phy->dev, phy->address, reg, val); +} + +int emac_mii_reset_phy(struct mii_phy *phy) +{ + int val; + int limit = 10000; + + val = phy_read(phy, MII_BMCR); + val &= ~(BMCR_ISOLATE | BMCR_ANENABLE); + val |= BMCR_RESET; + phy_write(phy, MII_BMCR, val); + + udelay(300); + + while (limit--) { + val = phy_read(phy, MII_BMCR); + if (val >= 0 && (val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) + phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE); + + return limit <= 0; +} + +static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) +{ + int ctl, adv; + + phy->autoneg = AUTONEG_ENABLE; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + phy->advertising = advertise; + + ctl = phy_read(phy, MII_BMCR); + if (ctl < 0) + return ctl; + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + + /* First clear the PHY */ + phy_write(phy, MII_BMCR, ctl); + + /* Setup standard advertise */ + adv = phy_read(phy, MII_ADVERTISE); + if (adv < 0) + return adv; + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + if (advertise & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (advertise & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; + phy_write(phy, MII_ADVERTISE, adv); + + if (phy->features & + (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { + adv = phy_read(phy, MII_CTRL1000); + if (adv < 0) + return adv; + adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + if (advertise & ADVERTISED_1000baseT_Full) + adv |= ADVERTISE_1000FULL; + if (advertise & ADVERTISED_1000baseT_Half) + adv |= ADVERTISE_1000HALF; + phy_write(phy, MII_CTRL1000, adv); + } + + /* Start/Restart aneg */ + ctl = phy_read(phy, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) +{ + int ctl; + + phy->autoneg = AUTONEG_DISABLE; + phy->speed = speed; + phy->duplex = fd; + phy->pause = phy->asym_pause = 0; + + ctl = phy_read(phy, MII_BMCR); + if (ctl < 0) + return ctl; + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE); + + /* First clear the PHY */ + phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + + /* Select speed & duplex */ + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + ctl |= BMCR_SPEED100; + break; + case SPEED_1000: + ctl |= BMCR_SPEED1000; + break; + default: + return -EINVAL; + } + if (fd == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_poll_link(struct mii_phy *phy) +{ + int status; + + /* Clear latched value with dummy read */ + phy_read(phy, MII_BMSR); + status = phy_read(phy, MII_BMSR); + if (status < 0 || (status & BMSR_LSTATUS) == 0) + return 0; + if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE)) + return 0; + return 1; +} + +static int genmii_read_link(struct mii_phy *phy) +{ + if (phy->autoneg == AUTONEG_ENABLE) { + int glpa = 0; + int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); + if (lpa < 0) + return lpa; + + if (phy->features & + (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) { + int adv = phy_read(phy, MII_CTRL1000); + glpa = phy_read(phy, MII_STAT1000); + + if (glpa < 0 || adv < 0) + return adv; + + glpa &= adv << 2; + } + + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + + if (glpa & (LPA_1000FULL | LPA_1000HALF)) { + phy->speed = SPEED_1000; + if (glpa & LPA_1000FULL) + phy->duplex = DUPLEX_FULL; + } else if (lpa & (LPA_100FULL | LPA_100HALF)) { + phy->speed = SPEED_100; + if (lpa & LPA_100FULL) + phy->duplex = DUPLEX_FULL; + } else if (lpa & LPA_10FULL) + phy->duplex = DUPLEX_FULL; + + if (phy->duplex == DUPLEX_FULL) { + phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; + phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + } + } else { + int bmcr = phy_read(phy, MII_BMCR); + if (bmcr < 0) + return bmcr; + + if (bmcr & BMCR_FULLDPLX) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (bmcr & BMCR_SPEED1000) + phy->speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + + phy->pause = phy->asym_pause = 0; + } + return 0; +} + +/* Generic implementation for most 10/100/1000 PHYs */ +static struct mii_phy_ops generic_phy_ops = { + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def genmii_phy_def = { + .phy_id = 0x00000000, + .phy_id_mask = 0x00000000, + .name = "Generic MII", + .ops = &generic_phy_ops +}; + +/* CIS8201 */ +#define MII_CIS8201_10BTCSR 0x16 +#define TENBTCSR_ECHO_DISABLE 0x2000 +#define MII_CIS8201_EPCR 0x17 +#define EPCR_MODE_MASK 0x3000 +#define EPCR_GMII_MODE 0x0000 +#define EPCR_RGMII_MODE 0x1000 +#define EPCR_TBI_MODE 0x2000 +#define EPCR_RTBI_MODE 0x3000 +#define MII_CIS8201_ACSR 0x1c +#define ACSR_PIN_PRIO_SELECT 0x0004 + +static int cis8201_init(struct mii_phy *phy) +{ + int epcr; + + epcr = phy_read(phy, MII_CIS8201_EPCR); + if (epcr < 0) + return epcr; + + epcr &= ~EPCR_MODE_MASK; + + switch (phy->mode) { + case PHY_MODE_TBI: + epcr |= EPCR_TBI_MODE; + break; + case PHY_MODE_RTBI: + epcr |= EPCR_RTBI_MODE; + break; + case PHY_MODE_GMII: + epcr |= EPCR_GMII_MODE; + break; + case PHY_MODE_RGMII: + default: + epcr |= EPCR_RGMII_MODE; + } + + phy_write(phy, MII_CIS8201_EPCR, epcr); + + /* MII regs override strap pins */ + phy_write(phy, MII_CIS8201_ACSR, + phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT); + + /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */ + phy_write(phy, MII_CIS8201_10BTCSR, + phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE); + + return 0; +} + +static struct mii_phy_ops cis8201_phy_ops = { + .init = cis8201_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link +}; + +static struct mii_phy_def cis8201_phy_def = { + .phy_id = 0x000fc410, + .phy_id_mask = 0x000ffff0, + .name = "CIS8201 Gigabit Ethernet", + .ops = &cis8201_phy_ops +}; + +static struct mii_phy_def *mii_phy_table[] = { + &cis8201_phy_def, + &genmii_phy_def, + NULL +}; + +int emac_mii_phy_probe(struct mii_phy *phy, int address) +{ + struct mii_phy_def *def; + int i; + u32 id; + + phy->autoneg = AUTONEG_DISABLE; + phy->advertising = 0; + phy->address = address; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = phy->asym_pause = 0; + + /* Take PHY out of isolate mode and reset it. */ + if (emac_mii_reset_phy(phy)) + return -ENODEV; + + /* Read ID and find matching entry */ + id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2); + for (i = 0; (def = mii_phy_table[i]) != NULL; i++) + if ((id & def->phy_id_mask) == def->phy_id) + break; + /* Should never be NULL (we have a generic entry), but... */ + if (!def) + return -ENODEV; + + phy->def = def; + + /* Determine PHY features if needed */ + phy->features = def->features; + if (!phy->features) { + u16 bmsr = phy_read(phy, MII_BMSR); + if (bmsr & BMSR_ANEGCAPABLE) + phy->features |= SUPPORTED_Autoneg; + if (bmsr & BMSR_10HALF) + phy->features |= SUPPORTED_10baseT_Half; + if (bmsr & BMSR_10FULL) + phy->features |= SUPPORTED_10baseT_Full; + if (bmsr & BMSR_100HALF) + phy->features |= SUPPORTED_100baseT_Half; + if (bmsr & BMSR_100FULL) + phy->features |= SUPPORTED_100baseT_Full; + if (bmsr & BMSR_ESTATEN) { + u16 esr = phy_read(phy, MII_ESTATUS); + if (esr & ESTATUS_1000_TFULL) + phy->features |= SUPPORTED_1000baseT_Full; + if (esr & ESTATUS_1000_THALF) + phy->features |= SUPPORTED_1000baseT_Half; + } + phy->features |= SUPPORTED_MII; + } + + /* Setup default advertising */ + phy->advertising = phy->features; + + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ibm_newemac/phy.h b/drivers/net/ibm_newemac/phy.h new file mode 100644 index 0000000000000000000000000000000000000000..6feca26afedbbf487cb8a1d4881cb74b818fd5a9 --- /dev/null +++ b/drivers/net/ibm_newemac/phy.h @@ -0,0 +1,80 @@ +/* + * drivers/net/ibm_newemac/phy.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, PHY support + * + * Benjamin Herrenschmidt + * February 2003 + * + * Minor additions by Eugene Surovegin , 2004 + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This file basically duplicates sungem_phy.{c,h} with different PHYs + * supported. I'm looking into merging that in a single mii layer more + * flexible than mii.c + */ + +#ifndef __IBM_NEWEMAC_PHY_H +#define __IBM_NEWEMAC_PHY_H + +struct mii_phy; + +/* Operations supported by any kind of PHY */ +struct mii_phy_ops { + int (*init) (struct mii_phy * phy); + int (*suspend) (struct mii_phy * phy, int wol_options); + int (*setup_aneg) (struct mii_phy * phy, u32 advertise); + int (*setup_forced) (struct mii_phy * phy, int speed, int fd); + int (*poll_link) (struct mii_phy * phy); + int (*read_link) (struct mii_phy * phy); +}; + +/* Structure used to statically define an mii/gii based PHY */ +struct mii_phy_def { + u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ + u32 phy_id_mask; /* Significant bits */ + u32 features; /* Ethtool SUPPORTED_* defines or + 0 for autodetect */ + int magic_aneg; /* Autoneg does all speed test for us */ + const char *name; + const struct mii_phy_ops *ops; +}; + +/* An instance of a PHY, partially borrowed from mii_if_info */ +struct mii_phy { + struct mii_phy_def *def; + u32 advertising; /* Ethtool ADVERTISED_* defines */ + u32 features; /* Copied from mii_phy_def.features + or determined automaticaly */ + int address; /* PHY address */ + int mode; /* PHY mode */ + + /* 1: autoneg enabled, 0: disabled */ + int autoneg; + + /* forced speed & duplex (no autoneg) + * partner speed & duplex & pause (autoneg) + */ + int speed; + int duplex; + int pause; + int asym_pause; + + /* Provided by host chip */ + struct net_device *dev; + int (*mdio_read) (struct net_device * dev, int addr, int reg); + void (*mdio_write) (struct net_device * dev, int addr, int reg, + int val); +}; + +/* Pass in a struct mii_phy with dev, mdio_read and mdio_write + * filled, the remaining fields will be filled on return + */ +int emac_mii_phy_probe(struct mii_phy *phy, int address); +int emac_mii_reset_phy(struct mii_phy *phy); + +#endif /* __IBM_NEWEMAC_PHY_H */ diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c new file mode 100644 index 0000000000000000000000000000000000000000..bcd7fc639c40a99411074fcd7bed0891c47be0f6 --- /dev/null +++ b/drivers/net/ibm_newemac/rgmii.c @@ -0,0 +1,323 @@ +/* + * drivers/net/ibm_newemac/rgmii.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Matt Porter + * Copyright 2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include + +#include "emac.h" +#include "debug.h" + +// XXX FIXME: Axon seems to support a subset of the RGMII, we +// thus need to take that into account and possibly change some +// of the bit settings below that don't seem to quite match the +// AXON spec + +/* RGMIIx_FER */ +#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4)) +#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4)) +#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4)) +#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4)) +#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4)) + +/* RGMIIx_SSR */ +#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8)) +#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8)) +#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8)) + +/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */ +static inline int rgmii_valid_mode(int phy_mode) +{ + return phy_mode == PHY_MODE_GMII || + phy_mode == PHY_MODE_RGMII || + phy_mode == PHY_MODE_TBI || + phy_mode == PHY_MODE_RTBI; +} + +static inline const char *rgmii_mode_name(int mode) +{ + switch (mode) { + case PHY_MODE_RGMII: + return "RGMII"; + case PHY_MODE_TBI: + return "TBI"; + case PHY_MODE_GMII: + return "GMII"; + case PHY_MODE_RTBI: + return "RTBI"; + default: + BUG(); + } +} + +static inline u32 rgmii_mode_mask(int mode, int input) +{ + switch (mode) { + case PHY_MODE_RGMII: + return RGMII_FER_RGMII(input); + case PHY_MODE_TBI: + return RGMII_FER_TBI(input); + case PHY_MODE_GMII: + return RGMII_FER_GMII(input); + case PHY_MODE_RTBI: + return RGMII_FER_RTBI(input); + default: + BUG(); + } +} + +int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + + RGMII_DBG(dev, "attach(%d)" NL, input); + + /* Check if we need to attach to a RGMII */ + if (input < 0 || !rgmii_valid_mode(mode)) { + printk(KERN_ERR "%s: unsupported settings !\n", + ofdev->node->full_name); + return -ENODEV; + } + + mutex_lock(&dev->lock); + + /* Enable this input */ + out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); + + printk(KERN_NOTICE "%s: input %d in %s mode\n", + ofdev->node->full_name, input, rgmii_mode_name(mode)); + + ++dev->users; + + mutex_unlock(&dev->lock); + + return 0; +} + +void rgmii_set_speed(struct of_device *ofdev, int input, int speed) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + u32 ssr; + + mutex_lock(&dev->lock); + + ssr = in_be32(&p->ssr) & ~RGMII_SSR_MASK(input); + + RGMII_DBG(dev, "speed(%d, %d)" NL, input, speed); + + if (speed == SPEED_1000) + ssr |= RGMII_SSR_1000(input); + else if (speed == SPEED_100) + ssr |= RGMII_SSR_100(input); + + out_be32(&p->ssr, ssr); + + mutex_unlock(&dev->lock); +} + +void rgmii_get_mdio(struct of_device *ofdev, int input) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + u32 fer; + + RGMII_DBG2(dev, "get_mdio(%d)" NL, input); + + if (dev->type != RGMII_AXON) + return; + + mutex_lock(&dev->lock); + + fer = in_be32(&p->fer); + fer |= 0x00080000u >> input; + out_be32(&p->fer, fer); + (void)in_be32(&p->fer); + + DBG2(dev, " fer = 0x%08x\n", fer); +} + +void rgmii_put_mdio(struct of_device *ofdev, int input) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + u32 fer; + + RGMII_DBG2(dev, "put_mdio(%d)" NL, input); + + if (dev->type != RGMII_AXON) + return; + + fer = in_be32(&p->fer); + fer &= ~(0x00080000u >> input); + out_be32(&p->fer, fer); + (void)in_be32(&p->fer); + + DBG2(dev, " fer = 0x%08x\n", fer); + + mutex_unlock(&dev->lock); +} + +void __devexit rgmii_detach(struct of_device *ofdev, int input) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct rgmii_regs *p = dev->base; + + mutex_lock(&dev->lock); + + BUG_ON(!dev || dev->users == 0); + + RGMII_DBG(dev, "detach(%d)" NL, input); + + /* Disable this input */ + out_be32(&p->fer, in_be32(&p->fer) & ~RGMII_FER_MASK(input)); + + --dev->users; + + mutex_unlock(&dev->lock); +} + +int rgmii_get_regs_len(struct of_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct rgmii_regs); +} + +void *rgmii_dump_regs(struct of_device *ofdev, void *buf) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * rgmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs)); + return regs + 1; +} + + +static int __devinit rgmii_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct rgmii_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct rgmii_instance), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "%s: could not allocate RGMII device!\n", + np->full_name); + goto err_gone; + } + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct rgmii_regs *)ioremap(regs.start, + sizeof(struct rgmii_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* Check for RGMII type */ + if (device_is_compatible(ofdev->node, "ibm,rgmii-axon")) + dev->type = RGMII_AXON; + else + dev->type = RGMII_STANDARD; + + DBG2(dev, " Boot FER = 0x%08x, SSR = 0x%08x\n", + in_be32(&dev->base->fer), in_be32(&dev->base->ssr)); + + /* Disable all inputs by default */ + out_be32(&dev->base->fer, 0); + + printk(KERN_INFO + "RGMII %s %s initialized\n", + dev->type == RGMII_STANDARD ? "standard" : "axon", + ofdev->node->full_name); + + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int __devexit rgmii_remove(struct of_device *ofdev) +{ + struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static struct of_device_id rgmii_match[] = +{ + { + .type = "rgmii-interface", + .compatible = "ibm,rgmii", + }, + { + .type = "emac-rgmii", + }, + {}, +}; + +static struct of_platform_driver rgmii_driver = { + .name = "emac-rgmii", + .match_table = rgmii_match, + + .probe = rgmii_probe, + .remove = rgmii_remove, +}; + +int __init rgmii_init(void) +{ + return of_register_platform_driver(&rgmii_driver); +} + +void rgmii_exit(void) +{ + of_unregister_platform_driver(&rgmii_driver); +} diff --git a/drivers/net/ibm_newemac/rgmii.h b/drivers/net/ibm_newemac/rgmii.h new file mode 100644 index 0000000000000000000000000000000000000000..57806833121e3a2cdd58a7d7421b4f8bbece7444 --- /dev/null +++ b/drivers/net/ibm_newemac/rgmii.h @@ -0,0 +1,76 @@ +/* + * drivers/net/ibm_newemac/rgmii.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support. + * + * Based on ocp_zmii.h/ibm_emac_zmii.h + * Armin Kuster akuster@mvista.com + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __IBM_NEWEMAC_RGMII_H +#define __IBM_NEWEMAC_RGMII_H + +/* RGMII bridge type */ +#define RGMII_STANDARD 0 +#define RGMII_AXON 1 + +/* RGMII bridge */ +struct rgmii_regs { + u32 fer; /* Function enable register */ + u32 ssr; /* Speed select register */ +}; + +/* RGMII device */ +struct rgmii_instance { + struct rgmii_regs __iomem *base; + + /* Type of RGMII bridge */ + int type; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* number of EMACs using this RGMII bridge */ + int users; + + /* OF device instance */ + struct of_device *ofdev; +}; + +#ifdef CONFIG_IBM_NEW_EMAC_RGMII + +extern int rgmii_init(void); +extern void rgmii_exit(void); +extern int rgmii_attach(struct of_device *ofdev, int input, int mode); +extern void rgmii_detach(struct of_device *ofdev, int input); +extern void rgmii_get_mdio(struct of_device *ofdev, int input); +extern void rgmii_put_mdio(struct of_device *ofdev, int input); +extern void rgmii_set_speed(struct of_device *ofdev, int input, int speed); +extern int rgmii_get_regs_len(struct of_device *ofdev); +extern void *rgmii_dump_regs(struct of_device *ofdev, void *buf); + +#else + +# define rgmii_init() 0 +# define rgmii_exit() do { } while(0) +# define rgmii_attach(x,y,z) (-ENXIO) +# define rgmii_detach(x,y) do { } while(0) +# define rgmii_get_mdio(o,i) do { } while (0) +# define rgmii_put_mdio(o,i) do { } while (0) +# define rgmii_set_speed(x,y,z) do { } while(0) +# define rgmii_get_regs_len(x) 0 +# define rgmii_dump_regs(x,buf) (buf) +#endif /* !CONFIG_IBM_NEW_EMAC_RGMII */ + +#endif /* __IBM_NEWEMAC_RGMII_H */ diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c new file mode 100644 index 0000000000000000000000000000000000000000..e05c7e81efb60ff7aa5b9d8bff2703fc44493be1 --- /dev/null +++ b/drivers/net/ibm_newemac/tah.c @@ -0,0 +1,173 @@ +/* + * drivers/net/ibm_newemac/tah.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * Copyright (c) 2005 Eugene Surovegin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include + +#include "emac.h" +#include "core.h" + +int __devinit tah_attach(struct of_device *ofdev, int channel) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + + mutex_lock(&dev->lock); + /* Reset has been done at probe() time... nothing else to do for now */ + ++dev->users; + mutex_unlock(&dev->lock); + + return 0; +} + +void __devexit tah_detach(struct of_device *ofdev, int channel) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + + mutex_lock(&dev->lock); + --dev->users; + mutex_unlock(&dev->lock); +} + +void tah_reset(struct of_device *ofdev) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct tah_regs *p = dev->base; + int n; + + /* Reset TAH */ + out_be32(&p->mr, TAH_MR_SR); + n = 100; + while ((in_be32(&p->mr) & TAH_MR_SR) && n) + --n; + + if (unlikely(!n)) + printk(KERN_ERR "%s: reset timeout\n", ofdev->node->full_name); + + /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ + out_be32(&p->mr, + TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | + TAH_MR_DIG); +} + +int tah_get_regs_len(struct of_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct tah_regs); +} + +void *tah_dump_regs(struct of_device *ofdev, void *buf) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct tah_regs *regs = (struct tah_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * zmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct tah_regs)); + return regs + 1; +} + +static int __devinit tah_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct tah_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct tah_instance), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "%s: could not allocate TAH device!\n", + np->full_name); + goto err_gone; + } + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct tah_regs *)ioremap(regs.start, + sizeof(struct tah_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ + tah_reset(ofdev); + + printk(KERN_INFO + "TAH %s initialized\n", ofdev->node->full_name); + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int __devexit tah_remove(struct of_device *ofdev) +{ + struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static struct of_device_id tah_match[] = +{ + { + .type = "tah", + }, + {}, +}; + +static struct of_platform_driver tah_driver = { + .name = "emac-tah", + .match_table = tah_match, + + .probe = tah_probe, + .remove = tah_remove, +}; + +int __init tah_init(void) +{ + return of_register_platform_driver(&tah_driver); +} + +void tah_exit(void) +{ + of_unregister_platform_driver(&tah_driver); +} diff --git a/drivers/net/ibm_newemac/tah.h b/drivers/net/ibm_newemac/tah.h new file mode 100644 index 0000000000000000000000000000000000000000..bc41853b6e26f8ef470664cc3b6e78ec971ce4ac --- /dev/null +++ b/drivers/net/ibm_newemac/tah.h @@ -0,0 +1,90 @@ +/* + * drivers/net/ibm_newemac/tah.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, TAH support. + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * Copyright (c) 2005 Eugene Surovegin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __IBM_NEWEMAC_TAH_H +#define __IBM_NEWEMAC_TAH_H + +/* TAH */ +struct tah_regs { + u32 revid; + u32 pad[3]; + u32 mr; + u32 ssr0; + u32 ssr1; + u32 ssr2; + u32 ssr3; + u32 ssr4; + u32 ssr5; + u32 tsr; +}; + + +/* TAH device */ +struct tah_instance { + struct tah_regs __iomem *base; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* number of EMACs using this TAH */ + int users; + + /* OF device instance */ + struct of_device *ofdev; +}; + + +/* TAH engine */ +#define TAH_MR_CVR 0x80000000 +#define TAH_MR_SR 0x40000000 +#define TAH_MR_ST_256 0x01000000 +#define TAH_MR_ST_512 0x02000000 +#define TAH_MR_ST_768 0x03000000 +#define TAH_MR_ST_1024 0x04000000 +#define TAH_MR_ST_1280 0x05000000 +#define TAH_MR_ST_1536 0x06000000 +#define TAH_MR_TFS_16KB 0x00000000 +#define TAH_MR_TFS_2KB 0x00200000 +#define TAH_MR_TFS_4KB 0x00400000 +#define TAH_MR_TFS_6KB 0x00600000 +#define TAH_MR_TFS_8KB 0x00800000 +#define TAH_MR_TFS_10KB 0x00a00000 +#define TAH_MR_DTFP 0x00100000 +#define TAH_MR_DIG 0x00080000 + +#ifdef CONFIG_IBM_NEW_EMAC_TAH + +extern int tah_init(void); +extern void tah_exit(void); +extern int tah_attach(struct of_device *ofdev, int channel); +extern void tah_detach(struct of_device *ofdev, int channel); +extern void tah_reset(struct of_device *ofdev); +extern int tah_get_regs_len(struct of_device *ofdev); +extern void *tah_dump_regs(struct of_device *ofdev, void *buf); + +#else + +# define tah_init() 0 +# define tah_exit() do { } while(0) +# define tah_attach(x,y) (-ENXIO) +# define tah_detach(x,y) do { } while(0) +# define tah_reset(x) do { } while(0) +# define tah_get_regs_len(x) 0 +# define tah_dump_regs(x,buf) (buf) + +#endif /* !CONFIG_IBM_NEW_EMAC_TAH */ + +#endif /* __IBM_NEWEMAC_TAH_H */ diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c new file mode 100644 index 0000000000000000000000000000000000000000..d06312901848f75b8dcb0b00303e70fcfc9bf2a7 --- /dev/null +++ b/drivers/net/ibm_newemac/zmii.c @@ -0,0 +1,322 @@ +/* + * drivers/net/ibm_newemac/zmii.c + * + * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include + +#include "emac.h" +#include "core.h" + +/* ZMIIx_FER */ +#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4)) +#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \ + ZMII_FER_MDI(2) | ZMII_FER_MDI(3)) + +#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4)) +#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4)) +#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4)) + +/* ZMIIx_SSR */ +#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4)) +#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4)) +#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4)) + +/* ZMII only supports MII, RMII and SMII + * we also support autodetection for backward compatibility + */ +static inline int zmii_valid_mode(int mode) +{ + return mode == PHY_MODE_MII || + mode == PHY_MODE_RMII || + mode == PHY_MODE_SMII || + mode == PHY_MODE_NA; +} + +static inline const char *zmii_mode_name(int mode) +{ + switch (mode) { + case PHY_MODE_MII: + return "MII"; + case PHY_MODE_RMII: + return "RMII"; + case PHY_MODE_SMII: + return "SMII"; + default: + BUG(); + } +} + +static inline u32 zmii_mode_mask(int mode, int input) +{ + switch (mode) { + case PHY_MODE_MII: + return ZMII_FER_MII(input); + case PHY_MODE_RMII: + return ZMII_FER_RMII(input); + case PHY_MODE_SMII: + return ZMII_FER_SMII(input); + default: + return 0; + } +} + +int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct zmii_regs *p = dev->base; + + ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); + + if (!zmii_valid_mode(*mode)) + /* Probably an EMAC connected to RGMII, + * but it still may need ZMII for MDIO so + * we don't fail here. + */ + return 0; + + mutex_lock(&dev->lock); + + /* Autodetect ZMII mode if not specified. + * This is only for backward compatibility with the old driver. + * Please, always specify PHY mode in your board port to avoid + * any surprises. + */ + if (dev->mode == PHY_MODE_NA) { + if (*mode == PHY_MODE_NA) { + u32 r = dev->fer_save; + + ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r); + + if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1))) + dev->mode = PHY_MODE_MII; + else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1))) + dev->mode = PHY_MODE_RMII; + else + dev->mode = PHY_MODE_SMII; + } else + dev->mode = *mode; + + printk(KERN_NOTICE "%s: bridge in %s mode\n", + ofdev->node->full_name, zmii_mode_name(dev->mode)); + } else { + /* All inputs must use the same mode */ + if (*mode != PHY_MODE_NA && *mode != dev->mode) { + printk(KERN_ERR + "%s: invalid mode %d specified for input %d\n", + ofdev->node->full_name, *mode, input); + mutex_unlock(&dev->lock); + return -EINVAL; + } + } + + /* Report back correct PHY mode, + * it may be used during PHY initialization. + */ + *mode = dev->mode; + + /* Enable this input */ + out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input)); + ++dev->users; + + mutex_unlock(&dev->lock); + + return 0; +} + +void zmii_get_mdio(struct of_device *ofdev, int input) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + u32 fer; + + ZMII_DBG2(dev, "get_mdio(%d)" NL, input); + + mutex_lock(&dev->lock); + + fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL; + out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input)); +} + +void zmii_put_mdio(struct of_device *ofdev, int input) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + ZMII_DBG2(dev, "put_mdio(%d)" NL, input); + mutex_unlock(&dev->lock); +} + + +void zmii_set_speed(struct of_device *ofdev, int input, int speed) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + u32 ssr; + + mutex_lock(&dev->lock); + + ssr = in_be32(&dev->base->ssr); + + ZMII_DBG(dev, "speed(%d, %d)" NL, input, speed); + + if (speed == SPEED_100) + ssr |= ZMII_SSR_SP(input); + else + ssr &= ~ZMII_SSR_SP(input); + + out_be32(&dev->base->ssr, ssr); + + mutex_unlock(&dev->lock); +} + +void __devexit zmii_detach(struct of_device *ofdev, int input) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + BUG_ON(!dev || dev->users == 0); + + mutex_lock(&dev->lock); + + ZMII_DBG(dev, "detach(%d)" NL, input); + + /* Disable this input */ + out_be32(&dev->base->fer, + in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input)); + + --dev->users; + + mutex_unlock(&dev->lock); +} + +int zmii_get_regs_len(struct of_device *ofdev) +{ + return sizeof(struct emac_ethtool_regs_subhdr) + + sizeof(struct zmii_regs); +} + +void *zmii_dump_regs(struct of_device *ofdev, void *buf) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + struct emac_ethtool_regs_subhdr *hdr = buf; + struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1); + + hdr->version = 0; + hdr->index = 0; /* for now, are there chips with more than one + * zmii ? if yes, then we'll add a cell_index + * like we do for emac + */ + memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs)); + return regs + 1; +} + +static int __devinit zmii_probe(struct of_device *ofdev, + const struct of_device_id *match) +{ + struct device_node *np = ofdev->node; + struct zmii_instance *dev; + struct resource regs; + int rc; + + rc = -ENOMEM; + dev = kzalloc(sizeof(struct zmii_instance), GFP_KERNEL); + if (dev == NULL) { + printk(KERN_ERR "%s: could not allocate ZMII device!\n", + np->full_name); + goto err_gone; + } + + mutex_init(&dev->lock); + dev->ofdev = ofdev; + dev->mode = PHY_MODE_NA; + + rc = -ENXIO; + if (of_address_to_resource(np, 0, ®s)) { + printk(KERN_ERR "%s: Can't get registers address\n", + np->full_name); + goto err_free; + } + + rc = -ENOMEM; + dev->base = (struct zmii_regs *)ioremap(regs.start, + sizeof(struct zmii_regs)); + if (dev->base == NULL) { + printk(KERN_ERR "%s: Can't map device registers!\n", + np->full_name); + goto err_free; + } + + /* We may need FER value for autodetection later */ + dev->fer_save = in_be32(&dev->base->fer); + + /* Disable all inputs by default */ + out_be32(&dev->base->fer, 0); + + printk(KERN_INFO + "ZMII %s initialized\n", ofdev->node->full_name); + wmb(); + dev_set_drvdata(&ofdev->dev, dev); + + return 0; + + err_free: + kfree(dev); + err_gone: + return rc; +} + +static int __devexit zmii_remove(struct of_device *ofdev) +{ + struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); + + dev_set_drvdata(&ofdev->dev, NULL); + + WARN_ON(dev->users != 0); + + iounmap(dev->base); + kfree(dev); + + return 0; +} + +static struct of_device_id zmii_match[] = +{ + { + .compatible = "ibm,zmii", + }, + /* For backward compat with old DT */ + { + .type = "emac-zmii", + }, + {}, +}; + +static struct of_platform_driver zmii_driver = { + .name = "emac-zmii", + .match_table = zmii_match, + + .probe = zmii_probe, + .remove = zmii_remove, +}; + +int __init zmii_init(void) +{ + return of_register_platform_driver(&zmii_driver); +} + +void zmii_exit(void) +{ + of_unregister_platform_driver(&zmii_driver); +} diff --git a/drivers/net/ibm_newemac/zmii.h b/drivers/net/ibm_newemac/zmii.h new file mode 100644 index 0000000000000000000000000000000000000000..82a9968b1f7448cb9ca8b9bd1d170e3dfb4228aa --- /dev/null +++ b/drivers/net/ibm_newemac/zmii.h @@ -0,0 +1,73 @@ +/* + * drivers/net/ibm_newemac/zmii.h + * + * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support. + * + * Copyright (c) 2004, 2005 Zultys Technologies. + * Eugene Surovegin or + * + * Based on original work by + * Armin Kuster + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __IBM_NEWEMAC_ZMII_H +#define __IBM_NEWEMAC_ZMII_H + +/* ZMII bridge registers */ +struct zmii_regs { + u32 fer; /* Function enable reg */ + u32 ssr; /* Speed select reg */ + u32 smiirs; /* SMII status reg */ +}; + +/* ZMII device */ +struct zmii_instance { + struct zmii_regs __iomem *base; + + /* Only one EMAC whacks us at a time */ + struct mutex lock; + + /* subset of PHY_MODE_XXXX */ + int mode; + + /* number of EMACs using this ZMII bridge */ + int users; + + /* FER value left by firmware */ + u32 fer_save; + + /* OF device instance */ + struct of_device *ofdev; +}; + +#ifdef CONFIG_IBM_NEW_EMAC_ZMII + +extern int zmii_init(void); +extern void zmii_exit(void); +extern int zmii_attach(struct of_device *ofdev, int input, int *mode); +extern void zmii_detach(struct of_device *ofdev, int input); +extern void zmii_get_mdio(struct of_device *ofdev, int input); +extern void zmii_put_mdio(struct of_device *ofdev, int input); +extern void zmii_set_speed(struct of_device *ofdev, int input, int speed); +extern int zmii_get_regs_len(struct of_device *ocpdev); +extern void *zmii_dump_regs(struct of_device *ofdev, void *buf); + +#else +# define zmii_init() 0 +# define zmii_exit() do { } while(0) +# define zmii_attach(x,y,z) (-ENXIO) +# define zmii_detach(x,y) do { } while(0) +# define zmii_get_mdio(x,y) do { } while(0) +# define zmii_put_mdio(x,y) do { } while(0) +# define zmii_set_speed(x,y,z) do { } while(0) +# define zmii_get_regs_len(x) 0 +# define zmii_dump_regs(x,buf) (buf) +#endif /* !CONFIG_IBM_NEW_EMAC_ZMII */ + +#endif /* __IBM_NEWEMAC_ZMII_H */ diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index fe85d6fcba33aa022dd0f67c21ac346f2e5d2ce1..91d83aca6bc784e886ca15c508dc9125251c2963 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c @@ -591,7 +591,7 @@ static void irqrx_handler(struct net_device *dev) skb = dev_alloc_skb(rda.length + 2); if (skb == NULL) - priv->stat.rx_dropped++; + dev->stats.rx_dropped++; else { /* copy out data */ @@ -606,8 +606,8 @@ static void irqrx_handler(struct net_device *dev) /* bookkeeping */ dev->last_rx = jiffies; - priv->stat.rx_packets++; - priv->stat.rx_bytes += rda.length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += rda.length; /* pass to the upper layers */ netif_rx(skb); @@ -617,11 +617,11 @@ static void irqrx_handler(struct net_device *dev) /* otherwise check error status bits and increase statistics */ else { - priv->stat.rx_errors++; + dev->stats.rx_errors++; if (rda.status & RCREG_FAER) - priv->stat.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rda.status & RCREG_CRCR) - priv->stat.rx_crc_errors++; + dev->stats.rx_crc_errors++; } /* descriptor processed, will become new last descriptor in queue */ @@ -656,8 +656,8 @@ static void irqtx_handler(struct net_device *dev) memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); /* update statistics */ - priv->stat.tx_packets++; - priv->stat.tx_bytes += tda.length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += tda.length; /* update our pointers */ priv->txused[priv->currtxdescr] = 0; @@ -680,15 +680,15 @@ static void irqtxerr_handler(struct net_device *dev) memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t)); /* update statistics */ - priv->stat.tx_errors++; + dev->stats.tx_errors++; if (tda.status & (TCREG_NCRS | TCREG_CRSL)) - priv->stat.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tda.status & TCREG_EXC) - priv->stat.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (tda.status & TCREG_OWC) - priv->stat.tx_window_errors++; + dev->stats.tx_window_errors++; if (tda.status & TCREG_FU) - priv->stat.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; /* update our pointers */ priv->txused[priv->currtxdescr] = 0; @@ -824,7 +824,7 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) if (priv->txusedcnt >= TXBUFCNT) { retval = -EIO; - priv->stat.tx_dropped++; + dev->stats.tx_dropped++; goto tx_done; } @@ -876,14 +876,6 @@ static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) return retval; } -/* return pointer to Ethernet statistics */ - -static struct net_device_stats *ibmlana_stats(struct net_device *dev) -{ - ibmlana_priv *priv = netdev_priv(dev); - return &priv->stat; -} - /* switch receiver mode. */ static void ibmlana_set_multicast_list(struct net_device *dev) @@ -906,8 +898,7 @@ static int ibmlana_probe(struct net_device *dev) int base = 0, irq = 0, iobase = 0, memlen = 0; ibmlana_priv *priv; ibmlana_medium medium; - - SET_MODULE_OWNER(dev); + DECLARE_MAC_BUF(mac); /* can't work without an MCA bus ;-) */ if (MCA_bus == 0) @@ -980,7 +971,6 @@ static int ibmlana_probe(struct net_device *dev) dev->stop = ibmlana_close; dev->hard_start_xmit = ibmlana_tx; dev->do_ioctl = NULL; - dev->get_stats = ibmlana_stats; dev->set_multicast_list = ibmlana_set_multicast_list; dev->flags |= IFF_MULTICAST; @@ -992,11 +982,10 @@ static int ibmlana_probe(struct net_device *dev) /* print config */ printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, " - "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n", + "MAC address %s.\n", dev->name, priv->realirq, dev->base_addr, dev->mem_start, dev->mem_end - 1, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + print_mac(mac, dev->dev_addr)); printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]); /* reset board */ diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h index 6b58bab9e3086fd4b1826eb99d0dd3a7b00a1f93..aa3ddbdee4bbc76391d8f6be203d5f576ddf5d1a 100644 --- a/drivers/net/ibmlana.h +++ b/drivers/net/ibmlana.h @@ -26,7 +26,6 @@ typedef enum { typedef struct { unsigned int slot; /* MCA-Slot-# */ - struct net_device_stats stat; /* packet statistics */ int realirq; /* memorizes actual IRQ, even when currently not allocated */ ibmlana_medium medium; /* physical cannector */ diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index acba90f1638e7596edc6fe7e14bc11240f029197..4ac161e1ca12674f4ab4614d04f9cd8e60f6b567 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -28,7 +28,6 @@ /**************************************************************************/ /* TODO: - - remove frag processing code - no longer needed - add support for sysfs - possibly remove procfs support */ @@ -47,6 +46,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -83,9 +85,8 @@ static int ibmveth_open(struct net_device *dev); static int ibmveth_close(struct net_device *dev); static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); -static int ibmveth_poll(struct net_device *dev, int *budget); +static int ibmveth_poll(struct napi_struct *napi, int budget); static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); static void ibmveth_set_multicast_list(struct net_device *dev); static int ibmveth_change_mtu(struct net_device *dev, int new_mtu); static void ibmveth_proc_register_driver(void); @@ -97,7 +98,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); static struct kobj_type ktype_veth_pool; #ifdef CONFIG_PROC_FS -#define IBMVETH_PROC_DIR "net/ibmveth" +#define IBMVETH_PROC_DIR "ibmveth" static struct proc_dir_entry *ibmveth_proc_dir; #endif @@ -110,20 +111,49 @@ MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ibmveth_driver_version); +struct ibmveth_stat { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat) +#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off)) + +struct ibmveth_stat ibmveth_stats[] = { + { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, + { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, + { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, + { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, + { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, + { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, + { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, + { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, +}; + /* simple methods of getting data from the current rxq entry */ +static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter) +{ + return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off; +} + +static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; +} + static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle); + return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); } static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid); + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); } static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) { - return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset); + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); } static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) @@ -131,6 +161,11 @@ static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); } +static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) +{ + return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); +} + /* setup the initial settings for a buffer pool */ static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) { @@ -228,9 +263,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc correlator = ((u64)pool->index << 32) | index; *(u64*)skb->data = correlator; - desc.desc = 0; - desc.fields.valid = 1; - desc.fields.length = pool->buff_size; + desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; desc.fields.address = dma_addr; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); @@ -371,9 +404,8 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter) return; } - desc.desc = 0; - desc.fields.valid = 1; - desc.fields.length = adapter->rx_buff_pool[pool].buff_size; + desc.fields.flags_len = IBMVETH_BUF_VALID | + adapter->rx_buff_pool[pool].buff_size; desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index]; lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); @@ -480,6 +512,8 @@ static int ibmveth_open(struct net_device *netdev) ibmveth_debug_printk("open starting\n"); + napi_enable(&adapter->napi); + for(i = 0; irx_buff_pool[i].size; @@ -489,6 +523,7 @@ static int ibmveth_open(struct net_device *netdev) if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM; } @@ -498,6 +533,7 @@ static int ibmveth_open(struct net_device *netdev) if(!adapter->rx_queue.queue_addr) { ibmveth_error_printk("unable to allocate rx queue pages\n"); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM; } @@ -514,6 +550,7 @@ static int ibmveth_open(struct net_device *netdev) (dma_mapping_error(adapter->rx_queue.queue_dma))) { ibmveth_error_printk("unable to map filter or buffer list pages\n"); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM; } @@ -524,9 +561,7 @@ static int ibmveth_open(struct net_device *netdev) memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); mac_address = mac_address >> 16; - rxq_desc.desc = 0; - rxq_desc.fields.valid = 1; - rxq_desc.fields.length = adapter->rx_queue.queue_len; + rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; rxq_desc.fields.address = adapter->rx_queue.queue_dma; ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); @@ -545,6 +580,7 @@ static int ibmveth_open(struct net_device *netdev) rxq_desc.desc, mac_address); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENONET; } @@ -555,6 +591,7 @@ static int ibmveth_open(struct net_device *netdev) ibmveth_error_printk("unable to alloc pool\n"); adapter->rx_buff_pool[i].active = 0; ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return -ENOMEM ; } } @@ -567,6 +604,7 @@ static int ibmveth_open(struct net_device *netdev) } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); ibmveth_cleanup(adapter); + napi_disable(&adapter->napi); return rc; } @@ -587,6 +625,8 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_debug_printk("close starting\n"); + napi_disable(&adapter->napi); + if (!adapter->pool_config) netif_stop_queue(netdev); @@ -634,12 +674,164 @@ static u32 netdev_get_link(struct net_device *dev) { return 1; } +static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if (data) + adapter->rx_csum = 1; + else { + /* + * Since the ibmveth firmware interface does not have the concept of + * separate tx/rx checksum offload enable, if rx checksum is disabled + * we also have to disable tx checksum offload. Once we disable rx + * checksum offload, we are no longer allowed to send tx buffers that + * are not properly checksummed. + */ + adapter->rx_csum = 0; + dev->features &= ~NETIF_F_IP_CSUM; + } +} + +static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if (data) { + dev->features |= NETIF_F_IP_CSUM; + adapter->rx_csum = 1; + } else + dev->features &= ~NETIF_F_IP_CSUM; +} + +static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, + void (*done) (struct net_device *, u32)) +{ + struct ibmveth_adapter *adapter = dev->priv; + u64 set_attr, clr_attr, ret_attr; + long ret; + int rc1 = 0, rc2 = 0; + int restart = 0; + + if (netif_running(dev)) { + restart = 1; + adapter->pool_config = 1; + ibmveth_close(dev); + adapter->pool_config = 0; + } + + set_attr = 0; + clr_attr = 0; + + if (data) + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + else + clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + + ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, + set_attr, &ret_attr); + + if (ret != H_SUCCESS) { + rc1 = -EIO; + ibmveth_error_printk("unable to change checksum offload settings." + " %d rc=%ld\n", data, ret); + + ret = h_illan_attributes(adapter->vdev->unit_address, + set_attr, clr_attr, &ret_attr); + } else + done(dev, data); + } else { + rc1 = -EIO; + ibmveth_error_printk("unable to change checksum offload settings." + " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); + } + + if (restart) + rc2 = ibmveth_open(dev); + + return rc1 ? rc1 : rc2; +} + +static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + + if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) + return 0; + + return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags); +} + +static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) +{ + struct ibmveth_adapter *adapter = dev->priv; + int rc = 0; + + if (data && (dev->features & NETIF_F_IP_CSUM)) + return 0; + if (!data && !(dev->features & NETIF_F_IP_CSUM)) + return 0; + + if (data && !adapter->rx_csum) + rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); + else + ibmveth_set_tx_csum_flags(dev, data); + + return rc; +} + +static u32 ibmveth_get_rx_csum(struct net_device *dev) +{ + struct ibmveth_adapter *adapter = dev->priv; + return adapter->rx_csum; +} + +static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN) + memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN); +} + +static int ibmveth_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ibmveth_stats); + default: + return -EOPNOTSUPP; + } +} + +static void ibmveth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i; + struct ibmveth_adapter *adapter = dev->priv; + + for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) + data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); +} + static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, .get_settings = netdev_get_settings, .get_link = netdev_get_link, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ibmveth_set_tx_csum, + .get_rx_csum = ibmveth_get_rx_csum, + .set_rx_csum = ibmveth_set_rx_csum, + .get_strings = ibmveth_get_strings, + .get_sset_count = ibmveth_get_sset_count, + .get_ethtool_stats = ibmveth_get_ethtool_stats, }; static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -652,9 +844,8 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev->priv; - union ibmveth_buf_desc desc[IbmVethMaxSendFrags]; + union ibmveth_buf_desc desc; unsigned long lpar_rc; - int nfrags = 0, curfrag; unsigned long correlator; unsigned long flags; unsigned int retry_count; @@ -664,83 +855,48 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_send_failed = 0; unsigned int tx_map_failed = 0; + desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; + desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); - if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { + if (skb->ip_summed == CHECKSUM_PARTIAL && + ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { + ibmveth_error_printk("tx: failed to checksum packet\n"); tx_dropped++; goto out; } - memset(&desc, 0, sizeof(desc)); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; - /* nfrags = number of frags after the initial fragment */ - nfrags = skb_shinfo(skb)->nr_frags; + desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); - if(nfrags) - adapter->tx_multidesc_send++; - - /* map the initial fragment */ - desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; - desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data, - desc[0].fields.length, DMA_TO_DEVICE); - desc[0].fields.valid = 1; + /* Need to zero out the checksum */ + buf[0] = 0; + buf[1] = 0; + } - if(dma_mapping_error(desc[0].fields.address)) { - ibmveth_error_printk("tx: unable to map initial fragment\n"); + if (dma_mapping_error(desc.fields.address)) { + ibmveth_error_printk("tx: unable to map xmit buffer\n"); tx_map_failed++; tx_dropped++; goto out; } - curfrag = nfrags; - - /* map fragments past the initial portion if there are any */ - while(curfrag--) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; - desc[curfrag+1].fields.address - = dma_map_single(&adapter->vdev->dev, - page_address(frag->page) + frag->page_offset, - frag->size, DMA_TO_DEVICE); - desc[curfrag+1].fields.length = frag->size; - desc[curfrag+1].fields.valid = 1; - - if(dma_mapping_error(desc[curfrag+1].fields.address)) { - ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); - tx_map_failed++; - tx_dropped++; - /* Free all the mappings we just created */ - while(curfrag < nfrags) { - dma_unmap_single(&adapter->vdev->dev, - desc[curfrag+1].fields.address, - desc[curfrag+1].fields.length, - DMA_TO_DEVICE); - curfrag++; - } - goto out; - } - } - /* send the frame. Arbitrarily set retrycount to 1024 */ correlator = 0; retry_count = 1024; do { lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, - desc[0].desc, - desc[1].desc, - desc[2].desc, - desc[3].desc, - desc[4].desc, - desc[5].desc, - correlator, - &correlator); + desc.desc, 0, 0, 0, 0, 0, + correlator, &correlator); } while ((lpar_rc == H_BUSY) && (retry_count--)); if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { - int i; ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); - for(i = 0; i < 6; i++) { - ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, - desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); - } + ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", + (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, + skb->len, desc.fields.address); tx_send_failed++; tx_dropped++; } else { @@ -749,16 +905,13 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) netdev->trans_start = jiffies; } - do { - dma_unmap_single(&adapter->vdev->dev, - desc[nfrags].fields.address, - desc[nfrags].fields.length, DMA_TO_DEVICE); - } while(--nfrags >= 0); + dma_unmap_single(&adapter->vdev->dev, desc.fields.address, + skb->len, DMA_TO_DEVICE); out: spin_lock_irqsave(&adapter->stats_lock, flags); - adapter->stats.tx_dropped += tx_dropped; - adapter->stats.tx_bytes += tx_bytes; - adapter->stats.tx_packets += tx_packets; + netdev->stats.tx_dropped += tx_dropped; + netdev->stats.tx_bytes += tx_bytes; + netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; spin_unlock_irqrestore(&adapter->stats_lock, flags); @@ -767,80 +920,72 @@ out: spin_lock_irqsave(&adapter->stats_lock, flags); return 0; } -static int ibmveth_poll(struct net_device *netdev, int *budget) +static int ibmveth_poll(struct napi_struct *napi, int budget) { - struct ibmveth_adapter *adapter = netdev->priv; - int max_frames_to_process = netdev->quota; + struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); + struct net_device *netdev = adapter->netdev; int frames_processed = 0; - int more_work = 1; unsigned long lpar_rc; restart_poll: do { - struct net_device *netdev = adapter->netdev; + struct sk_buff *skb; - if(ibmveth_rxq_pending_buffer(adapter)) { - struct sk_buff *skb; + if (!ibmveth_rxq_pending_buffer(adapter)) + break; - rmb(); + rmb(); + if (!ibmveth_rxq_buffer_valid(adapter)) { + wmb(); /* suggested by larson1 */ + adapter->rx_invalid_buffer++; + ibmveth_debug_printk("recycling invalid buffer\n"); + ibmveth_rxq_recycle_buffer(adapter); + } else { + int length = ibmveth_rxq_frame_length(adapter); + int offset = ibmveth_rxq_frame_offset(adapter); + int csum_good = ibmveth_rxq_csum_good(adapter); - if(!ibmveth_rxq_buffer_valid(adapter)) { - wmb(); /* suggested by larson1 */ - adapter->rx_invalid_buffer++; - ibmveth_debug_printk("recycling invalid buffer\n"); - ibmveth_rxq_recycle_buffer(adapter); - } else { - int length = ibmveth_rxq_frame_length(adapter); - int offset = ibmveth_rxq_frame_offset(adapter); - skb = ibmveth_rxq_get_buffer(adapter); + skb = ibmveth_rxq_get_buffer(adapter); + if (csum_good) + skb->ip_summed = CHECKSUM_UNNECESSARY; - ibmveth_rxq_harvest_buffer(adapter); + ibmveth_rxq_harvest_buffer(adapter); - skb_reserve(skb, offset); - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, netdev); + skb_reserve(skb, offset); + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, netdev); - netif_receive_skb(skb); /* send it up */ + netif_receive_skb(skb); /* send it up */ - adapter->stats.rx_packets++; - adapter->stats.rx_bytes += length; - frames_processed++; - netdev->last_rx = jiffies; - } - } else { - more_work = 0; + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += length; + frames_processed++; + netdev->last_rx = jiffies; } - } while(more_work && (frames_processed < max_frames_to_process)); + } while (frames_processed < budget); ibmveth_replenish_task(adapter); - if(more_work) { - /* more work to do - return that we are not done yet */ - netdev->quota -= frames_processed; - *budget -= frames_processed; - return 1; - } - - /* we think we are done - reenable interrupts, then check once more to make sure we are done */ - lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); + if (frames_processed < budget) { + /* We think we are done - reenable interrupts, + * then check once more to make sure we are done. + */ + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_ENABLE); - ibmveth_assert(lpar_rc == H_SUCCESS); + ibmveth_assert(lpar_rc == H_SUCCESS); - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); - if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) - { - lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); - ibmveth_assert(lpar_rc == H_SUCCESS); - more_work = 1; - goto restart_poll; + if (ibmveth_rxq_pending_buffer(adapter) && + netif_rx_reschedule(netdev, napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); + goto restart_poll; + } } - netdev->quota -= frames_processed; - *budget -= frames_processed; - - /* we really are done */ - return 0; + return frames_processed; } static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) @@ -849,20 +994,15 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) struct ibmveth_adapter *adapter = netdev->priv; unsigned long lpar_rc; - if(netif_rx_schedule_prep(netdev)) { - lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + lpar_rc = h_vio_signal(adapter->vdev->unit_address, + VIO_IRQ_DISABLE); ibmveth_assert(lpar_rc == H_SUCCESS); - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } return IRQ_HANDLED; } -static struct net_device_stats *ibmveth_get_stats(struct net_device *dev) -{ - struct ibmveth_adapter *adapter = dev->priv; - return &adapter->stats; -} - static void ibmveth_set_multicast_list(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev->priv; @@ -962,8 +1102,10 @@ static void ibmveth_poll_controller(struct net_device *dev) static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) { int rc, i; + long ret; struct net_device *netdev; struct ibmveth_adapter *adapter; + u64 set_attr, ret_attr; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; @@ -994,8 +1136,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ if(!netdev) return -ENOMEM; - SET_MODULE_OWNER(netdev); - adapter = netdev->priv; dev->dev.driver_data = netdev; @@ -1004,6 +1144,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ adapter->mcastFilterSize= *mcastFilterSize_p; adapter->pool_config = 0; + netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); + /* Some older boxes running PHYP non-natively have an OF that returns a 8-byte local-mac-address field (and the first 2 bytes have to be ignored) while newer boxes' OF return @@ -1020,11 +1162,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ netdev->irq = dev->irq; netdev->open = ibmveth_open; - netdev->poll = ibmveth_poll; - netdev->weight = 16; netdev->stop = ibmveth_close; netdev->hard_start_xmit = ibmveth_start_xmit; - netdev->get_stats = ibmveth_get_stats; netdev->set_multicast_list = ibmveth_set_multicast_list; netdev->do_ioctl = ibmveth_ioctl; netdev->ethtool_ops = &netdev_ethtool_ops; @@ -1057,6 +1196,22 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ ibmveth_debug_printk("registering netdev...\n"); + ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr); + + if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && + !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && + (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { + set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; + + ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr); + + if (ret == H_SUCCESS) { + adapter->rx_csum = 1; + netdev->features |= NETIF_F_IP_CSUM; + } else + ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr); + } + rc = register_netdev(netdev); if(rc) { @@ -1092,15 +1247,14 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) #ifdef CONFIG_PROC_FS static void ibmveth_proc_register_driver(void) { - ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL); + ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net); if (ibmveth_proc_dir) { - SET_MODULE_OWNER(ibmveth_proc_dir); } } static void ibmveth_proc_unregister_driver(void) { - remove_proc_entry(IBMVETH_PROC_DIR, NULL); + remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); } static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) @@ -1127,22 +1281,16 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v) struct ibmveth_adapter *adapter = seq->private; char *current_mac = ((char*) &adapter->netdev->dev_addr); char *firmware_mac = ((char*) &adapter->mac_addr) ; + DECLARE_MAC_BUF(mac); seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); - seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", - current_mac[0], current_mac[1], current_mac[2], - current_mac[3], current_mac[4], current_mac[5]); - seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", - firmware_mac[0], firmware_mac[1], firmware_mac[2], - firmware_mac[3], firmware_mac[4], firmware_mac[5]); + seq_printf(seq, "Current MAC: %s\n", print_mac(mac, current_mac)); + seq_printf(seq, "Firmware MAC: %s\n", print_mac(mac, firmware_mac)); seq_printf(seq, "\nAdapter Statistics:\n"); - seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized); - seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send); - seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed); - seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed); + seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed); seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed); seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles); seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem); @@ -1195,7 +1343,6 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) } else { entry->data = (void *) adapter; entry->proc_fops = &ibmveth_proc_fops; - SET_MODULE_OWNER(entry); } } return; diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 72cc15a6cab745b3a2a273b6be7e4d085bfd08cb..41f61cd18852b26cd7f6bfc36808588fc2b230e6 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h @@ -25,8 +25,6 @@ #ifndef _IBMVETH_H #define _IBMVETH_H -#define IbmVethMaxSendFrags 6 - /* constants for H_MULTICAST_CTRL */ #define IbmVethMcastReceptionModifyBit 0x80000UL #define IbmVethMcastReceptionEnableBit 0x20000UL @@ -41,6 +39,12 @@ #define IbmVethMcastRemoveFilter 0x2UL #define IbmVethMcastClearFilterTable 0x3UL +#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000ULL +#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00ULL +#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004ULL +#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002ULL +#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001ULL + /* hcall macros */ #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac) @@ -67,6 +71,21 @@ static inline long h_send_logical_lan(unsigned long unit_address, return rc; } +static inline long h_illan_attributes(unsigned long unit_address, + unsigned long reset_mask, unsigned long set_mask, + unsigned long *ret_attributes) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address, + reset_mask, set_mask); + + *ret_attributes = retbuf[0]; + + return rc; +} + #define h_multicast_ctrl(ua, cmd, mac) \ plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac) @@ -112,6 +131,7 @@ struct ibmveth_rx_q { struct ibmveth_adapter { struct vio_dev *vdev; struct net_device *netdev; + struct napi_struct napi; struct net_device_stats stats; unsigned int mcastFilterSize; unsigned long mac_addr; @@ -122,6 +142,7 @@ struct ibmveth_adapter { struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; struct ibmveth_rx_q rx_queue; int pool_config; + int rx_csum; /* adapter specific stats */ u64 replenish_task_cycles; @@ -130,20 +151,19 @@ struct ibmveth_adapter { u64 replenish_add_buff_success; u64 rx_invalid_buffer; u64 rx_no_buffer; - u64 tx_multidesc_send; - u64 tx_linearized; - u64 tx_linearize_failed; u64 tx_map_failed; u64 tx_send_failed; spinlock_t stats_lock; }; struct ibmveth_buf_desc_fields { - u32 valid : 1; - u32 toggle : 1; - u32 reserved : 6; - u32 length : 24; - u32 address; + u32 flags_len; +#define IBMVETH_BUF_VALID 0x80000000 +#define IBMVETH_BUF_TOGGLE 0x40000000 +#define IBMVETH_BUF_NO_CSUM 0x02000000 +#define IBMVETH_BUF_CSUM_GOOD 0x01000000 +#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF + u32 address; }; union ibmveth_buf_desc { @@ -152,12 +172,16 @@ union ibmveth_buf_desc { }; struct ibmveth_rx_q_entry { - u16 toggle : 1; - u16 valid : 1; - u16 reserved : 14; - u16 offset; - u32 length; - u64 correlator; + u32 flags_off; +#define IBMVETH_RXQ_TOGGLE 0x80000000 +#define IBMVETH_RXQ_TOGGLE_SHIFT 31 +#define IBMVETH_RXQ_VALID 0x40000000 +#define IBMVETH_RXQ_NO_CSUM 0x02000000 +#define IBMVETH_RXQ_CSUM_GOOD 0x01000000 +#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF + + u32 length; + u64 correlator; }; #endif /* _IBMVETH_H */ diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index f5c3598e59afd928d89085e841e45b62fc6c9b92..15949d3df17e082726033593ae0246ce57e3191f 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -34,12 +34,12 @@ #include #include #include +#include #define TX_TIMEOUT (2*HZ) #define TX_Q_LIMIT 32 struct ifb_private { - struct net_device_stats stats; struct tasklet_struct ifb_tasklet; int tasklet_pending; /* mostly debug stats leave in for now */ @@ -60,7 +60,6 @@ static int numifbs = 2; static void ri_tasklet(unsigned long dev); static int ifb_xmit(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *ifb_get_stats(struct net_device *dev); static int ifb_open(struct net_device *dev); static int ifb_close(struct net_device *dev); @@ -69,7 +68,7 @@ static void ri_tasklet(unsigned long dev) struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); - struct net_device_stats *stats = &dp->stats; + struct net_device_stats *stats = &_dev->stats; struct sk_buff *skb; dp->st_task_enter++; @@ -97,7 +96,7 @@ static void ri_tasklet(unsigned long dev) stats->tx_packets++; stats->tx_bytes +=skb->len; - skb->dev = __dev_get_by_index(skb->iif); + skb->dev = __dev_get_by_index(&init_net, skb->iif); if (!skb->dev) { dev_kfree_skb(skb); stats->tx_dropped++; @@ -139,7 +138,6 @@ static void ri_tasklet(unsigned long dev) static void ifb_setup(struct net_device *dev) { /* Initialize the device structure. */ - dev->get_stats = ifb_get_stats; dev->hard_start_xmit = ifb_xmit; dev->open = &ifb_open; dev->stop = &ifb_close; @@ -151,14 +149,13 @@ static void ifb_setup(struct net_device *dev) dev->change_mtu = NULL; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - SET_MODULE_OWNER(dev); random_ether_addr(dev->dev_addr); } static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); - struct net_device_stats *stats = &dp->stats; + struct net_device_stats *stats = &dev->stats; int ret = 0; u32 from = G_TC_FROM(skb->tc_verd); @@ -185,19 +182,6 @@ static int ifb_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } -static struct net_device_stats *ifb_get_stats(struct net_device *dev) -{ - struct ifb_private *dp = netdev_priv(dev); - struct net_device_stats *stats = &dp->stats; - - pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n", - dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter, - dp->st_rx2tx_tran, dp->st_rxq_notenter, dp->st_rx_frm_egr, - dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch); - - return stats; -} - static int ifb_close(struct net_device *dev) { struct ifb_private *dp = netdev_priv(dev); diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 0834ef0eddb4e7b61c6e33ae6c7283c8965c8705..373f72cdbe8e4d12922d3e41984d876e8006942c 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -5,7 +5,7 @@ * * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card. * - * Copyright (C) 1999, 2000, 2001, 2003 Ralf Baechle + * Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc. * * References: @@ -62,12 +62,7 @@ #include #include #include -#include -#include -#include -#include #include -#include #include /* @@ -95,6 +90,9 @@ struct ioc3_private { u32 emcr, ehar_h, ehar_l; spinlock_t ioc3_lock; struct mii_if_info mii; + unsigned long flags; +#define IOC3_FLAG_RX_CHECKSUMS 1 + struct pci_dev *pdev; /* Members used by autonegotiation */ @@ -445,18 +443,12 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip) */ static void ioc3_get_eaddr(struct ioc3_private *ip) { - int i; - + DECLARE_MAC_BUF(mac); ioc3_get_eaddr_nic(ip); - printk("Ethernet address is "); - for (i = 0; i < 6; i++) { - printk("%02x", priv_netdev(ip)->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk(".\n"); + printk("Ethernet address is %s.\n", + print_mac(mac, priv_netdev(ip)->dev_addr)); } static void __ioc3_set_mac_address(struct net_device *dev) @@ -521,8 +513,6 @@ static struct net_device_stats *ioc3_get_stats(struct net_device *dev) return &ip->stats; } -#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM - static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) { struct ethhdr *eh = eth_hdr(skb); @@ -590,7 +580,6 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len) if (csum == 0xffff) skb->ip_summed = CHECKSUM_UNNECESSARY; } -#endif /* CONFIG_SGI_IOC3_ETH_HW_RX_CSUM */ static inline void ioc3_rx(struct ioc3_private *ip) { @@ -625,9 +614,9 @@ static inline void ioc3_rx(struct ioc3_private *ip) goto next; } -#ifdef CONFIG_SGI_IOC3_ETH_HW_RX_CSUM - ioc3_tcpudp_checksum(skb, w0 & ERXBUF_IPCKSUM_MASK,len); -#endif + if (likely(ip->flags & IOC3_FLAG_RX_CHECKSUMS)) + ioc3_tcpudp_checksum(skb, + w0 & ERXBUF_IPCKSUM_MASK, len); netif_rx(skb); @@ -1278,7 +1267,6 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); ip = netdev_priv(dev); @@ -1338,9 +1326,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->set_multicast_list = ioc3_set_multicast_list; dev->set_mac_address = ioc3_set_mac_address; dev->ethtool_ops = &ioc3_ethtool_ops; -#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM dev->features = NETIF_F_IP_CSUM; -#endif sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1); sw_physid2 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID2); @@ -1430,7 +1416,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) uint32_t w0 = 0; int produce; -#ifdef CONFIG_SGI_IOC3_ETH_HW_TX_CSUM /* * IOC3 has a fairly simple minded checksumming hardware which simply * adds up the 1's complement checksum for the entire packet and @@ -1478,7 +1463,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) w0 = ETXD_DOCHECKSUM | (csoff << ETXD_CHKOFF_SHIFT); } -#endif /* CONFIG_SGI_IOC3_ETH_HW_TX_CSUM */ spin_lock_irq(&ip->ioc3_lock); @@ -1633,12 +1617,37 @@ static u32 ioc3_get_link(struct net_device *dev) return rc; } +static u32 ioc3_get_rx_csum(struct net_device *dev) +{ + struct ioc3_private *ip = netdev_priv(dev); + + return ip->flags & IOC3_FLAG_RX_CHECKSUMS; +} + +static int ioc3_set_rx_csum(struct net_device *dev, u32 data) +{ + struct ioc3_private *ip = netdev_priv(dev); + + spin_lock_bh(&ip->ioc3_lock); + if (data) + ip->flags |= IOC3_FLAG_RX_CHECKSUMS; + else + ip->flags &= ~IOC3_FLAG_RX_CHECKSUMS; + spin_unlock_bh(&ip->ioc3_lock); + + return 0; +} + static const struct ethtool_ops ioc3_ethtool_ops = { .get_drvinfo = ioc3_get_drvinfo, .get_settings = ioc3_get_settings, .set_settings = ioc3_set_settings, .nway_reset = ioc3_nway_reset, .get_link = ioc3_get_link, + .get_rx_csum = ioc3_get_rx_csum, + .set_rx_csum = ioc3_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum }; static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c new file mode 100644 index 0000000000000000000000000000000000000000..59898ce54dcf3170a7579d28aa12c0d4bce35bee --- /dev/null +++ b/drivers/net/ipg.c @@ -0,0 +1,2332 @@ +/* + * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter + * + * Copyright (C) 2003, 2007 IC Plus Corp + * + * Original Author: + * + * Craig Rich + * Sundance Technology, Inc. + * www.sundanceti.com + * craig_rich@sundanceti.com + * + * Current Maintainer: + * + * Sorbica Shieh. + * http://www.icplus.com.tw + * sorbica@icplus.com.tw + * + * Jesse Huang + * http://www.icplus.com.tw + * jesse@icplus.com.tw + */ +#include +#include +#include +#include + +#include + +#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) +#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) +#define IPG_RESET_MASK \ + (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ + IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ + IPG_AC_AUTO_INIT) + +#define ipg_w32(val32,reg) iowrite32((val32), ioaddr + (reg)) +#define ipg_w16(val16,reg) iowrite16((val16), ioaddr + (reg)) +#define ipg_w8(val8,reg) iowrite8((val8), ioaddr + (reg)) + +#define ipg_r32(reg) ioread32(ioaddr + (reg)) +#define ipg_r16(reg) ioread16(ioaddr + (reg)) +#define ipg_r8(reg) ioread8(ioaddr + (reg)) + +#define JUMBO_FRAME_4k_ONLY +enum { + netdev_io_size = 128 +}; + +#include "ipg.h" +#define DRV_NAME "ipg" + +MODULE_AUTHOR("IC Plus Corp. 2003"); +MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver " + DrvVer); +MODULE_LICENSE("GPL"); + +static const char *ipg_brand_name[] = { + "IC PLUS IP1000 1000/100/10 based NIC", + "Sundance Technology ST2021 based NIC", + "Tamarack Microelectronics TC9020/9021 based NIC", + "Tamarack Microelectronics TC9020/9021 based NIC", + "D-Link NIC", + "D-Link NIC IP1000A" +}; + +static struct pci_device_id ipg_pci_tbl[] __devinitdata = { + { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, + { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, + { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, + { PCI_VDEVICE(DLINK, 0x9021), 3 }, + { PCI_VDEVICE(DLINK, 0x4000), 4 }, + { PCI_VDEVICE(DLINK, 0x4020), 5 }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); + +static inline void __iomem *ipg_ioaddr(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + return sp->ioaddr; +} + +#ifdef IPG_DEBUG +static void ipg_dump_rfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + u32 offset; + + IPG_DEBUG_MSG("_dump_rfdlist\n"); + + printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current); + printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty); + printk(KERN_INFO "RFDList start address = %16.16lx\n", + (unsigned long) sp->rxd_map); + printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n", + ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; + printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i, + offset, (unsigned long) sp->rxd[i].next_desc); + offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; + printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i, + offset, (unsigned long) sp->rxd[i].rfs); + offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; + printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, + offset, (unsigned long) sp->rxd[i].frag_info); + } +} + +static void ipg_dump_tfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + u32 offset; + + IPG_DEBUG_MSG("_dump_tfdlist\n"); + + printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current); + printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty); + printk(KERN_INFO "TFDList start address = %16.16lx\n", + (unsigned long) sp->txd_map); + printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n", + ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; + printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i, + offset, (unsigned long) sp->txd[i].next_desc); + + offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; + printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i, + offset, (unsigned long) sp->txd[i].tfc); + offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; + printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, + offset, (unsigned long) sp->txd[i].frag_info); + } +} +#endif + +static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) +{ + ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); + ndelay(IPG_PC_PHYCTRLWAIT_NS); +} + +static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) +{ + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); +} + +static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); +} + +static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) +{ + ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | + phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); +} + +static u16 read_phy_bit(void __iomem * ioaddr, u8 phyctrlpolarity) +{ + u16 bit_data; + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); + + bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); + + return bit_data; +} + +/* + * Read a register from the Physical Layer device located + * on the IPG NIC, using the IPG PHYCTRL register. + */ +static int mdio_read(struct net_device * dev, int phy_id, int phy_reg) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + /* + * The GMII mangement frame structure for a read is as follows: + * + * |Preamble|st|op|phyad|regad|ta| data |idle| + * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | + * + * <32 1s> = 32 consecutive logic 1 values + * A = bit of Physical Layer device address (MSB first) + * R = bit of register address (MSB first) + * z = High impedance state + * D = bit of read data (MSB first) + * + * Transmission order is 'Preamble' field first, bits transmitted + * left to right (first to last). + */ + struct { + u32 field; + unsigned int len; + } p[] = { + { GMII_PREAMBLE, 32 }, /* Preamble */ + { GMII_ST, 2 }, /* ST */ + { GMII_READ, 2 }, /* OP */ + { phy_id, 5 }, /* PHYAD */ + { phy_reg, 5 }, /* REGAD */ + { 0x0000, 2 }, /* TA */ + { 0x0000, 16 }, /* DATA */ + { 0x0000, 1 } /* IDLE */ + }; + unsigned int i, j; + u8 polarity, data; + + polarity = ipg_r8(PHY_CTRL); + polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); + + /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ + for (j = 0; j < 5; j++) { + for (i = 0; i < p[j].len; i++) { + /* For each variable length field, the MSB must be + * transmitted first. Rotate through the field bits, + * starting with the MSB, and move each bit into the + * the 1st (2^1) bit position (this is the bit position + * corresponding to the MgmtData bit of the PhyCtrl + * register for the IPG). + * + * Example: ST = 01; + * + * First write a '0' to bit 1 of the PhyCtrl + * register, then write a '1' to bit 1 of the + * PhyCtrl register. + * + * To do this, right shift the MSB of ST by the value: + * [field length - 1 - #ST bits already written] + * then left shift this result by 1. + */ + data = (p[j].field >> (p[j].len - 1 - i)) << 1; + data &= IPG_PC_MGMTDATA; + data |= polarity | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, data); + } + } + + send_three_state(ioaddr, polarity); + + read_phy_bit(ioaddr, polarity); + + /* + * For a read cycle, the bits for the next two fields (TA and + * DATA) are driven by the PHY (the IPG reads these bits). + */ + for (i = 0; i < p[6].len; i++) { + p[6].field |= + (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); + } + + send_three_state(ioaddr, polarity); + send_three_state(ioaddr, polarity); + send_three_state(ioaddr, polarity); + send_end(ioaddr, polarity); + + /* Return the value of the DATA field. */ + return p[6].field; +} + +/* + * Write to a register from the Physical Layer device located + * on the IPG NIC, using the IPG PHYCTRL register. + */ +static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + /* + * The GMII mangement frame structure for a read is as follows: + * + * |Preamble|st|op|phyad|regad|ta| data |idle| + * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | + * + * <32 1s> = 32 consecutive logic 1 values + * A = bit of Physical Layer device address (MSB first) + * R = bit of register address (MSB first) + * z = High impedance state + * D = bit of write data (MSB first) + * + * Transmission order is 'Preamble' field first, bits transmitted + * left to right (first to last). + */ + struct { + u32 field; + unsigned int len; + } p[] = { + { GMII_PREAMBLE, 32 }, /* Preamble */ + { GMII_ST, 2 }, /* ST */ + { GMII_WRITE, 2 }, /* OP */ + { phy_id, 5 }, /* PHYAD */ + { phy_reg, 5 }, /* REGAD */ + { 0x0002, 2 }, /* TA */ + { val & 0xffff, 16 }, /* DATA */ + { 0x0000, 1 } /* IDLE */ + }; + unsigned int i, j; + u8 polarity, data; + + polarity = ipg_r8(PHY_CTRL); + polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); + + /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ + for (j = 0; j < 7; j++) { + for (i = 0; i < p[j].len; i++) { + /* For each variable length field, the MSB must be + * transmitted first. Rotate through the field bits, + * starting with the MSB, and move each bit into the + * the 1st (2^1) bit position (this is the bit position + * corresponding to the MgmtData bit of the PhyCtrl + * register for the IPG). + * + * Example: ST = 01; + * + * First write a '0' to bit 1 of the PhyCtrl + * register, then write a '1' to bit 1 of the + * PhyCtrl register. + * + * To do this, right shift the MSB of ST by the value: + * [field length - 1 - #ST bits already written] + * then left shift this result by 1. + */ + data = (p[j].field >> (p[j].len - 1 - i)) << 1; + data &= IPG_PC_MGMTDATA; + data |= polarity | IPG_PC_MGMTDIR; + + ipg_drive_phy_ctl_low_high(ioaddr, data); + } + } + + /* The last cycle is a tri-state, so read from the PHY. */ + for (j = 7; j < 8; j++) { + for (i = 0; i < p[j].len; i++) { + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); + + p[j].field |= ((ipg_r8(PHY_CTRL) & + IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); + + ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); + } + } +} + +/* Set LED_Mode JES20040127EEPROM */ +static void ipg_set_led_mode(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + u32 mode; + + mode = ipg_r32(ASIC_CTRL); + mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); + + if ((sp->LED_Mode & 0x03) > 1) + mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ + + if ((sp->LED_Mode & 0x01) == 1) + mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ + + if ((sp->LED_Mode & 0x08) == 8) + mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ + + ipg_w32(mode, ASIC_CTRL); +} + +/* Set PHYSet JES20040127EEPROM */ +static void ipg_set_phy_set(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + int physet; + + physet = ipg_r8(PHY_SET); + physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); + physet |= ((sp->LED_Mode & 0x70) >> 4); + ipg_w8(physet, PHY_SET); +} + +static int ipg_reset(struct net_device *dev, u32 resetflags) +{ + /* Assert functional resets via the IPG AsicCtrl + * register as specified by the 'resetflags' input + * parameter. + */ + void __iomem *ioaddr = ipg_ioaddr(dev); //JES20040127EEPROM: + unsigned int timeout_count = 0; + + IPG_DEBUG_MSG("_reset\n"); + + ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); + + /* Delay added to account for problem with 10Mbps reset. */ + mdelay(IPG_AC_RESETWAIT); + + while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { + mdelay(IPG_AC_RESETWAIT); + if (++timeout_count > IPG_AC_RESET_TIMEOUT) + return -ETIME; + } + /* Set LED Mode in Asic Control JES20040127EEPROM */ + ipg_set_led_mode(dev); + + /* Set PHYSet Register Value JES20040127EEPROM */ + ipg_set_phy_set(dev); + return 0; +} + +/* Find the GMII PHY address. */ +static int ipg_find_phyaddr(struct net_device *dev) +{ + unsigned int phyaddr, i; + + for (i = 0; i < 32; i++) { + u32 status; + + /* Search for the correct PHY address among 32 possible. */ + phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; + + /* 10/22/03 Grace change verify from GMII_PHY_STATUS to + GMII_PHY_ID1 + */ + + status = mdio_read(dev, phyaddr, MII_BMSR); + + if ((status != 0xFFFF) && (status != 0)) + return phyaddr; + } + + return 0x1f; +} + +/* + * Configure IPG based on result of IEEE 802.3 PHY + * auto-negotiation. + */ +static int ipg_config_autoneg(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int txflowcontrol; + unsigned int rxflowcontrol; + unsigned int fullduplex; + unsigned int gig; + u32 mac_ctrl_val; + u32 asicctrl; + u8 phyctrl; + + IPG_DEBUG_MSG("_config_autoneg\n"); + + asicctrl = ipg_r32(ASIC_CTRL); + phyctrl = ipg_r8(PHY_CTRL); + mac_ctrl_val = ipg_r32(MAC_CTRL); + + /* Set flags for use in resolving auto-negotation, assuming + * non-1000Mbps, half duplex, no flow control. + */ + fullduplex = 0; + txflowcontrol = 0; + rxflowcontrol = 0; + gig = 0; + + /* To accomodate a problem in 10Mbps operation, + * set a global flag if PHY running in 10Mbps mode. + */ + sp->tenmbpsmode = 0; + + printk(KERN_INFO "%s: Link speed = ", dev->name); + + /* Determine actual speed of operation. */ + switch (phyctrl & IPG_PC_LINK_SPEED) { + case IPG_PC_LINK_SPEED_10MBPS: + printk("10Mbps.\n"); + printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n", + dev->name); + sp->tenmbpsmode = 1; + break; + case IPG_PC_LINK_SPEED_100MBPS: + printk("100Mbps.\n"); + break; + case IPG_PC_LINK_SPEED_1000MBPS: + printk("1000Mbps.\n"); + gig = 1; + break; + default: + printk("undefined!\n"); + return 0; + } + + if (phyctrl & IPG_PC_DUPLEX_STATUS) { + fullduplex = 1; + txflowcontrol = 1; + rxflowcontrol = 1; + } + + /* Configure full duplex, and flow control. */ + if (fullduplex == 1) { + /* Configure IPG for full duplex operation. */ + printk(KERN_INFO "%s: setting full duplex, ", dev->name); + + mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; + + if (txflowcontrol == 1) { + printk("TX flow control"); + mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; + } else { + printk("no TX flow control"); + mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; + } + + if (rxflowcontrol == 1) { + printk(", RX flow control."); + mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; + } else { + printk(", no RX flow control."); + mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; + } + + printk("\n"); + } else { + /* Configure IPG for half duplex operation. */ + printk(KERN_INFO "%s: setting half duplex, " + "no TX flow control, no RX flow control.\n", dev->name); + + mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD & + ~IPG_MC_TX_FLOW_CONTROL_ENABLE & + ~IPG_MC_RX_FLOW_CONTROL_ENABLE; + } + ipg_w32(mac_ctrl_val, MAC_CTRL); + return 0; +} + +/* Determine and configure multicast operation and set + * receive mode for IPG. + */ +static void ipg_nic_set_multicast_list(struct net_device *dev) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + struct dev_mc_list *mc_list_ptr; + unsigned int hashindex; + u32 hashtable[2]; + u8 receivemode; + + IPG_DEBUG_MSG("_nic_set_multicast_list\n"); + + receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; + + if (dev->flags & IFF_PROMISC) { + /* NIC to be configured in promiscuous mode. */ + receivemode = IPG_RM_RECEIVEALLFRAMES; + } else if ((dev->flags & IFF_ALLMULTI) || + (dev->flags & IFF_MULTICAST & + (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { + /* NIC to be configured to receive all multicast + * frames. */ + receivemode |= IPG_RM_RECEIVEMULTICAST; + } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) { + /* NIC to be configured to receive selected + * multicast addresses. */ + receivemode |= IPG_RM_RECEIVEMULTICASTHASH; + } + + /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. + * The IPG applies a cyclic-redundancy-check (the same CRC + * used to calculate the frame data FCS) to the destination + * address all incoming multicast frames whose destination + * address has the multicast bit set. The least significant + * 6 bits of the CRC result are used as an addressing index + * into the hash table. If the value of the bit addressed by + * this index is a 1, the frame is passed to the host system. + */ + + /* Clear hashtable. */ + hashtable[0] = 0x00000000; + hashtable[1] = 0x00000000; + + /* Cycle through all multicast addresses to filter. */ + for (mc_list_ptr = dev->mc_list; + mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) { + /* Calculate CRC result for each multicast address. */ + hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, + ETH_ALEN); + + /* Use only the least significant 6 bits. */ + hashindex = hashindex & 0x3F; + + /* Within "hashtable", set bit number "hashindex" + * to a logic 1. + */ + set_bit(hashindex, (void *)hashtable); + } + + /* Write the value of the hashtable, to the 4, 16 bit + * HASHTABLE IPG registers. + */ + ipg_w32(hashtable[0], HASHTABLE_0); + ipg_w32(hashtable[1], HASHTABLE_1); + + ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); + + IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); +} + +static int ipg_io_config(struct net_device *dev) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + u32 origmacctrl; + u32 restoremacctrl; + + IPG_DEBUG_MSG("_io_config\n"); + + origmacctrl = ipg_r32(MAC_CTRL); + + restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; + + /* Based on compilation option, determine if FCS is to be + * stripped on receive frames by IPG. + */ + if (!IPG_STRIP_FCS_ON_RX) + restoremacctrl |= IPG_MC_RCV_FCS; + + /* Determine if transmitter and/or receiver are + * enabled so we may restore MACCTRL correctly. + */ + if (origmacctrl & IPG_MC_TX_ENABLED) + restoremacctrl |= IPG_MC_TX_ENABLE; + + if (origmacctrl & IPG_MC_RX_ENABLED) + restoremacctrl |= IPG_MC_RX_ENABLE; + + /* Transmitter and receiver must be disabled before setting + * IFSSelect. + */ + ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & + IPG_MC_RSVD_MASK, MAC_CTRL); + + /* Now that transmitter and receiver are disabled, write + * to IFSSelect. + */ + ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); + + /* Set RECEIVEMODE register. */ + ipg_nic_set_multicast_list(dev); + + ipg_w16(IPG_MAX_RXFRAME_SIZE, MAX_FRAME_SIZE); + + ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); + ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); + ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); + ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); + ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); + ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); + ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | + IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | + IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | + IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); + ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); + ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); + + /* IPG multi-frag frame bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); + + /* IPG TX poll now bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); + + /* IPG RX poll now bug workaround. + * Per silicon revision B3 eratta. + */ + ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); + + /* Now restore MACCTRL to original setting. */ + ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); + + /* Disable unused RMON statistics. */ + ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); + + /* Disable unused MIB statistics. */ + ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | + IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | + IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | + IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | + IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | + IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); + + return 0; +} + +/* + * Create a receive buffer within system memory and update + * NIC private structure appropriately. + */ +static int ipg_get_rxbuff(struct net_device *dev, int entry) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct ipg_rx *rxfd = sp->rxd + entry; + struct sk_buff *skb; + u64 rxfragsize; + + IPG_DEBUG_MSG("_get_rxbuff\n"); + + skb = netdev_alloc_skb(dev, IPG_RXSUPPORT_SIZE + NET_IP_ALIGN); + if (!skb) { + sp->RxBuff[entry] = NULL; + return -ENOMEM; + } + + /* Adjust the data start location within the buffer to + * align IP address field to a 16 byte boundary. + */ + skb_reserve(skb, NET_IP_ALIGN); + + /* Associate the receive buffer with the IPG NIC. */ + skb->dev = dev; + + /* Save the address of the sk_buff structure. */ + sp->RxBuff[entry] = skb; + + rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, + sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + + /* Set the RFD fragment length. */ + rxfragsize = IPG_RXFRAG_SIZE; + rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); + + return 0; +} + +static int init_rfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_init_rfdlist\n"); + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + struct ipg_rx *rxfd = sp->rxd + i; + + if (sp->RxBuff[i]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + IPG_DEV_KFREE_SKB(sp->RxBuff[i]); + sp->RxBuff[i] = NULL; + } + + /* Clear out the RFS field. */ + rxfd->rfs = 0x0000000000000000; + + if (ipg_get_rxbuff(dev, i) < 0) { + /* + * A receive buffer was not ready, break the + * RFD list here. + */ + IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n"); + + /* Just in case we cannot allocate a single RFD. + * Should not occur. + */ + if (i == 0) { + printk(KERN_ERR "%s: No memory available" + " for RFD list.\n", dev->name); + return -ENOMEM; + } + } + + rxfd->next_desc = cpu_to_le64(sp->rxd_map + + sizeof(struct ipg_rx)*(i + 1)); + } + sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); + + sp->rx_current = 0; + sp->rx_dirty = 0; + + /* Write the location of the RFDList to the IPG. */ + ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); + ipg_w32(0x00000000, RFD_LIST_PTR_1); + + return 0; +} + +static void init_tfdlist(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_init_tfdlist\n"); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + struct ipg_tx *txfd = sp->txd + i; + + txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); + + if (sp->TxBuff[i]) { + IPG_DEV_KFREE_SKB(sp->TxBuff[i]); + sp->TxBuff[i] = NULL; + } + + txfd->next_desc = cpu_to_le64(sp->txd_map + + sizeof(struct ipg_tx)*(i + 1)); + } + sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); + + sp->tx_current = 0; + sp->tx_dirty = 0; + + /* Write the location of the TFDList to the IPG. */ + IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n", + (u32) sp->txd_map); + ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); + ipg_w32(0x00000000, TFD_LIST_PTR_1); + + sp->ResetCurrentTFD = 1; +} + +/* + * Free all transmit buffers which have already been transfered + * via DMA to the IPG. + */ +static void ipg_nic_txfree(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int curr; + u64 txd_map; + unsigned int released, pending; + + txd_map = (u64)sp->txd_map; + curr = ipg_r32(TFD_LIST_PTR_0) - + do_div(txd_map, sizeof(struct ipg_tx)) - 1; + + IPG_DEBUG_MSG("_nic_txfree\n"); + + pending = sp->tx_current - sp->tx_dirty; + + for (released = 0; released < pending; released++) { + unsigned int dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; + struct sk_buff *skb = sp->TxBuff[dirty]; + struct ipg_tx *txfd = sp->txd + dirty; + + IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc); + + /* Look at each TFD's TFC field beginning + * at the last freed TFD up to the current TFD. + * If the TFDDone bit is set, free the associated + * buffer. + */ + if (dirty == curr) + break; + + /* Setup TFDDONE for compatible issue. */ + txfd->tfc |= cpu_to_le64(IPG_TFC_TFDDONE); + + /* Free the transmit buffer. */ + if (skb) { + pci_unmap_single(sp->pdev, + le64_to_cpu(txfd->frag_info & ~IPG_TFI_FRAGLEN), + skb->len, PCI_DMA_TODEVICE); + + IPG_DEV_KFREE_SKB(skb); + + sp->TxBuff[dirty] = NULL; + } + } + + sp->tx_dirty += released; + + if (netif_queue_stopped(dev) && + (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { + netif_wake_queue(dev); + } +} + +static void ipg_tx_timeout(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + + ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | + IPG_AC_FIFO); + + spin_lock_irq(&sp->lock); + + /* Re-configure after DMA reset. */ + if (ipg_io_config(dev) < 0) { + printk(KERN_INFO "%s: Error during re-configuration.\n", + dev->name); + } + + init_tfdlist(dev); + + spin_unlock_irq(&sp->lock); + + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, + MAC_CTRL); +} + +/* + * For TxComplete interrupts, free all transmit + * buffers which have already been transfered via DMA + * to the IPG. + */ +static void ipg_nic_txcleanup(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_nic_txcleanup\n"); + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + /* Reading the TXSTATUS register clears the + * TX_COMPLETE interrupt. + */ + u32 txstatusdword = ipg_r32(TX_STATUS); + + IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword); + + /* Check for Transmit errors. Error bits only valid if + * TX_COMPLETE bit in the TXSTATUS register is a 1. + */ + if (!(txstatusdword & IPG_TS_TX_COMPLETE)) + break; + + /* If in 10Mbps mode, indicate transmit is ready. */ + if (sp->tenmbpsmode) { + netif_wake_queue(dev); + } + + /* Transmit error, increment stat counters. */ + if (txstatusdword & IPG_TS_TX_ERROR) { + IPG_DEBUG_MSG("Transmit error.\n"); + sp->stats.tx_errors++; + } + + /* Late collision, re-enable transmitter. */ + if (txstatusdword & IPG_TS_LATE_COLLISION) { + IPG_DEBUG_MSG("Late collision on transmit.\n"); + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + + /* Maximum collisions, re-enable transmitter. */ + if (txstatusdword & IPG_TS_TX_MAX_COLL) { + IPG_DEBUG_MSG("Maximum collisions on transmit.\n"); + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + + /* Transmit underrun, reset and re-enable + * transmitter. + */ + if (txstatusdword & IPG_TS_TX_UNDERRUN) { + IPG_DEBUG_MSG("Transmitter underrun.\n"); + sp->stats.tx_fifo_errors++; + ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | + IPG_AC_NETWORK | IPG_AC_FIFO); + + /* Re-configure after DMA reset. */ + if (ipg_io_config(dev) < 0) { + printk(KERN_INFO + "%s: Error during re-configuration.\n", + dev->name); + } + init_tfdlist(dev); + + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + } + } + + ipg_nic_txfree(dev); +} + +/* Provides statistical information about the IPG NIC. */ +struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + u16 temp1; + u16 temp2; + + IPG_DEBUG_MSG("_nic_get_stats\n"); + + /* Check to see if the NIC has been initialized via nic_open, + * before trying to read statistic registers. + */ + if (!test_bit(__LINK_STATE_START, &dev->state)) + return &sp->stats; + + sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); + sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); + sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); + sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); + temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); + sp->stats.rx_errors += temp1; + sp->stats.rx_missed_errors += temp1; + temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + + ipg_r32(IPG_LATECOLLISIONS); + temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); + sp->stats.collisions += temp1; + sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); + sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + + ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; + sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); + + /* detailed tx_errors */ + sp->stats.tx_carrier_errors += temp2; + + /* detailed rx_errors */ + sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + + ipg_r16(IPG_FRAMETOOLONGERRRORS); + sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); + + /* Unutilized IPG statistic registers. */ + ipg_r32(IPG_MCSTFRAMESRCVDOK); + + return &sp->stats; +} + +/* Restore used receive buffers. */ +static int ipg_nic_rxrestore(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + const unsigned int curr = sp->rx_current; + unsigned int dirty = sp->rx_dirty; + + IPG_DEBUG_MSG("_nic_rxrestore\n"); + + for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { + unsigned int entry = dirty % IPG_RFDLIST_LENGTH; + + /* rx_copybreak may poke hole here and there. */ + if (sp->RxBuff[entry]) + continue; + + /* Generate a new receive buffer to replace the + * current buffer (which will be released by the + * Linux system). + */ + if (ipg_get_rxbuff(dev, entry) < 0) { + IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n"); + + break; + } + + /* Reset the RFS field. */ + sp->rxd[entry].rfs = 0x0000000000000000; + } + sp->rx_dirty = dirty; + + return 0; +} + +#ifdef JUMBO_FRAME + +/* use jumboindex and jumbosize to control jumbo frame status + initial status is jumboindex=-1 and jumbosize=0 + 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. + 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving + 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump + previous receiving and need to continue dumping the current one +*/ +enum { + NormalPacket, + ErrorPacket +}; + +enum { + Frame_NoStart_NoEnd = 0, + Frame_WithStart = 1, + Frame_WithEnd = 10, + Frame_WithStart_WithEnd = 11 +}; + +inline void ipg_nic_rx_free_skb(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; + + if (sp->RxBuff[entry]) { + struct ipg_rx *rxfd = sp->rxd + entry; + + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); + sp->RxBuff[entry] = NULL; + } +} + +inline int ipg_nic_rx_check_frame_type(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); + int type = Frame_NoStart_NoEnd; + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) + type += Frame_WithStart; + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) + type += Frame_WithEnd; + return type; +} + +inline int ipg_nic_rx_check_error(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; + struct ipg_rx *rxfd = sp->rxd + entry; + + if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & + (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | + IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { + IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", + (unsigned long) rxfd->rfs); + + /* Increment general receive error statistic. */ + sp->stats.rx_errors++; + + /* Increment detailed receive error statistics. */ + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { + IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); + + sp->stats.rx_fifo_errors++; + } + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { + IPG_DEBUG_MSG("RX runt occured.\n"); + sp->stats.rx_length_errors++; + } + + /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, + * error count handled by a IPG statistic register. + */ + + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { + IPG_DEBUG_MSG("RX alignment error occured.\n"); + sp->stats.rx_frame_errors++; + } + + /* Do nothing for IPG_RFS_RXFCSERROR, error count + * handled by a IPG statistic register. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (sp->RxBuff[entry]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); + sp->RxBuff[entry] = NULL; + } + return ErrorPacket; + } + return NormalPacket; +} + +static void ipg_nic_rx_with_start_and_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + struct sk_buff *skb; + int framelen; + + if (jumbo->FoundStart) { + IPG_DEV_KFREE_SKB(jumbo->skb); + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + } + + // 1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) != NormalPacket) + return; + + skb = sp->RxBuff[entry]; + if (!skb) + return; + + // accept this frame and send to upper layer + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + if (framelen > IPG_RXFRAG_SIZE) + framelen = IPG_RXFRAG_SIZE; + + skb_put(skb, framelen); + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_NONE; + netif_rx(skb); + dev->last_rx = jiffies; + sp->RxBuff[entry] = NULL; +} + +static void ipg_nic_rx_with_start(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + struct pci_dev *pdev = sp->pdev; + struct sk_buff *skb; + + // 1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) != NormalPacket) + return; + + // accept this frame and send to upper layer + skb = sp->RxBuff[entry]; + if (!skb) + return; + + if (jumbo->FoundStart) + IPG_DEV_KFREE_SKB(jumbo->skb); + + pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + skb_put(skb, IPG_RXFRAG_SIZE); + + jumbo->FoundStart = 1; + jumbo->CurrentSize = IPG_RXFRAG_SIZE; + jumbo->skb = skb; + + sp->RxBuff[entry] = NULL; + dev->last_rx = jiffies; +} + +static void ipg_nic_rx_with_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + + //1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) == NormalPacket) { + struct sk_buff *skb = sp->RxBuff[entry]; + + if (!skb) + return; + + if (jumbo->FoundStart) { + int framelen, endframelen; + + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + + endframeLen = framelen - jumbo->CurrentSize; + /* + if (framelen > IPG_RXFRAG_SIZE) + framelen=IPG_RXFRAG_SIZE; + */ + if (framelen > IPG_RXSUPPORT_SIZE) + IPG_DEV_KFREE_SKB(jumbo->skb); + else { + memcpy(skb_put(jumbo->skb, endframeLen), + skb->data, endframeLen); + + jumbo->skb->protocol = + eth_type_trans(jumbo->skb, dev); + + jumbo->skb->ip_summed = CHECKSUM_NONE; + netif_rx(jumbo->skb); + } + } + + dev->last_rx = jiffies; + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + + ipg_nic_rx_free_skb(dev); + } else { + IPG_DEV_KFREE_SKB(jumbo->skb); + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + } +} + +static void ipg_nic_rx_no_start_no_end(struct net_device *dev, + struct ipg_nic_private *sp, + struct ipg_rx *rxfd, unsigned entry) +{ + struct SJumbo *jumbo = &sp->Jumbo; + + //1: found error, 0 no error + if (ipg_nic_rx_check_error(dev) == NormalPacket) { + struct sk_buff *skb = sp->RxBuff[entry]; + + if (skb) { + if (jumbo->FoundStart) { + jumbo->CurrentSize += IPG_RXFRAG_SIZE; + if (jumbo->CurrentSize <= IPG_RXSUPPORT_SIZE) { + memcpy(skb_put(jumbo->skb, + IPG_RXFRAG_SIZE), + skb->data, IPG_RXFRAG_SIZE); + } + } + dev->last_rx = jiffies; + ipg_nic_rx_free_skb(dev); + } + } else { + IPG_DEV_KFREE_SKB(jumbo->skb); + jumbo->FoundStart = 0; + jumbo->CurrentSize = 0; + jumbo->skb = NULL; + } +} + +static int ipg_nic_rx(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int curr = sp->rx_current; + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + + IPG_DEBUG_MSG("_nic_rx\n"); + + for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { + unsigned int entry = curr % IPG_RFDLIST_LENGTH; + struct ipg_rx *rxfd = sp->rxd + entry; + + if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) + break; + + switch (ipg_nic_rx_check_frame_type(dev)) { + case Frame_WithStart_WithEnd: + ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry); + break; + case Frame_WithStart: + ipg_nic_rx_with_start(dev, tp, rxfd, entry); + break; + case Frame_WithEnd: + ipg_nic_rx_with_end(dev, tp, rxfd, entry); + break; + case Frame_NoStart_NoEnd: + ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry); + break; + } + } + + sp->rx_current = curr; + + if (i == IPG_MAXRFDPROCESS_COUNT) { + /* There are more RFDs to process, however the + * allocated amount of RFD processing time has + * expired. Assert Interrupt Requested to make + * sure we come back to process the remaining RFDs. + */ + ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); + } + + ipg_nic_rxrestore(dev); + + return 0; +} + +#else +static int ipg_nic_rx(struct net_device *dev) +{ + /* Transfer received Ethernet frames to higher network layers. */ + struct ipg_nic_private *sp = netdev_priv(dev); + unsigned int curr = sp->rx_current; + void __iomem *ioaddr = sp->ioaddr; + struct ipg_rx *rxfd; + unsigned int i; + + IPG_DEBUG_MSG("_nic_rx\n"); + +#define __RFS_MASK \ + cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) + + for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { + unsigned int entry = curr % IPG_RFDLIST_LENGTH; + struct sk_buff *skb = sp->RxBuff[entry]; + unsigned int framelen; + + rxfd = sp->rxd + entry; + + if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) + break; + + /* Get received frame length. */ + framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; + + /* Check for jumbo frame arrival with too small + * RXFRAG_SIZE. + */ + if (framelen > IPG_RXFRAG_SIZE) { + IPG_DEBUG_MSG + ("RFS FrameLen > allocated fragment size.\n"); + + framelen = IPG_RXFRAG_SIZE; + } + + if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs & + (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | + IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))))) { + + IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", + (unsigned long int) rxfd->rfs); + + /* Increment general receive error statistic. */ + sp->stats.rx_errors++; + + /* Increment detailed receive error statistics. */ + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXFIFOOVERRUN)) { + IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); + sp->stats.rx_fifo_errors++; + } + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXRUNTFRAME)) { + IPG_DEBUG_MSG("RX runt occured.\n"); + sp->stats.rx_length_errors++; + } + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXOVERSIZEDFRAME)) ; + /* Do nothing, error count handled by a IPG + * statistic register. + */ + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXALIGNMENTERROR)) { + IPG_DEBUG_MSG("RX alignment error occured.\n"); + sp->stats.rx_frame_errors++; + } + + if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXFCSERROR)) ; + /* Do nothing, error count handled by a IPG + * statistic register. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (skb) { + u64 info = rxfd->frag_info; + + pci_unmap_single(sp->pdev, + le64_to_cpu(info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + + IPG_DEV_KFREE_SKB(skb); + } + } else { + + /* Adjust the new buffer length to accomodate the size + * of the received frame. + */ + skb_put(skb, framelen); + + /* Set the buffer's protocol field to Ethernet. */ + skb->protocol = eth_type_trans(skb, dev); + + /* If the frame contains an IP/TCP/UDP frame, + * determine if upper layer must check IP/TCP/UDP + * checksums. + * + * NOTE: DO NOT RELY ON THE TCP/UDP CHECKSUM + * VERIFICATION FOR SILICON REVISIONS B3 + * AND EARLIER! + * + if ((le64_to_cpu(rxfd->rfs & + (IPG_RFS_TCPDETECTED | IPG_RFS_UDPDETECTED | + IPG_RFS_IPDETECTED))) && + !(le64_to_cpu(rxfd->rfs & + (IPG_RFS_TCPERROR | IPG_RFS_UDPERROR | + IPG_RFS_IPERROR)))) { + * Indicate IP checksums were performed + * by the IPG. + * + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + */ + { + /* The IPG encountered an error with (or + * there were no) IP/TCP/UDP checksums. + * This may or may not indicate an invalid + * IP/TCP/UDP frame was received. Let the + * upper layer decide. + */ + skb->ip_summed = CHECKSUM_NONE; + } + + /* Hand off frame for higher layer processing. + * The function netif_rx() releases the sk_buff + * when processing completes. + */ + netif_rx(skb); + + /* Record frame receive time (jiffies = Linux + * kernel current time stamp). + */ + dev->last_rx = jiffies; + } + + /* Assure RX buffer is not reused by IPG. */ + sp->RxBuff[entry] = NULL; + } + + /* + * If there are more RFDs to proces and the allocated amount of RFD + * processing time has expired, assert Interrupt Requested to make + * sure we come back to process the remaining RFDs. + */ + if (i == IPG_MAXRFDPROCESS_COUNT) + ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); + +#ifdef IPG_DEBUG + /* Check if the RFD list contained no receive frame data. */ + if (!i) + sp->EmptyRFDListCount++; +#endif + while ((le64_to_cpu(rxfd->rfs & IPG_RFS_RFDDONE)) && + !((le64_to_cpu(rxfd->rfs & IPG_RFS_FRAMESTART)) && + (le64_to_cpu(rxfd->rfs & IPG_RFS_FRAMEEND)))) { + unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; + + rxfd = sp->rxd + entry; + + IPG_DEBUG_MSG("Frame requires multiple RFDs.\n"); + + /* An unexpected event, additional code needed to handle + * properly. So for the time being, just disregard the + * frame. + */ + + /* Free the memory associated with the RX + * buffer since it is erroneous and we will + * not pass it to higher layer processes. + */ + if (sp->RxBuff[entry]) { + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); + } + + /* Assure RX buffer is not reused by IPG. */ + sp->RxBuff[entry] = NULL; + } + + sp->rx_current = curr; + + /* Check to see if there are a minimum number of used + * RFDs before restoring any (should improve performance.) + */ + if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) + ipg_nic_rxrestore(dev); + + return 0; +} +#endif + +static void ipg_reset_after_host_error(struct work_struct *work) +{ + struct ipg_nic_private *sp = + container_of(work, struct ipg_nic_private, task.work); + struct net_device *dev = sp->dev; + + IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL)); + + /* + * Acknowledge HostError interrupt by resetting + * IPG DMA and HOST. + */ + ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); + + init_rfdlist(dev); + init_tfdlist(dev); + + if (ipg_io_config(dev) < 0) { + printk(KERN_INFO "%s: Cannot recover from PCI error.\n", + dev->name); + schedule_delayed_work(&sp->task, HZ); + } +} + +static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) +{ + struct net_device *dev = dev_inst; + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int handled = 0; + u16 status; + + IPG_DEBUG_MSG("_interrupt_handler\n"); + +#ifdef JUMBO_FRAME + ipg_nic_rxrestore(dev); +#endif + /* Get interrupt source information, and acknowledge + * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, + * IntRequested, MacControlFrame, LinkEvent) interrupts + * if issued. Also, all IPG interrupts are disabled by + * reading IntStatusAck. + */ + status = ipg_r16(INT_STATUS_ACK); + + IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status); + + /* Shared IRQ of remove event. */ + if (!(status & IPG_IS_RSVD_MASK)) + goto out_enable; + + handled = 1; + + if (unlikely(!netif_running(dev))) + goto out; + + spin_lock(&sp->lock); + + /* If RFDListEnd interrupt, restore all used RFDs. */ + if (status & IPG_IS_RFD_LIST_END) { + IPG_DEBUG_MSG("RFDListEnd Interrupt.\n"); + + /* The RFD list end indicates an RFD was encountered + * with a 0 NextPtr, or with an RFDDone bit set to 1 + * (indicating the RFD is not read for use by the + * IPG.) Try to restore all RFDs. + */ + ipg_nic_rxrestore(dev); + +#ifdef IPG_DEBUG + /* Increment the RFDlistendCount counter. */ + sp->RFDlistendCount++; +#endif + } + + /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or + * IntRequested interrupt, process received frames. */ + if ((status & IPG_IS_RX_DMA_PRIORITY) || + (status & IPG_IS_RFD_LIST_END) || + (status & IPG_IS_RX_DMA_COMPLETE) || + (status & IPG_IS_INT_REQUESTED)) { +#ifdef IPG_DEBUG + /* Increment the RFD list checked counter if interrupted + * only to check the RFD list. */ + if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | + IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & + (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | + IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | + IPG_IS_UPDATE_STATS))) + sp->RFDListCheckedCount++; +#endif + + ipg_nic_rx(dev); + } + + /* If TxDMAComplete interrupt, free used TFDs. */ + if (status & IPG_IS_TX_DMA_COMPLETE) + ipg_nic_txfree(dev); + + /* TxComplete interrupts indicate one of numerous actions. + * Determine what action to take based on TXSTATUS register. + */ + if (status & IPG_IS_TX_COMPLETE) + ipg_nic_txcleanup(dev); + + /* If UpdateStats interrupt, update Linux Ethernet statistics */ + if (status & IPG_IS_UPDATE_STATS) + ipg_nic_get_stats(dev); + + /* If HostError interrupt, reset IPG. */ + if (status & IPG_IS_HOST_ERROR) { + IPG_DDEBUG_MSG("HostError Interrupt\n"); + + schedule_delayed_work(&sp->task, 0); + } + + /* If LinkEvent interrupt, resolve autonegotiation. */ + if (status & IPG_IS_LINK_EVENT) { + if (ipg_config_autoneg(dev) < 0) + printk(KERN_INFO "%s: Auto-negotiation error.\n", + dev->name); + } + + /* If MACCtrlFrame interrupt, do nothing. */ + if (status & IPG_IS_MAC_CTRL_FRAME) + IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n"); + + /* If RxComplete interrupt, do nothing. */ + if (status & IPG_IS_RX_COMPLETE) + IPG_DEBUG_MSG("RxComplete interrupt.\n"); + + /* If RxEarly interrupt, do nothing. */ + if (status & IPG_IS_RX_EARLY) + IPG_DEBUG_MSG("RxEarly interrupt.\n"); + +out_enable: + /* Re-enable IPG interrupts. */ + ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | + IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | + IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); + + spin_unlock(&sp->lock); +out: + return IRQ_RETVAL(handled); +} + +static void ipg_rx_clear(struct ipg_nic_private *sp) +{ + unsigned int i; + + for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { + if (sp->RxBuff[i]) { + struct ipg_rx *rxfd = sp->rxd + i; + + IPG_DEV_KFREE_SKB(sp->RxBuff[i]); + sp->RxBuff[i] = NULL; + pci_unmap_single(sp->pdev, + le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + sp->rx_buf_sz, PCI_DMA_FROMDEVICE); + } + } +} + +static void ipg_tx_clear(struct ipg_nic_private *sp) +{ + unsigned int i; + + for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { + if (sp->TxBuff[i]) { + struct ipg_tx *txfd = sp->txd + i; + + pci_unmap_single(sp->pdev, + le64_to_cpu(txfd->frag_info & ~IPG_TFI_FRAGLEN), + sp->TxBuff[i]->len, PCI_DMA_TODEVICE); + + IPG_DEV_KFREE_SKB(sp->TxBuff[i]); + + sp->TxBuff[i] = NULL; + } + } +} + +static int ipg_nic_open(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + struct pci_dev *pdev = sp->pdev; + int rc; + + IPG_DEBUG_MSG("_nic_open\n"); + + sp->rx_buf_sz = IPG_RXSUPPORT_SIZE; + + /* Check for interrupt line conflicts, and request interrupt + * line for IPG. + * + * IMPORTANT: Disable IPG interrupts prior to registering + * IRQ. + */ + ipg_w16(0x0000, INT_ENABLE); + + /* Register the interrupt line to be used by the IPG within + * the Linux system. + */ + rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, + dev->name, dev); + if (rc < 0) { + printk(KERN_INFO "%s: Error when requesting interrupt.\n", + dev->name); + goto out; + } + + dev->irq = pdev->irq; + + rc = -ENOMEM; + + sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, + &sp->rxd_map, GFP_KERNEL); + if (!sp->rxd) + goto err_free_irq_0; + + sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, + &sp->txd_map, GFP_KERNEL); + if (!sp->txd) + goto err_free_rx_1; + + rc = init_rfdlist(dev); + if (rc < 0) { + printk(KERN_INFO "%s: Error during configuration.\n", + dev->name); + goto err_free_tx_2; + } + + init_tfdlist(dev); + + rc = ipg_io_config(dev); + if (rc < 0) { + printk(KERN_INFO "%s: Error during configuration.\n", + dev->name); + goto err_release_tfdlist_3; + } + + /* Resolve autonegotiation. */ + if (ipg_config_autoneg(dev) < 0) + printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name); + +#ifdef JUMBO_FRAME + /* initialize JUMBO Frame control variable */ + sp->Jumbo.FoundStart = 0; + sp->Jumbo.CurrentSize = 0; + sp->Jumbo.skb = 0; + dev->mtu = IPG_TXFRAG_SIZE; +#endif + + /* Enable transmit and receive operation of the IPG. */ + ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & + IPG_MC_RSVD_MASK, MAC_CTRL); + + netif_start_queue(dev); +out: + return rc; + +err_release_tfdlist_3: + ipg_tx_clear(sp); + ipg_rx_clear(sp); +err_free_tx_2: + dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); +err_free_rx_1: + dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); +err_free_irq_0: + free_irq(pdev->irq, dev); + goto out; +} + +static int ipg_nic_stop(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + struct pci_dev *pdev = sp->pdev; + + IPG_DEBUG_MSG("_nic_stop\n"); + + netif_stop_queue(dev); + + IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount); + IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount); + IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount); + IPG_DUMPTFDLIST(dev); + + do { + (void) ipg_r16(INT_STATUS_ACK); + + ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); + + synchronize_irq(pdev->irq); + } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); + + ipg_rx_clear(sp); + + ipg_tx_clear(sp); + + pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); + pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); + + free_irq(pdev->irq, dev); + + return 0; +} + +static int ipg_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; + unsigned long flags; + struct ipg_tx *txfd; + + IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); + + /* If in 10Mbps mode, stop the transmit queue so + * no more transmit frames are accepted. + */ + if (sp->tenmbpsmode) + netif_stop_queue(dev); + + if (sp->ResetCurrentTFD) { + sp->ResetCurrentTFD = 0; + entry = 0; + } + + txfd = sp->txd + entry; + + sp->TxBuff[entry] = skb; + + /* Clear all TFC fields, except TFDDONE. */ + txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); + + /* Specify the TFC field within the TFD. */ + txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | + (IPG_TFC_FRAMEID & cpu_to_le64(sp->tx_current)) | + (IPG_TFC_FRAGCOUNT & (1 << 24))); + + /* Request TxComplete interrupts at an interval defined + * by the constant IPG_FRAMESBETWEENTXCOMPLETES. + * Request TxComplete interrupt for every frame + * if in 10Mbps mode to accomodate problem with 10Mbps + * processing. + */ + if (sp->tenmbpsmode) + txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); + else if (!((sp->tx_current - sp->tx_dirty + 1) > + IPG_FRAMESBETWEENTXDMACOMPLETES)) { + txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); + } + /* Based on compilation option, determine if FCS is to be + * appended to transmit frame by IPG. + */ + if (!(IPG_APPEND_FCS_ON_TX)) + txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); + + /* Based on compilation option, determine if IP, TCP and/or + * UDP checksums are to be added to transmit frame by IPG. + */ + if (IPG_ADD_IPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); + + if (IPG_ADD_TCPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); + + if (IPG_ADD_UDPCHECKSUM_ON_TX) + txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); + + /* Based on compilation option, determine if VLAN tag info is to be + * inserted into transmit frame by IPG. + */ + if (IPG_INSERT_MANUAL_VLAN_TAG) { + txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | + ((u64) IPG_MANUAL_VLAN_VID << 32) | + ((u64) IPG_MANUAL_VLAN_CFI << 44) | + ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); + } + + /* The fragment start location within system memory is defined + * by the sk_buff structure's data field. The physical address + * of this location within the system's virtual memory space + * is determined using the IPG_HOST2BUS_MAP function. + */ + txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE)); + + /* The length of the fragment within system memory is defined by + * the sk_buff structure's len field. + */ + txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & + ((u64) (skb->len & 0xffff) << 48)); + + /* Clear the TFDDone bit last to indicate the TFD is ready + * for transfer to the IPG. + */ + txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); + + spin_lock_irqsave(&sp->lock, flags); + + sp->tx_current++; + + mmiowb(); + + ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); + + if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) + netif_wake_queue(dev); + + spin_unlock_irqrestore(&sp->lock, flags); + + return NETDEV_TX_OK; +} + +static void ipg_set_phy_default_param(unsigned char rev, + struct net_device *dev, int phy_address) +{ + unsigned short length; + unsigned char revision; + unsigned short *phy_param; + unsigned short address, value; + + phy_param = &DefaultPhyParam[0]; + length = *phy_param & 0x00FF; + revision = (unsigned char)((*phy_param) >> 8); + phy_param++; + while (length != 0) { + if (rev == revision) { + while (length > 1) { + address = *phy_param; + value = *(phy_param + 1); + phy_param += 2; + mdio_write(dev, phy_address, address, value); + length -= 4; + } + break; + } else { + phy_param += length / 2; + length = *phy_param & 0x00FF; + revision = (unsigned char)((*phy_param) >> 8); + phy_param++; + } + } +} + +/* JES20040127EEPROM */ +static int read_eeprom(struct net_device *dev, int eep_addr) +{ + void __iomem *ioaddr = ipg_ioaddr(dev); + unsigned int i; + int ret = 0; + u16 value; + + value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); + ipg_w16(value, EEPROM_CTRL); + + for (i = 0; i < 1000; i++) { + u16 data; + + mdelay(10); + data = ipg_r16(EEPROM_CTRL); + if (!(data & IPG_EC_EEPROM_BUSY)) { + ret = ipg_r16(EEPROM_DATA); + break; + } + } + return ret; +} + +static void ipg_init_mii(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + struct mii_if_info *mii_if = &sp->mii_if; + int phyaddr; + + mii_if->dev = dev; + mii_if->mdio_read = mdio_read; + mii_if->mdio_write = mdio_write; + mii_if->phy_id_mask = 0x1f; + mii_if->reg_num_mask = 0x1f; + + mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); + + if (phyaddr != 0x1f) { + u16 mii_phyctrl, mii_1000cr; + u8 revisionid = 0; + + mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); + mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | + GMII_PHY_1000BASETCONTROL_PreferMaster; + mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); + + mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); + + /* Set default phyparam */ + pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); + ipg_set_phy_default_param(revisionid, dev, phyaddr); + + /* Reset PHY */ + mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; + mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); + + } +} + +static int ipg_hw_init(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + void __iomem *ioaddr = sp->ioaddr; + unsigned int i; + int rc; + + /* Read/Write and Reset EEPROM Value Jesse20040128EEPROM_VALUE */ + /* Read LED Mode Configuration from EEPROM */ + sp->LED_Mode = read_eeprom(dev, 6); + + /* Reset all functions within the IPG. Do not assert + * RST_OUT as not compatible with some PHYs. + */ + rc = ipg_reset(dev, IPG_RESET_MASK); + if (rc < 0) + goto out; + + ipg_init_mii(dev); + + /* Read MAC Address from EEPROM */ + for (i = 0; i < 3; i++) + sp->station_addr[i] = read_eeprom(dev, 16 + i); + + for (i = 0; i < 3; i++) + ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); + + /* Set station address in ethernet_device structure. */ + dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; + dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; + dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; + dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; + dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; + dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; +out: + return rc; +} + +static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) +{ + /* Function to accomodate changes to Maximum Transfer Unit + * (or MTU) of IPG NIC. Cannot use default function since + * the default will not allow for MTU > 1500 bytes. + */ + + IPG_DEBUG_MSG("_nic_change_mtu\n"); + + /* Check that the new MTU value is between 68 (14 byte header, 46 + * byte payload, 4 byte FCS) and IPG_MAX_RXFRAME_SIZE, which + * corresponds to the MAXFRAMESIZE register in the IPG. + */ + if ((new_mtu < 68) || (new_mtu > IPG_MAX_RXFRAME_SIZE)) + return -EINVAL; + + dev->mtu = new_mtu; + + return 0; +} + +static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_ethtool_gset(&sp->mii_if, cmd); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_ethtool_sset(&sp->mii_if, cmd); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static int ipg_nway_reset(struct net_device *dev) +{ + struct ipg_nic_private *sp = netdev_priv(dev); + int rc; + + mutex_lock(&sp->mii_mutex); + rc = mii_nway_restart(&sp->mii_if); + mutex_unlock(&sp->mii_mutex); + + return rc; +} + +static struct ethtool_ops ipg_ethtool_ops = { + .get_settings = ipg_get_settings, + .set_settings = ipg_set_settings, + .nway_reset = ipg_nway_reset, +}; + +static void ipg_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct ipg_nic_private *sp = netdev_priv(dev); + + IPG_DEBUG_MSG("_remove\n"); + + /* Un-register Ethernet device. */ + unregister_netdev(dev); + + pci_iounmap(pdev, sp->ioaddr); + + pci_release_regions(pdev); + + free_netdev(dev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __devinit ipg_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + unsigned int i = id->driver_data; + struct ipg_nic_private *sp; + struct net_device *dev; + void __iomem *ioaddr; + int rc; + + rc = pci_enable_device(pdev); + if (rc < 0) + goto out; + + printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]); + + pci_set_master(pdev); + + rc = pci_set_dma_mask(pdev, DMA_40BIT_MASK); + if (rc < 0) { + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc < 0) { + printk(KERN_ERR "%s: DMA config failed.\n", + pci_name(pdev)); + goto err_disable_0; + } + } + + /* + * Initialize net device. + */ + dev = alloc_etherdev(sizeof(struct ipg_nic_private)); + if (!dev) { + printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev)); + rc = -ENOMEM; + goto err_disable_0; + } + + sp = netdev_priv(dev); + spin_lock_init(&sp->lock); + mutex_init(&sp->mii_mutex); + + /* Declare IPG NIC functions for Ethernet device methods. + */ + dev->open = &ipg_nic_open; + dev->stop = &ipg_nic_stop; + dev->hard_start_xmit = &ipg_nic_hard_start_xmit; + dev->get_stats = &ipg_nic_get_stats; + dev->set_multicast_list = &ipg_nic_set_multicast_list; + dev->do_ioctl = ipg_ioctl; + dev->tx_timeout = ipg_tx_timeout; + dev->change_mtu = &ipg_nic_change_mtu; + + SET_NETDEV_DEV(dev, &pdev->dev); + SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_free_dev_1; + + ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); + if (!ioaddr) { + printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev)); + rc = -EIO; + goto err_release_regions_2; + } + + /* Save the pointer to the PCI device information. */ + sp->ioaddr = ioaddr; + sp->pdev = pdev; + sp->dev = dev; + + INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); + + pci_set_drvdata(pdev, dev); + + rc = ipg_hw_init(dev); + if (rc < 0) + goto err_unmap_3; + + rc = register_netdev(dev); + if (rc < 0) + goto err_unmap_3; + + printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name); +out: + return rc; + +err_unmap_3: + pci_iounmap(pdev, ioaddr); +err_release_regions_2: + pci_release_regions(pdev); +err_free_dev_1: + free_netdev(dev); +err_disable_0: + pci_disable_device(pdev); + goto out; +} + +static struct pci_driver ipg_pci_driver = { + .name = IPG_DRIVER_NAME, + .id_table = ipg_pci_tbl, + .probe = ipg_probe, + .remove = __devexit_p(ipg_remove), +}; + +static int __init ipg_init_module(void) +{ + return pci_register_driver(&ipg_pci_driver); +} + +static void __exit ipg_exit_module(void) +{ + pci_unregister_driver(&ipg_pci_driver); +} + +module_init(ipg_init_module); +module_exit(ipg_exit_module); diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h new file mode 100644 index 0000000000000000000000000000000000000000..1952d0dfd314b1c0375fa0d351a1b4a3b1f5a29d --- /dev/null +++ b/drivers/net/ipg.h @@ -0,0 +1,856 @@ +/* + * + * ipg.h + * + * Include file for Gigabit Ethernet device driver for Network + * Interface Cards (NICs) utilizing the Tamarack Microelectronics + * Inc. IPG Gigabit or Triple Speed Ethernet Media Access + * Controller. + * + * Craig Rich + * Sundance Technology, Inc. + * 1485 Saratoga Avenue + * Suite 200 + * San Jose, CA 95129 + * 408 873 4117 + * www.sundanceti.com + * craig_rich@sundanceti.com + */ +#ifndef __LINUX_IPG_H +#define __LINUX_IPG_H + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/*#include */ + +#define DrvVer "2.09d" + +#define IPG_DEV_KFREE_SKB(skb) dev_kfree_skb_irq(skb) + +/* + * Constants + */ + +/* GMII based PHY IDs */ +#define NS 0x2000 +#define MARVELL 0x0141 +#define ICPLUS_PHY 0x243 + +/* NIC Physical Layer Device MII register fields. */ +#define MII_PHY_SELECTOR_IEEE8023 0x0001 +#define MII_PHY_TECHABILITYFIELD 0x1FE0 + +/* GMII_PHY_1000 need to set to prefer master */ +#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 + +/* NIC Physical Layer Device GMII constants. */ +#define GMII_PREAMBLE 0xFFFFFFFF +#define GMII_ST 0x1 +#define GMII_READ 0x2 +#define GMII_WRITE 0x1 +#define GMII_TA_READ_MASK 0x1 +#define GMII_TA_WRITE 0x2 + +/* I/O register offsets. */ +enum ipg_regs { + DMA_CTRL = 0x00, + RX_DMA_STATUS = 0x08, // Unused + reserved + TFD_LIST_PTR_0 = 0x10, + TFD_LIST_PTR_1 = 0x14, + TX_DMA_BURST_THRESH = 0x18, + TX_DMA_URGENT_THRESH = 0x19, + TX_DMA_POLL_PERIOD = 0x1a, + RFD_LIST_PTR_0 = 0x1c, + RFD_LIST_PTR_1 = 0x20, + RX_DMA_BURST_THRESH = 0x24, + RX_DMA_URGENT_THRESH = 0x25, + RX_DMA_POLL_PERIOD = 0x26, + DEBUG_CTRL = 0x2c, + ASIC_CTRL = 0x30, + FIFO_CTRL = 0x38, // Unused + FLOW_OFF_THRESH = 0x3c, + FLOW_ON_THRESH = 0x3e, + EEPROM_DATA = 0x48, + EEPROM_CTRL = 0x4a, + EXPROM_ADDR = 0x4c, // Unused + EXPROM_DATA = 0x50, // Unused + WAKE_EVENT = 0x51, // Unused + COUNTDOWN = 0x54, // Unused + INT_STATUS_ACK = 0x5a, + INT_ENABLE = 0x5c, + INT_STATUS = 0x5e, // Unused + TX_STATUS = 0x60, + MAC_CTRL = 0x6c, + VLAN_TAG = 0x70, // Unused + PHY_SET = 0x75, // JES20040127EEPROM + PHY_CTRL = 0x76, + STATION_ADDRESS_0 = 0x78, + STATION_ADDRESS_1 = 0x7a, + STATION_ADDRESS_2 = 0x7c, + MAX_FRAME_SIZE = 0x86, + RECEIVE_MODE = 0x88, + HASHTABLE_0 = 0x8c, + HASHTABLE_1 = 0x90, + RMON_STATISTICS_MASK = 0x98, + STATISTICS_MASK = 0x9c, + RX_JUMBO_FRAMES = 0xbc, // Unused + TCP_CHECKSUM_ERRORS = 0xc0, // Unused + IP_CHECKSUM_ERRORS = 0xc2, // Unused + UDP_CHECKSUM_ERRORS = 0xc4, // Unused + TX_JUMBO_FRAMES = 0xf4 // Unused +}; + +/* Ethernet MIB statistic register offsets. */ +#define IPG_OCTETRCVOK 0xA8 +#define IPG_MCSTOCTETRCVDOK 0xAC +#define IPG_BCSTOCTETRCVOK 0xB0 +#define IPG_FRAMESRCVDOK 0xB4 +#define IPG_MCSTFRAMESRCVDOK 0xB8 +#define IPG_BCSTFRAMESRCVDOK 0xBE +#define IPG_MACCONTROLFRAMESRCVD 0xC6 +#define IPG_FRAMETOOLONGERRRORS 0xC8 +#define IPG_INRANGELENGTHERRORS 0xCA +#define IPG_FRAMECHECKSEQERRORS 0xCC +#define IPG_FRAMESLOSTRXERRORS 0xCE +#define IPG_OCTETXMTOK 0xD0 +#define IPG_MCSTOCTETXMTOK 0xD4 +#define IPG_BCSTOCTETXMTOK 0xD8 +#define IPG_FRAMESXMTDOK 0xDC +#define IPG_MCSTFRAMESXMTDOK 0xE0 +#define IPG_FRAMESWDEFERREDXMT 0xE4 +#define IPG_LATECOLLISIONS 0xE8 +#define IPG_MULTICOLFRAMES 0xEC +#define IPG_SINGLECOLFRAMES 0xF0 +#define IPG_BCSTFRAMESXMTDOK 0xF6 +#define IPG_CARRIERSENSEERRORS 0xF8 +#define IPG_MACCONTROLFRAMESXMTDOK 0xFA +#define IPG_FRAMESABORTXSCOLLS 0xFC +#define IPG_FRAMESWEXDEFERRAL 0xFE + +/* RMON statistic register offsets. */ +#define IPG_ETHERSTATSCOLLISIONS 0x100 +#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 +#define IPG_ETHERSTATSPKTSTRANSMIT 0x108 +#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C +#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 +#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 +#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 +#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C +#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 +#define IPG_ETHERSTATSCRCALIGNERRORS 0x124 +#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 +#define IPG_ETHERSTATSFRAGMENTS 0x12C +#define IPG_ETHERSTATSJABBERS 0x130 +#define IPG_ETHERSTATSOCTETS 0x134 +#define IPG_ETHERSTATSPKTS 0x138 +#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C +#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 +#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 +#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 +#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C +#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 + +/* RMON statistic register equivalents. */ +#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 +#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 +#define IPG_ETHERSTATSMULTICASTPKTS 0xB8 +#define IPG_ETHERSTATSBROADCASTPKTS 0xBE +#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 +#define IPG_ETHERSTATSDROPEVENTS 0xCE + +/* Serial EEPROM offsets */ +#define IPG_EEPROM_CONFIGPARAM 0x00 +#define IPG_EEPROM_ASICCTRL 0x01 +#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 +#define IPG_EEPROM_SUBSYSTEMID 0x03 +#define IPG_EEPROM_STATIONADDRESS0 0x10 +#define IPG_EEPROM_STATIONADDRESS1 0x11 +#define IPG_EEPROM_STATIONADDRESS2 0x12 + +/* Register & data structure bit masks */ + +/* PCI register masks. */ + +/* IOBaseAddress */ +#define IPG_PIB_RSVD_MASK 0xFFFFFE01 +#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 +#define IPG_PIB_IOBASEADDRIND 0x00000001 + +/* MemBaseAddress */ +#define IPG_PMB_RSVD_MASK 0xFFFFFE07 +#define IPG_PMB_MEMBASEADDRIND 0x00000001 +#define IPG_PMB_MEMMAPTYPE 0x00000006 +#define IPG_PMB_MEMMAPTYPE0 0x00000002 +#define IPG_PMB_MEMMAPTYPE1 0x00000004 +#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 + +/* ConfigStatus */ +#define IPG_CS_RSVD_MASK 0xFFB0 +#define IPG_CS_CAPABILITIES 0x0010 +#define IPG_CS_66MHZCAPABLE 0x0020 +#define IPG_CS_FASTBACK2BACK 0x0080 +#define IPG_CS_DATAPARITYREPORTED 0x0100 +#define IPG_CS_DEVSELTIMING 0x0600 +#define IPG_CS_SIGNALEDTARGETABORT 0x0800 +#define IPG_CS_RECEIVEDTARGETABORT 0x1000 +#define IPG_CS_RECEIVEDMASTERABORT 0x2000 +#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 +#define IPG_CS_DETECTEDPARITYERROR 0x8000 + +/* TFD data structure masks. */ + +/* TFDList, TFC */ +#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF +#define IPG_TFC_FRAMEID 0x000000000000FFFF +#define IPG_TFC_WORDALIGN 0x0000000000030000 +#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 +#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 +#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 +#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 +#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 +#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 +#define IPG_TFC_TXINDICATE 0x0000000000400000 +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 +#define IPG_TFC_FRAGCOUNT 0x000000000F000000 +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 +#define IPG_TFC_TFDDONE 0x0000000080000000 +#define IPG_TFC_VID 0x00000FFF00000000 +#define IPG_TFC_CFI 0x0000100000000000 +#define IPG_TFC_USERPRIORITY 0x0000E00000000000 + +/* TFDList, FragInfo */ +#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF +#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF +#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL + +/* RFD data structure masks. */ + +/* RFDList, RFS */ +#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF +#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 +#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 +#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 +#define IPG_RFS_RXFCSERROR 0x0000000000080000 +#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 +#define IPG_RFS_VLANDETECTED 0x0000000000400000 +#define IPG_RFS_TCPDETECTED 0x0000000000800000 +#define IPG_RFS_TCPERROR 0x0000000001000000 +#define IPG_RFS_UDPDETECTED 0x0000000002000000 +#define IPG_RFS_UDPERROR 0x0000000004000000 +#define IPG_RFS_IPDETECTED 0x0000000008000000 +#define IPG_RFS_IPERROR 0x0000000010000000 +#define IPG_RFS_FRAMESTART 0x0000000020000000 +#define IPG_RFS_FRAMEEND 0x0000000040000000 +#define IPG_RFS_RFDDONE 0x0000000080000000 +#define IPG_RFS_TCI 0x0000FFFF00000000 + +/* RFDList, FragInfo */ +#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF +#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF +#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL + +/* I/O Register masks. */ + +/* RMON Statistics Mask */ +#define IPG_RZ_ALL 0x0FFFFFFF + +/* Statistics Mask */ +#define IPG_SM_ALL 0x0FFFFFFF +#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 +#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 +#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 +#define IPG_SM_RXJUMBOFRAMES 0x00000008 +#define IPG_SM_TCPCHECKSUMERRORS 0x00000010 +#define IPG_SM_IPCHECKSUMERRORS 0x00000020 +#define IPG_SM_UDPCHECKSUMERRORS 0x00000040 +#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 +#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 +#define IPG_SM_INRANGELENGTHERRORS 0x00000200 +#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 +#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 +#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 +#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 +#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 +#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 +#define IPG_SM_LATECOLLISIONS 0x00010000 +#define IPG_SM_MULTICOLFRAMES 0x00020000 +#define IPG_SM_SINGLECOLFRAMES 0x00040000 +#define IPG_SM_TXJUMBOFRAMES 0x00080000 +#define IPG_SM_CARRIERSENSEERRORS 0x00100000 +#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 +#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 +#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 + +/* Countdown */ +#define IPG_CD_RSVD_MASK 0x0700FFFF +#define IPG_CD_COUNT 0x0000FFFF +#define IPG_CD_COUNTDOWNSPEED 0x01000000 +#define IPG_CD_COUNTDOWNMODE 0x02000000 +#define IPG_CD_COUNTINTENABLED 0x04000000 + +/* TxDMABurstThresh */ +#define IPG_TB_RSVD_MASK 0xFF + +/* TxDMAUrgentThresh */ +#define IPG_TU_RSVD_MASK 0xFF + +/* TxDMAPollPeriod */ +#define IPG_TP_RSVD_MASK 0xFF + +/* RxDMAUrgentThresh */ +#define IPG_RU_RSVD_MASK 0xFF + +/* RxDMAPollPeriod */ +#define IPG_RP_RSVD_MASK 0xFF + +/* ReceiveMode */ +#define IPG_RM_RSVD_MASK 0x3F +#define IPG_RM_RECEIVEUNICAST 0x01 +#define IPG_RM_RECEIVEMULTICAST 0x02 +#define IPG_RM_RECEIVEBROADCAST 0x04 +#define IPG_RM_RECEIVEALLFRAMES 0x08 +#define IPG_RM_RECEIVEMULTICASTHASH 0x10 +#define IPG_RM_RECEIVEIPMULTICAST 0x20 + +/* PhySet JES20040127EEPROM*/ +#define IPG_PS_MEM_LENB9B 0x01 +#define IPG_PS_MEM_LEN9 0x02 +#define IPG_PS_NON_COMPDET 0x04 + +/* PhyCtrl */ +#define IPG_PC_RSVD_MASK 0xFF +#define IPG_PC_MGMTCLK_LO 0x00 +#define IPG_PC_MGMTCLK_HI 0x01 +#define IPG_PC_MGMTCLK 0x01 +#define IPG_PC_MGMTDATA 0x02 +#define IPG_PC_MGMTDIR 0x04 +#define IPG_PC_DUPLEX_POLARITY 0x08 +#define IPG_PC_DUPLEX_STATUS 0x10 +#define IPG_PC_LINK_POLARITY 0x20 +#define IPG_PC_LINK_SPEED 0xC0 +#define IPG_PC_LINK_SPEED_10MBPS 0x40 +#define IPG_PC_LINK_SPEED_100MBPS 0x80 +#define IPG_PC_LINK_SPEED_1000MBPS 0xC0 + +/* DMACtrl */ +#define IPG_DC_RSVD_MASK 0xC07D9818 +#define IPG_DC_RX_DMA_COMPLETE 0x00000008 +#define IPG_DC_RX_DMA_POLL_NOW 0x00000010 +#define IPG_DC_TX_DMA_COMPLETE 0x00000800 +#define IPG_DC_TX_DMA_POLL_NOW 0x00001000 +#define IPG_DC_TX_DMA_IN_PROG 0x00008000 +#define IPG_DC_RX_EARLY_DISABLE 0x00010000 +#define IPG_DC_MWI_DISABLE 0x00040000 +#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 +#define IPG_DC_TX_BURST_LIMIT 0x00700000 +#define IPG_DC_TARGET_ABORT 0x40000000 +#define IPG_DC_MASTER_ABORT 0x80000000 + +/* ASICCtrl */ +#define IPG_AC_RSVD_MASK 0x07FFEFF2 +#define IPG_AC_EXP_ROM_SIZE 0x00000002 +#define IPG_AC_PHY_SPEED10 0x00000010 +#define IPG_AC_PHY_SPEED100 0x00000020 +#define IPG_AC_PHY_SPEED1000 0x00000040 +#define IPG_AC_PHY_MEDIA 0x00000080 +#define IPG_AC_FORCED_CFG 0x00000700 +#define IPG_AC_D3RESETDISABLE 0x00000800 +#define IPG_AC_SPEED_UP_MODE 0x00002000 +#define IPG_AC_LED_MODE 0x00004000 +#define IPG_AC_RST_OUT_POLARITY 0x00008000 +#define IPG_AC_GLOBAL_RESET 0x00010000 +#define IPG_AC_RX_RESET 0x00020000 +#define IPG_AC_TX_RESET 0x00040000 +#define IPG_AC_DMA 0x00080000 +#define IPG_AC_FIFO 0x00100000 +#define IPG_AC_NETWORK 0x00200000 +#define IPG_AC_HOST 0x00400000 +#define IPG_AC_AUTO_INIT 0x00800000 +#define IPG_AC_RST_OUT 0x01000000 +#define IPG_AC_INT_REQUEST 0x02000000 +#define IPG_AC_RESET_BUSY 0x04000000 +#define IPG_AC_LED_SPEED 0x08000000 //JES20040127EEPROM +#define IPG_AC_LED_MODE_BIT_1 0x20000000 //JES20040127EEPROM + +/* EepromCtrl */ +#define IPG_EC_RSVD_MASK 0x83FF +#define IPG_EC_EEPROM_ADDR 0x00FF +#define IPG_EC_EEPROM_OPCODE 0x0300 +#define IPG_EC_EEPROM_SUBCOMMAD 0x0000 +#define IPG_EC_EEPROM_WRITEOPCODE 0x0100 +#define IPG_EC_EEPROM_READOPCODE 0x0200 +#define IPG_EC_EEPROM_ERASEOPCODE 0x0300 +#define IPG_EC_EEPROM_BUSY 0x8000 + +/* FIFOCtrl */ +#define IPG_FC_RSVD_MASK 0xC001 +#define IPG_FC_RAM_TEST_MODE 0x0001 +#define IPG_FC_TRANSMITTING 0x4000 +#define IPG_FC_RECEIVING 0x8000 + +/* TxStatus */ +#define IPG_TS_RSVD_MASK 0xFFFF00DD +#define IPG_TS_TX_ERROR 0x00000001 +#define IPG_TS_LATE_COLLISION 0x00000004 +#define IPG_TS_TX_MAX_COLL 0x00000008 +#define IPG_TS_TX_UNDERRUN 0x00000010 +#define IPG_TS_TX_IND_REQD 0x00000040 +#define IPG_TS_TX_COMPLETE 0x00000080 +#define IPG_TS_TX_FRAMEID 0xFFFF0000 + +/* WakeEvent */ +#define IPG_WE_WAKE_PKT_ENABLE 0x01 +#define IPG_WE_MAGIC_PKT_ENABLE 0x02 +#define IPG_WE_LINK_EVT_ENABLE 0x04 +#define IPG_WE_WAKE_POLARITY 0x08 +#define IPG_WE_WAKE_PKT_EVT 0x10 +#define IPG_WE_MAGIC_PKT_EVT 0x20 +#define IPG_WE_LINK_EVT 0x40 +#define IPG_WE_WOL_ENABLE 0x80 + +/* IntEnable */ +#define IPG_IE_RSVD_MASK 0x1FFE +#define IPG_IE_HOST_ERROR 0x0002 +#define IPG_IE_TX_COMPLETE 0x0004 +#define IPG_IE_MAC_CTRL_FRAME 0x0008 +#define IPG_IE_RX_COMPLETE 0x0010 +#define IPG_IE_RX_EARLY 0x0020 +#define IPG_IE_INT_REQUESTED 0x0040 +#define IPG_IE_UPDATE_STATS 0x0080 +#define IPG_IE_LINK_EVENT 0x0100 +#define IPG_IE_TX_DMA_COMPLETE 0x0200 +#define IPG_IE_RX_DMA_COMPLETE 0x0400 +#define IPG_IE_RFD_LIST_END 0x0800 +#define IPG_IE_RX_DMA_PRIORITY 0x1000 + +/* IntStatus */ +#define IPG_IS_RSVD_MASK 0x1FFF +#define IPG_IS_INTERRUPT_STATUS 0x0001 +#define IPG_IS_HOST_ERROR 0x0002 +#define IPG_IS_TX_COMPLETE 0x0004 +#define IPG_IS_MAC_CTRL_FRAME 0x0008 +#define IPG_IS_RX_COMPLETE 0x0010 +#define IPG_IS_RX_EARLY 0x0020 +#define IPG_IS_INT_REQUESTED 0x0040 +#define IPG_IS_UPDATE_STATS 0x0080 +#define IPG_IS_LINK_EVENT 0x0100 +#define IPG_IS_TX_DMA_COMPLETE 0x0200 +#define IPG_IS_RX_DMA_COMPLETE 0x0400 +#define IPG_IS_RFD_LIST_END 0x0800 +#define IPG_IS_RX_DMA_PRIORITY 0x1000 + +/* MACCtrl */ +#define IPG_MC_RSVD_MASK 0x7FE33FA3 +#define IPG_MC_IFS_SELECT 0x00000003 +#define IPG_MC_IFS_4352BIT 0x00000003 +#define IPG_MC_IFS_1792BIT 0x00000002 +#define IPG_MC_IFS_1024BIT 0x00000001 +#define IPG_MC_IFS_96BIT 0x00000000 +#define IPG_MC_DUPLEX_SELECT 0x00000020 +#define IPG_MC_DUPLEX_SELECT_FD 0x00000020 +#define IPG_MC_DUPLEX_SELECT_HD 0x00000000 +#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 +#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 +#define IPG_MC_RCV_FCS 0x00000200 +#define IPG_MC_FIFO_LOOPBACK 0x00000400 +#define IPG_MC_MAC_LOOPBACK 0x00000800 +#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 +#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 +#define IPG_MC_COLLISION_DETECT 0x00010000 +#define IPG_MC_CARRIER_SENSE 0x00020000 +#define IPG_MC_STATISTICS_ENABLE 0x00200000 +#define IPG_MC_STATISTICS_DISABLE 0x00400000 +#define IPG_MC_STATISTICS_ENABLED 0x00800000 +#define IPG_MC_TX_ENABLE 0x01000000 +#define IPG_MC_TX_DISABLE 0x02000000 +#define IPG_MC_TX_ENABLED 0x04000000 +#define IPG_MC_RX_ENABLE 0x08000000 +#define IPG_MC_RX_DISABLE 0x10000000 +#define IPG_MC_RX_ENABLED 0x20000000 +#define IPG_MC_PAUSED 0x40000000 + +/* + * Tune + */ + +/* Miscellaneous Constants. */ +#define TRUE 1 +#define FALSE 0 + +/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ +#define IPG_APPEND_FCS_ON_TX TRUE + +/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ +#define IPG_STRIP_FCS_ON_RX TRUE + +/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with + * Ethernet errors. + */ +#define IPG_DROP_ON_RX_ETH_ERRORS TRUE + +/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually + * (via TFC). + */ +#define IPG_INSERT_MANUAL_VLAN_TAG FALSE + +/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ +#define IPG_ADD_IPCHECKSUM_ON_TX FALSE + +/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. + * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. + */ +#define IPG_ADD_TCPCHECKSUM_ON_TX FALSE + +/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. + * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. + */ +#define IPG_ADD_UDPCHECKSUM_ON_TX FALSE + +/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx + * constants as desired. + */ +#define IPG_MANUAL_VLAN_VID 0xABC +#define IPG_MANUAL_VLAN_CFI 0x1 +#define IPG_MANUAL_VLAN_USERPRIORITY 0x5 + +#define IPG_IO_REG_RANGE 0xFF +#define IPG_MEM_REG_RANGE 0x154 +#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" +#define IPG_NIC_PHY_ADDRESS 0x01 +#define IPG_DMALIST_ALIGN_PAD 0x07 +#define IPG_MULTICAST_HASHTABLE_SIZE 0x40 + +/* Number of miliseconds to wait after issuing a software reset. + * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. + */ +#define IPG_AC_RESETWAIT 0x05 + +/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ +#define IPG_AC_RESET_TIMEOUT 0x0A + +/* Minimum number of nanoseconds used to toggle MDC clock during + * MII/GMII register access. + */ +#define IPG_PC_PHYCTRLWAIT_NS 200 + +#define IPG_TFDLIST_LENGTH 0x100 + +/* Number of frames between TxDMAComplete interrupt. + * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH + */ +#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 + +#ifdef JUMBO_FRAME + +# ifdef JUMBO_FRAME_SIZE_2K +# define JUMBO_FRAME_SIZE 2048 +# define __IPG_RXFRAG_SIZE 2048 +# else +# ifdef JUMBO_FRAME_SIZE_3K +# define JUMBO_FRAME_SIZE 3072 +# define __IPG_RXFRAG_SIZE 3072 +# else +# ifdef JUMBO_FRAME_SIZE_4K +# define JUMBO_FRAME_SIZE 4096 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_5K +# define JUMBO_FRAME_SIZE 5120 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_6K +# define JUMBO_FRAME_SIZE 6144 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_7K +# define JUMBO_FRAME_SIZE 7168 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_8K +# define JUMBO_FRAME_SIZE 8192 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_9K +# define JUMBO_FRAME_SIZE 9216 +# define __IPG_RXFRAG_SIZE 4088 +# else +# ifdef JUMBO_FRAME_SIZE_10K +# define JUMBO_FRAME_SIZE 10240 +# define __IPG_RXFRAG_SIZE 4088 +# else +# define JUMBO_FRAME_SIZE 4096 +# endif +# endif +# endif +# endif +# endif +# endif +# endif +# endif +# endif +#endif + +/* Size of allocated received buffers. Nominally 0x0600. + * Define larger if expecting jumbo frames. + */ +#ifdef JUMBO_FRAME +//IPG_TXFRAG_SIZE must <= 0x2b00, or TX will crash +#define IPG_TXFRAG_SIZE JUMBO_FRAME_SIZE +#endif + +/* Size of allocated received buffers. Nominally 0x0600. + * Define larger if expecting jumbo frames. + */ +#ifdef JUMBO_FRAME +//4088=4096-8 +#define IPG_RXFRAG_SIZE __IPG_RXFRAG_SIZE +#define IPG_RXSUPPORT_SIZE IPG_MAX_RXFRAME_SIZE +#else +#define IPG_RXFRAG_SIZE 0x0600 +#define IPG_RXSUPPORT_SIZE IPG_RXFRAG_SIZE +#endif + +/* IPG_MAX_RXFRAME_SIZE <= IPG_RXFRAG_SIZE */ +#ifdef JUMBO_FRAME +#define IPG_MAX_RXFRAME_SIZE JUMBO_FRAME_SIZE +#else +#define IPG_MAX_RXFRAME_SIZE 0x0600 +#endif + +#define IPG_RFDLIST_LENGTH 0x100 + +/* Maximum number of RFDs to process per interrupt. + * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH + */ +#define IPG_MAXRFDPROCESS_COUNT 0x80 + +/* Minimum margin between last freed RFD, and current RFD. + * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH + */ +#define IPG_MINUSEDRFDSTOFREE 0x80 + +/* specify the jumbo frame maximum size + * per unit is 0x600 (the RxBuffer size that one RFD can carry) + */ +#define MAX_JUMBOSIZE 0x8 // max is 12K + +/* Key register values loaded at driver start up. */ + +/* TXDMAPollPeriod is specified in 320ns increments. + * + * Value Time + * --------------------- + * 0x00-0x01 320ns + * 0x03 ~1us + * 0x1F ~10us + * 0xFF ~82us + */ +#define IPG_TXDMAPOLLPERIOD_VALUE 0x26 + +/* TxDMAUrgentThresh specifies the minimum amount of + * data in the transmit FIFO before asserting an + * urgent transmit DMA request. + * + * Value Min TxFIFO occupied space before urgent TX request + * --------------------------------------------------------------- + * 0x00-0x04 128 bytes (1024 bits) + * 0x27 1248 bytes (~10000 bits) + * 0x30 1536 bytes (12288 bits) + * 0xFF 8192 bytes (65535 bits) + */ +#define IPG_TXDMAURGENTTHRESH_VALUE 0x04 + +/* TxDMABurstThresh specifies the minimum amount of + * free space in the transmit FIFO before asserting an + * transmit DMA request. + * + * Value Min TxFIFO free space before TX request + * ---------------------------------------------------- + * 0x00-0x08 256 bytes + * 0x30 1536 bytes + * 0xFF 8192 bytes + */ +#define IPG_TXDMABURSTTHRESH_VALUE 0x30 + +/* RXDMAPollPeriod is specified in 320ns increments. + * + * Value Time + * --------------------- + * 0x00-0x01 320ns + * 0x03 ~1us + * 0x1F ~10us + * 0xFF ~82us + */ +#define IPG_RXDMAPOLLPERIOD_VALUE 0x01 + +/* RxDMAUrgentThresh specifies the minimum amount of + * free space within the receive FIFO before asserting + * a urgent receive DMA request. + * + * Value Min RxFIFO free space before urgent RX request + * --------------------------------------------------------------- + * 0x00-0x04 128 bytes (1024 bits) + * 0x27 1248 bytes (~10000 bits) + * 0x30 1536 bytes (12288 bits) + * 0xFF 8192 bytes (65535 bits) + */ +#define IPG_RXDMAURGENTTHRESH_VALUE 0x30 + +/* RxDMABurstThresh specifies the minimum amount of + * occupied space within the receive FIFO before asserting + * a receive DMA request. + * + * Value Min TxFIFO free space before TX request + * ---------------------------------------------------- + * 0x00-0x08 256 bytes + * 0x30 1536 bytes + * 0xFF 8192 bytes + */ +#define IPG_RXDMABURSTTHRESH_VALUE 0x30 + +/* FlowOnThresh specifies the maximum amount of occupied + * space in the receive FIFO before a PAUSE frame with + * maximum pause time transmitted. + * + * Value Max RxFIFO occupied space before PAUSE + * --------------------------------------------------- + * 0x0000 0 bytes + * 0x0740 29,696 bytes + * 0x07FF 32,752 bytes + */ +#define IPG_FLOWONTHRESH_VALUE 0x0740 + +/* FlowOffThresh specifies the minimum amount of occupied + * space in the receive FIFO before a PAUSE frame with + * zero pause time is transmitted. + * + * Value Max RxFIFO occupied space before PAUSE + * --------------------------------------------------- + * 0x0000 0 bytes + * 0x00BF 3056 bytes + * 0x07FF 32,752 bytes + */ +#define IPG_FLOWOFFTHRESH_VALUE 0x00BF + +/* + * Miscellaneous macros. + */ + +/* Marco for printing debug statements. +# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " ## args) */ +#ifdef IPG_DEBUG +# define IPG_DEBUG_MSG(args...) +# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args) +# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) +# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) +#else +# define IPG_DEBUG_MSG(args...) +# define IPG_DDEBUG_MSG(args...) +# define IPG_DUMPRFDLIST(args) +# define IPG_DUMPTFDLIST(args) +#endif + +/* + * End miscellaneous macros. + */ + +/* Transmit Frame Descriptor. The IPG supports 15 fragments, + * however Linux requires only a single fragment. Note, each + * TFD field is 64 bits wide. + */ +struct ipg_tx { + u64 next_desc; + u64 tfc; + u64 frag_info; +}; + +/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. + */ +struct ipg_rx { + u64 next_desc; + u64 rfs; + u64 frag_info; +}; + +struct SJumbo { + int FoundStart; + int CurrentSize; + struct sk_buff *skb; +}; +/* Structure of IPG NIC specific data. */ +struct ipg_nic_private { + void __iomem *ioaddr; + struct ipg_tx *txd; + struct ipg_rx *rxd; + dma_addr_t txd_map; + dma_addr_t rxd_map; + struct sk_buff *TxBuff[IPG_TFDLIST_LENGTH]; + struct sk_buff *RxBuff[IPG_RFDLIST_LENGTH]; + unsigned int tx_current; + unsigned int tx_dirty; + unsigned int rx_current; + unsigned int rx_dirty; +// Add by Grace 2005/05/19 +#ifdef JUMBO_FRAME + struct SJumbo Jumbo; +#endif + unsigned int rx_buf_sz; + struct pci_dev *pdev; + struct net_device *dev; + struct net_device_stats stats; + spinlock_t lock; + int tenmbpsmode; + + /*Jesse20040128EEPROM_VALUE */ + u16 LED_Mode; + u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ + + struct mutex mii_mutex; + struct mii_if_info mii_if; + int ResetCurrentTFD; +#ifdef IPG_DEBUG + int RFDlistendCount; + int RFDListCheckedCount; + int EmptyRFDListCount; +#endif + struct delayed_work task; +}; + +//variable record -- index by leading revision/length +//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN +unsigned short DefaultPhyParam[] = { + // 11/12/03 IP1000A v1-3 rev=0x40 + /*-------------------------------------------------------------------------- + (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, + 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, + 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, + --------------------------------------------------------------------------*/ + // 12/17/03 IP1000A v1-4 rev=0x40 + (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + // 01/09/04 IP1000A v1-5 rev=0x41 + (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, + 0x0000, + 30, 0x005e, 9, 0x0700, + 0x0000 +}; + +#endif /* __LINUX_IPG_H */ diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index 2098d0af8ff5fee635ea8fdbddcc378209d5f754..65806956728afa95e832d0bcd37e036d1f634eb0 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -162,7 +162,33 @@ config EP7211_DONGLE Say Y here if you want to build support for the Cirrus logic EP7211 chipset's infrared module. +config KSDAZZLE_DONGLE + tristate "KingSun Dazzle IrDA-USB dongle (EXPERIMENTAL)" + depends on IRDA && USB && EXPERIMENTAL + help + Say Y or M here if you want to build support for the KingSun Dazzle + IrDA-USB bridge device driver. + + This USB bridge does not conform to the IrDA-USB device class + specification, and therefore needs its own specific driver. This + dongle supports SIR speeds only (9600 through 115200 bps). + + To compile it as a module, choose M here: the module will be called + ksdazzle-sir. +config KS959_DONGLE + tristate "KingSun KS-959 IrDA-USB dongle (EXPERIMENTAL)" + depends on IRDA && USB && EXPERIMENTAL + help + Say Y or M here if you want to build support for the KingSun KS-959 + IrDA-USB bridge device driver. + + This USB bridge does not conform to the IrDA-USB device class + specification, and therefore needs its own specific driver. This + dongle supports SIR speeds only (9600 through 57600 bps). + + To compile it as a module, choose M here: the module will be called + ks959-sir. comment "Old SIR device drivers" diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 2808ef5c7b79187ac93cdf5a1f00da70c3e2abd0..fefbb590908165b720a5607514dd5fd449fae6d4 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -47,6 +47,8 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o +obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o +obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o # The SIR helper module sir-dev-objs := sir_dev.o sir_dongle.o diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c index 9715ab5572e998c11a96121a6fdde28cdce5399e..ccf6ec548a6464a3e51c1945796a0f7842d5977d 100644 --- a/drivers/net/irda/actisys-sir.c +++ b/drivers/net/irda/actisys-sir.c @@ -67,7 +67,7 @@ static int actisys_reset(struct sir_dev *); /* Note : the 220L doesn't support 38400, but we will fix that below */ static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 }; -#define MAX_SPEEDS (sizeof(baud_rates)/sizeof(baud_rates[0])) +#define MAX_SPEEDS ARRAY_SIZE(baud_rates) static struct dongle_driver act220l = { .owner = THIS_MODULE, diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index f9c889c0dd07b40fa24523970815e411d5a99b18..9f584521304a8a84ce5e8230f43df2fa72def8b3 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -360,10 +360,6 @@ static int ali_ircc_open(int i, chipio_t *info) self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; - - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); - /* Override the network functions we need to use */ dev->hard_start_xmit = ali_ircc_sir_hard_xmit; dev->open = ali_ircc_net_open; diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 3ca47bf6dfeca62c612e0a90f1abbd01d1c99ea4..3e5eca1aa98761ecf932f6d47d895dfda1e1a7a9 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -1660,7 +1660,6 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid) } #endif - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pci_dev->dev); dev->hard_start_xmit = toshoboe_hard_xmit; dev->open = toshoboe_net_open; diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 3b0fd83fa26634351695b3f98e68a550c83d5701..c6355c00fd7abb8e1143f81a0c4bb2945edb97fd 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -1635,7 +1635,6 @@ static int irda_usb_probe(struct usb_interface *intf, if (!net) goto err_out; - SET_MODULE_OWNER(net); SET_NETDEV_DEV(net, &intf->dev); self = net->priv; self->netdev = net; diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c index 20732458f5ac20cb41cea01cfe14e5ae5d2ce2c8..c79caa5d3d710a55bcdaeca4cb52470ea0ae57b5 100644 --- a/drivers/net/irda/irport.c +++ b/drivers/net/irda/irport.c @@ -175,8 +175,6 @@ irport_open(int i, unsigned int iobase, unsigned int irq) self->tx_buff.data = self->tx_buff.head; self->netdev = dev; - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); /* May be overridden by piggyback drivers */ self->interrupt = irport_interrupt; diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c index 4e5101a45c3c62c69bf27d44b38bcd9d0467c73c..648e54b3f00e97411fb04e2d46e1574e80602270 100644 --- a/drivers/net/irda/kingsun-sir.c +++ b/drivers/net/irda/kingsun-sir.c @@ -66,7 +66,6 @@ #include #include #include -#include #include #include #include @@ -488,7 +487,6 @@ static int kingsun_probe(struct usb_interface *intf, if(!net) goto err_out1; - SET_MODULE_OWNER(net); SET_NETDEV_DEV(net, &intf->dev); kingsun = netdev_priv(net); kingsun->irlap = NULL; diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c new file mode 100644 index 0000000000000000000000000000000000000000..8c257a51341a131e2426ab5f1ff0de220a2a2aa0 --- /dev/null +++ b/drivers/net/irda/ks959-sir.c @@ -0,0 +1,938 @@ +/***************************************************************************** +* +* Filename: ks959-sir.c +* Version: 0.1.2 +* Description: Irda KingSun KS-959 USB Dongle +* Status: Experimental +* Author: Alex Villacís Lasso +* with help from Domen Puncer +* +* Based on stir4200, mcs7780, kingsun-sir drivers. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + +/* + * Following is my most current (2007-07-17) understanding of how the Kingsun + * KS-959 dongle is supposed to work. This information was deduced by + * reverse-engineering and examining the USB traffic captured with USBSnoopy + * from the WinXP driver. Feel free to update here as more of the dongle is + * known. + * + * My most sincere thanks must go to Domen Puncer for + * invaluable help in cracking the obfuscation and padding required for this + * dongle. + * + * General: This dongle exposes one interface with one interrupt IN endpoint. + * However, the interrupt endpoint is NOT used at all for this dongle. Instead, + * this dongle uses control transfers for everything, including sending and + * receiving the IrDA frame data. Apparently the interrupt endpoint is just a + * dummy to ensure the dongle has a valid interface to present to the PC.And I + * thought the DonShine dongle was weird... In addition, this dongle uses + * obfuscation (?!?!), applied at the USB level, to hide the traffic, both sent + * and received, from the dongle. I call it obfuscation because the XOR keying + * and padding required to produce an USB traffic acceptable for the dongle can + * not be explained by any other technical requirement. + * + * Transmission: To transmit an IrDA frame, the driver must prepare a control + * URB with the following as a setup packet: + * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x09 + * wValue + * wIndex 0x0000 + * wLength + * The payload packet must be manually wrapped and escaped (as in stir4200.c), + * then padded and obfuscated before being sent. Both padding and obfuscation + * are implemented in the procedure obfuscate_tx_buffer(). Suffice to say, the + * designer/programmer of the dongle used his name as a source for the + * obfuscation. WTF?! + * Apparently the dongle cannot handle payloads larger than 256 bytes. The + * driver has to perform fragmentation in order to send anything larger than + * this limit. + * + * Reception: To receive data, the driver must poll the dongle regularly (like + * kingsun-sir.c) with control URBs and the following as a setup packet: + * bRequestType USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x01 + * wValue 0x0200 + * wIndex 0x0000 + * wLength 0x0800 (size of available buffer) + * If there is data to be read, it will be returned as the response payload. + * This data is (apparently) not padded, but it is obfuscated. To de-obfuscate + * it, the driver must XOR every byte, in sequence, with a value that starts at + * 1 and is incremented with each byte processed, and then with 0x55. The value + * incremented with each byte processed overflows as an unsigned char. The + * resulting bytes form a wrapped SIR frame that is unwrapped and unescaped + * as in stir4200.c The incremented value is NOT reset with each frame, but is + * kept across the entire session with the dongle. Also, the dongle inserts an + * extra garbage byte with value 0x95 (after decoding) every 0xff bytes, which + * must be skipped. + * + * Speed change: To change the speed of the dongle, the driver prepares a + * control URB with the following as a setup packet: + * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x09 + * wValue 0x0200 + * wIndex 0x0001 + * wLength 0x0008 (length of the payload) + * The payload is a 8-byte record, apparently identical to the one used in + * drivers/usb/serial/cypress_m8.c to change speed: + * __u32 baudSpeed; + * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits + * unsigned int : 1; + * unsigned int stopBits : 1; + * unsigned int parityEnable : 1; + * unsigned int parityType : 1; + * unsigned int : 1; + * unsigned int reset : 1; + * unsigned char reserved[3]; // set to 0 + * + * For now only SIR speeds have been observed with this dongle. Therefore, + * nothing is known on what changes (if any) must be done to frame wrapping / + * unwrapping for higher than SIR speeds. This driver assumes no change is + * necessary and announces support for all the way to 57600 bps. Although the + * package announces support for up to 4MBps, tests with a Sony Ericcson K300 + * phone show corruption when receiving large frames at 115200 bps, the highest + * speed announced by the phone. However, transmission at 115200 bps is OK. Go + * figure. Since I don't know whether the phone or the dongle is at fault, max + * announced speed is 57600 bps until someone produces a device that can run + * at higher speeds with this dongle. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#define KS959_VENDOR_ID 0x07d0 +#define KS959_PRODUCT_ID 0x4959 + +/* These are the currently known USB ids */ +static struct usb_device_id dongles[] = { + /* KingSun Co,Ltd IrDA/USB Bridge */ + {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, dongles); + +#define KINGSUN_MTT 0x07 +#define KINGSUN_REQ_RECV 0x01 +#define KINGSUN_REQ_SEND 0x09 + +#define KINGSUN_RCV_FIFO_SIZE 2048 /* Max length we can receive */ +#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ +#define KINGSUN_SND_PACKET_SIZE 256 /* Max packet dongle can handle */ + +struct ks959_speedparams { + __le32 baudrate; /* baud rate, little endian */ + __u8 flags; + __u8 reserved[3]; +} __attribute__ ((packed)); + +#define KS_DATA_5_BITS 0x00 +#define KS_DATA_6_BITS 0x01 +#define KS_DATA_7_BITS 0x02 +#define KS_DATA_8_BITS 0x03 + +#define KS_STOP_BITS_1 0x00 +#define KS_STOP_BITS_2 0x08 + +#define KS_PAR_DISABLE 0x00 +#define KS_PAR_EVEN 0x10 +#define KS_PAR_ODD 0x30 +#define KS_RESET 0x80 + +struct ks959_cb { + struct usb_device *usbdev; /* init: probe_irda */ + struct net_device *netdev; /* network layer */ + struct irlap_cb *irlap; /* The link layer we are binded to */ + struct net_device_stats stats; /* network statistics */ + struct qos_info qos; + + struct usb_ctrlrequest *tx_setuprequest; + struct urb *tx_urb; + __u8 *tx_buf_clear; + unsigned int tx_buf_clear_used; + unsigned int tx_buf_clear_sent; + __u8 *tx_buf_xored; + + struct usb_ctrlrequest *rx_setuprequest; + struct urb *rx_urb; + __u8 *rx_buf; + __u8 rx_variable_xormask; + iobuff_t rx_unwrap_buff; + struct timeval rx_time; + + struct usb_ctrlrequest *speed_setuprequest; + struct urb *speed_urb; + struct ks959_speedparams speedparams; + unsigned int new_speed; + + spinlock_t lock; + int receiving; +}; + +/* Procedure to perform the obfuscation/padding expected by the dongle + * + * buf_cleartext (IN) Cleartext version of the IrDA frame to transmit + * len_cleartext (IN) Length of the cleartext version of IrDA frame + * buf_xoredtext (OUT) Obfuscated version of frame built by proc + * len_maxbuf (OUT) Maximum space available at buf_xoredtext + * + * (return) length of obfuscated frame with padding + * + * If not enough space (as indicated by len_maxbuf vs. required padding), + * zero is returned + * + * The value of lookup_string is actually a required portion of the algorithm. + * Seems the designer of the dongle wanted to state who exactly is responsible + * for implementing obfuscation. Send your best (or other) wishes to him ]:-) + */ +static unsigned int obfuscate_tx_buffer(const __u8 * buf_cleartext, + unsigned int len_cleartext, + __u8 * buf_xoredtext, + unsigned int len_maxbuf) +{ + unsigned int len_xoredtext; + + /* Calculate required length with padding, check for necessary space */ + len_xoredtext = ((len_cleartext + 7) & ~0x7) + 0x10; + if (len_xoredtext <= len_maxbuf) { + static const __u8 lookup_string[] = "wangshuofei19710"; + __u8 xor_mask; + + /* Unlike the WinXP driver, we *do* clear out the padding */ + memset(buf_xoredtext, 0, len_xoredtext); + + xor_mask = lookup_string[(len_cleartext & 0x0f) ^ 0x06] ^ 0x55; + + while (len_cleartext-- > 0) { + *buf_xoredtext++ = *buf_cleartext++ ^ xor_mask; + } + } else { + len_xoredtext = 0; + } + return len_xoredtext; +} + +/* Callback transmission routine */ +static void ks959_speed_irq(struct urb *urb) +{ + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ks959_speed_irq: urb asynchronously failed - %d", + urb->status); + } +} + +/* Send a control request to change speed of the dongle */ +static int ks959_change_speed(struct ks959_cb *kingsun, unsigned speed) +{ + static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, + 57600, 115200, 576000, 1152000, 4000000, 0 + }; + int err; + unsigned int i; + + if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) + return -ENOMEM; + + /* Check that requested speed is among the supported ones */ + for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; + if (supported_speeds[i] == 0) + return -EOPNOTSUPP; + + memset(&(kingsun->speedparams), 0, sizeof(struct ks959_speedparams)); + kingsun->speedparams.baudrate = cpu_to_le32(speed); + kingsun->speedparams.flags = KS_DATA_8_BITS; + + /* speed_setuprequest pre-filled in ks959_probe */ + usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, + usb_sndctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->speed_setuprequest, + &(kingsun->speedparams), + sizeof(struct ks959_speedparams), ks959_speed_irq, + kingsun); + kingsun->speed_urb->status = 0; + err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); + + return err; +} + +/* Submit one fragment of an IrDA frame to the dongle */ +static void ks959_send_irq(struct urb *urb); +static int ks959_submit_tx_fragment(struct ks959_cb *kingsun) +{ + unsigned int padlen; + unsigned int wraplen; + int ret; + + /* Check whether current plaintext can produce a padded buffer that fits + within the range handled by the dongle */ + wraplen = (KINGSUN_SND_PACKET_SIZE & ~0x7) - 0x10; + if (wraplen > kingsun->tx_buf_clear_used) + wraplen = kingsun->tx_buf_clear_used; + + /* Perform dongle obfuscation. Also remove the portion of the frame that + was just obfuscated and will now be sent to the dongle. */ + padlen = obfuscate_tx_buffer(kingsun->tx_buf_clear, wraplen, + kingsun->tx_buf_xored, + KINGSUN_SND_PACKET_SIZE); + + /* Calculate how much data can be transmitted in this urb */ + kingsun->tx_setuprequest->wValue = cpu_to_le16(wraplen); + kingsun->tx_setuprequest->wLength = cpu_to_le16(padlen); + /* Rest of the fields were filled in ks959_probe */ + usb_fill_control_urb(kingsun->tx_urb, kingsun->usbdev, + usb_sndctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->tx_setuprequest, + kingsun->tx_buf_xored, padlen, + ks959_send_irq, kingsun); + kingsun->tx_urb->status = 0; + ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); + + /* Remember how much data was sent, in order to update at callback */ + kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; + return ret; +} + +/* Callback transmission routine */ +static void ks959_send_irq(struct urb *urb) +{ + struct ks959_cb *kingsun = urb->context; + struct net_device *netdev = kingsun->netdev; + int ret = 0; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + err("ks959_send_irq: Network not running!"); + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ks959_send_irq: urb asynchronously failed - %d", + urb->status); + return; + } + + if (kingsun->tx_buf_clear_used > 0) { + /* Update data remaining to be sent */ + if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { + memmove(kingsun->tx_buf_clear, + kingsun->tx_buf_clear + + kingsun->tx_buf_clear_sent, + kingsun->tx_buf_clear_used - + kingsun->tx_buf_clear_sent); + } + kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; + kingsun->tx_buf_clear_sent = 0; + + if (kingsun->tx_buf_clear_used > 0) { + /* There is more data to be sent */ + if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) { + err("ks959_send_irq: failed tx_urb submit: %d", + ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } + } else { + /* All data sent, send next speed && wake network queue */ + if (kingsun->new_speed != -1 && + cpu_to_le32(kingsun->new_speed) != + kingsun->speedparams.baudrate) + ks959_change_speed(kingsun, kingsun->new_speed); + + netif_wake_queue(netdev); + } + } +} + +/* + * Called from net/core when new frame is available. + */ +static int ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ks959_cb *kingsun; + unsigned int wraplen; + int ret = 0; + + if (skb == NULL || netdev == NULL) + return -EINVAL; + + netif_stop_queue(netdev); + + /* the IRDA wrapping routines don't deal with non linear skb */ + SKB_LINEAR_ASSERT(skb); + + kingsun = netdev_priv(netdev); + + spin_lock(&kingsun->lock); + kingsun->new_speed = irda_get_next_speed(skb); + + /* Append data to the end of whatever data remains to be transmitted */ + wraplen = + async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); + kingsun->tx_buf_clear_used = wraplen; + + if ((ret = ks959_submit_tx_fragment(kingsun)) != 0) { + err("ks959_hard_xmit: failed tx_urb submit: %d", ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } else { + kingsun->stats.tx_packets++; + kingsun->stats.tx_bytes += skb->len; + + } + + dev_kfree_skb(skb); + spin_unlock(&kingsun->lock); + + return ret; +} + +/* Receive callback function */ +static void ks959_rcv_irq(struct urb *urb) +{ + struct ks959_cb *kingsun = urb->context; + int ret; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + kingsun->receiving = 0; + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("kingsun_rcv_irq: urb asynchronously failed - %d", + urb->status); + kingsun->receiving = 0; + return; + } + + if (urb->actual_length > 0) { + __u8 *bytes = urb->transfer_buffer; + unsigned int i; + + for (i = 0; i < urb->actual_length; i++) { + /* De-obfuscation implemented here: variable portion of + xormask is incremented, and then used with the encoded + byte for the XOR. The result of the operation is used + to unwrap the SIR frame. */ + kingsun->rx_variable_xormask++; + bytes[i] = + bytes[i] ^ kingsun->rx_variable_xormask ^ 0x55u; + + /* rx_variable_xormask doubles as an index counter so we + can skip the byte at 0xff (wrapped around to 0). + */ + if (kingsun->rx_variable_xormask != 0) { + async_unwrap_char(kingsun->netdev, + &kingsun->stats, + &kingsun->rx_unwrap_buff, + bytes[i]); + } + } + kingsun->netdev->last_rx = jiffies; + do_gettimeofday(&kingsun->rx_time); + kingsun->receiving = + (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; + } + + /* This urb has already been filled in kingsun_net_open. Setup + packet must be re-filled, but it is assumed that urb keeps the + pointer to the initial setup packet, as well as the payload buffer. + Setup packet is already pre-filled at ks959_probe. + */ + urb->status = 0; + ret = usb_submit_urb(urb, GFP_ATOMIC); +} + +/* + * Function kingsun_net_open (dev) + * + * Network device is taken up. Usually this is done by "ifconfig irda0 up" + */ +static int ks959_net_open(struct net_device *netdev) +{ + struct ks959_cb *kingsun = netdev_priv(netdev); + int err = -ENOMEM; + char hwname[16]; + + /* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */ + kingsun->receiving = 0; + + /* Initialize for SIR to copy data directly into skb. */ + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; + kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); + if (!kingsun->rx_unwrap_buff.skb) + goto free_mem; + + skb_reserve(kingsun->rx_unwrap_buff.skb, 1); + kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; + do_gettimeofday(&kingsun->rx_time); + + kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->rx_urb) + goto free_mem; + + kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->tx_urb) + goto free_mem; + + kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->speed_urb) + goto free_mem; + + /* Initialize speed for dongle */ + kingsun->new_speed = 9600; + err = ks959_change_speed(kingsun, 9600); + if (err < 0) + goto free_mem; + + /* + * Now that everything should be initialized properly, + * Open new IrLAP layer instance to take care of us... + */ + sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); + kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); + if (!kingsun->irlap) { + err("ks959-sir: irlap_open failed"); + goto free_mem; + } + + /* Start reception. Setup request already pre-filled in ks959_probe */ + usb_fill_control_urb(kingsun->rx_urb, kingsun->usbdev, + usb_rcvctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->rx_setuprequest, + kingsun->rx_buf, KINGSUN_RCV_FIFO_SIZE, + ks959_rcv_irq, kingsun); + kingsun->rx_urb->status = 0; + err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + if (err) { + err("ks959-sir: first urb-submit failed: %d", err); + goto close_irlap; + } + + netif_start_queue(netdev); + + /* Situation at this point: + - all work buffers allocated + - urbs allocated and ready to fill + - max rx packet known (in max_rx) + - unwrap state machine initialized, in state outside of any frame + - receive request in progress + - IrLAP layer started, about to hand over packets to send + */ + + return 0; + + close_irlap: + irlap_close(kingsun->irlap); + free_mem: + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + if (kingsun->rx_unwrap_buff.skb) { + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + } + return err; +} + +/* + * Function kingsun_net_close (kingsun) + * + * Network device is taken down. Usually this is done by + * "ifconfig irda0 down" + */ +static int ks959_net_close(struct net_device *netdev) +{ + struct ks959_cb *kingsun = netdev_priv(netdev); + + /* Stop transmit processing */ + netif_stop_queue(netdev); + + /* Mop up receive && transmit urb's */ + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->receiving = 0; + + /* Stop and remove instance of IrLAP */ + if (kingsun->irlap) + irlap_close(kingsun->irlap); + + kingsun->irlap = NULL; + + return 0; +} + +/* + * IOCTLs : Extra out-of-band network commands... + */ +static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) +{ + struct if_irda_req *irq = (struct if_irda_req *)rq; + struct ks959_cb *kingsun = netdev_priv(netdev); + int ret = 0; + + switch (cmd) { + case SIOCSBANDWIDTH: /* Set bandwidth */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the device is still there */ + if (netif_device_present(kingsun->netdev)) + return ks959_change_speed(kingsun, irq->ifr_baudrate); + break; + + case SIOCSMEDIABUSY: /* Set media busy */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the IrDA stack is still there */ + if (netif_running(kingsun->netdev)) + irda_device_set_media_busy(kingsun->netdev, TRUE); + break; + + case SIOCGRECEIVING: + /* Only approximately true */ + irq->ifr_receiving = kingsun->receiving; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +/* + * Get device stats (for /proc/net/dev and ifconfig) + */ +static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev) +{ + struct ks959_cb *kingsun = netdev_priv(netdev); + return &kingsun->stats; +} + +/* + * This routine is called by the USB subsystem for each new device + * in the system. We need to check if the device is ours, and in + * this case start handling it. + */ +static int ks959_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *dev = interface_to_usbdev(intf); + struct ks959_cb *kingsun = NULL; + struct net_device *net = NULL; + int ret = -ENOMEM; + + /* Allocate network device container. */ + net = alloc_irdadev(sizeof(*kingsun)); + if (!net) + goto err_out1; + + SET_NETDEV_DEV(net, &intf->dev); + kingsun = netdev_priv(net); + kingsun->netdev = net; + kingsun->usbdev = dev; + kingsun->irlap = NULL; + kingsun->tx_setuprequest = NULL; + kingsun->tx_urb = NULL; + kingsun->tx_buf_clear = NULL; + kingsun->tx_buf_xored = NULL; + kingsun->tx_buf_clear_used = 0; + kingsun->tx_buf_clear_sent = 0; + + kingsun->rx_setuprequest = NULL; + kingsun->rx_urb = NULL; + kingsun->rx_buf = NULL; + kingsun->rx_variable_xormask = 0; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->receiving = 0; + spin_lock_init(&kingsun->lock); + + kingsun->speed_setuprequest = NULL; + kingsun->speed_urb = NULL; + kingsun->speedparams.baudrate = 0; + + /* Allocate input buffer */ + kingsun->rx_buf = kmalloc(KINGSUN_RCV_FIFO_SIZE, GFP_KERNEL); + if (!kingsun->rx_buf) + goto free_mem; + + /* Allocate input setup packet */ + kingsun->rx_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->rx_setuprequest) + goto free_mem; + kingsun->rx_setuprequest->bRequestType = + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->rx_setuprequest->bRequest = KINGSUN_REQ_RECV; + kingsun->rx_setuprequest->wValue = cpu_to_le16(0x0200); + kingsun->rx_setuprequest->wIndex = 0; + kingsun->rx_setuprequest->wLength = cpu_to_le16(KINGSUN_RCV_FIFO_SIZE); + + /* Allocate output buffer */ + kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); + if (!kingsun->tx_buf_clear) + goto free_mem; + kingsun->tx_buf_xored = kmalloc(KINGSUN_SND_PACKET_SIZE, GFP_KERNEL); + if (!kingsun->tx_buf_xored) + goto free_mem; + + /* Allocate and initialize output setup packet */ + kingsun->tx_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->tx_setuprequest) + goto free_mem; + kingsun->tx_setuprequest->bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->tx_setuprequest->bRequest = KINGSUN_REQ_SEND; + kingsun->tx_setuprequest->wValue = 0; + kingsun->tx_setuprequest->wIndex = 0; + kingsun->tx_setuprequest->wLength = 0; + + /* Allocate and initialize speed setup packet */ + kingsun->speed_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->speed_setuprequest) + goto free_mem; + kingsun->speed_setuprequest->bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; + kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); + kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); + kingsun->speed_setuprequest->wLength = + cpu_to_le16(sizeof(struct ks959_speedparams)); + + printk(KERN_INFO "KingSun KS-959 IRDA/USB found at address %d, " + "Vendor: %x, Product: %x\n", + dev->devnum, le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); + + /* Initialize QoS for this device */ + irda_init_max_qos_capabilies(&kingsun->qos); + + /* Baud rates known to be supported. Please uncomment if devices (other + than a SonyEriccson K300 phone) can be shown to support higher speed + with this dongle. + */ + kingsun->qos.baud_rate.bits = + IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600; + kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; + irda_qos_bits_to_value(&kingsun->qos); + + /* Override the network functions we need to use */ + net->hard_start_xmit = ks959_hard_xmit; + net->open = ks959_net_open; + net->stop = ks959_net_close; + net->get_stats = ks959_net_get_stats; + net->do_ioctl = ks959_net_ioctl; + + ret = register_netdev(net); + if (ret != 0) + goto free_mem; + + info("IrDA: Registered KingSun KS-959 device %s", net->name); + + usb_set_intfdata(intf, kingsun); + + /* Situation at this point: + - all work buffers allocated + - setup requests pre-filled + - urbs not allocated, set to NULL + - max rx packet known (is KINGSUN_FIFO_SIZE) + - unwrap state machine (partially) initialized, but skb == NULL + */ + + return 0; + + free_mem: + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_setuprequest); + kfree(kingsun->tx_buf_xored); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_setuprequest); + kfree(kingsun->rx_buf); + free_netdev(net); + err_out1: + return ret; +} + +/* + * The current device is removed, the USB layer tell us to shut it down... + */ +static void ks959_disconnect(struct usb_interface *intf) +{ + struct ks959_cb *kingsun = usb_get_intfdata(intf); + + if (!kingsun) + return; + + unregister_netdev(kingsun->netdev); + + /* Mop up receive && transmit urb's */ + if (kingsun->speed_urb != NULL) { + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + } + if (kingsun->tx_urb != NULL) { + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + } + if (kingsun->rx_urb != NULL) { + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + } + + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_setuprequest); + kfree(kingsun->tx_buf_xored); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_setuprequest); + kfree(kingsun->rx_buf); + free_netdev(kingsun->netdev); + + usb_set_intfdata(intf, NULL); +} + +#ifdef CONFIG_PM +/* USB suspend, so power off the transmitter/receiver */ +static int ks959_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct ks959_cb *kingsun = usb_get_intfdata(intf); + + netif_device_detach(kingsun->netdev); + if (kingsun->speed_urb != NULL) + usb_kill_urb(kingsun->speed_urb); + if (kingsun->tx_urb != NULL) + usb_kill_urb(kingsun->tx_urb); + if (kingsun->rx_urb != NULL) + usb_kill_urb(kingsun->rx_urb); + return 0; +} + +/* Coming out of suspend, so reset hardware */ +static int ks959_resume(struct usb_interface *intf) +{ + struct ks959_cb *kingsun = usb_get_intfdata(intf); + + if (kingsun->rx_urb != NULL) { + /* Setup request already filled in ks959_probe */ + usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + } + netif_device_attach(kingsun->netdev); + + return 0; +} +#endif + +/* + * USB device callbacks + */ +static struct usb_driver irda_driver = { + .name = "ks959-sir", + .probe = ks959_probe, + .disconnect = ks959_disconnect, + .id_table = dongles, +#ifdef CONFIG_PM + .suspend = ks959_suspend, + .resume = ks959_resume, +#endif +}; + +/* + * Module insertion + */ +static int __init ks959_init(void) +{ + return usb_register(&irda_driver); +} + +module_init(ks959_init); + +/* + * Module removal + */ +static void __exit ks959_cleanup(void) +{ + /* Deregister the driver and remove all pending instances */ + usb_deregister(&irda_driver); +} + +module_exit(ks959_cleanup); + +MODULE_AUTHOR("Alex Villacís Lasso "); +MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c new file mode 100644 index 0000000000000000000000000000000000000000..d01a28593ce27e7222b34dcbc74619690a68ba72 --- /dev/null +++ b/drivers/net/irda/ksdazzle-sir.c @@ -0,0 +1,832 @@ +/***************************************************************************** +* +* Filename: ksdazzle.c +* Version: 0.1.2 +* Description: Irda KingSun Dazzle USB Dongle +* Status: Experimental +* Author: Alex Villacís Lasso +* +* Based on stir4200, mcs7780, kingsun-sir drivers. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License as published by +* the Free Software Foundation; either version 2 of the License. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +* +*****************************************************************************/ + +/* + * Following is my most current (2007-07-26) understanding of how the Kingsun + * 07D0:4100 dongle (sometimes known as the MA-660) is supposed to work. This + * information was deduced by examining the USB traffic captured with USBSnoopy + * from the WinXP driver. Feel free to update here as more of the dongle is + * known. + * + * General: This dongle exposes one interface with two interrupt endpoints, one + * IN and one OUT. In this regard, it is similar to what the Kingsun/Donshine + * dongle (07c0:4200) exposes. Traffic is raw and needs to be wrapped and + * unwrapped manually as in stir4200, kingsun-sir, and ks959-sir. + * + * Transmission: To transmit an IrDA frame, it is necessary to wrap it, then + * split it into multiple segments of up to 7 bytes each, and transmit each in + * sequence. It seems that sending a single big block (like kingsun-sir does) + * won't work with this dongle. Each segment needs to be prefixed with a value + * equal to (unsigned char)0xF8 + , inside a payload + * of exactly 8 bytes. For example, a segment of 1 byte gets prefixed by 0xF9, + * and one of 7 bytes gets prefixed by 0xFF. The bytes at the end of the + * payload, not considered by the prefix, are ignored (set to 0 by this + * implementation). + * + * Reception: To receive data, the driver must poll the dongle regularly (like + * kingsun-sir.c) with interrupt URBs. If data is available, it will be returned + * in payloads from 0 to 8 bytes long. When concatenated, these payloads form + * a raw IrDA stream that needs to be unwrapped as in stir4200 and kingsun-sir + * + * Speed change: To change the speed of the dongle, the driver prepares a + * control URB with the following as a setup packet: + * bRequestType USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + * bRequest 0x09 + * wValue 0x0200 + * wIndex 0x0001 + * wLength 0x0008 (length of the payload) + * The payload is a 8-byte record, apparently identical to the one used in + * drivers/usb/serial/cypress_m8.c to change speed: + * __u32 baudSpeed; + * unsigned int dataBits : 2; // 0 - 5 bits 3 - 8 bits + * unsigned int : 1; + * unsigned int stopBits : 1; + * unsigned int parityEnable : 1; + * unsigned int parityType : 1; + * unsigned int : 1; + * unsigned int reset : 1; + * unsigned char reserved[3]; // set to 0 + * + * For now only SIR speeds have been observed with this dongle. Therefore, + * nothing is known on what changes (if any) must be done to frame wrapping / + * unwrapping for higher than SIR speeds. This driver assumes no change is + * necessary and announces support for all the way to 115200 bps. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#define KSDAZZLE_VENDOR_ID 0x07d0 +#define KSDAZZLE_PRODUCT_ID 0x4100 + +/* These are the currently known USB ids */ +static struct usb_device_id dongles[] = { + /* KingSun Co,Ltd IrDA/USB Bridge */ + {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, dongles); + +#define KINGSUN_MTT 0x07 +#define KINGSUN_REQ_RECV 0x01 +#define KINGSUN_REQ_SEND 0x09 + +#define KINGSUN_SND_FIFO_SIZE 2048 /* Max packet we can send */ +#define KINGSUN_RCV_MAX 2048 /* Max transfer we can receive */ + +struct ksdazzle_speedparams { + __le32 baudrate; /* baud rate, little endian */ + __u8 flags; + __u8 reserved[3]; +} __attribute__ ((packed)); + +#define KS_DATA_5_BITS 0x00 +#define KS_DATA_6_BITS 0x01 +#define KS_DATA_7_BITS 0x02 +#define KS_DATA_8_BITS 0x03 + +#define KS_STOP_BITS_1 0x00 +#define KS_STOP_BITS_2 0x08 + +#define KS_PAR_DISABLE 0x00 +#define KS_PAR_EVEN 0x10 +#define KS_PAR_ODD 0x30 +#define KS_RESET 0x80 + +#define KINGSUN_EP_IN 0 +#define KINGSUN_EP_OUT 1 + +struct ksdazzle_cb { + struct usb_device *usbdev; /* init: probe_irda */ + struct net_device *netdev; /* network layer */ + struct irlap_cb *irlap; /* The link layer we are binded to */ + struct net_device_stats stats; /* network statistics */ + struct qos_info qos; + + struct urb *tx_urb; + __u8 *tx_buf_clear; + unsigned int tx_buf_clear_used; + unsigned int tx_buf_clear_sent; + __u8 tx_payload[8]; + + struct urb *rx_urb; + __u8 *rx_buf; + iobuff_t rx_unwrap_buff; + + struct usb_ctrlrequest *speed_setuprequest; + struct urb *speed_urb; + struct ksdazzle_speedparams speedparams; + unsigned int new_speed; + + __u8 ep_in; + __u8 ep_out; + + spinlock_t lock; + int receiving; +}; + +/* Callback transmission routine */ +static void ksdazzle_speed_irq(struct urb *urb) +{ + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ksdazzle_speed_irq: urb asynchronously failed - %d", + urb->status); + } +} + +/* Send a control request to change speed of the dongle */ +static int ksdazzle_change_speed(struct ksdazzle_cb *kingsun, unsigned speed) +{ + static unsigned int supported_speeds[] = { 2400, 9600, 19200, 38400, + 57600, 115200, 576000, 1152000, 4000000, 0 + }; + int err; + unsigned int i; + + if (kingsun->speed_setuprequest == NULL || kingsun->speed_urb == NULL) + return -ENOMEM; + + /* Check that requested speed is among the supported ones */ + for (i = 0; supported_speeds[i] && supported_speeds[i] != speed; i++) ; + if (supported_speeds[i] == 0) + return -EOPNOTSUPP; + + memset(&(kingsun->speedparams), 0, sizeof(struct ksdazzle_speedparams)); + kingsun->speedparams.baudrate = cpu_to_le32(speed); + kingsun->speedparams.flags = KS_DATA_8_BITS; + + /* speed_setuprequest pre-filled in ksdazzle_probe */ + usb_fill_control_urb(kingsun->speed_urb, kingsun->usbdev, + usb_sndctrlpipe(kingsun->usbdev, 0), + (unsigned char *)kingsun->speed_setuprequest, + &(kingsun->speedparams), + sizeof(struct ksdazzle_speedparams), + ksdazzle_speed_irq, kingsun); + kingsun->speed_urb->status = 0; + err = usb_submit_urb(kingsun->speed_urb, GFP_ATOMIC); + + return err; +} + +/* Submit one fragment of an IrDA frame to the dongle */ +static void ksdazzle_send_irq(struct urb *urb); +static int ksdazzle_submit_tx_fragment(struct ksdazzle_cb *kingsun) +{ + unsigned int wraplen; + int ret; + + /* We can send at most 7 bytes of payload at a time */ + wraplen = 7; + if (wraplen > kingsun->tx_buf_clear_used) + wraplen = kingsun->tx_buf_clear_used; + + /* Prepare payload prefix with used length */ + memset(kingsun->tx_payload, 0, 8); + kingsun->tx_payload[0] = (unsigned char)0xf8 + wraplen; + memcpy(kingsun->tx_payload + 1, kingsun->tx_buf_clear, wraplen); + + usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev, + usb_sndintpipe(kingsun->usbdev, kingsun->ep_out), + kingsun->tx_payload, 8, ksdazzle_send_irq, kingsun, 1); + kingsun->tx_urb->status = 0; + ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC); + + /* Remember how much data was sent, in order to update at callback */ + kingsun->tx_buf_clear_sent = (ret == 0) ? wraplen : 0; + return ret; +} + +/* Callback transmission routine */ +static void ksdazzle_send_irq(struct urb *urb) +{ + struct ksdazzle_cb *kingsun = urb->context; + struct net_device *netdev = kingsun->netdev; + int ret = 0; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + err("ksdazzle_send_irq: Network not running!"); + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ksdazzle_send_irq: urb asynchronously failed - %d", + urb->status); + return; + } + + if (kingsun->tx_buf_clear_used > 0) { + /* Update data remaining to be sent */ + if (kingsun->tx_buf_clear_sent < kingsun->tx_buf_clear_used) { + memmove(kingsun->tx_buf_clear, + kingsun->tx_buf_clear + + kingsun->tx_buf_clear_sent, + kingsun->tx_buf_clear_used - + kingsun->tx_buf_clear_sent); + } + kingsun->tx_buf_clear_used -= kingsun->tx_buf_clear_sent; + kingsun->tx_buf_clear_sent = 0; + + if (kingsun->tx_buf_clear_used > 0) { + /* There is more data to be sent */ + if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { + err("ksdazzle_send_irq: failed tx_urb submit: %d", ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } + } else { + /* All data sent, send next speed && wake network queue */ + if (kingsun->new_speed != -1 && + cpu_to_le32(kingsun->new_speed) != + kingsun->speedparams.baudrate) + ksdazzle_change_speed(kingsun, + kingsun->new_speed); + + netif_wake_queue(netdev); + } + } +} + +/* + * Called from net/core when new frame is available. + */ +static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ksdazzle_cb *kingsun; + unsigned int wraplen; + int ret = 0; + + if (skb == NULL || netdev == NULL) + return -EINVAL; + + netif_stop_queue(netdev); + + /* the IRDA wrapping routines don't deal with non linear skb */ + SKB_LINEAR_ASSERT(skb); + + kingsun = netdev_priv(netdev); + + spin_lock(&kingsun->lock); + kingsun->new_speed = irda_get_next_speed(skb); + + /* Append data to the end of whatever data remains to be transmitted */ + wraplen = + async_wrap_skb(skb, kingsun->tx_buf_clear, KINGSUN_SND_FIFO_SIZE); + kingsun->tx_buf_clear_used = wraplen; + + if ((ret = ksdazzle_submit_tx_fragment(kingsun)) != 0) { + err("ksdazzle_hard_xmit: failed tx_urb submit: %d", ret); + switch (ret) { + case -ENODEV: + case -EPIPE: + break; + default: + kingsun->stats.tx_errors++; + netif_start_queue(netdev); + } + } else { + kingsun->stats.tx_packets++; + kingsun->stats.tx_bytes += skb->len; + + } + + dev_kfree_skb(skb); + spin_unlock(&kingsun->lock); + + return ret; +} + +/* Receive callback function */ +static void ksdazzle_rcv_irq(struct urb *urb) +{ + struct ksdazzle_cb *kingsun = urb->context; + + /* in process of stopping, just drop data */ + if (!netif_running(kingsun->netdev)) { + kingsun->receiving = 0; + return; + } + + /* unlink, shutdown, unplug, other nasties */ + if (urb->status != 0) { + err("ksdazzle_rcv_irq: urb asynchronously failed - %d", + urb->status); + kingsun->receiving = 0; + return; + } + + if (urb->actual_length > 0) { + __u8 *bytes = urb->transfer_buffer; + unsigned int i; + + for (i = 0; i < urb->actual_length; i++) { + async_unwrap_char(kingsun->netdev, &kingsun->stats, + &kingsun->rx_unwrap_buff, bytes[i]); + } + kingsun->netdev->last_rx = jiffies; + kingsun->receiving = + (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; + } + + /* This urb has already been filled in ksdazzle_net_open. It is assumed that + urb keeps the pointer to the payload buffer. + */ + urb->status = 0; + usb_submit_urb(urb, GFP_ATOMIC); +} + +/* + * Function ksdazzle_net_open (dev) + * + * Network device is taken up. Usually this is done by "ifconfig irda0 up" + */ +static int ksdazzle_net_open(struct net_device *netdev) +{ + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + int err = -ENOMEM; + char hwname[16]; + + /* At this point, urbs are NULL, and skb is NULL (see ksdazzle_probe) */ + kingsun->receiving = 0; + + /* Initialize for SIR to copy data directly into skb. */ + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU; + kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU); + if (!kingsun->rx_unwrap_buff.skb) + goto free_mem; + + skb_reserve(kingsun->rx_unwrap_buff.skb, 1); + kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data; + + kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->rx_urb) + goto free_mem; + + kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->tx_urb) + goto free_mem; + + kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!kingsun->speed_urb) + goto free_mem; + + /* Initialize speed for dongle */ + kingsun->new_speed = 9600; + err = ksdazzle_change_speed(kingsun, 9600); + if (err < 0) + goto free_mem; + + /* + * Now that everything should be initialized properly, + * Open new IrLAP layer instance to take care of us... + */ + sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); + kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); + if (!kingsun->irlap) { + err("ksdazzle-sir: irlap_open failed"); + goto free_mem; + } + + /* Start reception. */ + usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev, + usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in), + kingsun->rx_buf, KINGSUN_RCV_MAX, ksdazzle_rcv_irq, + kingsun, 1); + kingsun->rx_urb->status = 0; + err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + if (err) { + err("ksdazzle-sir: first urb-submit failed: %d", err); + goto close_irlap; + } + + netif_start_queue(netdev); + + /* Situation at this point: + - all work buffers allocated + - urbs allocated and ready to fill + - max rx packet known (in max_rx) + - unwrap state machine initialized, in state outside of any frame + - receive request in progress + - IrLAP layer started, about to hand over packets to send + */ + + return 0; + + close_irlap: + irlap_close(kingsun->irlap); + free_mem: + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + if (kingsun->rx_unwrap_buff.skb) { + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + } + return err; +} + +/* + * Function ksdazzle_net_close (dev) + * + * Network device is taken down. Usually this is done by + * "ifconfig irda0 down" + */ +static int ksdazzle_net_close(struct net_device *netdev) +{ + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + + /* Stop transmit processing */ + netif_stop_queue(netdev); + + /* Mop up receive && transmit urb's */ + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + + kfree_skb(kingsun->rx_unwrap_buff.skb); + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->rx_unwrap_buff.head = NULL; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->receiving = 0; + + /* Stop and remove instance of IrLAP */ + irlap_close(kingsun->irlap); + + kingsun->irlap = NULL; + + return 0; +} + +/* + * IOCTLs : Extra out-of-band network commands... + */ +static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq, + int cmd) +{ + struct if_irda_req *irq = (struct if_irda_req *)rq; + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + int ret = 0; + + switch (cmd) { + case SIOCSBANDWIDTH: /* Set bandwidth */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the device is still there */ + if (netif_device_present(kingsun->netdev)) + return ksdazzle_change_speed(kingsun, + irq->ifr_baudrate); + break; + + case SIOCSMEDIABUSY: /* Set media busy */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + /* Check if the IrDA stack is still there */ + if (netif_running(kingsun->netdev)) + irda_device_set_media_busy(kingsun->netdev, TRUE); + break; + + case SIOCGRECEIVING: + /* Only approximately true */ + irq->ifr_receiving = kingsun->receiving; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +/* + * Get device stats (for /proc/net/dev and ifconfig) + */ +static struct net_device_stats *ksdazzle_net_get_stats(struct net_device + *netdev) +{ + struct ksdazzle_cb *kingsun = netdev_priv(netdev); + return &kingsun->stats; +} + +/* + * This routine is called by the USB subsystem for each new device + * in the system. We need to check if the device is ours, and in + * this case start handling it. + */ +static int ksdazzle_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_host_interface *interface; + struct usb_endpoint_descriptor *endpoint; + + struct usb_device *dev = interface_to_usbdev(intf); + struct ksdazzle_cb *kingsun = NULL; + struct net_device *net = NULL; + int ret = -ENOMEM; + int pipe, maxp_in, maxp_out; + __u8 ep_in; + __u8 ep_out; + + /* Check that there really are two interrupt endpoints. Check based on the + one in drivers/usb/input/usbmouse.c + */ + interface = intf->cur_altsetting; + if (interface->desc.bNumEndpoints != 2) { + err("ksdazzle: expected 2 endpoints, found %d", + interface->desc.bNumEndpoints); + return -ENODEV; + } + endpoint = &interface->endpoint[KINGSUN_EP_IN].desc; + if (!usb_endpoint_is_int_in(endpoint)) { + err("ksdazzle: endpoint 0 is not interrupt IN"); + return -ENODEV; + } + + ep_in = endpoint->bEndpointAddress; + pipe = usb_rcvintpipe(dev, ep_in); + maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); + if (maxp_in > 255 || maxp_in <= 1) { + err("ksdazzle: endpoint 0 has max packet size %d not in range [2..255]", maxp_in); + return -ENODEV; + } + + endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc; + if (!usb_endpoint_is_int_out(endpoint)) { + err("ksdazzle: endpoint 1 is not interrupt OUT"); + return -ENODEV; + } + + ep_out = endpoint->bEndpointAddress; + pipe = usb_sndintpipe(dev, ep_out); + maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); + + /* Allocate network device container. */ + net = alloc_irdadev(sizeof(*kingsun)); + if (!net) + goto err_out1; + + SET_NETDEV_DEV(net, &intf->dev); + kingsun = netdev_priv(net); + kingsun->netdev = net; + kingsun->usbdev = dev; + kingsun->ep_in = ep_in; + kingsun->ep_out = ep_out; + kingsun->irlap = NULL; + kingsun->tx_urb = NULL; + kingsun->tx_buf_clear = NULL; + kingsun->tx_buf_clear_used = 0; + kingsun->tx_buf_clear_sent = 0; + + kingsun->rx_urb = NULL; + kingsun->rx_buf = NULL; + kingsun->rx_unwrap_buff.in_frame = FALSE; + kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME; + kingsun->rx_unwrap_buff.skb = NULL; + kingsun->receiving = 0; + spin_lock_init(&kingsun->lock); + + kingsun->speed_setuprequest = NULL; + kingsun->speed_urb = NULL; + kingsun->speedparams.baudrate = 0; + + /* Allocate input buffer */ + kingsun->rx_buf = kmalloc(KINGSUN_RCV_MAX, GFP_KERNEL); + if (!kingsun->rx_buf) + goto free_mem; + + /* Allocate output buffer */ + kingsun->tx_buf_clear = kmalloc(KINGSUN_SND_FIFO_SIZE, GFP_KERNEL); + if (!kingsun->tx_buf_clear) + goto free_mem; + + /* Allocate and initialize speed setup packet */ + kingsun->speed_setuprequest = + kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!kingsun->speed_setuprequest) + goto free_mem; + kingsun->speed_setuprequest->bRequestType = + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + kingsun->speed_setuprequest->bRequest = KINGSUN_REQ_SEND; + kingsun->speed_setuprequest->wValue = cpu_to_le16(0x0200); + kingsun->speed_setuprequest->wIndex = cpu_to_le16(0x0001); + kingsun->speed_setuprequest->wLength = + cpu_to_le16(sizeof(struct ksdazzle_speedparams)); + + printk(KERN_INFO "KingSun/Dazzle IRDA/USB found at address %d, " + "Vendor: %x, Product: %x\n", + dev->devnum, le16_to_cpu(dev->descriptor.idVendor), + le16_to_cpu(dev->descriptor.idProduct)); + + /* Initialize QoS for this device */ + irda_init_max_qos_capabilies(&kingsun->qos); + + /* Baud rates known to be supported. Please uncomment if devices (other + than a SonyEriccson K300 phone) can be shown to support higher speeds + with this dongle. + */ + kingsun->qos.baud_rate.bits = + IR_2400 | IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200; + kingsun->qos.min_turn_time.bits &= KINGSUN_MTT; + irda_qos_bits_to_value(&kingsun->qos); + + /* Override the network functions we need to use */ + net->hard_start_xmit = ksdazzle_hard_xmit; + net->open = ksdazzle_net_open; + net->stop = ksdazzle_net_close; + net->get_stats = ksdazzle_net_get_stats; + net->do_ioctl = ksdazzle_net_ioctl; + + ret = register_netdev(net); + if (ret != 0) + goto free_mem; + + info("IrDA: Registered KingSun/Dazzle device %s", net->name); + + usb_set_intfdata(intf, kingsun); + + /* Situation at this point: + - all work buffers allocated + - setup requests pre-filled + - urbs not allocated, set to NULL + - max rx packet known (is KINGSUN_FIFO_SIZE) + - unwrap state machine (partially) initialized, but skb == NULL + */ + + return 0; + + free_mem: + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_buf); + free_netdev(net); + err_out1: + return ret; +} + +/* + * The current device is removed, the USB layer tell us to shut it down... + */ +static void ksdazzle_disconnect(struct usb_interface *intf) +{ + struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); + + if (!kingsun) + return; + + unregister_netdev(kingsun->netdev); + + /* Mop up receive && transmit urb's */ + usb_kill_urb(kingsun->speed_urb); + usb_free_urb(kingsun->speed_urb); + kingsun->speed_urb = NULL; + + usb_kill_urb(kingsun->tx_urb); + usb_free_urb(kingsun->tx_urb); + kingsun->tx_urb = NULL; + + usb_kill_urb(kingsun->rx_urb); + usb_free_urb(kingsun->rx_urb); + kingsun->rx_urb = NULL; + + kfree(kingsun->speed_setuprequest); + kfree(kingsun->tx_buf_clear); + kfree(kingsun->rx_buf); + free_netdev(kingsun->netdev); + + usb_set_intfdata(intf, NULL); +} + +#ifdef CONFIG_PM +/* USB suspend, so power off the transmitter/receiver */ +static int ksdazzle_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); + + netif_device_detach(kingsun->netdev); + if (kingsun->speed_urb != NULL) + usb_kill_urb(kingsun->speed_urb); + if (kingsun->tx_urb != NULL) + usb_kill_urb(kingsun->tx_urb); + if (kingsun->rx_urb != NULL) + usb_kill_urb(kingsun->rx_urb); + return 0; +} + +/* Coming out of suspend, so reset hardware */ +static int ksdazzle_resume(struct usb_interface *intf) +{ + struct ksdazzle_cb *kingsun = usb_get_intfdata(intf); + + if (kingsun->rx_urb != NULL) { + /* Setup request already filled in ksdazzle_probe */ + usb_submit_urb(kingsun->rx_urb, GFP_KERNEL); + } + netif_device_attach(kingsun->netdev); + + return 0; +} +#endif + +/* + * USB device callbacks + */ +static struct usb_driver irda_driver = { + .name = "ksdazzle-sir", + .probe = ksdazzle_probe, + .disconnect = ksdazzle_disconnect, + .id_table = dongles, +#ifdef CONFIG_PM + .suspend = ksdazzle_suspend, + .resume = ksdazzle_resume, +#endif +}; + +/* + * Module insertion + */ +static int __init ksdazzle_init(void) +{ + return usb_register(&irda_driver); +} + +module_init(ksdazzle_init); + +/* + * Module removal + */ +static void __exit ksdazzle_cleanup(void) +{ + /* Deregister the driver and remove all pending instances */ + usb_deregister(&irda_driver); +} + +module_exit(ksdazzle_cleanup); + +MODULE_AUTHOR("Alex Villacís Lasso "); +MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 0de867288a473321bc6cfdf83834bdbea1fed538..0b769192d4ce0753c872410c601a1e2eb4849a8e 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include @@ -465,7 +464,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) } fcs = ~(crc32_le(~0, buf, new_len)); - if(fcs != le32_to_cpu(get_unaligned((u32 *)(buf+new_len)))) { + if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) { IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); mcs->stats.rx_errors++; mcs->stats.rx_crc_errors++; @@ -899,8 +898,6 @@ static int mcs_probe(struct usb_interface *intf, IRDA_DEBUG(1, "MCS7780 USB-IrDA bridge found at %d.\n", udev->devnum); - /* what is it realy for? */ - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &intf->dev); ret = usb_reset_configuration(udev); diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index d96c89751a719b9d3fc791b3ed2def107d3cb578..12b9378c587f890c090bb807a3cf7191cdcfd9fa 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c @@ -437,7 +437,6 @@ static int __init nsc_ircc_open(chipio_t *info) self->tx_fifo.tail = self->tx_buff.head; /* Override the network functions we need to use */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = nsc_ircc_hard_xmit_sir; dev->open = nsc_ircc_net_open; dev->stop = nsc_ircc_net_close; diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 9d6c8f391b2d594338bf457f3119363160d7cf07..6078e03de9a83d256368cf61a4c7be4710b7e612 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -414,7 +414,7 @@ EXPORT_SYMBOL(sirdev_raw_read); int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) { int ret = -ENXIO; - if (dev->drv->set_dtr_rts != 0) + if (dev->drv->set_dtr_rts) ret = dev->drv->set_dtr_rts(dev, dtr, rts); return ret; } @@ -913,8 +913,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n dev->drv = drv; dev->netdev = ndev; - SET_MODULE_OWNER(ndev); - /* Override the network functions we need to use */ ndev->hard_start_xmit = sirdev_hard_xmit; ndev->open = sirdev_open; diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 36ab98386be0a7d08bdc9e0de2f5a780014d7b2d..7e7b5828214a89fa12e587f3b3fc0a31aa27e54b 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -81,7 +81,7 @@ MODULE_LICENSE("GPL"); static int smsc_nopnp = 1; module_param_named(nopnp, smsc_nopnp, bool, 0); -MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings"); +MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true"); #define DMA_INVAL 255 static int ircc_dma = DMA_INVAL; @@ -519,8 +519,6 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u goto err_out1; } - SET_MODULE_OWNER(dev); - dev->hard_start_xmit = smsc_ircc_hard_xmit_sir; #if SMSC_IRCC2_C_NET_TIMEOUT dev->tx_timeout = smsc_ircc_timeout; diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 755aa444a4ddb91026a25834ade6b59b17efc4d3..042bc2f0417d8615318bffc479d7d0614b547401 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -332,7 +332,7 @@ static void fir_eof(struct stir_cb *stir) } fcs = ~(crc32_le(~0, rx_buff->data, len)); - if (fcs != le32_to_cpu(get_unaligned((u32 *)(rx_buff->data+len)))) { + if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) { pr_debug("crc error calc 0x%x len %d\n", fcs, len); stir->stats.rx_errors++; stir->stats.rx_crc_errors++; @@ -1034,7 +1034,6 @@ static int stir_probe(struct usb_interface *intf, if(!net) goto err_out1; - SET_MODULE_OWNER(net); SET_NETDEV_DEV(net, &intf->dev); stir = netdev_priv(net); stir->netdev = net; diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c index ff5358574d0a06a138ea4c816b871eeee0f553e9..126ec7c8680ea176a156be4c4aca2cc16bcb66f5 100644 --- a/drivers/net/irda/via-ircc.c +++ b/drivers/net/irda/via-ircc.c @@ -429,9 +429,6 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id) self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; self->tx_fifo.tail = self->tx_buff.head; - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); - /* Override the network functions we need to use */ dev->hard_start_xmit = via_ircc_hard_xmit_sir; dev->open = via_ircc_net_open; diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 0538ca9ce0588c937cd5289ec5be0261cbc99c8e..acd082a96a4f2b6dd986c956552adae410d48065 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -1584,8 +1584,6 @@ static int vlsi_irda_init(struct net_device *ndev) vlsi_irda_dev_t *idev = ndev->priv; struct pci_dev *pdev = idev->pdev; - SET_MODULE_OWNER(ndev); - ndev->irq = pdev->irq; ndev->base_addr = pci_resource_start(pdev,0); diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h index ca12a60964199b162bb7b3782995156944414742..c8b9c74eea524249c443a1e1faa28865ccd5e81d 100644 --- a/drivers/net/irda/vlsi_ir.h +++ b/drivers/net/irda/vlsi_ir.h @@ -537,10 +537,10 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect) */ struct ring_descr_hw { - volatile u16 rd_count; /* tx/rx count [11:0] */ - u16 reserved; + volatile __le16 rd_count; /* tx/rx count [11:0] */ + __le16 reserved; union { - u32 addr; /* [23:0] of the buffer's busaddress */ + __le32 addr; /* [23:0] of the buffer's busaddress */ struct { u8 addr_res[3]; volatile u8 status; /* descriptor status */ diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 5182e800cc187d51bf259db76bd65c911e854968..9fd2451b0fb256d584b9dca09c3f12914b8124e8 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -232,9 +232,6 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq, self->rx_buff.data = self->rx_buff.head; self->netdev = dev; - /* Keep track of module usage */ - SET_MODULE_OWNER(dev); - /* Override the network functions we need to use */ dev->hard_start_xmit = w83977af_hard_xmit; dev->open = w83977af_net_open; diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c index 0343f12d2ffb452bc75f220f14eb2ec360feae92..d6ff26af37b3e5c1dc5a60d5c979819855389e1b 100644 --- a/drivers/net/isa-skeleton.c +++ b/drivers/net/isa-skeleton.c @@ -133,8 +133,6 @@ static int __init do_netcard_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) /* Check a single specified location. */ return netcard_probe1(dev, base_addr); else if (base_addr != 0) /* Don't probe at all. */ @@ -194,6 +192,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) static unsigned version_printed; int i; int err = -ENODEV; + DECLARE_MAC_BUF(mac); /* Grab the region so that no one else tries to probe our ioports. */ if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname)) @@ -219,7 +218,9 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + + printk("%s", print_mac(mac, dev->dev_addr)); err = -EAGAIN; #ifdef jumpered_interrupts diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 0433c41f9029b8c06cce63e7e0842756f06d8147..97bd9dc2e52e8a2aa1f42ef764e5119e53dc8aba 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -196,7 +196,6 @@ struct veth_lpar_connection { struct veth_port { struct device *dev; - struct net_device_stats stats; u64 mac_addr; HvLpIndexMap lpar_map; @@ -936,9 +935,6 @@ static void veth_release_connection(struct kobject *kobj) static int veth_open(struct net_device *dev) { - struct veth_port *port = (struct veth_port *) dev->priv; - - memset(&port->stats, 0, sizeof (port->stats)); netif_start_queue(dev); return 0; } @@ -949,13 +945,6 @@ static int veth_close(struct net_device *dev) return 0; } -static struct net_device_stats *veth_get_stats(struct net_device *dev) -{ - struct veth_port *port = (struct veth_port *) dev->priv; - - return &port->stats; -} - static int veth_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > VETH_MAX_MTU)) @@ -1084,7 +1073,6 @@ static struct net_device * __init veth_probe_one(int vlan, dev->open = veth_open; dev->hard_start_xmit = veth_start_xmit; dev->stop = veth_close; - dev->get_stats = veth_get_stats; dev->change_mtu = veth_change_mtu; dev->set_mac_address = NULL; dev->set_multicast_list = veth_set_multicast_list; @@ -1183,7 +1171,6 @@ static void veth_transmit_to_many(struct sk_buff *skb, HvLpIndexMap lpmask, struct net_device *dev) { - struct veth_port *port = (struct veth_port *) dev->priv; int i, success, error; success = error = 0; @@ -1199,11 +1186,11 @@ static void veth_transmit_to_many(struct sk_buff *skb, } if (error) - port->stats.tx_errors++; + dev->stats.tx_errors++; if (success) { - port->stats.tx_packets++; - port->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; } } @@ -1541,8 +1528,8 @@ static void veth_receive(struct veth_lpar_connection *cnx, skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_NONE; netif_rx(skb); /* send it up */ - port->stats.rx_packets++; - port->stats.rx_bytes += length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += length; } while (startchunk += nchunks, startchunk < VETH_MAX_FRAMES_PER_MSG); /* Ack it */ diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 3569d5b0338868e0d6be8259f383eda2b7caa493..1eee8894c732e0aac48be5fcab68c44c8cd62fa6 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -184,6 +184,7 @@ struct ixgb_adapter { boolean_t rx_csum; /* OS defined structs */ + struct napi_struct napi; struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index 52c99d01d5681f33c9c51242e887c52240debe5b..e8eb0fd6c5766fdc1429ffb050a11408f243c5d6 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data) ixgb_cleanup_eeprom(hw); /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ - ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR); + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); return; } @@ -476,19 +476,19 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw) uint16_t ee_data; ee_data = ixgb_read_eeprom(hw, i); checksum += ee_data; - hw->eeprom[i] = le16_to_cpu(ee_data); + hw->eeprom[i] = cpu_to_le16(ee_data); } if (checksum != (uint16_t) EEPROM_SUM) { DEBUGOUT("ixgb_ee: Checksum invalid.\n"); /* clear the init_ctrl_reg_1 to signify that the cache is * invalidated */ - ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR); + ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); return (FALSE); } - if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK)) - != le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) { + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { DEBUGOUT("ixgb_ee: Signature invalid.\n"); return(FALSE); } @@ -511,8 +511,8 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) { struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK)) - == le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) { + if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) + == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { return (TRUE); } else { return ixgb_get_eeprom_data(hw); @@ -528,7 +528,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) * Returns: * Word at indexed offset in eeprom, if valid, 0 otherwise. ******************************************************************************/ -uint16_t +__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index) { diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h index ef236b935c15559edb5cd9c69889fd73bef9eb70..7908bf3005ed53e121f56e696f01ec3a6917e7b8 100644 --- a/drivers/net/ixgb/ixgb_ee.h +++ b/drivers/net/ixgb/ixgb_ee.h @@ -76,22 +76,22 @@ /* EEPROM structure */ struct ixgb_ee_map_type { uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; - uint16_t compatibility; - uint16_t reserved1[4]; - uint32_t pba_number; - uint16_t init_ctrl_reg_1; - uint16_t subsystem_id; - uint16_t subvendor_id; - uint16_t device_id; - uint16_t vendor_id; - uint16_t init_ctrl_reg_2; - uint16_t oem_reserved[16]; - uint16_t swdpins_reg; - uint16_t circuit_ctrl_reg; + __le16 compatibility; + __le16 reserved1[4]; + __le32 pba_number; + __le16 init_ctrl_reg_1; + __le16 subsystem_id; + __le16 subvendor_id; + __le16 device_id; + __le16 vendor_id; + __le16 init_ctrl_reg_2; + __le16 oem_reserved[16]; + __le16 swdpins_reg; + __le16 circuit_ctrl_reg; uint8_t d3_power; uint8_t d0_power; - uint16_t reserved2[28]; - uint16_t checksum; + __le16 reserved2[28]; + __le16 checksum; }; /* EEPROM Functions */ diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 0413cd95eda7b28d0a273e82ff2b6b6758b9f89e..fddd5844168d69202d4c39748ae24ed282999f6e 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -94,8 +94,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)} }; -#define IXGB_STATS_LEN \ - sizeof(ixgb_gstrings_stats) / sizeof(struct ixgb_stats) +#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats) static int ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) @@ -423,7 +422,7 @@ ixgb_get_eeprom(struct net_device *netdev, { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - uint16_t *eeprom_buff; + __le16 *eeprom_buff; int i, max_len, first_word, last_word; int ret_val = 0; @@ -447,7 +446,7 @@ ixgb_get_eeprom(struct net_device *netdev, first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(sizeof(uint16_t) * + eeprom_buff = kmalloc(sizeof(__le16) * (last_word - first_word + 1), GFP_KERNEL); if(!eeprom_buff) return -ENOMEM; @@ -660,9 +659,14 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data) } static int -ixgb_get_stats_count(struct net_device *netdev) +ixgb_get_sset_count(struct net_device *netdev, int sset) { - return IXGB_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + return IXGB_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void @@ -714,15 +718,13 @@ static const struct ethtool_ops ixgb_ethtool_ops = { .set_rx_csum = ixgb_set_rx_csum, .get_tx_csum = ixgb_get_tx_csum, .set_tx_csum = ixgb_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_msglevel = ixgb_get_msglevel, .set_msglevel = ixgb_set_msglevel, - .get_tso = ethtool_op_get_tso, .set_tso = ixgb_set_tso, .get_strings = ixgb_get_strings, .phys_id = ixgb_phys_id, - .get_stats_count = ixgb_get_stats_count, + .get_sset_count = ixgb_get_sset_count, .get_ethtool_stats = ixgb_get_ethtool_stats, }; diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h index 40ef5ca88717d4f4d3f9bfa0e5606f3003d06803..af5643324ee3ee854da86e0110e969e3b9118c74 100644 --- a/drivers/net/ixgb/ixgb_hw.h +++ b/drivers/net/ixgb/ixgb_hw.h @@ -711,7 +711,7 @@ struct ixgb_hw { uint32_t bar2; uint32_t bar3; uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */ - uint16_t eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ + __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ unsigned long io_base; /* Our I/O mapped location */ uint32_t lastLFC; uint32_t lastRFC; @@ -809,7 +809,7 @@ void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); -uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); +__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); /* Everything else */ void ixgb_led_on(struct ixgb_hw *hw); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 991c8833e23c955f02cff9562c2973fbbfe1c205..d444de58ba341455b40cf823e31736f270c01836 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -97,7 +97,7 @@ static irqreturn_t ixgb_intr(int irq, void *data); static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); #ifdef CONFIG_IXGB_NAPI -static int ixgb_clean(struct net_device *netdev, int *budget); +static int ixgb_clean(struct napi_struct *napi, int budget); static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do); #else @@ -288,7 +288,7 @@ ixgb_up(struct ixgb_adapter *adapter) mod_timer(&adapter->watchdog_timer, jiffies); #ifdef CONFIG_IXGB_NAPI - netif_poll_enable(netdev); + napi_enable(&adapter->napi); #endif ixgb_irq_enable(adapter); @@ -309,7 +309,7 @@ ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) if(kill_watchdog) del_timer_sync(&adapter->watchdog_timer); #ifdef CONFIG_IXGB_NAPI - netif_poll_disable(netdev); + napi_disable(&adapter->napi); #endif adapter->link_speed = 0; adapter->link_duplex = 0; @@ -382,7 +382,6 @@ ixgb_probe(struct pci_dev *pdev, goto err_alloc_etherdev; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); @@ -421,8 +420,7 @@ ixgb_probe(struct pci_dev *pdev, netdev->tx_timeout = &ixgb_tx_timeout; netdev->watchdog_timeo = 5 * HZ; #ifdef CONFIG_IXGB_NAPI - netdev->poll = &ixgb_clean; - netdev->weight = 64; + netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); #endif netdev->vlan_rx_register = ixgb_vlan_rx_register; netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid; @@ -1746,7 +1744,7 @@ ixgb_intr(int irq, void *data) } #ifdef CONFIG_IXGB_NAPI - if(netif_rx_schedule_prep(netdev)) { + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { /* Disable interrupts and register for poll. The flush of the posted write is intentionally left out. @@ -1754,7 +1752,7 @@ ixgb_intr(int irq, void *data) atomic_inc(&adapter->irq_sem); IXGB_WRITE_REG(&adapter->hw, IMC, ~0); - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } #else /* yes, that is actually a & and it is meant to make sure that @@ -1776,27 +1774,23 @@ ixgb_intr(int irq, void *data) **/ static int -ixgb_clean(struct net_device *netdev, int *budget) +ixgb_clean(struct napi_struct *napi, int budget) { - struct ixgb_adapter *adapter = netdev_priv(netdev); - int work_to_do = min(*budget, netdev->quota); + struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); + struct net_device *netdev = adapter->netdev; int tx_cleaned; int work_done = 0; tx_cleaned = ixgb_clean_tx_irq(adapter); - ixgb_clean_rx_irq(adapter, &work_done, work_to_do); - - *budget -= work_done; - netdev->quota -= work_done; + ixgb_clean_rx_irq(adapter, &work_done, budget); /* if no Tx and not enough Rx work done, exit the polling mode */ if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); ixgb_irq_enable(adapter); - return 0; } - return 1; + return work_done; } #endif diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ccd83d9f579ef48cff32d3586535faea655e17fa --- /dev/null +++ b/drivers/net/ixgbe/Makefile @@ -0,0 +1,36 @@ +################################################################################ +# +# Intel 10 Gigabit PCI Express Linux driver +# Copyright(c) 1999 - 2007 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# Linux NICS +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 10GbE PCI Express ethernet driver +# + +obj-$(CONFIG_IXGBE) += ixgbe.o + +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82598.o ixgbe_phy.o diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h new file mode 100644 index 0000000000000000000000000000000000000000..c160a7d91e21a2c3b24e289665c93387cd72095f --- /dev/null +++ b/drivers/net/ixgbe/ixgbe.h @@ -0,0 +1,259 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + +#include +#include +#include + +#include "ixgbe_type.h" +#include "ixgbe_common.h" + + +#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args) + +#define PFX "ixgbe: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __FUNCTION__ , ## args))) + +/* TX/RX descriptor defines */ +#define IXGBE_DEFAULT_TXD 1024 +#define IXGBE_MAX_TXD 4096 +#define IXGBE_MIN_TXD 64 + +#define IXGBE_DEFAULT_RXD 1024 +#define IXGBE_MAX_RXD 4096 +#define IXGBE_MIN_RXD 64 + +#define IXGBE_DEFAULT_RXQ 1 +#define IXGBE_MAX_RXQ 1 +#define IXGBE_MIN_RXQ 1 + +#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */ +#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */ +#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */ +#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */ + +/* flow control */ +#define IXGBE_DEFAULT_FCRTL 0x10000 +#define IXGBE_MIN_FCRTL 0 +#define IXGBE_MAX_FCRTL 0x7FF80 +#define IXGBE_DEFAULT_FCRTH 0x20000 +#define IXGBE_MIN_FCRTH 0 +#define IXGBE_MAX_FCRTH 0x7FFF0 +#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */ +#define IXGBE_MIN_FCPAUSE 0 +#define IXGBE_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define IXGBE_RXBUFFER_64 64 /* Used for packet split */ +#define IXGBE_RXBUFFER_128 128 /* Used for packet split */ +#define IXGBE_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBE_RXBUFFER_2048 2048 + +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 + +#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) + +/* How many Tx Descriptors do we need to call netif_wake_queue? */ +#define IXGBE_TX_QUEUE_WAKE 16 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IXGBE_TX_FLAGS_CSUM (u32)(1) +#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) +#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) +#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct ixgbe_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; +}; + +struct ixgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; +}; + +struct ixgbe_queue_stats { + u64 packets; + u64 bytes; +}; + +struct ixgbe_ring { + struct ixgbe_adapter *adapter; /* backlink */ + void *desc; /* descriptor ring memory */ + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + unsigned int count; /* amount of descriptors */ + unsigned int next_to_use; + unsigned int next_to_clean; + + union { + struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_rx_buffer *rx_buffer_info; + }; + + u16 head; + u16 tail; + + /* To protect race between sender and clean_tx_irq */ + spinlock_t tx_lock; + + struct ixgbe_queue_stats stats; + + u32 eims_value; + u16 itr_register; + + char name[IFNAMSIZ + 5]; + u16 work_limit; /* max work per interrupt */ +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 0) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define IXGBE_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define IXGBE_RX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) +#define IXGBE_TX_DESC_ADV(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) +#define IXGBE_TX_CTXTDESC_ADV(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) + +#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 + +/* board specific private data structure */ +struct ixgbe_adapter { + struct timer_list watchdog_timer; + struct vlan_group *vlgrp; + u16 bd_number; + u16 rx_buf_len; + atomic_t irq_sem; + struct work_struct reset_task; + + /* TX */ + struct ixgbe_ring *tx_ring; /* One per active queue */ + struct napi_struct napi; + u64 restart_queue; + u64 lsc_int; + u64 hw_tso_ctxt; + u64 hw_tso6_ctxt; + u32 tx_timeout_count; + bool detect_tx_hung; + + /* RX */ + struct ixgbe_ring *rx_ring; /* One per active queue */ + u64 hw_csum_tx_good; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 non_eop_descs; + int num_tx_queues; + int num_rx_queues; + struct msix_entry *msix_entries; + + u64 rx_hdr_split; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + + u32 flags; +#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) + + /* Interrupt Throttle Rate */ + u32 rx_eitr; + u32 tx_eitr; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + + /* structs defined in ixgbe_hw.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbe_hw_stats stats; + char lsc_name[IFNAMSIZ + 5]; + + unsigned long state; + u64 tx_busy; +}; + +enum ixbge_state_t { + __IXGBE_TESTING, + __IXGBE_RESETTING, + __IXGBE_DOWN +}; + +enum ixgbe_boards { + board_82598AF, + board_82598EB, + board_82598AT, +}; + +extern struct ixgbe_info ixgbe_82598AF_info; +extern struct ixgbe_info ixgbe_82598EB_info; +extern struct ixgbe_info ixgbe_82598AT_info; + +extern char ixgbe_driver_name[]; +extern char ixgbe_driver_version[]; + +extern int ixgbe_up(struct ixgbe_adapter *adapter); +extern void ixgbe_down(struct ixgbe_adapter *adapter); +extern void ixgbe_reset(struct ixgbe_adapter *adapter); +extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); +extern void ixgbe_set_ethtool_ops(struct net_device *netdev); +extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rxdr); +extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *txdr); + +#endif /* _IXGBE_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c new file mode 100644 index 0000000000000000000000000000000000000000..00ee20125ca9c94e92e0ab77659ef9a76a0d79c4 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -0,0 +1,589 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +#define IXGBE_82598_MAX_TX_QUEUES 32 +#define IXGBE_82598_MAX_RX_QUEUES 64 +#define IXGBE_82598_RAR_ENTRIES 16 + +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); +static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, + bool *autoneg); +static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, + u32 *speed, bool *autoneg); +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up); +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); +static s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up); +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); + + +static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +{ + hw->mac.num_rx_queues = IXGBE_82598_MAX_TX_QUEUES; + hw->mac.num_tx_queues = IXGBE_82598_MAX_RX_QUEUES; + hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; + + return 0; +} + +/** + * ixgbe_get_link_settings_82598 - Determines default link settings + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the default link settings by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, + bool *autoneg) +{ + s32 status = 0; + s32 autoc_reg; + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + if (hw->mac.link_settings_loaded) { + autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; + autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; + autoc_reg |= hw->mac.link_attach_type; + autoc_reg |= hw->mac.link_mode_select; + } + + switch (autoc_reg & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = false; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc_reg & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc_reg & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + break; + } + + return status; +} + +/** + * ixgbe_get_copper_link_settings_82598 - Determines default link settings + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the default link settings by reading the AUTOC register. + **/ +static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, + u32 *speed, bool *autoneg) +{ + s32 status = IXGBE_ERR_LINK_SETUP; + u16 speed_ability; + + *speed = 0; + *autoneg = true; + + status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); + + if (status == 0) { + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + } + + return status; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598EB_CX4: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82598AT_DUAL_PORT: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + + return media_type; +} + +/** + * ixgbe_setup_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = 0; + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + if (hw->mac.link_settings_loaded) { + autoc_reg &= ~IXGBE_AUTOC_LMS_ATTACH_TYPE; + autoc_reg &= ~IXGBE_AUTOC_LMS_MASK; + autoc_reg |= hw->mac.link_attach_type; + autoc_reg |= hw->mac.link_mode_select; + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + msleep(50); + } + + /* Restart link */ + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (hw->phy.autoneg_wait_to_complete) { + if (hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN || + hw->mac.link_mode_select == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msleep(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + hw_dbg(hw, + "Autonegotiation did not complete.\n"); + } + } + } + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.type = hw->fc.original_type; + ixgbe_setup_fc(hw, 0); + + /* Add delay to filter out noises during initial link setup */ + msleep(50); + + return status; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up) +{ + u32 links_reg; + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_reg & IXGBE_LINKS_UP) + *link_up = true; + else + *link_up = false; + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + return 0; +} + +/** + * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if auto-negotiation enabled + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, + u32 speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = 0; + + /* If speed is 10G, then check for CX4 or XAUI. */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; + else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + else if (autoneg) { + /* BX mode - Autonegotiate 1G */ + if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; + else /* KX/KX4 mode */ + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN_1G_AN; + } else { + status = IXGBE_ERR_LINK_SETUP; + } + + if (status == 0) { + hw->phy.autoneg_wait_to_complete = autoneg_wait_to_complete; + + hw->mac.link_settings_loaded = true; + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + hw->phy.ops.setup(hw); + } + + return status; +} + + +/** + * ixgbe_setup_copper_link_82598 - Setup copper link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. Restart + * phy and wait for autonegotiate to finish. Then synchronize the + * MAC and PHY. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) +{ + s32 status; + u32 speed = 0; + bool link_up = false; + + /* Set up MAC */ + hw->phy.ops.setup(hw); + + /* Restart autonegotiation on PHY */ + status = hw->phy.ops.setup(hw); + + /* Synchronize MAC to PHY speed */ + if (status == 0) + status = hw->phy.ops.check(hw, &speed, &link_up); + + return status; +} + +/** + * ixgbe_check_copper_link_82598 - Syncs MAC & PHY link settings + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true if link is up, false otherwise + * + * Reads the mac link, phy link, and synchronizes the MAC to PHY. + **/ +static s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, u32 *speed, + bool *link_up) +{ + s32 status; + u32 phy_speed = 0; + bool phy_link = false; + + /* This is the speed and link the MAC is set at */ + hw->phy.ops.check(hw, speed, link_up); + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + status = hw->phy.ops.check(hw, &phy_speed, &phy_link); + + if ((status == 0) && (phy_link)) { + /* + * Check current link status of the MACs link's register + * matches that of the speed in the PHY register + */ + if (*speed != phy_speed) { + /* + * The copper PHY requires 82598 attach type to be XAUI + * for 10G and BX for 1G + */ + hw->mac.link_attach_type = + (IXGBE_AUTOC_10G_XAUI | IXGBE_AUTOC_1G_BX); + + /* Synchronize the MAC speed to the PHY speed */ + status = hw->phy.ops.setup_speed(hw, phy_speed, false, + false); + if (status == 0) + hw->phy.ops.check(hw, speed, link_up); + else + status = IXGBE_ERR_LINK_SETUP; + } + } else { + *link_up = phy_link; + } + + return status; +} + +/** + * ixgbe_setup_copper_link_speed_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status; + bool link_up = 0; + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_speed(hw, speed, autoneg, + autoneg_wait_to_complete); + + /* Synchronize MAC to PHY speed */ + if (status == 0) + status = hw->phy.ops.check(hw, &speed, &link_up); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by reseting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + ixgbe_stop_adapter(hw); + + /* + * Power up the Atlas TX lanes if they are currently powered down. + * Atlas TX lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable TX Atlas so packets can be transmitted again */ + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); + + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); + + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); + + ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); + } + + /* Reset PHY */ + ixgbe_reset_phy(hw); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests before reset + */ + if (ixgbe_disable_pcie_master(hw) != 0) { + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + } + + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * AUTOC register which stores link settings gets cleared + * and reloaded from EEPROM after reset. We need to restore + * our stored value from init in case SW changed the attach + * type or speed. If this is the first time and link settings + * have not been stored, store default settings from AUTOC. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.link_settings_loaded) { + autoc &= ~(IXGBE_AUTOC_LMS_ATTACH_TYPE); + autoc &= ~(IXGBE_AUTOC_LMS_MASK); + autoc |= hw->mac.link_attach_type; + autoc |= hw->mac.link_mode_select; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } else { + hw->mac.link_attach_type = + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); + hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); + hw->mac.link_settings_loaded = true; + } + + /* Store the permanent mac address */ + ixgbe_get_mac_addr(hw, hw->mac.perm_addr); + + return status; +} + +static struct ixgbe_mac_operations mac_ops_82598 = { + .reset = &ixgbe_reset_hw_82598, + .get_media_type = &ixgbe_get_media_type_82598, +}; + +static struct ixgbe_phy_operations phy_ops_82598EB = { + .setup = &ixgbe_setup_copper_link_82598, + .check = &ixgbe_check_copper_link_82598, + .setup_speed = &ixgbe_setup_copper_link_speed_82598, + .get_settings = &ixgbe_get_copper_link_settings_82598, +}; + +struct ixgbe_info ixgbe_82598EB_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .phy_ops = &phy_ops_82598EB, +}; + +static struct ixgbe_phy_operations phy_ops_82598AT = { + .setup = &ixgbe_setup_tnx_phy_link, + .check = &ixgbe_check_tnx_phy_link, + .setup_speed = &ixgbe_setup_tnx_phy_link_speed, + .get_settings = &ixgbe_get_copper_link_settings_82598, +}; + +struct ixgbe_info ixgbe_82598AT_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .phy_ops = &phy_ops_82598AT, +}; + +static struct ixgbe_phy_operations phy_ops_82598AF = { + .setup = &ixgbe_setup_mac_link_82598, + .check = &ixgbe_check_mac_link_82598, + .setup_speed = &ixgbe_setup_mac_link_speed_82598, + .get_settings = &ixgbe_get_link_settings_82598, +}; + +struct ixgbe_info ixgbe_82598AF_info = { + .mac = ixgbe_mac_82598EB, + .get_invariants = &ixgbe_get_invariants_82598, + .mac_ops = &mac_ops_82598, + .phy_ops = &phy_ops_82598AF, +}; + diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c new file mode 100644 index 0000000000000000000000000000000000000000..512e3b22ed08e9e790b25707b92035fe74d567d3 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -0,0 +1,1175 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); + +static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); + +static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); + +/** + * ixgbe_start_hw - Prepare hardware for TX/RX + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + u32 ctrl_ext; + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* Identify the PHY */ + ixgbe_identify_phy(hw); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + ixgbe_init_rx_addrs(hw); + + /* Clear the VLAN filter table */ + ixgbe_clear_vfta(hw); + + /* Set up link */ + hw->phy.ops.setup(hw); + + /* Clear statistics registers */ + ixgbe_clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + + return 0; +} + +/** + * ixgbe_init_hw - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by reseting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + /* Reset the hardware */ + hw->mac.ops.reset(hw); + + /* Start the HW */ + ixgbe_start_hw(hw); + + return 0; +} + +/** + * ixgbe_clear_hw_cntrs - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + u16 i = 0; + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + + return 0; +} + +/** + * ixgbe_get_mac_addr - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return 0; +} + +s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) +{ + s32 ret_val; + u16 data; + + ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num = (u32)(data << 16); + + ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + *part_num |= data; + + return 0; +} + +/** + * ixgbe_stop_adapter - Generic stop TX/RX units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + u32 number_of_queues; + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = true; + + /* Disable the receive unit */ + reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + reg_val &= ~(IXGBE_RXCTRL_RXEN); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); + msleep(2); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + number_of_queues = hw->mac.num_tx_queues; + for (i = 0; i < number_of_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + if (reg_val & IXGBE_TXDCTL_ENABLE) { + reg_val &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val); + } + } + + return 0; +} + +/** + * ixgbe_led_on - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + + return 0; +} + +/** + * ixgbe_led_off - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + + return 0; +} + + +/** + * ixgbe_init_eeprom - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); + } + + return 0; +} + +/** + * ixgbe_read_eeprom - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + u32 eerd; + s32 status; + + eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + + IXGBE_EEPROM_READ_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eeprom_eerd_done(hw); + + if (status == 0) + *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_READ_REG_DATA); + else + hw_dbg(hw, "Eeprom read timed out\n"); + + return status; +} + +/** + * ixgbe_poll_eeprom_eerd_done - Poll EERD status + * @hw: pointer to hardware structure + * + * Polls the status bit (bit 1) of the EERD to determine when the read is done. + **/ +static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + if (reg & IXGBE_EEPROM_READ_REG_DONE) { + status = 0; + break; + } + udelay(5); + } + return status; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout; + u32 i; + u32 swsm; + + /* Set timeout value based on size of EEPROM */ + timeout = hw->eeprom.word_size + 1; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = 0; + break; + } + msleep(1); + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == 0) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + udelay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " + "not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } + + return status; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); +} + +/** + * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (ixgbe_read_eeprom(hw, i, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + ixgbe_read_eeprom(hw, i, &pointer); + + /* Make sure the pointer seems valid */ + if (pointer != 0xFFFF && pointer != 0) { + ixgbe_read_eeprom(hw, pointer, &length); + + if (length != 0xFFFF && length != 0) { + for (j = pointer+1; j <= pointer+length; j++) { + ixgbe_read_eeprom(hw, j, &word); + checksum += word; + } + } + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_eeprom(hw, 0, &checksum); + + if (status == 0) { + checksum = ixgbe_calc_eeprom_checksum(hw); + + ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + + /* + * Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + } else { + hw_dbg(hw, "EEPROM read failed\n"); + } + + return status; +} + +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = 0; + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + else if (IXGBE_IS_BROADCAST(mac_addr)) + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) + status = IXGBE_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** + * ixgbe_set_rar - Set RX address register + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @index: Receive address register to write + * @vind: Vind to set RAR to + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, + u32 enable_addr) +{ + u32 rar_low, rar_high; + + /* + * HW expects these in little endian so we reverse the byte order from + * network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | + ((u32)addr[5] << 8) | + ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return 0; +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive addresss registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rx_addrs; + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + ixgbe_get_mac_addr(hw, hw->mac.addr); + + hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + } + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-15]\n"); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mc_addr_in_rar_count = 0; + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + return 0; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initalization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + hw_dbg(hw, " bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); +} + +/** + * ixgbe_add_mc_addr - Adds a multicast address. + * @hw: pointer to hardware structure + * @mc_addr: new multicast address + * + * Adds it to unused receive address register or to the multicast table. + **/ +static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 rar_entries = hw->mac.num_rx_addrs; + + hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", + mc_addr[0], mc_addr[1], mc_addr[2], + mc_addr[3], mc_addr[4], mc_addr[5]); + + /* + * Place this multicast address in the RAR if there is room, + * else put it in the MTA + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, + mc_addr, 0, IXGBE_RAH_AV); + hw_dbg(hw, "Added a multicast address to RAR[%d]\n", + hw->addr_ctrl.rar_used_count); + hw->addr_ctrl.rar_used_count++; + hw->addr_ctrl.mc_addr_in_rar_count++; + } else { + ixgbe_set_mta(hw, mc_addr); + } + + hw_dbg(hw, "ixgbe_add_mc_addr Complete\n"); +} + +/** + * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @pad: number of bytes between addresses in the list + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unsed receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 pad) +{ + u32 i; + u32 rar_entries = hw->mac.num_rx_addrs; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count; + hw->addr_ctrl.mc_addr_in_rar_count = 0; + hw->addr_ctrl.mta_in_use = 0; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-15]\n"); + for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw_dbg(hw, " Clearing MTA\n"); + for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + /* Add the new addresses */ + for (i = 0; i < mc_addr_count; i++) { + hw_dbg(hw, " Adding the multicast addresses:\n"); + ixgbe_add_mc_addr(hw, mc_addr_list + + (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad))); + } + + /* Enable mta */ + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); + return 0; +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return 0; +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 VftaIndex; + u32 BitOffset; + u32 VftaReg; + u32 VftaByte; + + /* Determine 32-bit word position in array */ + VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex)); + VftaReg &= (~(0x0F << BitOffset)); + VftaReg |= (vind << BitOffset); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg); + + /* Determine the location of the bit for this VLAN id */ + BitOffset = vlan & 0x1F; /* lower five bits */ + + VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex)); + if (vlan_on) + /* Turn on this VLAN id */ + VftaReg |= (1 << BitOffset); + else + /* Turn off this VLAN id */ + VftaReg &= ~(1 << BitOffset); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg); + + return 0; +} + +/** + * ixgbe_setup_fc - Configure flow control settings + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Configures the flow control settings based on SW configuration. + * This function is used for 802.3x flow control configuration only. + **/ +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + u32 frctl_reg; + u32 rmcs_reg; + + if (packetbuf_num < 0 || packetbuf_num > 7) + hw_dbg(hw, "Invalid packet buffer number [%d], expected range" + "is 0-7\n", packetbuf_num); + + frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.type = hw->fc.original_type; + + /* + * The possible values of the "flow_control" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames but not + * send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do not + * support receiving pause frames) + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.type) { + case ixgbe_fc_none: + break; + case ixgbe_fc_rx_pause: + /* + * RX Flow control is enabled, + * and TX Flow control is disabled. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * TX Flow control is enabled, and RX Flow control is disabled, + * by a software over-ride. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* + * Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + /* We should never get here. The value should be 0-3. */ + hw_dbg(hw, "Flow control param set incorrectly\n"); + break; + } + + /* Enable 802.3x based flow control settings. */ + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc.type & ixgbe_fc_tx_pause) { + if (hw->fc.send_xon) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + (hw->fc.low_water | IXGBE_FCRTL_XONE)); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + hw->fc.low_water); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), + (hw->fc.high_water)|IXGBE_FCRTH_FCEN); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + + return 0; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else 0 + * is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + u32 ctrl; + s32 i; + s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + ctrl |= IXGBE_CTRL_GIO_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { + status = 0; + break; + } + udelay(100); + } + + return status; +} + + +/** + * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify wich semaphore to acquire + * + * Aquires the SWFW semaphore throught the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + u32 fwmask = mask << 5; + s32 timeout = 200; + + while (timeout) { + if (ixgbe_get_eeprom_semaphore(hw)) + return -IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) or other software + * thread currently using resource (swmask) + */ + ixgbe_release_eeprom_semaphore(hw); + msleep(5); + timeout--; + } + + if (!timeout) { + hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); + return -IXGBE_ERR_SWFW_SYNC; + } + + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); + return 0; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify wich semaphore to release + * + * Releases the SWFW semaphore throught the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * ixgbe_read_analog_reg8- Reads 8 bit 82598 Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return 0; +} + +/** + * ixgbe_write_analog_reg8- Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + udelay(10); + + return 0; +} + diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h new file mode 100644 index 0000000000000000000000000000000000000000..de6ddd5d04ada831cb7c958d00f885c74af85977 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -0,0 +1,86 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); + +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom(struct ixgbe_hw *hw); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); + +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, + u32 enable_addr); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, u32 pad); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); +s32 ixgbe_validate_mac_addr(u8 *mac_addr); + +s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num); + +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); + +#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) + +#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg)) + +#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\ + writel((value), ((a)->hw_addr + (reg) + ((offset) << 2)))) + +#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\ + readl((a)->hw_addr + (reg) + ((offset) << 2))) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#ifdef DEBUG +#define hw_dbg(hw, format, arg...) \ +printk(KERN_DEBUG, "%s: " format, ixgbe_get_hw_dev_name(hw), ##arg); +#else +static inline int __attribute__ ((format (printf, 2, 3))) +hw_dbg(struct ixgbe_hw *hw, const char *format, ...) +{ + return 0; +} +#endif + +#endif /* IXGBE_COMMON */ diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c new file mode 100644 index 0000000000000000000000000000000000000000..a4e576a0c5435c3f16eccab1b7d7e35ade2ccb12 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -0,0 +1,948 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for ixgbe */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" + + +#define IXGBE_ALL_RAR_ENTRIES 16 + +struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ + offsetof(struct ixgbe_adapter, m) +static struct ixgbe_stats ixgbe_gstrings_stats[] = { + {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, + {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, + {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, + {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, + {"lsc_int", IXGBE_STAT(lsc_int)}, + {"tx_busy", IXGBE_STAT(tx_busy)}, + {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, + {"rx_errors", IXGBE_STAT(net_stats.rx_errors)}, + {"tx_errors", IXGBE_STAT(net_stats.tx_errors)}, + {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)}, + {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)}, + {"multicast", IXGBE_STAT(net_stats.multicast)}, + {"broadcast", IXGBE_STAT(stats.bprc)}, + {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, + {"collisions", IXGBE_STAT(net_stats.collisions)}, + {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)}, + {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, + {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, + {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, + {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, + {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, + {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)}, + {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)}, + {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)}, + {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, + {"tx_restart_queue", IXGBE_STAT(restart_queue)}, + {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, + {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, + {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt)}, + {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt)}, + {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, + {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, + {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, + {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, + {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good)}, + {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, + {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good)}, + {"rx_header_split", IXGBE_STAT(rx_hdr_split)}, + {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, +}; + +#define IXGBE_QUEUE_STATS_LEN \ + ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ + ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +#define IXGBE_GLOBAL_STATS_LEN \ + sizeof(ixgbe_gstrings_stats) / sizeof(struct ixgbe_stats) +#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) + +static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + + if (netif_carrier_ok(adapter->netdev)) { + ecmd->speed = SPEED_10000; + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + return 0; +} + +static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (ecmd->autoneg == AUTONEG_ENABLE || + ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL) + return -EINVAL; + + if (netif_running(adapter->netdev)) { + ixgbe_down(adapter); + ixgbe_reset(adapter); + ixgbe_up(adapter); + } else { + ixgbe_reset(adapter); + } + + return 0; +} + +static void ixgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + pause->autoneg = AUTONEG_DISABLE; + + if (hw->fc.type == ixgbe_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.type == ixgbe_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.type == ixgbe_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int ixgbe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (pause->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + if (pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgbe_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgbe_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.type = ixgbe_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.type = ixgbe_fc_none; + + hw->fc.original_type = hw->fc.type; + + if (netif_running(adapter->netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } else { + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_rx_csum(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); +} + +static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + if (data) + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } else { + ixgbe_reset(adapter); + } + + return 0; +} + +static u32 ixgbe_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_HW_CSUM) != 0; +} + +static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +static int ixgbe_set_tso(struct net_device *netdev, u32 data) +{ + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + return 0; +} + +static u32 ixgbe_get_msglevel(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int ixgbe_get_regs_len(struct net_device *netdev) +{ +#define IXGBE_REGS_LEN 1128 + return IXGBE_REGS_LEN * sizeof(u32); +} + +#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ + +static void ixgbe_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IXGBE_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + + /* General Registers */ + regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); + regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); + regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); + regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); + regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); + + /* NVM Register */ + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); + regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); + regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); + regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); + regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); + regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); + regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); + regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + + /* Interrupt */ + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); + regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); + regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); + regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); + regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); + regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); + regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); + regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); + regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); + regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); + regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); + + /* Flow Control */ + regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); + regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); + regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); + regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); + regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 8; i++) + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); + for (i = 0; i < 8; i++) + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); + regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); + + /* Receive DMA */ + for (i = 0; i < 64; i++) + regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); + for (i = 0; i < 64; i++) + regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); + for (i = 0; i < 64; i++) + regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); + for (i = 0; i < 64; i++) + regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); + for (i = 0; i < 64; i++) + regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); + for (i = 0; i < 64; i++) + regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + for (i = 0; i < 16; i++) + regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + for (i = 0; i < 8; i++) + regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); + + /* Receive */ + regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); + for (i = 0; i < 16; i++) + regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); + regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); + regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); + regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); + regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + for (i = 0; i < 8; i++) + regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); + for (i = 0; i < 8; i++) + regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); + regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); + + /* Transmit */ + for (i = 0; i < 32; i++) + regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); + for (i = 0; i < 32; i++) + regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); + for (i = 0; i < 32; i++) + regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); + for (i = 0; i < 32; i++) + regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); + for (i = 0; i < 32; i++) + regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + for (i = 0; i < 32; i++) + regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); + for (i = 0; i < 32; i++) + regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); + regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); + for (i = 0; i < 16; i++) + regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); + for (i = 0; i < 8; i++) + regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); + regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); + + /* Wake Up */ + regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); + regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); + regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); + regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); + regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); + regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); + regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); + regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); + regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); + + /* DCE */ + regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); + regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); + regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); + regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); + for (i = 0; i < 8; i++) + regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); + for (i = 0; i < 8; i++) + regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) + regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); + for (i = 0; i < 8; i++) + regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); + for (i = 0; i < 8; i++) + regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); + + /* Statistics */ + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); + regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc); + regs_buff[883] = IXGBE_GET_STAT(adapter, errbc); + regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc); + for (i = 0; i < 8; i++) + regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]); + regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc); + regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc); + regs_buff[895] = IXGBE_GET_STAT(adapter, rlec); + regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc); + regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc); + regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc); + regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc); + for (i = 0; i < 8; i++) + regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]); + for (i = 0; i < 8; i++) + regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]); + for (i = 0; i < 8; i++) + regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]); + for (i = 0; i < 8; i++) + regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]); + regs_buff[932] = IXGBE_GET_STAT(adapter, prc64); + regs_buff[933] = IXGBE_GET_STAT(adapter, prc127); + regs_buff[934] = IXGBE_GET_STAT(adapter, prc255); + regs_buff[935] = IXGBE_GET_STAT(adapter, prc511); + regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023); + regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522); + regs_buff[938] = IXGBE_GET_STAT(adapter, gprc); + regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); + regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); + regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); + regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); + regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + for (i = 0; i < 8; i++) + regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); + regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); + regs_buff[955] = IXGBE_GET_STAT(adapter, rfc); + regs_buff[956] = IXGBE_GET_STAT(adapter, roc); + regs_buff[957] = IXGBE_GET_STAT(adapter, rjc); + regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); + regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); + regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); + regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); + regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); + regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); + regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127); + regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255); + regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511); + regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023); + regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522); + regs_buff[971] = IXGBE_GET_STAT(adapter, mptc); + regs_buff[972] = IXGBE_GET_STAT(adapter, bptc); + regs_buff[973] = IXGBE_GET_STAT(adapter, xec); + for (i = 0; i < 16; i++) + regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]); + for (i = 0; i < 16; i++) + regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]); + for (i = 0; i < 16; i++) + regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]); + for (i = 0; i < 16; i++) + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + + /* MAC */ + regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); + regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); + regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); + regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); + regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); + regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); + regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); + regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); + regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); + regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); + regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); + regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); + regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); + regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); + regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); + regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); + regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); + regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); + regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); + regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); + regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); + regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); + regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); + regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); + regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); + regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); + regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); + regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + + /* Diagnostic */ + regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); + regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); + regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); + regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); + regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2); + regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3); + regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); + regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); + for (i = 0; i < 8; i++) + regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); + regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); + regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); + regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); + regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2); + regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3); + regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); + regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); + regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); + regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); + regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); + regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); + regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); + regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); + regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); + regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 8; i++) + regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); + regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); + regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); + regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); + regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); + regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); + regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); + regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); + regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); + regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); +} + +static int ixgbe_get_eeprom_len(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + return adapter->hw.eeprom.word_size * 2; +} + +static int ixgbe_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word, eeprom_len; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_len = last_word - first_word + 1; + + eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + for (i = 0; i < eeprom_len; i++) { + if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, + &eeprom_buff[i]))) + break; + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static void ixgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + strncpy(drvinfo->driver, ixgbe_driver_name, 32); + strncpy(drvinfo->version, ixgbe_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_stats = IXGBE_STATS_LEN; + drvinfo->regdump_len = ixgbe_get_regs_len(netdev); +} + +static void ixgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring = adapter->tx_ring; + struct ixgbe_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IXGBE_MAX_RXD; + ring->tx_max_pending = IXGBE_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int ixgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_tx_buffer *old_buf; + struct ixgbe_rx_buffer *old_rx_buf; + void *old_desc; + int i, err; + u32 new_rx_count, new_tx_count, old_size; + dma_addr_t old_dma; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + if (netif_running(adapter->netdev)) + ixgbe_down(adapter); + + /* + * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring->count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + /* Save existing descriptor ring */ + old_buf = adapter->tx_ring[i].tx_buffer_info; + old_desc = adapter->tx_ring[i].desc; + old_size = adapter->tx_ring[i].size; + old_dma = adapter->tx_ring[i].dma; + /* Try to allocate a new one */ + adapter->tx_ring[i].tx_buffer_info = NULL; + adapter->tx_ring[i].desc = NULL; + adapter->tx_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(adapter, + &adapter->tx_ring[i]); + if (err) { + /* Restore the old one so at least + the adapter still works, even if + we failed the request */ + adapter->tx_ring[i].tx_buffer_info = old_buf; + adapter->tx_ring[i].desc = old_desc; + adapter->tx_ring[i].size = old_size; + adapter->tx_ring[i].dma = old_dma; + goto err_setup; + } + /* Free the old buffer manually */ + vfree(old_buf); + pci_free_consistent(adapter->pdev, old_size, + old_desc, old_dma); + } + } + + if (new_rx_count != adapter->rx_ring->count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + + old_rx_buf = adapter->rx_ring[i].rx_buffer_info; + old_desc = adapter->rx_ring[i].desc; + old_size = adapter->rx_ring[i].size; + old_dma = adapter->rx_ring[i].dma; + + adapter->rx_ring[i].rx_buffer_info = NULL; + adapter->rx_ring[i].desc = NULL; + adapter->rx_ring[i].dma = 0; + adapter->rx_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(adapter, + &adapter->rx_ring[i]); + if (err) { + adapter->rx_ring[i].rx_buffer_info = old_rx_buf; + adapter->rx_ring[i].desc = old_desc; + adapter->rx_ring[i].size = old_size; + adapter->rx_ring[i].dma = old_dma; + goto err_setup; + } + + vfree(old_rx_buf); + pci_free_consistent(adapter->pdev, old_size, old_desc, + old_dma); + } + } + + err = 0; +err_setup: + if (netif_running(adapter->netdev)) + ixgbe_up(adapter); + + return err; +} + +static int ixgbe_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return IXGBE_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ixgbe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u64 *queue_stat; + int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); + int j, k; + int i; + + ixgbe_update_stats(adapter); + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; + data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + for (j = 0; j < adapter->num_tx_queues; j++) { + queue_stat = (u64 *)&adapter->tx_ring[j].stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } + for (j = 0; j < adapter->num_rx_queues; j++) { + queue_stat = (u64 *)&adapter->rx_ring[j].stats; + for (k = 0; k < stat_count; k++) + data[i + k] = queue_stat[k]; + i += k; + } +} + +static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; + } +/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + + +static void ixgbe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + return; +} + +static int ixgbe_nway_reset(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_reset(adapter); + ixgbe_up(adapter); + } + + return 0; +} + +static int ixgbe_phys_id(struct net_device *netdev, u32 data) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); + u32 i; + + if (!data || data > 300) + data = 300; + + for (i = 0; i < (data * 1000); i += 400) { + ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); + msleep_interruptible(200); + ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); + msleep_interruptible(200); + } + + /* Restore LED settings */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); + + return 0; +} + +static int ixgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_eitr == 0) + ec->rx_coalesce_usecs = 0; + else + ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr; + + if (adapter->tx_eitr == 0) + ec->tx_coalesce_usecs = 0; + else + ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr; + + ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; + return 0; +} + +static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 0) && + (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) + return -EINVAL; + if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || + ((ec->tx_coalesce_usecs > 0) && + (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) + return -EINVAL; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs == 0) + adapter->rx_eitr = 0; + else + adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs); + + if (ec->tx_coalesce_usecs == 0) + adapter->tx_eitr = 0; + else + adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs); + + if (ec->tx_max_coalesced_frames_irq) + adapter->tx_ring[0].work_limit = + ec->tx_max_coalesced_frames_irq; + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } + + return 0; +} + + +static struct ethtool_ops ixgbe_ethtool_ops = { + .get_settings = ixgbe_get_settings, + .set_settings = ixgbe_set_settings, + .get_drvinfo = ixgbe_get_drvinfo, + .get_regs_len = ixgbe_get_regs_len, + .get_regs = ixgbe_get_regs, + .get_wol = ixgbe_get_wol, + .nway_reset = ixgbe_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = ixgbe_get_eeprom_len, + .get_eeprom = ixgbe_get_eeprom, + .get_ringparam = ixgbe_get_ringparam, + .set_ringparam = ixgbe_set_ringparam, + .get_pauseparam = ixgbe_get_pauseparam, + .set_pauseparam = ixgbe_set_pauseparam, + .get_rx_csum = ixgbe_get_rx_csum, + .set_rx_csum = ixgbe_set_rx_csum, + .get_tx_csum = ixgbe_get_tx_csum, + .set_tx_csum = ixgbe_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_msglevel = ixgbe_get_msglevel, + .set_msglevel = ixgbe_set_msglevel, + .get_tso = ethtool_op_get_tso, + .set_tso = ixgbe_set_tso, + .get_strings = ixgbe_get_strings, + .phys_id = ixgbe_phys_id, + .get_sset_count = ixgbe_get_sset_count, + .get_ethtool_stats = ixgbe_get_ethtool_stats, + .get_coalesce = ixgbe_get_coalesce, + .set_coalesce = ixgbe_set_coalesce, +}; + +void ixgbe_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); +} diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c new file mode 100644 index 0000000000000000000000000000000000000000..b75f1c6efc42b589516a1e4efa5fbd7ae504723d --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -0,0 +1,2872 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_common.h" + +char ixgbe_driver_name[] = "ixgbe"; +static char ixgbe_driver_string[] = + "Intel(R) 10 Gigabit PCI Express Network Driver"; + +#define DRV_VERSION "1.1.18" +char ixgbe_driver_version[] = DRV_VERSION; +static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; + +static const struct ixgbe_info *ixgbe_info_tbl[] = { + [board_82598AF] = &ixgbe_82598AF_info, + [board_82598EB] = &ixgbe_82598EB_info, + [board_82598AT] = &ixgbe_82598AT_info, +}; + +/* ixgbe_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static struct pci_device_id ixgbe_pci_tbl[] = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), + board_82598AF }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), + board_82598AF }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT), + board_82598AT }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), + board_82598EB }, + + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 + + +#ifdef DEBUG +/** + * ixgbe_get_hw_dev_name - return device name string + * used by hardware layer to print debugging information + **/ +char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + struct net_device *netdev = adapter->netdev; + return netdev->name; +} +#endif + +static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, + u8 msix_vector) +{ + u32 ivar, index; + + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + index = (int_alloc_entry >> 2) & 0x1F; + ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); + ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); +} + +static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, + struct ixgbe_tx_buffer + *tx_buffer_info) +{ + if (tx_buffer_info->dma) { + pci_unmap_page(adapter->pdev, + tx_buffer_info->dma, + tx_buffer_info->length, PCI_DMA_TODEVICE); + tx_buffer_info->dma = 0; + } + if (tx_buffer_info->skb) { + dev_kfree_skb_any(tx_buffer_info->skb); + tx_buffer_info->skb = NULL; + } + /* tx_buffer_info must be completely set up in the transmit path */ +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + unsigned int eop, + union ixgbe_adv_tx_desc *eop_desc) +{ + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->tx_buffer_info[eop].dma && + time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && + !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { + /* detected Tx unit hang */ + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->tx_buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->wb.status); + return true; + } + + return false; +} + +/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ +static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct net_device *netdev = adapter->netdev; + union ixgbe_adv_tx_desc *tx_desc, *eop_desc; + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int i, eop; + bool cleaned = false; + int count = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) { + for (cleaned = false; !cleaned;) { + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + cleaned = (i == eop); + + tx_ring->stats.bytes += tx_buffer_info->length; + ixgbe_unmap_and_free_tx_resource(adapter, + tx_buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_ring->stats.packets++; + + eop = tx_ring->tx_buffer_info[i].next_to_watch; + eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + + /* weight of a sort for tx, avoid endless transmit cleanup */ + if (count++ >= tx_ring->work_limit) + break; + } + + tx_ring->next_to_clean = i; + +#define TX_WAKE_THRESHOLD 32 + spin_lock(&tx_ring->tx_lock); + + if (cleaned && netif_carrier_ok(netdev) && + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) && + !test_bit(__IXGBE_DOWN, &adapter->state)) + netif_wake_queue(netdev); + + spin_unlock(&tx_ring->tx_lock); + + if (adapter->detect_tx_hung) + if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) + netif_stop_queue(netdev); + + if (count >= tx_ring->work_limit) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); + + return cleaned; +} + +/** + * ixgbe_receive_skb - Send a completed packet up the stack + * @adapter: board private structure + * @skb: packet to send up + * @is_vlan: packet has a VLAN tag + * @tag: VLAN tag from descriptor + **/ +static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, + struct sk_buff *skb, bool is_vlan, + u16 tag) +{ + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { + if (adapter->vlgrp && is_vlan) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); + else + netif_receive_skb(skb); + } else { + + if (adapter->vlgrp && is_vlan) + vlan_hwaccel_rx(skb, adapter->vlgrp, tag); + else + netif_rx(skb); + } +} + +static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, + u32 status_err, + struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set */ + if ((status_err & IXGBE_RXD_STAT_IXSM) || + !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + return; + /* TCP/UDP checksum error bit is set */ + if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_rx_error++; + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_rx_good++; +} + +/** + * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ +static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *rx_buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; + + i = rx_ring->next_to_use; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + + while (cleaned_count--) { + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + + if (!rx_buffer_info->page && + (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { + rx_buffer_info->page = alloc_page(GFP_ATOMIC); + if (!rx_buffer_info->page) { + adapter->alloc_rx_page_failed++; + goto no_buffers; + } + rx_buffer_info->page_dma = + pci_map_page(pdev, rx_buffer_info->page, + 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); + } + + if (!rx_buffer_info->skb) { + skb = netdev_alloc_skb(netdev, bufsz); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + rx_buffer_info->skb = skb; + rx_buffer_info->dma = pci_map_single(pdev, skb->data, + bufsz, + PCI_DMA_FROMDEVICE); + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + rx_desc->read.pkt_addr = + cpu_to_le64(rx_buffer_info->page_dma); + rx_desc->read.hdr_addr = + cpu_to_le64(rx_buffer_info->dma); + } else { + rx_desc->read.pkt_addr = + cpu_to_le64(rx_buffer_info->dma); + } + + i++; + if (i == rx_ring->count) + i = 0; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + } +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i-- == 0) + i = (rx_ring->count - 1); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union ixgbe_adv_rx_desc *rx_desc, *next_rxd; + struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; + struct sk_buff *skb; + unsigned int i; + u32 upper_len, len, staterr; + u16 hdr_info, vlan_tag; + bool is_vlan, cleaned = false; + int cleaned_count = 0; + + i = rx_ring->next_to_clean; + upper_len = 0; + rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + is_vlan = (staterr & IXGBE_RXD_STAT_VP); + vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); + + while (staterr & IXGBE_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + hdr_info = + le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); + len = + ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT); + if (hdr_info & IXGBE_RXDADV_SPH) + adapter->rx_hdr_split++; + if (len > IXGBE_RX_HDR_SIZE) + len = IXGBE_RX_HDR_SIZE; + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } else + len = le16_to_cpu(rx_desc->wb.upper.length); + + cleaned = true; + skb = rx_buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + rx_buffer_info->skb = NULL; + + if (len && !skb_shinfo(skb)->nr_frags) { + pci_unmap_single(pdev, rx_buffer_info->dma, + adapter->rx_buf_len + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + skb_put(skb, len); + } + + if (upper_len) { + pci_unmap_page(pdev, rx_buffer_info->page_dma, + PAGE_SIZE, PCI_DMA_FROMDEVICE); + rx_buffer_info->page_dma = 0; + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buffer_info->page, 0, upper_len); + rx_buffer_info->page = NULL; + + skb->len += upper_len; + skb->data_len += upper_len; + skb->truesize += upper_len; + } + + i++; + if (i == rx_ring->count) + i = 0; + next_buffer = &rx_ring->rx_buffer_info[i]; + + next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + + cleaned_count++; + if (staterr & IXGBE_RXD_STAT_EOP) { + rx_ring->stats.packets++; + rx_ring->stats.bytes += skb->len; + } else { + rx_buffer_info->skb = next_buffer->skb; + rx_buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + adapter->non_eop_descs++; + goto next_desc; + } + + if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + ixgbe_rx_checksum(adapter, staterr, skb); + skb->protocol = eth_type_trans(skb, netdev); + ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag); + netdev->last_rx = jiffies; + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + rx_buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + is_vlan = (staterr & IXGBE_RXD_STAT_VP); + vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan); + } + + rx_ring->next_to_clean = i; + cleaned_count = IXGBE_DESC_UNUSED(rx_ring); + + if (cleaned_count) + ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + + return cleaned; +} + +#define IXGBE_MAX_INTR 10 +/** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * ixgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) +{ + int i, vector = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), + IXGBE_MSIX_VECTOR(vector)); + writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr), + adapter->hw.hw_addr + adapter->tx_ring[i].itr_register); + vector++; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), + IXGBE_MSIX_VECTOR(vector)); + writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr), + adapter->hw.hw_addr + adapter->rx_ring[i].itr_register); + vector++; + } + + vector = adapter->num_tx_queues + adapter->num_rx_queues; + ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MSIX_VECTOR(vector)); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950); +} + +static irqreturn_t ixgbe_msix_lsc(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + + if (eicr & IXGBE_EICR_LSC) { + adapter->lsc_int++; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) +{ + struct ixgbe_ring *txr = data; + struct ixgbe_adapter *adapter = txr->adapter; + + ixgbe_clean_tx_irq(adapter, txr); + + return IRQ_HANDLED; +} + +static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) +{ + struct ixgbe_ring *rxr = data; + struct ixgbe_adapter *adapter = rxr->adapter; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value); + netif_rx_schedule(adapter->netdev, &adapter->napi); + return IRQ_HANDLED; +} + +static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) +{ + struct ixgbe_adapter *adapter = container_of(napi, + struct ixgbe_adapter, napi); + struct net_device *netdev = adapter->netdev; + int work_done = 0; + struct ixgbe_ring *rxr = adapter->rx_ring; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(netdev)) + goto quit_polling; + + ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((work_done < budget) || !netif_running(netdev)) { +quit_polling: + netif_rx_complete(netdev, napi); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, + rxr->eims_value); + } + + return work_done; +} + +/** + * ixgbe_setup_msix - Initialize MSI-X interrupts + * + * ixgbe_setup_msix allocates MSI-X vectors and requests + * interrutps from the kernel. + **/ +static int ixgbe_setup_msix(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i, int_vector = 0, err = 0; + int max_msix_count; + + /* +1 for the LSC interrupt */ + max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1; + adapter->msix_entries = kcalloc(max_msix_count, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < max_msix_count; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + max_msix_count); + if (err) + goto out; + + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i); + err = request_irq(adapter->msix_entries[int_vector].vector, + &ixgbe_msix_clean_tx, + 0, + adapter->tx_ring[i].name, + &(adapter->tx_ring[i])); + if (err) { + DPRINTK(PROBE, ERR, + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto release_irqs; + } + adapter->tx_ring[i].eims_value = + (1 << IXGBE_MSIX_VECTOR(int_vector)); + adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector); + int_vector++; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + sprintf(adapter->rx_ring[i].name, + "%s-rx%d", netdev->name, i); + else + memcpy(adapter->rx_ring[i].name, + netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[int_vector].vector, + &ixgbe_msix_clean_rx, 0, + adapter->rx_ring[i].name, + &(adapter->rx_ring[i])); + if (err) { + DPRINTK(PROBE, ERR, + "request_irq failed for MSIX interrupt " + "Error: %d\n", err); + goto release_irqs; + } + + adapter->rx_ring[i].eims_value = + (1 << IXGBE_MSIX_VECTOR(int_vector)); + adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector); + int_vector++; + } + + sprintf(adapter->lsc_name, "%s-lsc", netdev->name); + err = request_irq(adapter->msix_entries[int_vector].vector, + &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev); + if (err) { + DPRINTK(PROBE, ERR, + "request_irq for msix_lsc failed: %d\n", err); + goto release_irqs; + } + + /* FIXME: implement netif_napi_remove() instead */ + adapter->napi.poll = ixgbe_clean_rxonly; + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; + return 0; + +release_irqs: + int_vector--; + for (; int_vector >= adapter->num_tx_queues; int_vector--) + free_irq(adapter->msix_entries[int_vector].vector, + &(adapter->rx_ring[int_vector - + adapter->num_tx_queues])); + + for (; int_vector >= 0; int_vector--) + free_irq(adapter->msix_entries[int_vector].vector, + &(adapter->tx_ring[int_vector])); +out: + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + return err; +} + +/** + * ixgbe_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + * @pt_regs: CPU registers structure + **/ +static irqreturn_t ixgbe_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + + if (!eicr) + return IRQ_NONE; /* Not our interrupt */ + + if (eicr & IXGBE_EICR_LSC) { + adapter->lsc_int++; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); + } + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { + /* Disable interrupts and register for poll. The flush of the + * posted write is intentionally left out. */ + atomic_inc(&adapter->irq_sem); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + __netif_rx_schedule(netdev, &adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * ixgbe_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues) +{ + struct net_device *netdev = adapter->netdev; + int flags, err; + irqreturn_t(*handler) (int, void *) = &ixgbe_intr; + + flags = IRQF_SHARED; + + err = ixgbe_setup_msix(adapter); + if (!err) + goto request_done; + + /* + * if we can't do MSI-X, fall through and try MSI + * No need to reallocate memory since we're decreasing the number of + * queues. We just won't use the other ones, also it is freed correctly + * on ixgbe_remove. + */ + *num_rx_queues = 1; + + /* do MSI */ + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + flags &= ~IRQF_SHARED; + handler = &ixgbe_intr; + } + + err = request_irq(adapter->pdev->irq, handler, flags, + netdev->name, netdev); + if (err) + DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); + +request_done: + return err; +} + +static void ixgbe_free_irq(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + free_irq(adapter->msix_entries[i].vector, + &(adapter->tx_ring[i])); + for (i = 0; i < adapter->num_rx_queues; i++) + free_irq(adapter->msix_entries[i + + adapter->num_tx_queues].vector, + &(adapter->rx_ring[i])); + i = adapter->num_rx_queues + adapter->num_tx_queues; + free_irq(adapter->msix_entries[i].vector, netdev); + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + return; + } + + free_irq(adapter->pdev->irq, netdev); + if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + } +} + +/** + * ixgbe_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + IXGBE_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) +{ + if (atomic_dec_and_test(&adapter->irq_sem)) { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, + (IXGBE_EIMS_ENABLE_MASK & + ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, + IXGBE_EIMS_ENABLE_MASK); + IXGBE_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts + * + **/ +static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) +{ + int i; + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->rx_eitr) + IXGBE_WRITE_REG(hw, IXGBE_EITR(0), + EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); + + /* for re-triggering the interrupt in non-NAPI mode */ + adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0)); + adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0)); + + ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i); +} + +/** + * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + u64 tdba; + struct ixgbe_hw *hw = &adapter->hw; + u32 i, tdlen; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) { + tdba = adapter->tx_ring[i].dma; + tdlen = adapter->tx_ring[i].count * + sizeof(union ixgbe_adv_tx_desc); + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen); + IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); + adapter->tx_ring[i].head = IXGBE_TDH(i); + adapter->tx_ring[i].tail = IXGBE_TDT(i); + } + + IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT); +} + +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) + +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 +/** + * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + u64 rdba; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + u32 rdlen, rxctrl, rxcsum; + u32 random[10]; + u32 reta, mrqc; + int i; + u32 fctrl, hlreg0; + u32 srrctl; + u32 pages; + + /* Decide whether to use packet split mode or not */ + if (netdev->mtu > ETH_DATA_LEN) + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + else + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; + } else { + if (netdev->mtu <= ETH_DATA_LEN) + adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + adapter->rx_buf_len = ALIGN(max_frame, 1024); + } + + fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); + + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (adapter->netdev->mtu <= ETH_DATA_LEN) + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + else + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0)); + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + + if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { + srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + srrctl |= ((IXGBE_RX_HDR_SIZE << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + } else { + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) + srrctl |= + IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= + adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); + + rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); + /* disable receives while setting up the descriptors */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rdba = adapter->rx_ring[i].dma; + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); + IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); + adapter->rx_ring[i].head = IXGBE_RDH(i); + adapter->rx_ring[i].tail = IXGBE_RDT(i); + } + + if (adapter->num_rx_queues > 1) { + /* Random 40bytes used as random key in RSS hash function */ + get_random_bytes(&random[0], 40); + + switch (adapter->num_rx_queues) { + case 8: + case 4: + /* Bits [3:0] in each byte refers the Rx queue no */ + reta = 0x00010203; + break; + case 2: + reta = 0x00010001; + break; + default: + reta = 0x00000000; + break; + } + + /* Fill out redirection table */ + for (i = 0; i < 32; i++) { + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta); + if (adapter->num_rx_queues > 4) { + i++; + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, + 0x04050607); + } + } + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]); + + mrqc = IXGBE_MRQC_RSSEN + /* Perform hash on these packet types */ + | IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV4_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Multiqueue and packet checksumming are mutually exclusive. */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + } else { + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { + /* Enable IPv4 payload checksum for UDP fragments + * Must be used in conjunction with packet-split. */ + rxcsum |= IXGBE_RXCSUM_IPPCSE; + } else { + /* don't need to clear IPPCSE as it defaults to 0 */ + } + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + } + /* Enable Receives */ + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +} + +static void ixgbe_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 ctrl; + + ixgbe_irq_disable(adapter); + adapter->vlgrp = grp; + + if (grp) { + /* enable VLAN tag insert/strip */ + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; + ctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); + } + + ixgbe_irq_enable(adapter); +} + +static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* add VID to filter table */ + ixgbe_set_vfta(&adapter->hw, vid, 0, true); +} + +static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + ixgbe_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + ixgbe_irq_enable(adapter); + + /* remove VID from filter table */ + ixgbe_set_vfta(&adapter->hw, vid, 0, false); +} + +static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) +{ + ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); + + if (adapter->vlgrp) { + u16 vid; + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); + } + } +} + +/** + * ixgbe_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void ixgbe_set_multi(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u8 *mta_list; + u32 fctrl; + int i; + + /* Check for Promiscuous and All Multicast modes */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + + if (netdev->flags & IFF_PROMISC) { + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + } else if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + fctrl &= ~IXGBE_FCTRL_UPE; + } else { + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (netdev->mc_count) { + mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return; + + /* Shared function expects packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, + ETH_ALEN); + mc_ptr = mc_ptr->next; + } + + ixgbe_update_mc_addr_list(hw, mta_list, i, 0); + kfree(mta_list); + } else { + ixgbe_update_mc_addr_list(hw, NULL, 0, 0); + } + +} + +static void ixgbe_configure(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + ixgbe_set_multi(netdev); + + ixgbe_restore_vlan(adapter); + + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], + (adapter->rx_ring[i].count - 1)); +} + +static int ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + u32 gpie = 0; + struct ixgbe_hw *hw = &adapter->hw; + u32 txdctl, rxdctl, mhadd; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + + if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | + IXGBE_FLAG_MSI_ENABLED)) { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | + IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); + } else { + /* MSI only */ + gpie = (IXGBE_GPIE_EIAME | + IXGBE_GPIE_PBA_SUPPORT); + } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); + gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + } + + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); + } + /* enable all receives */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + ixgbe_configure_msix(adapter); + else + ixgbe_configure_msi_and_legacy(adapter); + + clear_bit(__IXGBE_DOWN, &adapter->state); + napi_enable(&adapter->napi); + ixgbe_irq_enable(adapter); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + mod_timer(&adapter->watchdog_timer, jiffies); + return 0; +} + +int ixgbe_up(struct ixgbe_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + ixgbe_configure(adapter); + + return ixgbe_up_complete(adapter); +} + +void ixgbe_reset(struct ixgbe_adapter *adapter) +{ + if (ixgbe_init_hw(&adapter->hw)) + DPRINTK(PROBE, ERR, "Hardware Error\n"); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + +} + +#ifdef CONFIG_PM +static int ixgbe_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 err, num_rx_queues = adapter->num_rx_queues; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ + "suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = ixgbe_request_irq(adapter, &num_rx_queues); + if (err) + return err; + } + + ixgbe_reset(adapter); + + if (netif_running(netdev)) + ixgbe_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +/** + * ixgbe_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + + for (i = 0; i < rx_ring->count; i++) { + struct ixgbe_rx_buffer *rx_buffer_info; + + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + if (rx_buffer_info->dma) { + pci_unmap_single(pdev, rx_buffer_info->dma, + adapter->rx_buf_len, + PCI_DMA_FROMDEVICE); + rx_buffer_info->dma = 0; + } + if (rx_buffer_info->skb) { + dev_kfree_skb(rx_buffer_info->skb); + rx_buffer_info->skb = NULL; + } + if (!rx_buffer_info->page) + continue; + pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + rx_buffer_info->page_dma = 0; + + put_page(rx_buffer_info->page); + rx_buffer_info->page = NULL; + } + + size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * ixgbe_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); + } + + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +void ixgbe_down(struct ixgbe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 rxctrl; + + /* signal that we are down to the interrupt handler */ + set_bit(__IXGBE_DOWN, &adapter->state); + + /* disable receives */ + rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, + rxctrl & ~IXGBE_RXCTRL_RXEN); + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + + /* flush both disables */ + IXGBE_WRITE_FLUSH(&adapter->hw); + msleep(10); + + ixgbe_irq_disable(adapter); + + napi_disable(&adapter->napi); + del_timer_sync(&adapter->watchdog_timer); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + ixgbe_reset(adapter); + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); + +} + +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static void ixgbe_shutdown(struct pci_dev *pdev) +{ + ixgbe_suspend(pdev, PMSG_SUSPEND); +} + +/** + * ixgbe_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int ixgbe_clean(struct napi_struct *napi, int budget) +{ + struct ixgbe_adapter *adapter = container_of(napi, + struct ixgbe_adapter, napi); + struct net_device *netdev = adapter->netdev; + int tx_cleaned = 0, work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + /* In non-MSIX case, there is no multi-Tx/Rx queue */ + tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); + ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, + budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if ((!tx_cleaned && (work_done < budget)) || + !netif_running(adapter->netdev)) { +quit_polling: + netif_rx_complete(netdev, napi); + ixgbe_irq_enable(adapter); + } + + return work_done; +} + +/** + * ixgbe_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void ixgbe_tx_timeout(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +static void ixgbe_reset_task(struct work_struct *work) +{ + struct ixgbe_adapter *adapter; + adapter = container_of(work, struct ixgbe_adapter, reset_task); + + adapter->tx_timeout_count++; + + ixgbe_down(adapter); + ixgbe_up(adapter); +} + +/** + * ixgbe_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter) +{ + int i; + + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct ixgbe_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct ixgbe_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i].adapter = adapter; + adapter->rx_ring[i].itr_register = IXGBE_EITR(i); + adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; + } + + return 0; +} + +/** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) + * @adapter: board private structure to initialize + * + * ixgbe_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* default flow control settings */ + hw->fc.original_type = ixgbe_fc_full; + hw->fc.type = ixgbe_fc_full; + + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; + if (hw->mac.ops.reset(hw)) { + dev_err(&pdev->dev, "HW Init failed\n"); + return -EIO; + } + if (hw->phy.ops.setup_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, + false)) { + dev_err(&pdev->dev, "Link Speed setup failed\n"); + return -EIO; + } + + /* initialize eeprom parameters */ + if (ixgbe_init_eeprom(hw)) { + dev_err(&pdev->dev, "EEPROM initialization failed\n"); + return -EIO; + } + + /* Set the default values */ + adapter->num_rx_queues = IXGBE_DEFAULT_RXQ; + adapter->num_tx_queues = 1; + adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; + + if (ixgbe_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; +} + +/** + * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct ixgbe_tx_buffer) * txdr->count; + txdr->tx_buffer_info = vmalloc(size); + if (!txdr->tx_buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; + } + memset(txdr->tx_buffer_info, 0, size); + + /* round up to nearest 4K */ + txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + if (!txdr->desc) { + vfree(txdr->tx_buffer_info); + DPRINTK(PROBE, ERR, + "Memory allocation failed for the tx desc ring\n"); + return -ENOMEM; + } + + txdr->adapter = adapter; + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + txdr->work_limit = txdr->count; + spin_lock_init(&txdr->tx_lock); + + return 0; +} + +/** + * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; + rxdr->rx_buffer_info = vmalloc(size); + if (!rxdr->rx_buffer_info) { + DPRINTK(PROBE, ERR, + "vmalloc allocation failed for the rx desc ring\n"); + return -ENOMEM; + } + memset(rxdr->rx_buffer_info, 0, size); + + desc_len = sizeof(union ixgbe_adv_rx_desc); + + /* Round up to nearest 4K */ + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + + if (!rxdr->desc) { + DPRINTK(PROBE, ERR, + "Memory allocation failed for the rx desc ring\n"); + vfree(rxdr->rx_buffer_info); + return -ENOMEM; + } + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->adapter = adapter; + + return 0; +} + +/** + * ixgbe_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgbe_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +/** + * ixgbe_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ixgbe_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Tx Queue %u failed\n", i); + break; + } + } + + return err; +} + +/** + * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Rx Queue %u failed\n", i); + break; + } + } + + return err; +} + +/** + * ixgbe_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || + (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + ixgbe_down(adapter); + ixgbe_up(adapter); + } + + return 0; +} + +/** + * ixgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int ixgbe_open(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err; + u32 ctrl_ext; + u32 num_rx_queues = adapter->num_rx_queues; + + /* Let firmware know the driver has taken over */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); + +try_intr_reinit: + /* allocate transmit descriptors */ + err = ixgbe_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + num_rx_queues = 1; + adapter->num_rx_queues = num_rx_queues; + } + + /* allocate receive descriptors */ + err = ixgbe_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + ixgbe_configure(adapter); + + err = ixgbe_request_irq(adapter, &num_rx_queues); + if (err) + goto err_req_irq; + + /* ixgbe_request might have reduced num_rx_queues */ + if (num_rx_queues < adapter->num_rx_queues) { + /* We didn't get MSI-X, so we need to release everything, + * set our Rx queue count to num_rx_queues, and redo the + * whole init process. + */ + ixgbe_free_irq(adapter); + if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + } + ixgbe_free_all_rx_resources(adapter); + ixgbe_free_all_tx_resources(adapter); + adapter->num_rx_queues = num_rx_queues; + + /* Reset the hardware, and start over. */ + ixgbe_reset(adapter); + + goto try_intr_reinit; + } + + err = ixgbe_up_complete(adapter); + if (err) + goto err_up; + + return 0; + +err_up: + ixgbe_free_irq(adapter); +err_req_irq: + ixgbe_free_all_rx_resources(adapter); +err_setup_rx: + ixgbe_free_all_tx_resources(adapter); +err_setup_tx: + ixgbe_reset(adapter); + + return err; +} + +/** + * ixgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int ixgbe_close(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 ctrl_ext; + + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); + + return 0; +} + +/** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void ixgbe_update_stats(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 good_rx, missed_rx, bprc; + + adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC); + missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6)); + missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7)); + adapter->stats.gprc += (good_rx - missed_rx); + + adapter->stats.mpc[0] += missed_rx; + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + adapter->stats.bprc += bprc; + adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + adapter->stats.mprc -= bprc; + adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); + adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + + adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC); + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0)); + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); + adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + + /* Fill out the OS statistics structure */ + adapter->net_stats.rx_packets = adapter->stats.gprc; + adapter->net_stats.tx_packets = adapter->stats.gptc; + adapter->net_stats.rx_bytes = adapter->stats.gorc; + adapter->net_stats.tx_bytes = adapter->stats.gotc; + adapter->net_stats.multicast = adapter->stats.mprc; + + /* Rx Errors */ + adapter->net_stats.rx_errors = adapter->stats.crcerrs + + adapter->stats.rlec; + adapter->net_stats.rx_dropped = 0; + adapter->net_stats.rx_length_errors = adapter->stats.rlec; + adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; + adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0]; + +} + +/** + * ixgbe_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void ixgbe_watchdog(unsigned long data) +{ + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct net_device *netdev = adapter->netdev; + bool link_up; + u32 link_speed = 0; + + adapter->hw.phy.ops.check(&adapter->hw, &(link_speed), &link_up); + + if (link_up) { + if (!netif_carrier_ok(netdev)) { + u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); +#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) +#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) + DPRINTK(LINK, INFO, "NIC Link is Up %s, " + "Flow Control: %s\n", + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? + "1 Gpbs" : "unknown speed")), + ((FLOW_RX && FLOW_TX) ? "RX/TX" : + (FLOW_RX ? "RX" : + (FLOW_TX ? "TX" : "None")))); + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } else { + /* Force detection of hung controller */ + adapter->detect_tx_hung = true; + } + } else { + if (netif_carrier_ok(netdev)) { + DPRINTK(LINK, INFO, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + + ixgbe_update_stats(adapter); + + /* Reset the timer */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define IXGBE_MAX_TXD_PWR 14 +#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ + (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) +#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ + +static int ixgbe_tso(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 mss_l4len_idx = 0, l4len; + *hdr_len = 0; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == ntohs(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + adapter->hw_tso_ctxt++; + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + adapter->hw_tso6_ctxt++; + } + + i = tx_ring->next_to_use; + + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + + /* VLAN MACLEN IPLEN */ + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + vlan_macip_lens |= + (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); + vlan_macip_lens |= ((skb_network_offset(skb)) << + IXGBE_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + vlan_macip_lens |= + (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += + (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + + if (skb->protocol == ntohs(ETH_P_IP)) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + + /* MSS L4LEN IDX */ + mss_l4len_idx |= + (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + unsigned int i; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + + if (skb->ip_summed == CHECKSUM_PARTIAL || + (tx_flags & IXGBE_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + vlan_macip_lens |= + (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); + vlan_macip_lens |= (skb_network_offset(skb) << + IXGBE_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + vlan_macip_lens |= (skb_transport_header(skb) - + skb_network_header(skb)); + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + + type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol == ntohs(ETH_P_IP)) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + + if (skb->sk->sk_protocol == IPPROTO_TCP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); + context_desc->mss_l4len_idx = 0; + + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + adapter->hw_csum_tx_good++; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static int ixgbe_tx_map(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, unsigned int first) +{ + struct ixgbe_tx_buffer *tx_buffer_info; + unsigned int len = skb->len; + unsigned int offset = 0, size, count = 0, i; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned int f; + + len -= skb->data_len; + + i = tx_ring->next_to_use; + + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = pci_map_single(adapter->pdev, + skb->data + offset, + size, PCI_DMA_TODEVICE); + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + offset = frag->page_offset; + + while (len) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); + + tx_buffer_info->length = size; + tx_buffer_info->dma = pci_map_page(adapter->pdev, + frag->page, + offset, + size, PCI_DMA_TODEVICE); + tx_buffer_info->time_stamp = jiffies; + tx_buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + i++; + if (i == tx_ring->count) + i = 0; + } + } + if (i == 0) + i = tx_ring->count - 1; + else + i = i - 1; + tx_ring->tx_buffer_info[i].skb = skb; + tx_ring->tx_buffer_info[first].next_to_watch = i; + + return count; +} + +static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int tx_flags, int count, u32 paylen, u8 hdr_len) +{ + union ixgbe_adv_tx_desc *tx_desc = NULL; + struct ixgbe_tx_buffer *tx_buffer_info; + u32 olinfo_status = 0, cmd_type_len = 0; + unsigned int i; + u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; + + cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; + + cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + + if (tx_flags & IXGBE_TX_FLAGS_VLAN) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + if (tx_flags & IXGBE_TX_FLAGS_TSO) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= IXGBE_TXD_POPTS_IXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << + IXGBE_ADVTXD_POPTS_SHIFT; + + olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | tx_buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); + + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); +} + +static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + unsigned int len = skb->len; + unsigned int first; + unsigned int tx_flags = 0; + unsigned long flags = 0; + u8 hdr_len; + int tso; + unsigned int mss = 0; + int count = 0; + unsigned int f; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + len -= skb->data_len; + + tx_ring = adapter->tx_ring; + + if (skb->len <= 0) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + mss = skb_shinfo(skb)->gso_size; + + if (mss) + count++; + else if (skb->ip_summed == CHECKSUM_PARTIAL) + count++; + + count += TXD_USE_COUNT(len); + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + + spin_lock_irqsave(&tx_ring->tx_lock, flags); + if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) { + adapter->tx_busy++; + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + return NETDEV_TX_BUSY; + } + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= IXGBE_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); + } + + if (skb->protocol == ntohs(ETH_P_IP)) + tx_flags |= IXGBE_TX_FLAGS_IPV4; + first = tx_ring->next_to_use; + tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IXGBE_TX_FLAGS_TSO; + else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IXGBE_TX_FLAGS_CSUM; + + ixgbe_tx_queue(adapter, tx_ring, tx_flags, + ixgbe_tx_map(adapter, tx_ring, skb, first), + skb->len, hdr_len); + + netdev->trans_start = jiffies; + + spin_lock_irqsave(&tx_ring->tx_lock, flags); + /* Make sure there is space in the ring for the next send. */ + if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED) + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); + + return NETDEV_TX_OK; +} + +/** + * ixgbe_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ixgbe_set_mac(struct net_device *netdev, void *p) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbe_netpoll(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + adapter->flags |= IXGBE_FLAG_IN_NETPOLL; + ixgbe_intr(adapter->pdev->irq, netdev); + adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * ixgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in ixgbe_pci_tbl + * + * Returns 0 on success, negative on failure + * + * ixgbe_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit ixgbe_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; + struct ixgbe_hw *hw; + const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; + unsigned long mmio_start, mmio_len; + static int cards_found; + int i, err, pci_using_dac; + u16 link_status, link_speed, link_width; + u32 part_num; + + err = pci_enable_device(pdev); + if (err) + return err; + + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && + !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + pci_using_dac = 0; + } + + err = pci_request_regions(pdev, ixgbe_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + hw->hw_addr = ioremap(mmio_start, mmio_len); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + + for (i = 1; i <= 5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + } + + netdev->open = &ixgbe_open; + netdev->stop = &ixgbe_close; + netdev->hard_start_xmit = &ixgbe_xmit_frame; + netdev->get_stats = &ixgbe_get_stats; + netdev->set_multicast_list = &ixgbe_set_multi; + netdev->set_mac_address = &ixgbe_set_mac; + netdev->change_mtu = &ixgbe_change_mtu; + ixgbe_set_ethtool_ops(netdev); + netdev->tx_timeout = &ixgbe_tx_timeout; + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64); + netdev->vlan_rx_register = ixgbe_vlan_rx_register; + netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; + netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; +#ifdef CONFIG_NET_POLL_CONTROLLER + netdev->poll_controller = ixgbe_netpoll; +#endif + strcpy(netdev->name, pci_name(pdev)); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* Setup hw api */ + memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + + err = ii->get_invariants(hw); + if (err) + goto err_hw_init; + + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_TSO; + + netdev->features |= NETIF_F_TSO6; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + + /* make sure the EEPROM is good */ + if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { + dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + + if (ixgbe_validate_mac_addr(netdev->dev_addr)) { + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &ixgbe_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->reset_task, ixgbe_reset_task); + + /* initialize default flow control settings */ + hw->fc.original_type = ixgbe_fc_full; + hw->fc.type = ixgbe_fc_full; + hw->fc.high_water = IXGBE_DEFAULT_FCRTH; + hw->fc.low_water = IXGBE_DEFAULT_FCRTL; + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + + /* Interrupt Throttle Rate */ + adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); + adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); + + /* print bus type/speed/width info */ + pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); + link_speed = link_status & IXGBE_PCI_LINK_SPEED; + link_width = link_status & IXGBE_PCI_LINK_WIDTH; + dev_info(&pdev->dev, "(PCI Express:%s:%s) " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : + (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : + "Unknown"), + ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : + (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : + (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : + (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : + "Unknown"), + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + ixgbe_read_part_num(hw, &part_num); + dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, + (part_num >> 8), (part_num & 0xff)); + + /* reset the hardware with the new settings */ + ixgbe_start_hw(hw); + + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + + dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; + return 0; + +err_register: +err_hw_init: +err_sw_init: +err_eeprom: + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ixgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * ixgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit ixgbe_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + set_bit(__IXGBE_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev->priv; + + netif_device_detach(netdev); + + if (netif_running(netdev)) + ixgbe_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbe_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + */ +static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev->priv; + + if (pci_enable_device(pdev)) { + DPRINTK(PROBE, ERR, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + ixgbe_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgbe_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. + */ +static void ixgbe_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbe_adapter *adapter = netdev->priv; + + if (netif_running(netdev)) { + if (ixgbe_up(adapter)) { + DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n"); + return; + } + } + + netif_device_attach(netdev); + +} + +static struct pci_error_handlers ixgbe_err_handler = { + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, +}; + +static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = __devexit_p(ixgbe_remove), +#ifdef CONFIG_PM + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, +#endif + .shutdown = ixgbe_shutdown, + .err_handler = &ixgbe_err_handler +}; + +/** + * ixgbe_init_module - Driver Registration Routine + * + * ixgbe_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init ixgbe_init_module(void) +{ + int ret; + printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name, + ixgbe_driver_string, ixgbe_driver_version); + + printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); + + ret = pci_register_driver(&ixgbe_driver); + return ret; +} +module_init(ixgbe_init_module); + +/** + * ixgbe_exit_module - Driver Exit Cleanup Routine + * + * ixgbe_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ixgbe_exit_module(void) +{ + pci_unregister_driver(&ixgbe_driver); +} +module_exit(ixgbe_exit_module); + +/* ixgbe_main.c */ diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c new file mode 100644 index 0000000000000000000000000000000000000000..8002931ae82371d62f1f410a28ca0df5ba4746c6 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -0,0 +1,494 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); + +/** + * ixgbe_identify_phy - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 phy_addr; + + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_validate_phy_addr(hw, phy_addr)) { + hw->phy.addr = phy_addr; + ixgbe_get_phy_id(hw); + hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); + status = 0; + break; + } + } + return status; +} + +/** + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * + **/ +static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = false; + + hw->phy.addr = phy_addr; + ixgbe_read_phy_reg(hw, + IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = true; + + return valid; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); + + if (status == 0) { + hw->phy.id = (u32)(phy_id_high << 16); + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + + return phy_type; +} + +/** + * ixgbe_reset_phy - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); +} + +/** + * ixgbe_read_phy_reg - Reads a value from a specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 command; + u32 i; + u32 timeout = 10; + u32 data; + s32 status = 0; + u16 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, "PHY address command did not complete.\n"); + status = IXGBE_ERR_PHY; + } + + if (status == 0) { + /* + * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + hw_dbg(hw, + "PHY read command didn't complete\n"); + status = IXGBE_ERR_PHY; + } else { + /* + * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + } + } + + ixgbe_release_swfw_sync(hw, gssr); + } + return status; +} + +/** + * ixgbe_write_phy_reg - Writes a value to specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + u32 i; + u32 timeout = 10; + s32 status = 0; + u16 gssr; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == 0) { + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { + hw_dbg(hw, "PHY address cmd didn't complete\n"); + break; + } + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + status = IXGBE_ERR_PHY; + + if (status == 0) { + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < timeout; i++) { + udelay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { + hw_dbg(hw, "PHY write command did not " + "complete.\n"); + break; + } + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + status = IXGBE_ERR_PHY; + } + + ixgbe_release_swfw_sync(hw, gssr); + } + + return status; +} + +/** + * ixgbe_setup_tnx_phy_link - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_NOT_IMPLEMENTED; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_speed_selection_register = 0x10; + u16 autoneg_restart_mask = 0x0200; + u16 autoneg_complete_mask = 0x0020; + u16 autoneg_reg = 0; + + /* + * Set advertisement settings in PHY based on autoneg_advertised + * settings. If autoneg_advertised = 0, then advertise default values + * txn devices cannot be "forced" to a autoneg 10G and fail. But can + * for a 1G. + */ + ixgbe_read_phy_reg(hw, + autoneg_speed_selection_register, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ + else + autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ + + ixgbe_write_phy_reg(hw, + autoneg_speed_selection_register, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + + /* Restart PHY autonegotiation and wait for completion */ + ixgbe_read_phy_reg(hw, + IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg |= autoneg_restart_mask; + + ixgbe_write_phy_reg(hw, + IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= autoneg_complete_mask; + if (autoneg_reg == autoneg_complete_mask) { + status = 0; + break; + } + } + + if (time_out == max_time_out) + status = IXGBE_ERR_LINK_SETUP; + + return status; +} + +/** + * ixgbe_check_tnx_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, + bool *link_up) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } else { + status = ixgbe_read_phy_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + } + } + + return status; +} + +/** + * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + **/ +s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + /* Setup link based on the new speed settings */ + ixgbe_setup_tnx_phy_link(hw); + + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h new file mode 100644 index 0000000000000000000000000000000000000000..199e8f670f3a589cd50a680cdcc2869263e4b36a --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -0,0 +1,50 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_init_shared_code_phy(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); + +/* PHY specific */ +s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); +s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, + bool autoneg_wait_to_complete); + +#endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h new file mode 100644 index 0000000000000000000000000000000000000000..fdcde16a2a9987c0e9e504e74ddc5e6db8648039 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -0,0 +1,1332 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2007 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EERD 0x10014 +#define IXGBE_FLA 0x1001C +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C +#define IXGBE_GRC 0x10200 + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL 0x11068 +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ +#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) +#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) +#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) +#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) +#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) +#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) +#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) + /* array of 16 (0x02100-0x0213C) */ +#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) + /* array of 16 (0x02200-0x0223C) */ +#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) + /* Multicast Table Array - 128 entries */ +#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ +#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ +#define IXGBE_PSRTYPE 0x05480 + /* 0x5480-0x54BC Packet split receive type */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) + /* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) + /*array of 4096 4-bit vlan vmdq indicies */ +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) + /* there are 16 of these (0-15) */ +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) + /* there are 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ + +/* Music registers */ +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ +#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS 0x10150 +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_FWSM 0x10148 +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW0 0x02F10 +#define IXGBE_RIC_DW1 0x02F14 +#define IXGBE_RIC_DW2 0x02F18 +#define IXGBE_RIC_DW3 0x02F1C +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TIC_DW0 0x07F10 +#define IXGBE_TIC_DW1 0x07F14 +#define IXGBE_TIC_DW2 0x07F18 +#define IXGBE_TIC_DW3 0x07F1C +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_ATLASCTL 0x04800 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ + +/* FACTPS */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */ +#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define QT2022_PHY_ID 0x0043A400 + +/* General purpose Interrupt Enable */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* bit 31, XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Rx Flow control enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority flow control ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK (\ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP4 0x00000001 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000002 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00000008 /* SDP5 IO direction */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 +#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ + +/* GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* Checksum and EEPROM pointers */ +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enbale latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */ + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +#ifndef IXGBE_EERD_ATTEMPTS +/* Number of 5 microseconds we wait for EERD read to complete */ +#define IXGBE_EERD_ATTEMPTS 100000 +#endif + +/* PCI Bus Info */ +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 + +/* Filters */ +#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enbale */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_HBO 0x00800000 +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + u32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + u16 vlan; + } fields; + } upper; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + u16 length; /* Length of data DMAed into data buffer */ + u16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + u16 vlan; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + u16 pkt_info; /* RSS type, Packet type */ + u16 hdr_info; /* Split Header, header len */ + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Link speed */ +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 + + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_tn, + ixgbe_phy_qt, + ixgbe_phy_xaui +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_copper, + ixgbe_media_type_backplane +}; + +/* Flow Control Settings */ +enum ixgbe_fc_type { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mc_addr_in_rar_count; + u32 mta_in_use; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water; /* Flow Control High-water */ + u32 low_water; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum ixgbe_fc_type type; /* Type of flow control */ + enum ixgbe_fc_type original_type; +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 rqsmr[16]; + u64 tqsmr[8]; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; +}; + +/* forward declaration */ +struct ixgbe_hw; + +struct ixgbe_mac_operations { + s32 (*reset)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); +}; + +struct ixgbe_phy_operations { + s32 (*setup)(struct ixgbe_hw *); + s32 (*check)(struct ixgbe_hw *, u32 *, bool *); + s32 (*setup_speed)(struct ixgbe_hw *, u32, bool, bool); + s32 (*get_settings)(struct ixgbe_hw *, u32 *, bool *); +}; + +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + s32 mc_filter_type; + u32 num_rx_queues; + u32 num_tx_queues; + u32 num_rx_addrs; + u32 link_attach_type; + u32 link_mode_select; + bool link_settings_loaded; +}; + + +struct ixgbe_eeprom_info { + enum ixgbe_eeprom_type type; + u16 word_size; + u16 address_bits; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + + enum ixgbe_phy_type type; + u32 addr; + u32 id; + u32 revision; + enum ixgbe_media_type media_type; + u32 autoneg_advertised; + bool autoneg_wait_to_complete; +}; + +struct ixgbe_info { + enum ixgbe_mac_type mac; + s32 (*get_invariants)(struct ixgbe_hw *); + struct ixgbe_mac_operations *mac_ops; + struct ixgbe_phy_operations *phy_ops; +}; + +struct ixgbe_hw { + u8 __iomem *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_eeprom_info eeprom; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; +}; + +/* Error Codes */ +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index d3f4235c585d42d8c7b5ac2cc7684a59b6aee8c6..b02a981c87a8e7f7d332f5dfd122d59396181bb9 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c @@ -210,7 +210,6 @@ static int __init enp2611_init_module(void) return -ENOMEM; } - SET_MODULE_OWNER(nds[i]); nds[i]->get_stats = enp2611_get_stats; pm3386_init_port(i); pm3386_get_mac(i, nds[i]->dev_addr); diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index d9ce1aef148a5d9ab4c4d3ccf8c4b0e5ded4eda2..6c0dd49149d0d3047d55059fd2c54889a3e7786b 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -74,9 +74,9 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev) } -static int ixpdev_rx(struct net_device *dev, int *budget) +static int ixpdev_rx(struct net_device *dev, int processed, int budget) { - while (*budget > 0) { + while (processed < budget) { struct ixpdev_rx_desc *desc; struct sk_buff *skb; void *buf; @@ -122,29 +122,34 @@ static int ixpdev_rx(struct net_device *dev, int *budget) err: ixp2000_reg_write(RING_RX_PENDING, _desc); - dev->quota--; - (*budget)--; + processed++; } - return 1; + return processed; } /* dev always points to nds[0]. */ -static int ixpdev_poll(struct net_device *dev, int *budget) +static int ixpdev_poll(struct napi_struct *napi, int budget) { + struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi); + struct net_device *dev = ip->dev; + int rx; + /* @@@ Have to stop polling when nds[0] is administratively * downed while we are polling. */ + rx = 0; do { ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff); - if (ixpdev_rx(dev, budget)) - return 1; + rx = ixpdev_rx(dev, rx, budget); + if (rx >= budget) + break; } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); - return 0; + return rx; } static void ixpdev_tx_complete(void) @@ -199,9 +204,12 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id) * Any of the eight receive units signaled RX? */ if (status & 0x00ff) { + struct net_device *dev = nds[0]; + struct ixpdev_priv *ip = netdev_priv(dev); + ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); - if (likely(__netif_rx_schedule_prep(nds[0]))) { - __netif_rx_schedule(nds[0]); + if (likely(napi_schedule_prep(&ip->napi))) { + __netif_rx_schedule(dev, &ip->napi); } else { printk(KERN_CRIT "ixp2000: irq while polling!!\n"); } @@ -232,11 +240,13 @@ static int ixpdev_open(struct net_device *dev) struct ixpdev_priv *ip = netdev_priv(dev); int err; + napi_enable(&ip->napi); if (!nds_open++) { err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt, IRQF_SHARED, "ixp2000_eth", nds); if (err) { nds_open--; + napi_disable(&ip->napi); return err; } @@ -254,6 +264,7 @@ static int ixpdev_close(struct net_device *dev) struct ixpdev_priv *ip = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&ip->napi); set_port_admin_status(ip->channel, 0); if (!--nds_open) { @@ -274,7 +285,6 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv) return NULL; dev->hard_start_xmit = ixpdev_xmit; - dev->poll = ixpdev_poll; dev->open = ixpdev_open; dev->stop = ixpdev_close; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -282,9 +292,10 @@ struct net_device *ixpdev_alloc(int channel, int sizeof_priv) #endif dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - dev->weight = 64; ip = netdev_priv(dev); + ip->dev = dev; + netif_napi_add(dev, &ip->napi, ixpdev_poll, 64); ip->channel = channel; ip->tx_queue_entries = 0; diff --git a/drivers/net/ixp2000/ixpdev.h b/drivers/net/ixp2000/ixpdev.h index bd686cb630581eea1a3b2087f7beed10d052948a..391ece623243cc6141873baf82686efcba4041d5 100644 --- a/drivers/net/ixp2000/ixpdev.h +++ b/drivers/net/ixp2000/ixpdev.h @@ -14,6 +14,8 @@ struct ixpdev_priv { + struct net_device *dev; + struct napi_struct napi; int channel; int tx_queue_entries; }; diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index 75f6f441e876c69322db9ab806960f8b15c08c84..d3825c8ee99493fc01cfb65a0e25bfe07a285bd3 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -45,7 +45,6 @@ #include static char jazz_sonic_string[] = "jazzsonic"; -static struct platform_device *jazz_sonic_device; #define SONIC_MEM_SIZE 0x100 @@ -69,14 +68,6 @@ static unsigned int sonic_debug = SONIC_DEBUG; static unsigned int sonic_debug = 1; #endif -/* - * Base address and interrupt of the SONIC controller on JAZZ boards - */ -static struct { - unsigned int port; - unsigned int irq; -} sonic_portlist[] = { {JAZZ_ETHERNET_BASE, JAZZ_ETHERNET_IRQ}, {0, 0}}; - /* * We cannot use station (ethernet) address prefixes to detect the * sonic controller since these are board manufacturer depended. @@ -215,13 +206,13 @@ static int __init jazz_sonic_probe(struct platform_device *pdev) { struct net_device *dev; struct sonic_local *lp; + struct resource *res; int err = 0; int i; + DECLARE_MAC_BUF(mac); - /* - * Don't probe if we're not running on a Jazz board. - */ - if (mips_machgroup != MACH_GROUP_JAZZ) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) return -ENODEV; dev = alloc_etherdev(sizeof(struct sonic_local)); @@ -231,37 +222,20 @@ static int __init jazz_sonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); - SET_MODULE_OWNER(dev); netdev_boot_setup_check(dev); - if (dev->base_addr >= KSEG0) { /* Check a single specified location. */ - err = sonic_probe1(dev); - } else if (dev->base_addr != 0) { /* Don't probe at all. */ - err = -ENXIO; - } else { - for (i = 0; sonic_portlist[i].port; i++) { - dev->base_addr = sonic_portlist[i].port; - dev->irq = sonic_portlist[i].irq; - if (sonic_probe1(dev) == 0) - break; - } - if (!sonic_portlist[i].port) - err = -ENODEV; - } + dev->base_addr = res->start; + dev->irq = platform_get_irq(pdev, 0); + err = sonic_probe1(dev); if (err) goto out; err = register_netdev(dev); if (err) goto out1; - printk("%s: MAC ", dev->name); - for (i = 0; i < 6; i++) { - printk("%2.2x", dev->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk(" IRQ %d\n", dev->irq); + printk("%s: MAC %s IRQ %d\n", + dev->name, print_mac(mac, dev->dev_addr), dev->irq); return 0; @@ -303,38 +277,12 @@ static struct platform_driver jazz_sonic_driver = { static int __init jazz_sonic_init_module(void) { - int err; - - if ((err = platform_driver_register(&jazz_sonic_driver))) { - printk(KERN_ERR "Driver registration failed\n"); - return err; - } - - jazz_sonic_device = platform_device_alloc(jazz_sonic_string, 0); - if (!jazz_sonic_device) - goto out_unregister; - - if (platform_device_add(jazz_sonic_device)) { - platform_device_put(jazz_sonic_device); - jazz_sonic_device = NULL; - } - - return 0; - -out_unregister: - platform_driver_unregister(&jazz_sonic_driver); - - return -ENOMEM; + return platform_driver_register(&jazz_sonic_driver); } static void __exit jazz_sonic_cleanup_module(void) { platform_driver_unregister(&jazz_sonic_driver); - - if (jazz_sonic_device) { - platform_device_unregister(jazz_sonic_device); - jazz_sonic_device = NULL; - } } module_init(jazz_sonic_init_module); diff --git a/drivers/net/lance.c b/drivers/net/lance.c index a4e5fab12628f6b578cd11ac5327fa61b3949779..977ed3401bb34cd97d688800ba89340381d39e9e 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -466,6 +466,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int unsigned long flags; int err = -ENOMEM; void __iomem *bios; + DECLARE_MAC_BUF(mac); /* First we look for special cases. Check for HP's on-board ethernet by looking for 'HP' in the BIOS. @@ -521,14 +522,14 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int /* We can't allocate dev->priv from alloc_etherdev() because it must a ISA DMA-able region. */ - SET_MODULE_OWNER(dev); chipname = chip_table[lance_version].name; - printk("%s: %s at %#3x,", dev->name, chipname, ioaddr); + printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); /* There is a 16 byte station address PROM at the base address. The first six bytes are the station address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)); + dev->dev_addr[i] = inb(ioaddr + i); + printk("%s", print_mac(mac, dev->dev_addr)); dev->base_addr = ioaddr; /* Make certain the data structures used by the LANCE are aligned and DMAble. */ diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c index cab57911a80e041e1f7b536324f1c56de6a44c19..abce2ee8430acdde41486667dae98d6aa4cb41d2 100644 --- a/drivers/net/lguest_net.c +++ b/drivers/net/lguest_net.c @@ -235,9 +235,9 @@ static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev) struct lguestnet_info *info = netdev_priv(dev); /* Extract the destination ethernet address from the packet. */ const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; + DECLARE_MAC_BUF(mac); - pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]); + pr_debug("%s: xmit %s\n", dev->name, print_mac(mac, dest)); /* If it's a multicast packet, we broadcast to everyone. That's not * very efficient, but there are very few applications which actually @@ -460,8 +460,6 @@ static int lguestnet_probe(struct lguest_device *lgdev) if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - /* Ethernet defaults with some changes */ ether_setup(dev); dev->set_mac_address = NULL; diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 5884f5bd04a455d00d75f67b5210b4da08529a9d..ffaa14f2cd016429b775d2151eacc6bf740ec38f 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -322,7 +322,6 @@ struct i596_private { struct i596_cmd *cmd_head; int cmd_backlog; u32 last_cmd; - struct net_device_stats stats; int next_tx_cmd; int options; spinlock_t lock; /* serialize access to chip */ @@ -352,7 +351,6 @@ static int i596_open(struct net_device *dev); static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); -static struct net_device_stats *i596_get_stats(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void i596_tx_timeout (struct net_device *dev); static void print_eth(unsigned char *buf, char *str); @@ -725,7 +723,7 @@ static inline int i596_rx(struct net_device *dev) printk(KERN_ERR "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { if (!rx_in_place) { /* 16 byte align the data fields */ @@ -742,28 +740,28 @@ static inline int i596_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } else { DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", dev->name, rfd->stat)); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rfd->stat & SWAP16(0x0100)) - lp->stats.collisions++; + dev->stats.collisions++; if (rfd->stat & SWAP16(0x8000)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rfd->stat & SWAP16(0x0001)) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (rfd->stat & SWAP16(0x0002)) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (rfd->stat & SWAP16(0x0004)) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rfd->stat & SWAP16(0x0008)) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rfd->stat & SWAP16(0x0010)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* Clear the buffer descriptor count and EOF + F flags */ @@ -821,8 +819,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private dev_kfree_skb(skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; ptr->v_next = NULL; ptr->b_next = I596_NULL; @@ -951,10 +949,10 @@ static void i596_tx_timeout (struct net_device *dev) "%s: transmit timed out, status resetting.\n", dev->name)); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adaptor */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n")); /* Shutdown and restart */ i596_reset (dev, lp); @@ -964,7 +962,7 @@ static void i596_tx_timeout (struct net_device *dev) lp->dma->scb.command = SWAP16(CUC_START | RX_START); DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb)); ca (dev); - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } dev->trans_start = jiffies; @@ -999,7 +997,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: xmit ring full, dropping packet.\n", dev->name)); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); } else { @@ -1025,8 +1023,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd)); i596_add_cmd(dev, &tx_cmd->cmd); - lp->stats.tx_packets++; - lp->stats.tx_bytes += length; + dev->stats.tx_packets++; + dev->stats.tx_bytes += length; } netif_start_queue(dev); @@ -1036,15 +1034,12 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) static void print_eth(unsigned char *add, char *str) { - int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); - printk(KERN_DEBUG "i596 0x%p, ", add); - for (i = 0; i < 6; i++) - printk(" %02X", add[i + 6]); - printk(" -->"); - for (i = 0; i < 6; i++) - printk(" %02X", add[i]); - printk(" %02X%02X, %s\n", add[12], add[13], str); + printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n", + add, print_mac(mac, add + 6), print_mac(mac2, add), + add[12], add[13], str); } static int __devinit i82596_probe(struct net_device *dev) @@ -1076,7 +1071,6 @@ static int __devinit i82596_probe(struct net_device *dev) dev->open = i596_open; dev->stop = i596_close; dev->hard_start_xmit = i596_start_xmit; - dev->get_stats = i596_get_stats; dev->set_multicast_list = set_multicast_list; dev->tx_timeout = i596_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -1197,17 +1191,17 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) DEB(DEB_TXADDR, print_eth(skb->data, "tx-done")); } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (ptr->status & SWAP16(0x0020)) - lp->stats.collisions++; + dev->stats.collisions++; if (!(ptr->status & SWAP16(0x0040))) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (ptr->status & SWAP16(0x0400)) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (ptr->status & SWAP16(0x0800)) - lp->stats.collisions++; + dev->stats.collisions++; if (ptr->status & SWAP16(0x1000)) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } dma_unmap_single(dev->dev.parent, tx_cmd->dma_addr, @@ -1292,8 +1286,8 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); ack_cmd |= RX_START; - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; rebuild_rx_bufs(dev); } } @@ -1346,13 +1340,6 @@ static int i596_close(struct net_device *dev) return 0; } -static struct net_device_stats *i596_get_stats(struct net_device *dev) -{ - struct i596_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* * Set or clear the multicast filter for this adaptor. */ @@ -1362,6 +1349,7 @@ static void set_multicast_list(struct net_device *dev) struct i596_private *lp = netdev_priv(dev); struct i596_dma *dma = lp->dma; int config = 0, cnt; + DECLARE_MAC_BUF(mac); DEB(DEB_MULTI, printk(KERN_DEBUG @@ -1425,8 +1413,8 @@ static void set_multicast_list(struct net_device *dev) if (i596_debug > 1) DEB(DEB_MULTI, printk(KERN_DEBUG - "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, cp[0], cp[1], cp[2], cp[3], cp[4], cp[5])); + "%s: Adding address %s\n", + dev->name, print_mac(mac, cp))); } DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); i596_add_cmd(dev, &cmd->cmd); diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 0a08d0c4e7b4b1063d76c7d46ee1a0d85f6f6724..b36989097883bc7b800bfd00269dd245b55323ff 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c @@ -111,8 +111,6 @@ static int __init do_lne390_probe(struct net_device *dev) int mem_start = dev->mem_start; int ret; - SET_MODULE_OWNER(dev); - if (ioaddr > 0x1ff) { /* Check a single specified location. */ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -171,6 +169,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) { int i, revision, ret; unsigned long eisa_id; + DECLARE_MAC_BUF(mac); if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; @@ -202,10 +201,12 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) } #endif - printk("lne390.c: LNE390%X in EISA slot %d, address", 0xa+revision, ioaddr/0x1000); for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i))); - printk(".\nlne390.c: "); + dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); + printk("lne390.c: LNE390%X in EISA slot %d, address %s.\n", + 0xa+revision, ioaddr/0x1000, print_mac(mac, dev->dev_addr)); + + printk("lne390.c: "); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ if (dev->irq == 0) { diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 5106c2328d12cb7a52f7f3fec847ea844e87d306..be25aa33971cd5f51a2fbff62dcd807420f623fd 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -57,12 +57,12 @@ #include #include #include +#include struct pcpu_lstats { unsigned long packets; unsigned long bytes; }; -static DEFINE_PER_CPU(struct pcpu_lstats, pcpu_lstats); #define LOOPBACK_OVERHEAD (128 + MAX_HEADER + 16 + 16) @@ -134,7 +134,7 @@ static void emulate_large_send_offload(struct sk_buff *skb) */ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) { - struct pcpu_lstats *lb_stats; + struct pcpu_lstats *pcpu_lstats, *lb_stats; skb_orphan(skb); @@ -154,8 +154,9 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) #endif dev->last_rx = jiffies; - /* it's OK to use __get_cpu_var() because BHs are off */ - lb_stats = &__get_cpu_var(pcpu_lstats); + /* it's OK to use per_cpu_ptr() because BHs are off */ + pcpu_lstats = netdev_priv(dev); + lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); lb_stats->bytes += skb->len; lb_stats->packets++; @@ -166,15 +167,17 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) static struct net_device_stats *get_stats(struct net_device *dev) { + const struct pcpu_lstats *pcpu_lstats; struct net_device_stats *stats = &dev->stats; unsigned long bytes = 0; unsigned long packets = 0; int i; + pcpu_lstats = netdev_priv(dev); for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; - lb_stats = &per_cpu(pcpu_lstats, i); + lb_stats = per_cpu_ptr(pcpu_lstats, i); bytes += lb_stats->bytes; packets += lb_stats->packets; } @@ -192,51 +195,107 @@ static u32 always_on(struct net_device *dev) static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_tx_csum = always_on, .get_sg = always_on, .get_rx_csum = always_on, }; +static int loopback_dev_init(struct net_device *dev) +{ + struct pcpu_lstats *lstats; + + lstats = alloc_percpu(struct pcpu_lstats); + if (!lstats) + return -ENOMEM; + + dev->priv = lstats; + return 0; +} + +static void loopback_dev_free(struct net_device *dev) +{ + struct pcpu_lstats *lstats = netdev_priv(dev); + + free_percpu(lstats); + free_netdev(dev); +} + /* - * The loopback device is special. There is only one instance and - * it is statically allocated. Don't do this for other devices. + * The loopback device is special. There is only one instance + * per network namespace. */ -struct net_device loopback_dev = { - .name = "lo", - .get_stats = &get_stats, - .mtu = (16 * 1024) + 20 + 20 + 12, - .hard_start_xmit = loopback_xmit, - .hard_header = eth_header, - .hard_header_cache = eth_header_cache, - .header_cache_update = eth_header_cache_update, - .hard_header_len = ETH_HLEN, /* 14 */ - .addr_len = ETH_ALEN, /* 6 */ - .tx_queue_len = 0, - .type = ARPHRD_LOOPBACK, /* 0x0001*/ - .rebuild_header = eth_rebuild_header, - .flags = IFF_LOOPBACK, - .features = NETIF_F_SG | NETIF_F_FRAGLIST +static void loopback_setup(struct net_device *dev) +{ + dev->get_stats = &get_stats; + dev->mtu = (16 * 1024) + 20 + 20 + 12; + dev->hard_start_xmit = loopback_xmit; + dev->hard_header_len = ETH_HLEN; /* 14 */ + dev->addr_len = ETH_ALEN; /* 6 */ + dev->tx_queue_len = 0; + dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ + dev->flags = IFF_LOOPBACK; + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST #ifdef LOOPBACK_TSO - | NETIF_F_TSO + | NETIF_F_TSO #endif - | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA - | NETIF_F_LLTX, - .ethtool_ops = &loopback_ethtool_ops, -}; + | NETIF_F_NO_CSUM + | NETIF_F_HIGHDMA + | NETIF_F_LLTX + | NETIF_F_NETNS_LOCAL, + dev->ethtool_ops = &loopback_ethtool_ops; + dev->header_ops = ð_header_ops; + dev->init = loopback_dev_init; + dev->destructor = loopback_dev_free; +} /* Setup and register the loopback device. */ -static int __init loopback_init(void) +static __net_init int loopback_net_init(struct net *net) { - int err = register_netdev(&loopback_dev); + struct net_device *dev; + int err; + + err = -ENOMEM; + dev = alloc_netdev(0, "lo", loopback_setup); + if (!dev) + goto out; + dev->nd_net = net; + err = register_netdev(dev); if (err) - panic("loopback: Failed to register netdevice: %d\n", err); + goto out_free_netdev; + + err = 0; + net->loopback_dev = dev; +out: + if (err) + panic("loopback: Failed to register netdevice: %d\n", err); return err; + +out_free_netdev: + free_netdev(dev); + goto out; +} + +static __net_exit void loopback_net_exit(struct net *net) +{ + struct net_device *dev = net->loopback_dev; + + unregister_netdev(dev); +} + +static struct pernet_operations __net_initdata loopback_net_ops = { + .init = loopback_net_init, + .exit = loopback_net_exit, }; -module_init(loopback_init); +static int __init loopback_init(void) +{ + return register_pernet_device(&loopback_net_ops); +} -EXPORT_SYMBOL(loopback_dev); +/* Loopback is special. It should be initialized before any other network + * device and network subsystem. + */ +fs_initcall(loopback_init); diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 5fc18da1873dcb279edfc8e27d2358cce87be44d..c5095ecd8b116a5eea41ff799c27bf415c5bd597 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c @@ -350,7 +350,6 @@ struct i596_private { /* aligned to a 16-byte boundary */ struct i596_cmd *cmd_head; int cmd_backlog; unsigned long last_cmd; - struct net_device_stats stats; spinlock_t cmd_lock; }; @@ -381,7 +380,6 @@ static int i596_open(struct net_device *dev); static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t i596_interrupt(int irq, void *dev_id); static int i596_close(struct net_device *dev); -static struct net_device_stats *i596_get_stats(struct net_device *dev); static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); static void print_eth(char *); static void set_multicast_list(struct net_device *dev); @@ -515,8 +513,6 @@ CLEAR_INT(void) { outb(0, IOADDR+8); } -#define SIZE(x) (sizeof(x)/sizeof((x)[0])) - #if 0 /* selftest or dump */ static void @@ -532,7 +528,7 @@ i596_port_do(struct net_device *dev, int portcmd, char *cmdname) { mdelay(30); /* random, unmotivated */ printk("lp486e i82596 %s result:\n", cmdname); - for (m = SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) + for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--) ; for (i = 0; i < m; i++) { printk(" %04x", lp->dump.dump[i]); @@ -672,7 +668,7 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, if (skb == NULL) { printk ("%s: i596_rx Memory squeeze, " "dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return 1; } @@ -681,27 +677,27 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp, skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } else { #if 0 printk("Frame reception error status %04x\n", rfd->stat); #endif - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rfd->stat & RFD_COLLISION) - lp->stats.collisions++; + dev->stats.collisions++; if (rfd->stat & RFD_SHORT_FRAME_ERR) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rfd->stat & RFD_DMA_ERR) - lp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (rfd->stat & RFD_NOBUFS_ERR) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (rfd->stat & RFD_ALIGN_ERR) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rfd->stat & RFD_CRC_ERR) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rfd->stat & RFD_LENGTH_ERR) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } rfd->stat = rfd->count = 0; return 0; @@ -757,8 +753,8 @@ i596_cleanup_cmd(struct net_device *dev) { dev_kfree_skb_any(tx_cmd_tbd->skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; cmd->pa_next = I596_NULL; kfree((unsigned char *)tx_cmd); @@ -869,7 +865,6 @@ static int i596_open(struct net_device *dev) } static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { - struct i596_private *lp = dev->priv; struct tx_cmd *tx_cmd; short length; @@ -886,7 +881,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); if (tx_cmd == NULL) { printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); - lp->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb (skb); } else { struct i596_tbd *tx_cmd_tbd; @@ -909,7 +904,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { i596_add_cmd (dev, (struct i596_cmd *) tx_cmd); - lp->stats.tx_packets++; + dev->stats.tx_packets++; } return 0; @@ -922,10 +917,10 @@ i596_tx_timeout (struct net_device *dev) { /* Transmitter timeout, serious problems. */ printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name); - lp->stats.tx_errors++; + dev->stats.tx_errors++; /* Try to restart the adaptor */ - if (lp->last_restart == lp->stats.tx_packets) { + if (lp->last_restart == dev->stats.tx_packets) { printk ("Resetting board.\n"); /* Shutdown and restart */ @@ -935,7 +930,7 @@ i596_tx_timeout (struct net_device *dev) { printk ("Kicking board.\n"); lp->scb.command = (CUC_START | RX_START); CA(); - lp->last_restart = lp->stats.tx_packets; + lp->last_restart = dev->stats.tx_packets; } netif_wake_queue(dev); } @@ -1023,7 +1018,6 @@ static int __init lp486e_probe(struct net_device *dev) { dev->open = &i596_open; dev->stop = &i596_close; dev->hard_start_xmit = &i596_start_xmit; - dev->get_stats = &i596_get_stats; dev->set_multicast_list = &set_multicast_list; dev->watchdog_timeo = 5*HZ; dev->tx_timeout = i596_tx_timeout; @@ -1080,20 +1074,20 @@ i596_handle_CU_completion(struct net_device *dev, if (i596_debug) print_eth(pa_to_va(tx_cmd_tbd->pa_data)); } else { - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (i596_debug) printk("transmission failure:%04x\n", cmd->status); if (cmd->status & 0x0020) - lp->stats.collisions++; + dev->stats.collisions++; if (!(cmd->status & 0x0040)) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (cmd->status & 0x0400) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (cmd->status & 0x0800) - lp->stats.collisions++; + dev->stats.collisions++; if (cmd->status & 0x1000) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; } dev_kfree_skb_irq(tx_cmd_tbd->skb); @@ -1244,12 +1238,6 @@ static int i596_close(struct net_device *dev) { return 0; } -static struct net_device_stats * i596_get_stats(struct net_device *dev) { - struct i596_private *lp = dev->priv; - - return &lp->stats; -} - /* * Set or clear the multicast filter for this adaptor. */ diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 90b0c3ed4bb6edede5f2d31fc768e26e9d8dcc68..9e700749bb319fdd65856dcba91b8413ceb76fb3 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c @@ -313,8 +313,6 @@ struct net_device * __init mac8390_probe(int unit) if (unit >= 0) sprintf(dev->name, "eth%d", unit); - SET_MODULE_OWNER(dev); - while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) { /* Have we seen it already? */ if (slots & (1<board->slot)) diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 62c1c6262febea4261c97bf79d61383bb687401b..30854f094965c840fd11e8f8eb1bd55e26449305 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -181,6 +181,7 @@ struct net_device * __init mac89x0_probe(int unit) unsigned long ioaddr; unsigned short sig; int err = -ENODEV; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) @@ -191,8 +192,6 @@ struct net_device * __init mac89x0_probe(int unit) netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); - if (once_is_enough) goto out; once_is_enough = 1; @@ -274,13 +273,11 @@ struct net_device * __init mac89x0_probe(int unit) } dev->irq = SLOT2IRQ(slot); - printk(" IRQ %d ADDR ", dev->irq); - /* print the ethernet address. */ - for (i = 0; i < ETH_ALEN; i++) - printk("%2.2x%s", dev->dev_addr[i], - ((i < ETH_ALEN-1) ? ":" : "")); - printk("\n"); + /* print the IRQ and ethernet address. */ + + printk(" IRQ %d ADDR %s\n", + dev->irq, print_mac(mac, dev->dev_addr)); dev->open = net_open; dev->stop = net_close; diff --git a/drivers/net/macb.c b/drivers/net/macb.c index a4bb0264180a4563d5bd59bd1ae7a2ad357bd7dc..047ea7be4850cfccfb812436c03a410e01661098 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -470,47 +470,41 @@ static int macb_rx(struct macb *bp, int budget) return received; } -static int macb_poll(struct net_device *dev, int *budget) +static int macb_poll(struct napi_struct *napi, int budget) { - struct macb *bp = netdev_priv(dev); - int orig_budget, work_done, retval = 0; + struct macb *bp = container_of(napi, struct macb, napi); + struct net_device *dev = bp->dev; + int work_done; u32 status; status = macb_readl(bp, RSR); macb_writel(bp, RSR, status); + work_done = 0; if (!status) { /* * This may happen if an interrupt was pending before * this function was called last time, and no packets * have been received since. */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); goto out; } dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", - (unsigned long)status, *budget); + (unsigned long)status, budget); if (!(status & MACB_BIT(REC))) { dev_warn(&bp->pdev->dev, "No RX buffers complete, status = %02lx\n", (unsigned long)status); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); goto out; } - orig_budget = *budget; - if (orig_budget > dev->quota) - orig_budget = dev->quota; - - work_done = macb_rx(bp, orig_budget); - if (work_done < orig_budget) { - netif_rx_complete(dev); - retval = 0; - } else { - retval = 1; - } + work_done = macb_rx(bp, budget); + if (work_done < budget) + netif_rx_complete(dev, napi); /* * We've done what we can to clean the buffers. Make sure we @@ -521,7 +515,7 @@ static int macb_poll(struct net_device *dev, int *budget) /* TODO: Handle errors */ - return retval; + return work_done; } static irqreturn_t macb_interrupt(int irq, void *dev_id) @@ -545,7 +539,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_RX_INT_FLAGS) { - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &bp->napi)) { /* * There's no point taking any more interrupts * until we have processed the buffers @@ -553,7 +547,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) macb_writel(bp, IDR, MACB_RX_INT_FLAGS); dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &bp->napi); } } @@ -937,6 +931,8 @@ static int macb_open(struct net_device *dev) return err; } + napi_enable(&bp->napi); + macb_init_rings(bp); macb_init_hw(bp); @@ -954,6 +950,7 @@ static int macb_close(struct net_device *dev) unsigned long flags; netif_stop_queue(dev); + napi_disable(&bp->napi); if (bp->phy_dev) phy_stop(bp->phy_dev); @@ -1074,6 +1071,7 @@ static int __devinit macb_probe(struct platform_device *pdev) unsigned long pclk_hz; u32 config; int err = -ENXIO; + DECLARE_MAC_BUF(mac); regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { @@ -1088,7 +1086,6 @@ static int __devinit macb_probe(struct platform_device *pdev) goto err_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* TODO: Actually, we have some interesting features... */ @@ -1146,8 +1143,7 @@ static int __devinit macb_probe(struct platform_device *pdev) dev->get_stats = macb_get_stats; dev->set_multicast_list = macb_set_rx_mode; dev->do_ioctl = macb_ioctl; - dev->poll = macb_poll; - dev->weight = 64; + netif_napi_add(dev, &bp->napi, macb_poll, 64); dev->ethtool_ops = &macb_ethtool_ops; dev->base_addr = regs->start; @@ -1195,10 +1191,9 @@ static int __devinit macb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " - "(%02x:%02x:%02x:%02x:%02x:%02x)\n", + "(%s)\n", dev->name, dev->base_addr, dev->irq, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + print_mac(mac, dev->dev_addr)); phydev = bp->phy_dev; printk(KERN_INFO "%s: attached PHY driver [%s] " diff --git a/drivers/net/macb.h b/drivers/net/macb.h index 4e3283ebd97cc245dd3b370bb79f8f36da620d6f..57b85acf0d1685d6a239e8ff9c9a3f0862c9dbf5 100644 --- a/drivers/net/macb.h +++ b/drivers/net/macb.h @@ -374,6 +374,7 @@ struct macb { struct clk *pclk; struct clk *hclk; struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; struct macb_stats hw_stats; diff --git a/drivers/net/mace.c b/drivers/net/mace.c index 52b9332810c52eb7465fdb30de5450758f10fd36..95ebe72f320f0ee1cb9ec1c17b8fb661b527f3d1 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c @@ -57,7 +57,6 @@ struct mace_data { unsigned char tx_fullup; unsigned char tx_active; unsigned char tx_bad_runt; - struct net_device_stats stats; struct timer_list tx_timeout; int timeout_active; int port_aaui; @@ -78,7 +77,6 @@ struct mace_data { static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *mace_stats(struct net_device *dev); static void mace_set_multicast(struct net_device *dev); static void mace_reset(struct net_device *dev); static int mace_set_address(struct net_device *dev, void *addr); @@ -103,6 +101,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i struct mace_data *mp; const unsigned char *addr; int j, rev, rc = -EBUSY; + DECLARE_MAC_BUF(mac); if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", @@ -143,7 +142,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i rc = -ENOMEM; goto err_release; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); mp = dev->priv; @@ -189,7 +187,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; - memset(&mp->stats, 0, sizeof(mp->stats)); memset((char *) mp->tx_cmds, 0, (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); init_timer(&mp->tx_timeout); @@ -214,7 +211,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i dev->open = mace_open; dev->stop = mace_close; dev->hard_start_xmit = mace_xmit_start; - dev->get_stats = mace_stats; dev->set_multicast_list = mace_set_multicast; dev->set_mac_address = mace_set_address; @@ -245,11 +241,9 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i goto err_free_rx_irq; } - printk(KERN_INFO "%s: MACE at", dev->name); - for (j = 0; j < 6; ++j) { - printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); - } - printk(", chip revision %d.%d\n", mp->chipid >> 8, mp->chipid & 0xff); + printk(KERN_INFO "%s: MACE at %s, chip revision %d.%d\n", + dev->name, print_mac(mac, dev->dev_addr), + mp->chipid >> 8, mp->chipid & 0xff); return 0; @@ -585,13 +579,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *mace_stats(struct net_device *dev) -{ - struct mace_data *p = (struct mace_data *) dev->priv; - - return &p->stats; -} - static void mace_set_multicast(struct net_device *dev) { struct mace_data *mp = (struct mace_data *) dev->priv; @@ -645,19 +632,19 @@ static void mace_set_multicast(struct net_device *dev) spin_unlock_irqrestore(&mp->lock, flags); } -static void mace_handle_misc_intrs(struct mace_data *mp, int intr) +static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) { volatile struct mace __iomem *mb = mp->mace; static int mace_babbles, mace_jabbers; if (intr & MPCO) - mp->stats.rx_missed_errors += 256; - mp->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ + dev->stats.rx_missed_errors += 256; + dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */ if (intr & RNTPCO) - mp->stats.rx_length_errors += 256; - mp->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ + dev->stats.rx_length_errors += 256; + dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */ if (intr & CERR) - ++mp->stats.tx_heartbeat_errors; + ++dev->stats.tx_heartbeat_errors; if (intr & BABBLE) if (mace_babbles++ < 4) printk(KERN_DEBUG "mace: babbling transmitter\n"); @@ -681,7 +668,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) spin_lock_irqsave(&mp->lock, flags); intr = in_8(&mb->ir); /* read interrupt register */ in_8(&mb->xmtrc); /* get retries */ - mace_handle_misc_intrs(mp, intr); + mace_handle_misc_intrs(mp, intr, dev); i = mp->tx_empty; while (in_8(&mb->pr) & XMTSV) { @@ -694,7 +681,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) */ intr = in_8(&mb->ir); if (intr != 0) - mace_handle_misc_intrs(mp, intr); + mace_handle_misc_intrs(mp, intr, dev); if (mp->tx_bad_runt) { fs = in_8(&mb->xmtfs); mp->tx_bad_runt = 0; @@ -768,14 +755,14 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) } /* Update stats */ if (fs & (UFLO|LCOL|LCAR|RTRY)) { - ++mp->stats.tx_errors; + ++dev->stats.tx_errors; if (fs & LCAR) - ++mp->stats.tx_carrier_errors; + ++dev->stats.tx_carrier_errors; if (fs & (UFLO|LCOL|RTRY)) - ++mp->stats.tx_aborted_errors; + ++dev->stats.tx_aborted_errors; } else { - mp->stats.tx_bytes += mp->tx_bufs[i]->len; - ++mp->stats.tx_packets; + dev->stats.tx_bytes += mp->tx_bufs[i]->len; + ++dev->stats.tx_packets; } dev_kfree_skb_irq(mp->tx_bufs[i]); --mp->tx_active; @@ -829,7 +816,7 @@ static void mace_tx_timeout(unsigned long data) goto out; /* update various counters */ - mace_handle_misc_intrs(mp, in_8(&mb->ir)); + mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; @@ -849,7 +836,7 @@ static void mace_tx_timeout(unsigned long data) /* fix up the transmit side */ i = mp->tx_empty; mp->tx_active = 0; - ++mp->stats.tx_errors; + ++dev->stats.tx_errors; if (mp->tx_bad_runt) { mp->tx_bad_runt = 0; } else if (i != mp->tx_fill) { @@ -917,18 +904,18 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) /* got a packet, have a look at it */ skb = mp->rx_bufs[i]; if (skb == 0) { - ++mp->stats.rx_dropped; + ++dev->stats.rx_dropped; } else if (nb > 8) { data = skb->data; frame_status = (data[nb-3] << 8) + data[nb-4]; if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) { - ++mp->stats.rx_errors; + ++dev->stats.rx_errors; if (frame_status & RS_OFLO) - ++mp->stats.rx_over_errors; + ++dev->stats.rx_over_errors; if (frame_status & RS_FRAMERR) - ++mp->stats.rx_frame_errors; + ++dev->stats.rx_frame_errors; if (frame_status & RS_FCSERR) - ++mp->stats.rx_crc_errors; + ++dev->stats.rx_crc_errors; } else { /* Mace feature AUTO_STRIP_RCV is on by default, dropping the * FCS on frames with 802.3 headers. This means that Ethernet @@ -940,15 +927,15 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) nb -= 8; skb_put(skb, nb); skb->protocol = eth_type_trans(skb, dev); - mp->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += skb->len; netif_rx(skb); dev->last_rx = jiffies; mp->rx_bufs[i] = NULL; - ++mp->stats.rx_packets; + ++dev->stats.rx_packets; } } else { - ++mp->stats.rx_errors; - ++mp->stats.rx_length_errors; + ++dev->stats.rx_errors; + ++dev->stats.rx_length_errors; } /* advance to next */ diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 9a343b965975eb469a133cdbd133c1faee74831c..6589239b79ee55e4ecacc43b0997ab49ca164877 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -65,7 +65,6 @@ struct mace_data { unsigned char *rx_ring; dma_addr_t rx_ring_phys; int dma_intr; - struct net_device_stats stats; int rx_slot, rx_tail; int tx_slot, tx_sloti, tx_count; int chipid; @@ -92,7 +91,6 @@ struct mace_frame { static int mace_open(struct net_device *dev); static int mace_close(struct net_device *dev); static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); -static struct net_device_stats *mace_stats(struct net_device *dev); static void mace_set_multicast(struct net_device *dev); static int mace_set_address(struct net_device *dev, void *addr); static void mace_reset(struct net_device *dev); @@ -196,6 +194,7 @@ static int __devinit mace_probe(struct platform_device *pdev) unsigned char checksum = 0; static int found = 0; int err; + DECLARE_MAC_BUF(mac); if (found || macintosh_config->ether_type != MAC_ETHER_MACE) return -ENODEV; @@ -210,7 +209,6 @@ static int __devinit mace_probe(struct platform_device *pdev) mp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); - SET_MODULE_OWNER(dev); dev->base_addr = (u32)MACE_BASE; mp->mace = (volatile struct mace *) MACE_BASE; @@ -243,20 +241,16 @@ static int __devinit mace_probe(struct platform_device *pdev) return -ENODEV; } - memset(&mp->stats, 0, sizeof(mp->stats)); - dev->open = mace_open; dev->stop = mace_close; dev->hard_start_xmit = mace_xmit_start; dev->tx_timeout = mace_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - dev->get_stats = mace_stats; dev->set_multicast_list = mace_set_multicast; dev->set_mac_address = mace_set_address; - printk(KERN_INFO "%s: 68K MACE, hardware address %.2X", dev->name, dev->dev_addr[0]); - for (j = 1 ; j < 6 ; j++) printk(":%.2X", dev->dev_addr[j]); - printk("\n"); + printk(KERN_INFO "%s: 68K MACE, hardware address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); err = register_netdev(dev); if (!err) @@ -473,8 +467,8 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) mp->tx_count--; local_irq_restore(flags); - mp->stats.tx_packets++; - mp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; /* We need to copy into our xmit buffer to take care of alignment and caching issues */ skb_copy_from_linear_data(skb, mp->tx_ring, skb->len); @@ -493,12 +487,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static struct net_device_stats *mace_stats(struct net_device *dev) -{ - struct mace_data *mp = netdev_priv(dev); - return &mp->stats; -} - static void mace_set_multicast(struct net_device *dev) { struct mace_data *mp = netdev_priv(dev); @@ -556,13 +544,13 @@ static void mace_handle_misc_intrs(struct mace_data *mp, int intr) static int mace_babbles, mace_jabbers; if (intr & MPCO) - mp->stats.rx_missed_errors += 256; - mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */ + dev->stats.rx_missed_errors += 256; + dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */ if (intr & RNTPCO) - mp->stats.rx_length_errors += 256; - mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */ + dev->stats.rx_length_errors += 256; + dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */ if (intr & CERR) - ++mp->stats.tx_heartbeat_errors; + ++dev->stats.tx_heartbeat_errors; if (intr & BABBLE) if (mace_babbles++ < 4) printk(KERN_DEBUG "macmace: babbling transmitter\n"); @@ -601,14 +589,14 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) } /* Update stats */ if (fs & (UFLO|LCOL|LCAR|RTRY)) { - ++mp->stats.tx_errors; + ++dev->stats.tx_errors; if (fs & LCAR) - ++mp->stats.tx_carrier_errors; + ++dev->stats.tx_carrier_errors; else if (fs & (UFLO|LCOL|RTRY)) { - ++mp->stats.tx_aborted_errors; + ++dev->stats.tx_aborted_errors; if (mb->xmtfs & UFLO) { printk(KERN_ERR "%s: DMA underrun.\n", dev->name); - mp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; mace_txdma_reset(dev); } } @@ -662,23 +650,23 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) unsigned int frame_status = mf->rcvsts; if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) { - mp->stats.rx_errors++; + dev->stats.rx_errors++; if (frame_status & RS_OFLO) { printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name); - mp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; } if (frame_status & RS_CLSN) - mp->stats.collisions++; + dev->stats.collisions++; if (frame_status & RS_FRAMERR) - mp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (frame_status & RS_FCSERR) - mp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else { unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 ); skb = dev_alloc_skb(frame_length + 2); if (!skb) { - mp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } skb_reserve(skb, 2); @@ -687,8 +675,8 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - mp->stats.rx_packets++; - mp->stats.rx_bytes += frame_length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += frame_length; } } diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index e9ecdbf352ae36b8096eeb9c3abd1cbd1049dfe2..b267161418ea3680e6bb522ba5d8afd5fbcbedc0 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -223,6 +223,7 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) struct sonic_local *lp = netdev_priv(dev); const int prom_addr = ONBOARD_SONIC_PROM_BASE; int i; + DECLARE_MAC_BUF(mac); /* On NuBus boards we can sometimes look in the ROM resources. No such luck for comm-slot/onboard. */ @@ -266,13 +267,8 @@ int __init mac_onboard_sonic_ethernet_addr(struct net_device* dev) dev->dev_addr[1] = val >> 8; dev->dev_addr[0] = val & 0xff; - printk(KERN_INFO "HW Address from CAM 15: "); - for (i = 0; i < 6; i++) { - printk("%2.2x", dev->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk("\n"); + printk(KERN_INFO "HW Address from CAM 15: %s\n", + print_mac(mac, dev->dev_addr)); } else return 0; if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && @@ -567,7 +563,7 @@ static int __init mac_sonic_probe(struct platform_device *pdev) struct net_device *dev; struct sonic_local *lp; int err; - int i; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct sonic_local)); if (!dev) @@ -576,7 +572,6 @@ static int __init mac_sonic_probe(struct platform_device *pdev) lp = netdev_priv(dev); lp->device = &pdev->dev; SET_NETDEV_DEV(dev, &pdev->dev); - SET_MODULE_OWNER(dev); /* This will catch fatal stuff like -ENOMEM as well as success */ err = mac_onboard_sonic_probe(dev); @@ -592,13 +587,8 @@ static int __init mac_sonic_probe(struct platform_device *pdev) if (err) goto out; - printk("%s: MAC ", dev->name); - for (i = 0; i < 6; i++) { - printk("%2.2x", dev->dev_addr[i]); - if (i < 5) - printk(":"); - } - printk(" IRQ %d\n", dev->irq); + printk("%s: MAC %s IRQ %d\n", + dev->name, print_mac(mac, dev->dev_addr), dev->irq); return 0; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index dc74d006e01f0cf67b39dddad2180b0cbfa4272b..b7c81c874f7a0a15e4bd65281aa9ceeb1e83b0cf 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -164,16 +164,25 @@ static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - return lowerdev->hard_header(skb, lowerdev, type, daddr, - saddr ? : dev->dev_addr, len); + return dev_hard_header(skb, lowerdev, type, daddr, + saddr ? : dev->dev_addr, len); } +static const struct header_ops macvlan_hard_header_ops = { + .create = macvlan_hard_header, + .rebuild = eth_rebuild_header, + .parse = eth_header_parse, + .rebuild = eth_rebuild_header, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; + static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -282,10 +291,6 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev) static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_rx_csum = macvlan_ethtool_get_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_tso = ethtool_op_get_tso, - .get_ufo = ethtool_op_get_ufo, - .get_sg = ethtool_op_get_sg, .get_drvinfo = macvlan_ethtool_get_drvinfo, }; @@ -299,9 +304,9 @@ static void macvlan_setup(struct net_device *dev) dev->change_mtu = macvlan_change_mtu; dev->change_rx_flags = macvlan_change_rx_flags; dev->set_multicast_list = macvlan_set_multicast_list; - dev->hard_header = macvlan_hard_header; dev->hard_start_xmit = macvlan_hard_start_xmit; dev->destructor = free_netdev; + dev->header_ops = &macvlan_hard_header_ops, dev->ethtool_ops = &macvlan_ethtool_ops; dev->tx_queue_len = 0; } @@ -376,7 +381,7 @@ static int macvlan_newlink(struct net_device *dev, if (!tb[IFLA_LINK]) return -EINVAL; - lowerdev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK])); + lowerdev = __dev_get_by_index(dev->nd_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 32bed6bc6c064ba6c660ff454775a20f43bade13..e25dbab67363c3b5ede9eea2fec63b1197577848 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -66,7 +66,6 @@ module_param(timeout, int, 0); * packets in and out, so there is place for a packet */ struct meth_private { - struct net_device_stats stats; /* in-memory copy of MAC Control register */ unsigned long mac_ctrl; /* in-memory copy of DMA Control register */ @@ -96,11 +95,11 @@ char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; static inline void load_eaddr(struct net_device *dev) { int i; - DPRINTK("Loading MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", - (int)o2meth_eaddr[0]&0xFF,(int)o2meth_eaddr[1]&0xFF,(int)o2meth_eaddr[2]&0xFF, - (int)o2meth_eaddr[3]&0xFF,(int)o2meth_eaddr[4]&0xFF,(int)o2meth_eaddr[5]&0xFF); + DECLARE_MAC_BUF(mac); + for (i = 0; i < 6; i++) dev->dev_addr[i] = o2meth_eaddr[i]; + DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); mace->eth.mac_addr = (*(unsigned long*)o2meth_eaddr) >> 16; } @@ -401,15 +400,15 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n", dev->name, priv->rx_write, priv->rx_ring[priv->rx_write]->status.raw); - priv->stats.rx_errors++; - priv->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; skb = priv->rx_skbs[priv->rx_write]; } else { skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); if (!skb) { /* Ouch! No memory! Drop packet on the floor */ DPRINTK("No mem: dropping packet\n"); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; skb = priv->rx_skbs[priv->rx_write]; } else { struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; @@ -421,13 +420,13 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) priv->rx_skbs[priv->rx_write] = skb; skb_c->protocol = eth_type_trans(skb_c, dev); dev->last_rx = jiffies; - priv->stats.rx_packets++; - priv->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; netif_rx(skb_c); } } } else { - priv->stats.rx_errors++; + dev->stats.rx_errors++; skb=priv->rx_skbs[priv->rx_write]; #if MFE_DEBUG>0 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); @@ -490,10 +489,10 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) #endif if (status & METH_TX_ST_DONE) { if (status & METH_TX_ST_SUCCESS){ - priv->stats.tx_packets++; - priv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; } else { - priv->stats.tx_errors++; + dev->stats.tx_errors++; #if MFE_DEBUG>=1 DPRINTK("TX error: status=%016lx <",status); if(status & METH_TX_ST_SUCCESS) @@ -734,7 +733,7 @@ static void meth_tx_timeout(struct net_device *dev) /* Try to reset the interface. */ meth_reset(dev); - priv->stats.tx_errors++; + dev->stats.tx_errors++; /* Clear all rings */ meth_free_tx_ring(priv); @@ -773,12 +772,6 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) /* * Return statistics to the caller */ -static struct net_device_stats *meth_stats(struct net_device *dev) -{ - struct meth_private *priv = netdev_priv(dev); - return &priv->stats; -} - /* * The init function. */ @@ -796,7 +789,6 @@ static int __init meth_probe(struct platform_device *pdev) dev->stop = meth_release; dev->hard_start_xmit = meth_tx; dev->do_ioctl = meth_ioctl; - dev->get_stats = meth_stats; #ifdef HAVE_TX_TIMEOUT dev->tx_timeout = meth_tx_timeout; dev->watchdog_timeo = timeout; diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index 9853c74f6bbf24d2775b5e45eb8dd0f5e4a7b863..d593175ab6f02e413474f4e6c0cddebb93414898 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -22,10 +21,6 @@ #define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) -struct mipsnet_priv { - struct net_device_stats stats; -}; - static char mipsnet_string[] = "mipsnet"; /* @@ -50,7 +45,6 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, { int count_to_go = skb->len; char *buf_ptr = skb->data; - struct mipsnet_priv *mp = netdev_priv(dev); pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", dev->name, __FUNCTION__, skb->len); @@ -64,8 +58,8 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); } - mp->stats.tx_packets++; - mp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; return skb->len; } @@ -88,10 +82,9 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) { struct sk_buff *skb; size_t len = count; - struct mipsnet_priv *mp = netdev_priv(dev); if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { - mp->stats.rx_dropped++; + dev->stats.rx_dropped++; return -ENOMEM; } @@ -106,8 +99,8 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) dev->name, __FUNCTION__); netif_rx(skb); - mp->stats.rx_packets++; - mp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; return count; } @@ -204,13 +197,6 @@ static int mipsnet_close(struct net_device *dev) return 0; } -static struct net_device_stats *mipsnet_get_stats(struct net_device *dev) -{ - struct mipsnet_priv *mp = netdev_priv(dev); - - return &mp->stats; -} - static void mipsnet_set_mclist(struct net_device *dev) { // we don't do anything @@ -222,7 +208,7 @@ static int __init mipsnet_probe(struct device *dev) struct net_device *netdev; int err; - netdev = alloc_etherdev(sizeof(struct mipsnet_priv)); + netdev = alloc_etherdev(0); if (!netdev) { err = -ENOMEM; goto out; @@ -233,7 +219,6 @@ static int __init mipsnet_probe(struct device *dev) netdev->open = mipsnet_open; netdev->stop = mipsnet_close; netdev->hard_start_xmit = mipsnet_xmit; - netdev->get_stats = mipsnet_get_stats; netdev->set_multicast_list = mipsnet_set_mclist; /* diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 315335671f0f06f2a2f9357d17c7ca8a9c4f8296..b33d21f4efffb69182a93a53117779180abf9377 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -63,10 +63,9 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); static int mv643xx_eth_open(struct net_device *); static int mv643xx_eth_stop(struct net_device *); static int mv643xx_eth_change_mtu(struct net_device *, int); -static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); static void eth_port_init_mac_tables(unsigned int eth_port_num); #ifdef MV643XX_NAPI -static int mv643xx_poll(struct net_device *dev, int *budget); +static int mv643xx_poll(struct napi_struct *napi, int budget); #endif static int ethernet_phy_get(unsigned int eth_port_num); static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); @@ -341,7 +340,7 @@ int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) if (cmd_sts & ETH_ERROR_SUMMARY) { printk("%s: Error in TX\n", dev->name); - mp->stats.tx_errors++; + dev->stats.tx_errors++; } spin_unlock_irqrestore(&mp->lock, flags); @@ -388,7 +387,7 @@ static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) { struct mv643xx_private *mp = netdev_priv(dev); - struct net_device_stats *stats = &mp->stats; + struct net_device_stats *stats = &dev->stats; unsigned int received_packets = 0; struct sk_buff *skb; struct pkt_info pkt_info; @@ -562,7 +561,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) /* wait for previous write to complete */ mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &mp->napi); } #else if (eth_int_cause & ETH_INT_CAUSE_RX) @@ -785,6 +784,7 @@ static int mv643xx_eth_open(struct net_device *dev) unsigned int port_num = mp->port_num; unsigned int size; int err; + DECLARE_MAC_BUF(mac); /* Clear any pending ethernet port interrupts */ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); @@ -880,6 +880,10 @@ static int mv643xx_eth_open(struct net_device *dev) mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ +#ifdef MV643XX_NAPI + napi_enable(&mp->napi); +#endif + eth_port_start(dev); /* Interrupt Coalescing */ @@ -982,7 +986,7 @@ static int mv643xx_eth_stop(struct net_device *dev) mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); #ifdef MV643XX_NAPI - netif_poll_disable(dev); + napi_disable(&mp->napi); #endif netif_carrier_off(dev); netif_stop_queue(dev); @@ -992,10 +996,6 @@ static int mv643xx_eth_stop(struct net_device *dev) mv643xx_eth_free_tx_rings(dev); mv643xx_eth_free_rx_rings(dev); -#ifdef MV643XX_NAPI - netif_poll_enable(dev); -#endif - free_irq(dev->irq, dev); return 0; @@ -1007,11 +1007,12 @@ static int mv643xx_eth_stop(struct net_device *dev) * * This function is used in case of NAPI */ -static int mv643xx_poll(struct net_device *dev, int *budget) +static int mv643xx_poll(struct napi_struct *napi, int budget) { - struct mv643xx_private *mp = netdev_priv(dev); - int done = 1, orig_budget, work_done; + struct mv643xx_private *mp = container_of(napi, struct mv643xx_private, napi); + struct net_device *dev = mp->dev; unsigned int port_num = mp->port_num; + int work_done; #ifdef MV643XX_TX_FAST_REFILL if (++mp->tx_clean_threshold > 5) { @@ -1020,27 +1021,20 @@ static int mv643xx_poll(struct net_device *dev, int *budget) } #endif + work_done = 0; if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) - != (u32) mp->rx_used_desc_q) { - orig_budget = *budget; - if (orig_budget > dev->quota) - orig_budget = dev->quota; - work_done = mv643xx_eth_receive_queue(dev, orig_budget); - *budget -= work_done; - dev->quota -= work_done; - if (work_done >= orig_budget) - done = 0; - } + != (u32) mp->rx_used_desc_q) + work_done = mv643xx_eth_receive_queue(dev, budget); - if (done) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); } - return done ? 0 : 1; + return work_done; } #endif @@ -1198,7 +1192,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mv643xx_private *mp = netdev_priv(dev); - struct net_device_stats *stats = &mp->stats; + struct net_device_stats *stats = &dev->stats; unsigned long flags; BUG_ON(netif_queue_stopped(dev)); @@ -1234,23 +1228,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; /* success */ } -/* - * mv643xx_eth_get_stats - * - * Returns a pointer to the interface statistics. - * - * Input : dev - a pointer to the required interface - * - * Output : a pointer to the interface's statistics - */ - -static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) -{ - struct mv643xx_private *mp = netdev_priv(dev); - - return &mp->stats; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void mv643xx_netpoll(struct net_device *netdev) { @@ -1333,6 +1310,10 @@ static int mv643xx_eth_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); mp = netdev_priv(dev); + mp->dev = dev; +#ifdef MV643XX_NAPI + netif_napi_add(dev, &mp->napi, mv643xx_poll, 64); +#endif res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); BUG_ON(!res); @@ -1341,16 +1322,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev) dev->open = mv643xx_eth_open; dev->stop = mv643xx_eth_stop; dev->hard_start_xmit = mv643xx_eth_start_xmit; - dev->get_stats = mv643xx_eth_get_stats; dev->set_mac_address = mv643xx_eth_set_mac_address; dev->set_multicast_list = mv643xx_eth_set_rx_mode; /* No need to Tx Timeout */ dev->tx_timeout = mv643xx_eth_tx_timeout; -#ifdef MV643XX_NAPI - dev->poll = mv643xx_poll; - dev->weight = 64; -#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = mv643xx_netpoll; @@ -1431,7 +1407,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mv643xx_eth_update_pscr(dev, &cmd); mv643xx_set_settings(dev, &cmd); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_netdev(dev); if (err) @@ -1439,8 +1414,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) p = dev->dev_addr; printk(KERN_NOTICE - "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]); + "%s: port %d with MAC address %s\n", + dev->name, port_num, print_mac(mac, p)); if (dev->features & NETIF_F_SG) printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name); @@ -2687,8 +2662,7 @@ static const struct mv643xx_stats mv643xx_gstrings_stats[] = { { "late_collision", MV643XX_STAT(mib_counters.late_collision) }, }; -#define MV643XX_STATS_LEN \ - sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) +#define MV643XX_STATS_LEN ARRAY_SIZE(mv643xx_gstrings_stats) static void mv643xx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) @@ -2700,9 +2674,14 @@ static void mv643xx_get_drvinfo(struct net_device *netdev, drvinfo->n_stats = MV643XX_STATS_LEN; } -static int mv643xx_get_stats_count(struct net_device *netdev) +static int mv643xx_get_sset_count(struct net_device *netdev, int sset) { - return MV643XX_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + return MV643XX_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void mv643xx_get_ethtool_stats(struct net_device *netdev, @@ -2762,9 +2741,7 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { .set_settings = mv643xx_set_settings, .get_drvinfo = mv643xx_get_drvinfo, .get_link = mv643xx_eth_get_link, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_stats_count = mv643xx_get_stats_count, .get_ethtool_stats = mv643xx_get_ethtool_stats, .get_strings = mv643xx_get_strings, .nway_reset = mv643xx_eth_nway_restart, diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index 565b96696acab9e131fb275dca99430e3b88b824..be669eb23788014802c14f828cf6b3bfb3e61624 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h @@ -320,6 +320,8 @@ struct mv643xx_private { struct work_struct tx_timeout_task; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; struct mv643xx_mib_counters mib_counters; spinlock_t lock; diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c index e246d00bba6d0467a851d642984894cbd021ee22..86c9c06433cbbd2802ba2b56e2f9d3cd932ed17a 100644 --- a/drivers/net/mvme147.c +++ b/drivers/net/mvme147.c @@ -67,6 +67,7 @@ struct net_device * __init mvme147lance_probe(int unit) u_long *addr; u_long address; int err; + DECLARE_MAC_BUF(mac); if (!MACH_IS_MVME147 || called) return ERR_PTR(-ENODEV); @@ -79,8 +80,6 @@ struct net_device * __init mvme147lance_probe(int unit) if (unit >= 0) sprintf(dev->name, "eth%d", unit); - SET_MODULE_OWNER(dev); - /* Fill the dev fields */ dev->base_addr = (unsigned long)MVME147_LANCE_BASE; dev->open = &m147lance_open; @@ -103,12 +102,10 @@ struct net_device * __init mvme147lance_probe(int unit) address=address>>8; dev->dev_addr[3]=address&0xff; - printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, dev->base_addr, MVME147_LANCE_IRQ, - dev->dev_addr[0], - dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5]); + printk("%s: MVME147 at 0x%08lx, irq %d, " + "Hardware Address %s\n", + dev->name, dev->base_addr, MVME147_LANCE_IRQ, + print_mac(mac, dev->dev_addr)); lp = (struct m147lance_private *)dev->priv; lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 556962f9612d45ea3f6cfbf90868d0f069fe4e5f..e8afa101433ec5c0456d0ea72fba6424a64481e8 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -62,6 +63,8 @@ #include #include #include +#include +#include #include #include #include @@ -89,6 +92,8 @@ MODULE_LICENSE("Dual BSD/GPL"); #define MYRI10GE_EEPROM_STRINGS_SIZE 256 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) +#define MYRI10GE_MAX_LRO_DESCRIPTORS 8 +#define MYRI10GE_LRO_MAX_PKTS 64 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff @@ -151,6 +156,8 @@ struct myri10ge_rx_done { dma_addr_t bus; int cnt; int idx; + struct net_lro_mgr lro_mgr; + struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; }; struct myri10ge_priv { @@ -163,6 +170,7 @@ struct myri10ge_priv { int small_bytes; int big_bytes; struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; u8 __iomem *sram; int sram_size; @@ -277,6 +285,14 @@ static int myri10ge_debug = -1; /* defaults above */ module_param(myri10ge_debug, int, 0); MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); +static int myri10ge_lro = 1; +module_param(myri10ge_lro, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); + +static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; +module_param(myri10ge_lro_max_pkts, int, S_IRUGO); +MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); + static int myri10ge_fill_thresh = 256; module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); @@ -1020,6 +1036,15 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, remainder -= MYRI10GE_ALLOC_SIZE; } + if (mgp->csum_flag && myri10ge_lro) { + rx_frags[0].page_offset += MXGEFW_PAD; + rx_frags[0].size -= MXGEFW_PAD; + len -= MXGEFW_PAD; + lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, + len, len, (void *)(unsigned long)csum, csum); + return 1; + } + hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; /* allocate an skb to attach the page(s) to. */ @@ -1100,7 +1125,7 @@ static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) } } -static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) +static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) { struct myri10ge_rx_done *rx_done = &mgp->rx_done; unsigned long rx_bytes = 0; @@ -1109,10 +1134,11 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) int idx = rx_done->idx; int cnt = rx_done->cnt; + int work_done = 0; u16 length; __wsum checksum; - while (rx_done->entry[idx].length != 0 && *limit != 0) { + while (rx_done->entry[idx].length != 0 && work_done++ < budget) { length = ntohs(rx_done->entry[idx].length); rx_done->entry[idx].length = 0; checksum = csum_unfold(rx_done->entry[idx].checksum); @@ -1128,16 +1154,15 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) rx_bytes += rx_ok * (unsigned long)length; cnt++; idx = cnt & (myri10ge_max_intr_slots - 1); - - /* limit potential for livelock by only handling a - * limited number of frames. */ - (*limit)--; } rx_done->idx = idx; rx_done->cnt = cnt; mgp->stats.rx_packets += rx_packets; mgp->stats.rx_bytes += rx_bytes; + if (myri10ge_lro) + lro_flush_all(&rx_done->lro_mgr); + /* restock receive rings if needed */ if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, @@ -1145,6 +1170,7 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); + return work_done; } static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) @@ -1189,26 +1215,21 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) } } -static int myri10ge_poll(struct net_device *netdev, int *budget) +static int myri10ge_poll(struct napi_struct *napi, int budget) { - struct myri10ge_priv *mgp = netdev_priv(netdev); + struct myri10ge_priv *mgp = container_of(napi, struct myri10ge_priv, napi); + struct net_device *netdev = mgp->dev; struct myri10ge_rx_done *rx_done = &mgp->rx_done; - int limit, orig_limit, work_done; + int work_done; /* process as many rx events as NAPI will allow */ - limit = min(*budget, netdev->quota); - orig_limit = limit; - myri10ge_clean_rx_done(mgp, &limit); - work_done = orig_limit - limit; - *budget -= work_done; - netdev->quota -= work_done; + work_done = myri10ge_clean_rx_done(mgp, budget); if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); put_be32(htonl(3), mgp->irq_claim); - return 0; } - return 1; + return work_done; } static irqreturn_t myri10ge_intr(int irq, void *arg) @@ -1226,7 +1247,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg) /* low bit indicates receives are present, so schedule * napi poll handler */ if (stats->valid & 1) - netif_rx_schedule(mgp->dev); + netif_rx_schedule(mgp->dev, &mgp->napi); if (!mgp->msi_enabled) { put_be32(0, mgp->irq_deassert); @@ -1379,7 +1400,8 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", "dropped_unicast_filtered", "dropped_multicast_filtered", "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", - "dropped_no_big_buffer" + "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", + "LRO avg aggr", "LRO no_desc" }; #define MYRI10GE_NET_STATS_LEN 21 @@ -1396,9 +1418,14 @@ myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) } } -static int myri10ge_get_stats_count(struct net_device *netdev) +static int myri10ge_get_sset_count(struct net_device *netdev, int sset) { - return MYRI10GE_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + return MYRI10GE_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void @@ -1445,6 +1472,14 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); + data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; + data[i++] = mgp->rx_done.lro_mgr.stats.flushed; + if (mgp->rx_done.lro_mgr.stats.flushed) + data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / + mgp->rx_done.lro_mgr.stats.flushed; + else + data[i++] = 0; + data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; } static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) @@ -1469,15 +1504,12 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .get_ringparam = myri10ge_get_ringparam, .get_rx_csum = myri10ge_get_rx_csum, .set_rx_csum = myri10ge_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_link = ethtool_op_get_link, .get_strings = myri10ge_get_strings, - .get_stats_count = myri10ge_get_stats_count, + .get_sset_count = myri10ge_get_sset_count, .get_ethtool_stats = myri10ge_get_ethtool_stats, .set_msglevel = myri10ge_set_msglevel, .get_msglevel = myri10ge_get_msglevel @@ -1718,10 +1750,69 @@ static void myri10ge_free_irq(struct myri10ge_priv *mgp) pci_disable_msi(pdev); } +static int +myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr, + void **ip_hdr, void **tcpudp_hdr, + u64 * hdr_flags, void *priv) +{ + struct ethhdr *eh; + struct vlan_ethhdr *veh; + struct iphdr *iph; + u8 *va = page_address(frag->page) + frag->page_offset; + unsigned long ll_hlen; + __wsum csum = (__wsum) (unsigned long)priv; + + /* find the mac header, aborting if not IPv4 */ + + eh = (struct ethhdr *)va; + *mac_hdr = eh; + ll_hlen = ETH_HLEN; + if (eh->h_proto != htons(ETH_P_IP)) { + if (eh->h_proto == htons(ETH_P_8021Q)) { + veh = (struct vlan_ethhdr *)va; + if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) + return -1; + + ll_hlen += VLAN_HLEN; + + /* + * HW checksum starts ETH_HLEN bytes into + * frame, so we must subtract off the VLAN + * header's checksum before csum can be used + */ + csum = csum_sub(csum, csum_partial(va + ETH_HLEN, + VLAN_HLEN, 0)); + } else { + return -1; + } + } + *hdr_flags = LRO_IPV4; + + iph = (struct iphdr *)(va + ll_hlen); + *ip_hdr = iph; + if (iph->protocol != IPPROTO_TCP) + return -1; + *hdr_flags |= LRO_TCP; + *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); + + /* verify the IP checksum */ + if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl))) + return -1; + + /* verify the checksum */ + if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr, + ntohs(iph->tot_len) - (iph->ihl << 2), + IPPROTO_TCP, csum))) + return -1; + + return 0; +} + static int myri10ge_open(struct net_device *dev) { struct myri10ge_priv *mgp; struct myri10ge_cmd cmd; + struct net_lro_mgr *lro_mgr; int status, big_pow2; mgp = netdev_priv(dev); @@ -1853,7 +1944,19 @@ static int myri10ge_open(struct net_device *dev) mgp->link_state = htonl(~0U); mgp->rdma_tags_available = 15; - netif_poll_enable(mgp->dev); /* must happen prior to any irq */ + lro_mgr = &mgp->rx_done.lro_mgr; + lro_mgr->dev = dev; + lro_mgr->features = LRO_F_NAPI; + lro_mgr->ip_summed = CHECKSUM_COMPLETE; + lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; + lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; + lro_mgr->lro_arr = mgp->rx_done.lro_desc; + lro_mgr->get_frag_header = myri10ge_get_frag_header; + lro_mgr->max_aggr = myri10ge_lro_max_pkts; + if (lro_mgr->max_aggr > MAX_SKB_FRAGS) + lro_mgr->max_aggr = MAX_SKB_FRAGS; + + napi_enable(&mgp->napi); /* must happen prior to any irq */ status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); if (status) { @@ -1897,7 +2000,7 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - netif_poll_disable(mgp->dev); + napi_disable(&mgp->napi); netif_carrier_off(dev); netif_stop_queue(dev); old_down_cnt = mgp->down_cnt; @@ -2297,6 +2400,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) struct dev_mc_list *mc_list; __be32 data[2] = { 0, 0 }; int err; + DECLARE_MAC_BUF(mac); mgp = netdev_priv(dev); /* can be called from atomic contexts, @@ -2344,14 +2448,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev) printk(KERN_ERR "myri10ge: %s: Failed " "MXGEFW_JOIN_MULTICAST_GROUP, error status:" "%d\t", dev->name, err); - printk(KERN_ERR "MAC %02x:%02x:%02x:%02x:%02x:%02x\n", - ((unsigned char *)&mc_list->dmi_addr)[0], - ((unsigned char *)&mc_list->dmi_addr)[1], - ((unsigned char *)&mc_list->dmi_addr)[2], - ((unsigned char *)&mc_list->dmi_addr)[3], - ((unsigned char *)&mc_list->dmi_addr)[4], - ((unsigned char *)&mc_list->dmi_addr)[5] - ); + printk(KERN_ERR "MAC %s\n", + print_mac(mac, mc_list->dmi_addr)); goto abort; } } @@ -2855,8 +2953,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); mgp = netdev_priv(netdev); - memset(mgp, 0, sizeof(*mgp)); mgp->dev = netdev; + netif_napi_add(netdev, &mgp->napi, + myri10ge_poll, myri10ge_napi_weight); mgp->pdev = pdev; mgp->csum_flag = MXGEFW_FLAGS_CKSUM; mgp->pause = myri10ge_flow_control; @@ -2981,8 +3080,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; if (dac_enabled) netdev->features |= NETIF_F_HIGHDMA; - netdev->poll = myri10ge_poll; - netdev->weight = myri10ge_napi_weight; /* make sure we can get an irq, and that MSI can be * setup (if available). Also ensure netdev->irq diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 13444da93273b5762f2798ac124d2108f56d2f39..8d29319cc5cb39b88b3f13a37cb017bf28b595b2 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -311,12 +311,12 @@ static void myri_is_not_so_happy(struct myri_eth *mp) #ifdef DEBUG_HEADER static void dump_ehdr(struct ethhdr *ehdr) { - printk("ehdr[h_dst(%02x:%02x:%02x:%02x:%02x:%02x)" - "h_source(%02x:%02x:%02x:%02x:%02x:%02x)h_proto(%04x)]\n", - ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2], - ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[4], - ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2], - ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[4], + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + printk("ehdr[h_dst(%s)" + "h_source(%s)" + "h_proto(%04x)]\n", + print_mac(mac, ehdr->h_dest), print_mac(mac2, ehdr->h_source), ehdr->h_proto); } @@ -325,13 +325,7 @@ static void dump_ehdr_and_myripad(unsigned char *stuff) struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2); printk("pad[%02x:%02x]", stuff[0], stuff[1]); - printk("ehdr[h_dst(%02x:%02x:%02x:%02x:%02x:%02x)" - "h_source(%02x:%02x:%02x:%02x:%02x:%02x)h_proto(%04x)]\n", - ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2], - ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[4], - ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2], - ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[4], - ehdr->h_proto); + dump_ehdr(ehdr); } #endif @@ -353,7 +347,7 @@ static void myri_tx(struct myri_eth *mp, struct net_device *dev) sbus_unmap_single(mp->myri_sdev, dma_addr, skb->len, SBUS_DMA_TODEVICE); dev_kfree_skb(skb); mp->tx_skbs[entry] = NULL; - mp->enet_stats.tx_packets++; + dev->stats.tx_packets++; entry = NEXT_TX(entry); } mp->tx_old = entry; @@ -434,20 +428,20 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) RX_ALLOC_SIZE, SBUS_DMA_FROMDEVICE); if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) { DRX(("ERROR[")); - mp->enet_stats.rx_errors++; + dev->stats.rx_errors++; if (len < (ETH_HLEN + MYRI_PAD_LEN)) { DRX(("BAD_LENGTH] ")); - mp->enet_stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else { DRX(("NO_PADDING] ")); - mp->enet_stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; } /* Return it to the LANAI. */ drop_it: drops++; DRX(("DROP ")); - mp->enet_stats.rx_dropped++; + dev->stats.rx_dropped++; sbus_dma_sync_single_for_device(mp->myri_sdev, sbus_readl(&rxd->myri_scatters[0].addr), RX_ALLOC_SIZE, @@ -527,8 +521,8 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev) netif_rx(skb); dev->last_rx = jiffies; - mp->enet_stats.rx_packets++; - mp->enet_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next: DRX(("NEXT\n")); entry = NEXT_RX(entry); @@ -596,7 +590,7 @@ static void myri_tx_timeout(struct net_device *dev) printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); - mp->enet_stats.tx_errors++; + dev->stats.tx_errors++; myri_init(mp, 0); netif_wake_queue(dev); } @@ -682,8 +676,9 @@ static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ -static int myri_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int myri_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN); @@ -765,18 +760,18 @@ static int myri_rebuild_header(struct sk_buff *skb) return 0; } -int myri_header_cache(struct neighbour *neigh, struct hh_cache *hh) +static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { unsigned short type = hh->hh_type; unsigned char *pad; struct ethhdr *eth; - struct net_device *dev = neigh->dev; + const struct net_device *dev = neigh->dev; pad = ((unsigned char *) hh->hh_data) + HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN); eth = (struct ethhdr *) (pad + MYRI_PAD_LEN); - if (type == __constant_htons(ETH_P_802_3)) + if (type == htons(ETH_P_802_3)) return -1; /* Refill MyriNet padding identifiers, this is just being anal. */ @@ -792,7 +787,9 @@ int myri_header_cache(struct neighbour *neigh, struct hh_cache *hh) /* Called by Address Resolution module to notify changes in address. */ -void myri_header_cache_update(struct hh_cache *hh, struct net_device *dev, unsigned char * haddr) +void myri_header_cache_update(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char * haddr) { memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), haddr, dev->addr_len); @@ -806,9 +803,6 @@ static int myri_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static struct net_device_stats *myri_get_stats(struct net_device *dev) -{ return &(((struct myri_eth *)dev->priv)->enet_stats); } - static void myri_set_multicast(struct net_device *dev) { /* Do nothing, all MyriCOM nodes transmit multicast frames @@ -890,6 +884,13 @@ static void dump_eeprom(struct myri_eth *mp) } #endif +static const struct header_ops myri_header_ops = { + .create = myri_header, + .rebuild = myri_rebuild_header, + .cache = myri_header_cache, + .cache_update = myri_header_cache_update, +}; + static int __devinit myri_ether_init(struct sbus_dev *sdev) { static int num; @@ -898,6 +899,7 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) struct myri_eth *mp; unsigned char prop_buf[32]; int i; + DECLARE_MAC_BUF(mac); DET(("myri_ether_init(%p,%d):\n", sdev, num)); dev = alloc_etherdev(sizeof(struct myri_eth)); @@ -908,7 +910,6 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) if (version_printed++ == 0) printk(version); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); mp = (struct myri_eth *) dev->priv; @@ -1061,7 +1062,6 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) dev->hard_start_xmit = &myri_start_xmit; dev->tx_timeout = &myri_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &myri_get_stats; dev->set_multicast_list = &myri_set_multicast; dev->irq = sdev->irqs[0]; @@ -1075,11 +1075,9 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) dev->mtu = MYRINET_MTU; dev->change_mtu = myri_change_mtu; - dev->hard_header = myri_header; - dev->rebuild_header = myri_rebuild_header; + dev->header_ops = &myri_header_ops; + dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN); - dev->hard_header_cache = myri_header_cache; - dev->header_cache_update= myri_header_cache_update; /* Load code onto the LANai. */ DET(("Loading LANAI firmware\n")); @@ -1094,12 +1092,8 @@ static int __devinit myri_ether_init(struct sbus_dev *sdev) num++; - printk("%s: MyriCOM MyriNET Ethernet ", dev->name); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + printk("%s: MyriCOM MyriNET Ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h index 2f69ef7cdccbb40d0cf83e9466e4a408b4878466..5d93fcc95d55e7db5240b956ee3109d26331d42d 100644 --- a/drivers/net/myri_sbus.h +++ b/drivers/net/myri_sbus.h @@ -280,7 +280,6 @@ struct myri_eth { void __iomem *lregs; /* Quick ptr to LANAI regs. */ struct sk_buff *rx_skbs[RX_RING_SIZE+1];/* RX skb's */ struct sk_buff *tx_skbs[TX_RING_SIZE]; /* TX skb's */ - struct net_device_stats enet_stats; /* Interface stats. */ /* These are less frequently accessed. */ void __iomem *regs; /* MyriCOM register space. */ diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index b47a12d684f9ed30dd0b8cd302c09259ee394636..527f9dcc7f69f3b7bc212ed4c363d0690c66b98b 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -108,7 +108,7 @@ static int full_duplex[MAX_UNITS]; #define TX_TIMEOUT (2*HZ) #define NATSEMI_HW_TIMEOUT 400 -#define NATSEMI_TIMER_FREQ 3*HZ +#define NATSEMI_TIMER_FREQ 5*HZ #define NATSEMI_PG0_NREGS 64 #define NATSEMI_RFDR_NREGS 8 #define NATSEMI_PG1_NREGS 4 @@ -560,6 +560,8 @@ struct netdev_private { /* address of a sent-in-place packet/buffer, for later free() */ struct sk_buff *tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_dma[TX_RING_SIZE]; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; /* Media monitoring timer */ struct timer_list timer; @@ -636,7 +638,7 @@ static void init_registers(struct net_device *dev); static int start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void netdev_error(struct net_device *dev, int intr_status); -static int natsemi_poll(struct net_device *dev, int *budget); +static int natsemi_poll(struct napi_struct *napi, int budget); static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do); static void netdev_tx_done(struct net_device *dev); static int natsemi_change_mtu(struct net_device *dev, int new_mtu); @@ -803,6 +805,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, const int pcibar = 1; /* PCI base address register */ int prev_eedata; u32 tmp; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -835,7 +838,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, dev = alloc_etherdev(sizeof (struct netdev_private)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); i = pci_request_regions(pdev, DRV_NAME); @@ -861,6 +863,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, dev->irq = irq; np = netdev_priv(dev); + netif_napi_add(dev, &np->napi, natsemi_poll, 64); np->pci_dev = pdev; pci_set_drvdata(pdev, dev); @@ -931,8 +934,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, dev->do_ioctl = &netdev_ioctl; dev->tx_timeout = &tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - dev->poll = natsemi_poll; - dev->weight = 64; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &natsemi_poll_controller; @@ -958,12 +959,10 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev, goto err_create_file; if (netif_msg_drv(np)) { - printk(KERN_INFO "natsemi %s: %s at %#08lx (%s), ", - dev->name, natsemi_pci_info[chip_idx].name, iostart, - pci_name(np->pci_dev)); - for (i = 0; i < ETH_ALEN-1; i++) - printk("%02x:", dev->dev_addr[i]); - printk("%02x, IRQ %d", dev->dev_addr[i], irq); + printk(KERN_INFO "natsemi %s: %s at %#08lx " + "(%s), %s, IRQ %d", + dev->name, natsemi_pci_info[chip_idx].name, iostart, + pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq); if (dev->if_port == PORT_TP) printk(", port TP.\n"); else if (np->ignore_phy) @@ -1554,6 +1553,8 @@ static int netdev_open(struct net_device *dev) free_irq(dev->irq, dev); return i; } + napi_enable(&np->napi); + init_ring(dev); spin_lock_irq(&np->lock); init_registers(dev); @@ -1614,7 +1615,7 @@ static void do_cable_magic(struct net_device *dev) * (these values all come from National) */ if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) { - struct netdev_private *np = netdev_priv(dev); + np = netdev_priv(dev); /* the bug has been triggered - fix the coefficient */ writew(TSTDAT_FIXED, ioaddr + TSTDAT); @@ -1797,7 +1798,7 @@ static void netdev_timer(unsigned long data) struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); - int next_tick = 5*HZ; + int next_tick = NATSEMI_TIMER_FREQ; if (netif_msg_timer(np)) { /* DO NOT read the IntrStatus register, @@ -2200,10 +2201,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &np->napi)) { /* Disable interrupts and register for poll */ natsemi_irq_disable(dev); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &np->napi); } else printk(KERN_WARNING "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", @@ -2216,12 +2217,11 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) /* This is the NAPI poll routine. As well as the standard RX handling * it also handles all other interrupts that the chip might raise. */ -static int natsemi_poll(struct net_device *dev, int *budget) +static int natsemi_poll(struct napi_struct *napi, int budget) { - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = container_of(napi, struct netdev_private, napi); + struct net_device *dev = np->dev; void __iomem * ioaddr = ns_ioaddr(dev); - - int work_to_do = min(*budget, dev->quota); int work_done = 0; do { @@ -2236,7 +2236,7 @@ static int natsemi_poll(struct net_device *dev, int *budget) if (np->intr_status & (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | IntrRxErr | IntrRxOverrun)) { - netdev_rx(dev, &work_done, work_to_do); + netdev_rx(dev, &work_done, budget); } if (np->intr_status & @@ -2250,16 +2250,13 @@ static int natsemi_poll(struct net_device *dev, int *budget) if (np->intr_status & IntrAbnormalSummary) netdev_error(dev, np->intr_status); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done >= work_to_do) - return 1; + if (work_done >= budget) + return work_done; np->intr_status = readl(ioaddr + IntrStatus); } while (np->intr_status); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); /* Reenable interrupts providing nothing is trying to shut * the chip down. */ @@ -2268,7 +2265,7 @@ static int natsemi_poll(struct net_device *dev, int *budget) natsemi_irq_enable(dev); spin_unlock(&np->lock); - return 0; + return work_done; } /* This routine is logically part of the interrupt handler, but separated @@ -2505,8 +2502,8 @@ static void __set_rx_mode(struct net_device *dev) memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { - int i = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; - mc_filter[i/8] |= (1 << (i & 0x07)); + int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; + mc_filter[b/8] |= (1 << (b & 0x07)); } rx_mode = RxFilterEnable | AcceptBroadcast | AcceptMulticast | AcceptMyPhys; @@ -3158,6 +3155,8 @@ static int netdev_close(struct net_device *dev) dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); + napi_disable(&np->napi); + /* * FIXME: what if someone tries to close a device * that is suspended? @@ -3253,7 +3252,7 @@ static void __devexit natsemi_remove1 (struct pci_dev *pdev) * disable_irq() to enforce synchronization. * * natsemi_poll: checks before reenabling interrupts. suspend * sets hands_off, disables interrupts and then waits with - * netif_poll_disable(). + * napi_disable(). * * Interrupts must be disabled, otherwise hands_off can cause irq storms. */ @@ -3279,7 +3278,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) spin_unlock_irq(&np->lock); enable_irq(dev->irq); - netif_poll_disable(dev); + napi_disable(&np->napi); /* Update the error counts. */ __get_stats(dev); @@ -3320,6 +3319,8 @@ static int natsemi_resume (struct pci_dev *pdev) pci_enable_device(pdev); /* pci_power_on(pdev); */ + napi_enable(&np->napi); + natsemi_reset(dev); init_ring(dev); disable_irq(dev->irq); @@ -3333,7 +3334,6 @@ static int natsemi_resume (struct pci_dev *pdev) mod_timer(&np->timer, jiffies + 1*HZ); } netif_device_attach(dev); - netif_poll_enable(dev); out: rtnl_unlock(); return 0; diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 38fd525f0f132b1bb0d606d6008a39b83a7f4827..368f2560856db82d796a99b1611748e9b653cc41 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c @@ -149,8 +149,6 @@ static int __init do_ne_probe(struct net_device *dev) { unsigned int base_addr = dev->base_addr; - SET_MODULE_OWNER(dev); - /* First check any supplied i/o locations. User knows best. */ if (base_addr > 0x1ff) /* Check a single specified location. */ return ne_probe1(dev, base_addr); @@ -206,6 +204,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) static unsigned version_printed; struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); unsigned char bus_width; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -259,7 +258,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -298,12 +297,11 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) dev->base_addr = ioaddr; - for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; - } + printk(" %s\n", print_mac(mac, dev->dev_addr)); - printk("\n%s: %s found at %#x, using IRQ %d.\n", + printk("%s: %s found at %#x, using IRQ %d.\n", dev->name, name, ioaddr, dev->irq); ei_status.name = name; diff --git a/drivers/net/ne.c b/drivers/net/ne.c index c9f74bf5f4917634ff9d563bba0a0b2fc4d4df69..874d291cbaed8447f994f6ac1f4f17d78be122cc 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -191,8 +191,6 @@ static int __init do_ne_probe(struct net_device *dev) int orig_irq = dev->irq; #endif - SET_MODULE_OWNER(dev); - /* First check any supplied i/o locations. User knows best. */ if (base_addr > 0x1ff) /* Check a single specified location. */ return ne_probe1(dev, base_addr); @@ -293,6 +291,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) int neX000, ctron, copam, bad_card; int reg0, ret; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -377,7 +376,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -505,16 +504,14 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr) for (i = 0 ; i < ETHER_ADDR_LEN ; i++) { dev->dev_addr[i] = SA_prom[i] = inb_p(ioaddr + EN1_PHYS_SHIFT(i)); - printk(" %2.2x", SA_prom[i]); } #else for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); dev->dev_addr[i] = SA_prom[i]; } #endif - printk("\n"); + printk("%s\n", print_mac(mac, dev->dev_addr)); ei_status.name = name; ei_status.tx_start_page = start_page; diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c index 089b5bb702fcbccbc5dd6072fe5f5c315e11123b..f4cd8c7e81bad97536ed8911be353f5c3a0da367 100644 --- a/drivers/net/ne2.c +++ b/drivers/net/ne2.c @@ -251,8 +251,6 @@ static int __init do_ne2_probe(struct net_device *dev) int i; int adapter_found = 0; - SET_MODULE_OWNER(dev); - /* Do not check any supplied i/o locations. POS registers usually don't fail :) */ @@ -304,6 +302,7 @@ struct net_device * __init ne2_probe(int unit) static int ne2_procinfo(char *buf, int slot, struct net_device *dev) { int len=0; + DECLARE_MAC_BUF(mac); len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); len += sprintf(buf+len, "Driver written by Wim Dumon "); @@ -314,12 +313,7 @@ static int ne2_procinfo(char *buf, int slot, struct net_device *dev) len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); len += sprintf(buf+len, "IRQ : %d\n", dev->irq); - -#define HW_ADDR(i) dev->dev_addr[i] - len += sprintf(buf+len, "HW addr : %x:%x:%x:%x:%x:%x\n", - HW_ADDR(0), HW_ADDR(1), HW_ADDR(2), - HW_ADDR(3), HW_ADDR(4), HW_ADDR(5) ); -#undef HW_ADDR + len += sprintf(buf+len, "HW addr : %s\n", print_mac(mac, dev->dev_addr)); return len; } @@ -332,6 +326,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) const char *name = "NE/2"; int start_page, stop_page; static unsigned version_printed; + DECLARE_MAC_BUF(mac); if (ei_debug && version_printed++ == 0) printk(version); @@ -432,7 +427,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot) {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, base_addr + program_seq[i].offset); @@ -471,12 +466,12 @@ static int __init ne2_probe1(struct net_device *dev, int slot) dev->base_addr = base_addr; - for(i = 0; i < ETHER_ADDR_LEN; i++) { - printk(" %2.2x", SA_prom[i]); + for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = SA_prom[i]; - } - printk("\n%s: %s found at %#x, using IRQ %d.\n", + printk(" %s\n", print_mac(mac, dev->dev_addr)); + + printk("%s: %s found at %#x, using IRQ %d.\n", dev->name, name, base_addr, dev->irq); mca_set_adapter_procfn(slot, (MCA_ProcFn) ne2_procinfo, dev); diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index f81d9398d605b89a5098f3aa4dc1e10c90fc9958..b569c90da4ba547ff69ff08f0860bb75736c0f8b 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -212,6 +212,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, static unsigned int fnd_cnt; long ioaddr; int flags = pci_clone_list[chip_idx].flags; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -265,7 +266,6 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, dev_err(&pdev->dev, "cannot allocate ethernet device\n"); goto err_out_free_res; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Reset card. Who knows what dain-bramaged state it was left in. */ @@ -308,7 +308,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb(program_seq[i].value, ioaddr + program_seq[i].offset); } @@ -366,12 +366,12 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, if (i) goto err_out_free_netdev; - printk("%s: %s found at %#lx, IRQ %d, ", - dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq); - for(i = 0; i < 6; i++) { - printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":"); + for(i = 0; i < 6; i++) dev->dev_addr[i] = SA_prom[i]; - } + printk("%s: %s found at %#lx, IRQ %d, %s.\n", + dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, + print_mac(mac, dev->dev_addr)); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return 0; @@ -636,8 +636,6 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev, static const struct ethtool_ops ne2k_pci_ethtool_ops = { .get_drvinfo = ne2k_pci_get_drvinfo, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, }; static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c index 1a6fed76d4cc6f429d04570115f27360f5ffe234..425043a88db9e9409d958006a30fafddcd200fbe 100644 --- a/drivers/net/ne3210.c +++ b/drivers/net/ne3210.c @@ -99,6 +99,7 @@ static int __init ne3210_eisa_probe (struct device *device) int i, retval, port_index; struct eisa_device *edev = to_eisa_device (device); struct net_device *dev; + DECLARE_MAC_BUF(mac); /* Allocate dev->priv and fill in 8390 specific dev fields. */ if (!(dev = alloc_ei_netdev ())) { @@ -106,7 +107,6 @@ static int __init ne3210_eisa_probe (struct device *device) return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, device); device->driver_data = dev; ioaddr = edev->base_addr; @@ -128,17 +128,15 @@ static int __init ne3210_eisa_probe (struct device *device) inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2)); #endif - port_index = inb(ioaddr + NE3210_CFG2) >> 6; - printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr:", - edev->slot, ifmap[port_index]); for(i = 0; i < ETHER_ADDR_LEN; i++) - printk(" %02x", (dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i))); - + dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); + printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %s.\n", + edev->slot, ifmap[port_index], print_mac(mac, dev->dev_addr)); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; - printk(".\nne3210.c: using IRQ %d, ", dev->irq); + printk("ne3210.c: using IRQ %d, ", dev->irq); retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (retval) { diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 69233f6aa05ca2d115f77ed3c71d00954c5c0204..5ffbb88916473d45e6e60d1d3b57b22ec3651ee5 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -35,95 +35,780 @@ ****************************************************************/ #include -#include #include #include #include -#include #include #include -#include -#include #include +#include +#include MODULE_AUTHOR("Maintainer: Matt Mackall "); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); -static char config[256]; -module_param_string(netconsole, config, 256, 0); +#define MAX_PARAM_LENGTH 256 +#define MAX_PRINT_CHUNK 1000 + +static char config[MAX_PARAM_LENGTH]; +module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@/[tgt-macaddr]\n"); -static struct netpoll np = { - .name = "netconsole", - .dev_name = "eth0", - .local_port = 6665, - .remote_port = 6666, - .remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, +#ifndef MODULE +static int __init option_setup(char *opt) +{ + strlcpy(config, opt, MAX_PARAM_LENGTH); + return 1; +} +__setup("netconsole=", option_setup); +#endif /* MODULE */ + +/* Linked list of all configured targets */ +static LIST_HEAD(target_list); + +/* This needs to be a spinlock because write_msg() cannot sleep */ +static DEFINE_SPINLOCK(target_list_lock); + +/** + * struct netconsole_target - Represents a configured netconsole target. + * @list: Links this target into the target_list. + * @item: Links us into the configfs subsystem hierarchy. + * @enabled: On / off knob to enable / disable target. + * Visible from userspace (read-write). + * We maintain a strict 1:1 correspondence between this and + * whether the corresponding netpoll is active or inactive. + * Also, other parameters of a target may be modified at + * runtime only when it is disabled (enabled == 0). + * @np: The netpoll structure for this target. + * Contains the other userspace visible parameters: + * dev_name (read-write) + * local_port (read-write) + * remote_port (read-write) + * local_ip (read-write) + * remote_ip (read-write) + * local_mac (read-only) + * remote_mac (read-write) + */ +struct netconsole_target { + struct list_head list; +#ifdef CONFIG_NETCONSOLE_DYNAMIC + struct config_item item; +#endif + int enabled; + struct netpoll np; }; -static int configured = 0; -#define MAX_PRINT_CHUNK 1000 +#ifdef CONFIG_NETCONSOLE_DYNAMIC -static void write_msg(struct console *con, const char *msg, unsigned int len) +static struct configfs_subsystem netconsole_subsys; + +static int __init dynamic_netconsole_init(void) { - int frag, left; - unsigned long flags; + config_group_init(&netconsole_subsys.su_group); + mutex_init(&netconsole_subsys.su_mutex); + return configfs_register_subsystem(&netconsole_subsys); +} - if (!np.dev) - return; +static void __exit dynamic_netconsole_exit(void) +{ + configfs_unregister_subsystem(&netconsole_subsys); +} + +/* + * Targets that were created by parsing the boot/module option string + * do not exist in the configfs hierarchy (and have NULL names) and will + * never go away, so make these a no-op for them. + */ +static void netconsole_target_get(struct netconsole_target *nt) +{ + if (config_item_name(&nt->item)) + config_item_get(&nt->item); +} + +static void netconsole_target_put(struct netconsole_target *nt) +{ + if (config_item_name(&nt->item)) + config_item_put(&nt->item); +} + +#else /* !CONFIG_NETCONSOLE_DYNAMIC */ + +static int __init dynamic_netconsole_init(void) +{ + return 0; +} - local_irq_save(flags); +static void __exit dynamic_netconsole_exit(void) +{ +} - for(left = len; left; ) { - frag = min(left, MAX_PRINT_CHUNK); - netpoll_send_udp(&np, msg, frag); - msg += frag; - left -= frag; +/* + * No danger of targets going away from under us when dynamic + * reconfigurability is off. + */ +static void netconsole_target_get(struct netconsole_target *nt) +{ +} + +static void netconsole_target_put(struct netconsole_target *nt) +{ +} + +#endif /* CONFIG_NETCONSOLE_DYNAMIC */ + +/* Allocate new target (from boot/module param) and setup netpoll for it */ +static struct netconsole_target *alloc_param_target(char *target_config) +{ + int err = -ENOMEM; + struct netconsole_target *nt; + + /* + * Allocate and initialize with defaults. + * Note that these targets get their config_item fields zeroed-out. + */ + nt = kzalloc(sizeof(*nt), GFP_KERNEL); + if (!nt) { + printk(KERN_ERR "netconsole: failed to allocate memory\n"); + goto fail; } - local_irq_restore(flags); + nt->np.name = "netconsole"; + strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); + nt->np.local_port = 6665; + nt->np.remote_port = 6666; + memset(nt->np.remote_mac, 0xff, ETH_ALEN); + + /* Parse parameters and setup netpoll */ + err = netpoll_parse_options(&nt->np, target_config); + if (err) + goto fail; + + err = netpoll_setup(&nt->np); + if (err) + goto fail; + + nt->enabled = 1; + + return nt; + +fail: + kfree(nt); + return ERR_PTR(err); } -static struct console netconsole = { - .name = "netcon", - .flags = CON_ENABLED | CON_PRINTBUFFER, - .write = write_msg +/* Cleanup netpoll for given target (from boot/module param) and free it */ +static void free_param_target(struct netconsole_target *nt) +{ + netpoll_cleanup(&nt->np); + kfree(nt); +} + +#ifdef CONFIG_NETCONSOLE_DYNAMIC + +/* + * Our subsystem hierarchy is: + * + * /sys/kernel/config/netconsole/ + * | + * / + * | enabled + * | dev_name + * | local_port + * | remote_port + * | local_ip + * | remote_ip + * | local_mac + * | remote_mac + * | + * /... + */ + +struct netconsole_target_attr { + struct configfs_attribute attr; + ssize_t (*show)(struct netconsole_target *nt, + char *buf); + ssize_t (*store)(struct netconsole_target *nt, + const char *buf, + size_t count); }; -static int option_setup(char *opt) +static struct netconsole_target *to_target(struct config_item *item) { - configured = !netpoll_parse_options(&np, opt); - return 1; + return item ? + container_of(item, struct netconsole_target, item) : + NULL; } -__setup("netconsole=", option_setup); +/* + * Wrapper over simple_strtol (base 10) with sanity and range checking. + * We return (signed) long only because we may want to return errors. + * Do not use this to convert numbers that are allowed to be negative. + */ +static long strtol10_check_range(const char *cp, long min, long max) +{ + long ret; + char *p = (char *) cp; + + WARN_ON(min < 0); + WARN_ON(max < min); + + ret = simple_strtol(p, &p, 10); + + if (*p && (*p != '\n')) { + printk(KERN_ERR "netconsole: invalid input\n"); + return -EINVAL; + } + if ((ret < min) || (ret > max)) { + printk(KERN_ERR "netconsole: input %ld must be between " + "%ld and %ld\n", ret, min, max); + return -EINVAL; + } + + return ret; +} + +/* + * Attribute operations for netconsole_target. + */ + +static ssize_t show_enabled(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", nt->enabled); +} + +static ssize_t show_dev_name(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", nt->np.dev_name); +} + +static ssize_t show_local_port(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", nt->np.local_port); +} + +static ssize_t show_remote_port(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", nt->np.remote_port); +} + +static ssize_t show_local_ip(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", + HIPQUAD(nt->np.local_ip)); +} + +static ssize_t show_remote_ip(struct netconsole_target *nt, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", + HIPQUAD(nt->np.remote_ip)); +} + +static ssize_t show_local_mac(struct netconsole_target *nt, char *buf) +{ + DECLARE_MAC_BUF(mac); + return snprintf(buf, PAGE_SIZE, "%s\n", + print_mac(mac, nt->np.local_mac)); +} + +static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf) +{ + DECLARE_MAC_BUF(mac); + return snprintf(buf, PAGE_SIZE, "%s\n", + print_mac(mac, nt->np.remote_mac)); +} -static int init_netconsole(void) +/* + * This one is special -- targets created through the configfs interface + * are not enabled (and the corresponding netpoll activated) by default. + * The user is expected to set the desired parameters first (which + * would enable him to dynamically add new netpoll targets for new + * network interfaces as and when they come up). + */ +static ssize_t store_enabled(struct netconsole_target *nt, + const char *buf, + size_t count) { int err; + long enabled; + + enabled = strtol10_check_range(buf, 0, 1); + if (enabled < 0) + return enabled; + + if (enabled) { /* 1 */ + + /* + * Skip netpoll_parse_options() -- all the attributes are + * already configured via configfs. Just print them out. + */ + netpoll_print_options(&nt->np); + + err = netpoll_setup(&nt->np); + if (err) + return err; + + printk(KERN_INFO "netconsole: network logging started\n"); + + } else { /* 0 */ + netpoll_cleanup(&nt->np); + } + + nt->enabled = enabled; + + return strnlen(buf, count); +} + +static ssize_t store_dev_name(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + size_t len; + + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + strlcpy(nt->np.dev_name, buf, IFNAMSIZ); + + /* Get rid of possible trailing newline from echo(1) */ + len = strnlen(nt->np.dev_name, IFNAMSIZ); + if (nt->np.dev_name[len - 1] == '\n') + nt->np.dev_name[len - 1] = '\0'; + + return strnlen(buf, count); +} + +static ssize_t store_local_port(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + long local_port; +#define __U16_MAX ((__u16) ~0U) + + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + local_port = strtol10_check_range(buf, 0, __U16_MAX); + if (local_port < 0) + return local_port; + + nt->np.local_port = local_port; + + return strnlen(buf, count); +} + +static ssize_t store_remote_port(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + long remote_port; +#define __U16_MAX ((__u16) ~0U) + + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + remote_port = strtol10_check_range(buf, 0, __U16_MAX); + if (remote_port < 0) + return remote_port; + + nt->np.remote_port = remote_port; + + return strnlen(buf, count); +} + +static ssize_t store_local_ip(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + nt->np.local_ip = ntohl(in_aton(buf)); + + return strnlen(buf, count); +} + +static ssize_t store_remote_ip(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } + + nt->np.remote_ip = ntohl(in_aton(buf)); + + return strnlen(buf, count); +} + +static ssize_t store_remote_mac(struct netconsole_target *nt, + const char *buf, + size_t count) +{ + u8 remote_mac[ETH_ALEN]; + char *p = (char *) buf; + int i; - if(strlen(config)) - option_setup(config); + if (nt->enabled) { + printk(KERN_ERR "netconsole: target (%s) is enabled, " + "disable to update parameters\n", + config_item_name(&nt->item)); + return -EINVAL; + } - if(!configured) { - printk("netconsole: not configured, aborting\n"); - return 0; + for (i = 0; i < ETH_ALEN - 1; i++) { + remote_mac[i] = simple_strtoul(p, &p, 16); + if (*p != ':') + goto invalid; + p++; } + remote_mac[ETH_ALEN - 1] = simple_strtoul(p, &p, 16); + if (*p && (*p != '\n')) + goto invalid; + + memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); + + return strnlen(buf, count); + +invalid: + printk(KERN_ERR "netconsole: invalid input\n"); + return -EINVAL; +} + +/* + * Attribute definitions for netconsole_target. + */ + +#define NETCONSOLE_TARGET_ATTR_RO(_name) \ +static struct netconsole_target_attr netconsole_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO, show_##_name, NULL) - err = netpoll_setup(&np); +#define NETCONSOLE_TARGET_ATTR_RW(_name) \ +static struct netconsole_target_attr netconsole_target_##_name = \ + __CONFIGFS_ATTR(_name, S_IRUGO | S_IWUSR, show_##_name, store_##_name) + +NETCONSOLE_TARGET_ATTR_RW(enabled); +NETCONSOLE_TARGET_ATTR_RW(dev_name); +NETCONSOLE_TARGET_ATTR_RW(local_port); +NETCONSOLE_TARGET_ATTR_RW(remote_port); +NETCONSOLE_TARGET_ATTR_RW(local_ip); +NETCONSOLE_TARGET_ATTR_RW(remote_ip); +NETCONSOLE_TARGET_ATTR_RO(local_mac); +NETCONSOLE_TARGET_ATTR_RW(remote_mac); + +static struct configfs_attribute *netconsole_target_attrs[] = { + &netconsole_target_enabled.attr, + &netconsole_target_dev_name.attr, + &netconsole_target_local_port.attr, + &netconsole_target_remote_port.attr, + &netconsole_target_local_ip.attr, + &netconsole_target_remote_ip.attr, + &netconsole_target_local_mac.attr, + &netconsole_target_remote_mac.attr, + NULL, +}; + +/* + * Item operations and type for netconsole_target. + */ + +static void netconsole_target_release(struct config_item *item) +{ + kfree(to_target(item)); +} + +static ssize_t netconsole_target_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *buf) +{ + ssize_t ret = -EINVAL; + struct netconsole_target *nt = to_target(item); + struct netconsole_target_attr *na = + container_of(attr, struct netconsole_target_attr, attr); + + if (na->show) + ret = na->show(nt, buf); + + return ret; +} + +static ssize_t netconsole_target_attr_store(struct config_item *item, + struct configfs_attribute *attr, + const char *buf, + size_t count) +{ + ssize_t ret = -EINVAL; + struct netconsole_target *nt = to_target(item); + struct netconsole_target_attr *na = + container_of(attr, struct netconsole_target_attr, attr); + + if (na->store) + ret = na->store(nt, buf, count); + + return ret; +} + +static struct configfs_item_operations netconsole_target_item_ops = { + .release = netconsole_target_release, + .show_attribute = netconsole_target_attr_show, + .store_attribute = netconsole_target_attr_store, +}; + +static struct config_item_type netconsole_target_type = { + .ct_attrs = netconsole_target_attrs, + .ct_item_ops = &netconsole_target_item_ops, + .ct_owner = THIS_MODULE, +}; + +/* + * Group operations and type for netconsole_subsys. + */ + +static struct config_item *make_netconsole_target(struct config_group *group, + const char *name) +{ + unsigned long flags; + struct netconsole_target *nt; + + /* + * Allocate and initialize with defaults. + * Target is disabled at creation (enabled == 0). + */ + nt = kzalloc(sizeof(*nt), GFP_KERNEL); + if (!nt) { + printk(KERN_ERR "netconsole: failed to allocate memory\n"); + return NULL; + } + + nt->np.name = "netconsole"; + strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ); + nt->np.local_port = 6665; + nt->np.remote_port = 6666; + memset(nt->np.remote_mac, 0xff, ETH_ALEN); + + /* Initialize the config_item member */ + config_item_init_type_name(&nt->item, name, &netconsole_target_type); + + /* Adding, but it is disabled */ + spin_lock_irqsave(&target_list_lock, flags); + list_add(&nt->list, &target_list); + spin_unlock_irqrestore(&target_list_lock, flags); + + return &nt->item; +} + +static void drop_netconsole_target(struct config_group *group, + struct config_item *item) +{ + unsigned long flags; + struct netconsole_target *nt = to_target(item); + + spin_lock_irqsave(&target_list_lock, flags); + list_del(&nt->list); + spin_unlock_irqrestore(&target_list_lock, flags); + + /* + * The target may have never been enabled, or was manually disabled + * before being removed so netpoll may have already been cleaned up. + */ + if (nt->enabled) + netpoll_cleanup(&nt->np); + + config_item_put(&nt->item); +} + +static struct configfs_group_operations netconsole_subsys_group_ops = { + .make_item = make_netconsole_target, + .drop_item = drop_netconsole_target, +}; + +static struct config_item_type netconsole_subsys_type = { + .ct_group_ops = &netconsole_subsys_group_ops, + .ct_owner = THIS_MODULE, +}; + +/* The netconsole configfs subsystem */ +static struct configfs_subsystem netconsole_subsys = { + .su_group = { + .cg_item = { + .ci_namebuf = "netconsole", + .ci_type = &netconsole_subsys_type, + }, + }, +}; + +#endif /* CONFIG_NETCONSOLE_DYNAMIC */ + +/* Handle network interface device notifications */ +static int netconsole_netdev_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + unsigned long flags; + struct netconsole_target *nt; + struct net_device *dev = ptr; + + if (!(event == NETDEV_CHANGEADDR || event == NETDEV_CHANGENAME)) + goto done; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + netconsole_target_get(nt); + if (nt->np.dev == dev) { + switch (event) { + case NETDEV_CHANGEADDR: + memcpy(nt->np.local_mac, dev->dev_addr, ETH_ALEN); + break; + + case NETDEV_CHANGENAME: + strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); + break; + } + } + netconsole_target_put(nt); + } + spin_unlock_irqrestore(&target_list_lock, flags); + +done: + return NOTIFY_DONE; +} + +static struct notifier_block netconsole_netdev_notifier = { + .notifier_call = netconsole_netdev_event, +}; + +static void write_msg(struct console *con, const char *msg, unsigned int len) +{ + int frag, left; + unsigned long flags; + struct netconsole_target *nt; + const char *tmp; + + /* Avoid taking lock and disabling interrupts unnecessarily */ + if (list_empty(&target_list)) + return; + + spin_lock_irqsave(&target_list_lock, flags); + list_for_each_entry(nt, &target_list, list) { + netconsole_target_get(nt); + if (nt->enabled && netif_running(nt->np.dev)) { + /* + * We nest this inside the for-each-target loop above + * so that we're able to get as much logging out to + * at least one target if we die inside here, instead + * of unnecessarily keeping all targets in lock-step. + */ + tmp = msg; + for (left = len; left;) { + frag = min(left, MAX_PRINT_CHUNK); + netpoll_send_udp(&nt->np, tmp, frag); + tmp += frag; + left -= frag; + } + } + netconsole_target_put(nt); + } + spin_unlock_irqrestore(&target_list_lock, flags); +} + +static struct console netconsole = { + .name = "netcon", + .flags = CON_ENABLED | CON_PRINTBUFFER, + .write = write_msg, +}; + +static int __init init_netconsole(void) +{ + int err; + struct netconsole_target *nt, *tmp; + unsigned long flags; + char *target_config; + char *input = config; + + if (strnlen(input, MAX_PARAM_LENGTH)) { + while ((target_config = strsep(&input, ";"))) { + nt = alloc_param_target(target_config); + if (IS_ERR(nt)) { + err = PTR_ERR(nt); + goto fail; + } + spin_lock_irqsave(&target_list_lock, flags); + list_add(&nt->list, &target_list); + spin_unlock_irqrestore(&target_list_lock, flags); + } + } + + err = register_netdevice_notifier(&netconsole_netdev_notifier); + if (err) + goto fail; + + err = dynamic_netconsole_init(); if (err) - return err; + goto undonotifier; register_console(&netconsole); printk(KERN_INFO "netconsole: network logging started\n"); - return 0; + + return err; + +undonotifier: + unregister_netdevice_notifier(&netconsole_netdev_notifier); + +fail: + printk(KERN_ERR "netconsole: cleaning up\n"); + + /* + * Remove all targets and destroy them (only targets created + * from the boot/module option exist here). Skipping the list + * lock is safe here, and netpoll_cleanup() will sleep. + */ + list_for_each_entry_safe(nt, tmp, &target_list, list) { + list_del(&nt->list); + free_param_target(nt); + } + + return err; } -static void cleanup_netconsole(void) +static void __exit cleanup_netconsole(void) { + struct netconsole_target *nt, *tmp; + unregister_console(&netconsole); - netpoll_cleanup(&np); + dynamic_netconsole_exit(); + unregister_netdevice_notifier(&netconsole_netdev_notifier); + + /* + * Targets created via configfs pin references on our module + * and would first be rmdir(2)'ed from userspace. We reach + * here only when they are already destroyed, and only those + * created from the boot/module option are left, so remove and + * destroy them. Skipping the list lock is safe here, and + * netpoll_cleanup() will sleep. + */ + list_for_each_entry_safe(nt, tmp, &target_list, list) { + list_del(&nt->list); + free_param_target(nt); + } } module_init(init_netconsole); diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c index 2b8da0a54998cf63ca122cbed6d2bf68060fddbb..eb0aff787dfda8e5c910c45f752d4bb07c82eef4 100644 --- a/drivers/net/netx-eth.c +++ b/drivers/net/netx-eth.c @@ -97,7 +97,6 @@ struct netx_eth_priv { void __iomem *sram_base, *xpec_base, *xmac_base; int id; - struct net_device_stats stats; struct mii_if_info mii; u32 msg_enable; struct xc *xc; @@ -129,8 +128,8 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) FIFO_PTR_FRAMELEN(len)); ndev->trans_start = jiffies; - priv->stats.tx_packets++; - priv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; netif_stop_queue(ndev); spin_unlock_irq(&priv->lock); @@ -156,7 +155,7 @@ static void netx_eth_receive(struct net_device *ndev) if (unlikely(skb == NULL)) { printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", ndev->name); - priv->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -170,8 +169,8 @@ static void netx_eth_receive(struct net_device *ndev) ndev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); - priv->stats.rx_packets++; - priv->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } static irqreturn_t @@ -210,12 +209,6 @@ netx_eth_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct net_device_stats *netx_eth_query_statistics(struct net_device *ndev) -{ - struct netx_eth_priv *priv = netdev_priv(ndev); - return &priv->stats; -} - static int netx_eth_open(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); @@ -323,7 +316,6 @@ static int netx_eth_enable(struct net_device *ndev) ndev->hard_start_xmit = netx_eth_hard_start_xmit; ndev->tx_timeout = netx_eth_timeout; ndev->watchdog_timeo = msecs_to_jiffies(5000); - ndev->get_stats = netx_eth_query_statistics; ndev->set_multicast_list = netx_eth_set_multicast_list; priv->msg_enable = NETIF_MSG_LINK; @@ -390,7 +382,6 @@ static int netx_eth_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto exit; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index d4c92cc879d416e227f36b5f4e42c3350cecb564..fbc2553275dc82d4dc0b671ba4d5147fb49fc159 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -880,6 +880,7 @@ struct netxen_adapter { struct netxen_adapter *master; struct net_device *netdev; struct pci_dev *pdev; + struct napi_struct napi; struct net_device_stats net_stats; unsigned char mac_addr[ETH_ALEN]; int mtu; @@ -918,7 +919,7 @@ struct netxen_adapter { u16 link_duplex; u16 state; u16 link_autoneg; - int rcsum; + int rx_csum; int status; spinlock_t stats_lock; @@ -1118,7 +1119,7 @@ static const struct netxen_brdinfo netxen_boards[] = { {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"}, }; -#define NUM_SUPPORTED_BOARDS (sizeof(netxen_boards)/sizeof(struct netxen_brdinfo)) +#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards) static inline void get_brd_port_by_type(u32 type, int *ports) { diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index a6138b474b4aa8f7b00a936e3e15f66cb49aae54..cfb847b0cae3e44d9ead99eb2b49492954f22e33 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -115,8 +115,6 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_stats = NETXEN_NIC_STATS_LEN; - drvinfo->testinfo_len = NETXEN_NIC_TEST_LEN; drvinfo->regdump_len = NETXEN_NIC_REGS_LEN; drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev); } @@ -518,17 +516,17 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) ring->rx_jumbo_pending = 0; for (i = 0; i < MAX_RCV_CTX; ++i) { ring->rx_pending += adapter->recv_ctx[i]. - rcv_desc[RCV_DESC_NORMAL_CTXID].rcv_pending; + rcv_desc[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; ring->rx_jumbo_pending += adapter->recv_ctx[i]. - rcv_desc[RCV_DESC_JUMBO_CTXID].rcv_pending; + rcv_desc[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; } + ring->tx_pending = adapter->max_tx_desc_count; - ring->rx_max_pending = adapter->max_rx_desc_count; - ring->tx_max_pending = adapter->max_tx_desc_count; - ring->rx_jumbo_max_pending = adapter->max_jumbo_rx_desc_count; + ring->rx_max_pending = MAX_RCV_DESCRIPTORS; + ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; + ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; ring->rx_mini_max_pending = 0; ring->rx_mini_pending = 0; - ring->rx_jumbo_pending = 0; } static void @@ -672,9 +670,16 @@ static int netxen_nic_reg_test(struct net_device *dev) return 0; } -static int netxen_nic_diag_test_count(struct net_device *dev) +static int netxen_get_sset_count(struct net_device *dev, int sset) { - return NETXEN_NIC_TEST_LEN; + switch (sset) { + case ETH_SS_TEST: + return NETXEN_NIC_TEST_LEN; + case ETH_SS_STATS: + return NETXEN_NIC_STATS_LEN; + default: + return -EOPNOTSUPP; + } } static void @@ -709,11 +714,6 @@ netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) } } -static int netxen_nic_get_stats_count(struct net_device *dev) -{ - return NETXEN_NIC_STATS_LEN; -} - static void netxen_nic_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) @@ -731,6 +731,19 @@ netxen_nic_get_ethtool_stats(struct net_device *dev, } } +static u32 netxen_nic_get_rx_csum(struct net_device *dev) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + return adapter->rx_csum; +} + +static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data) +{ + struct netxen_adapter *adapter = netdev_priv(dev); + adapter->rx_csum = !!data; + return 0; +} + struct ethtool_ops netxen_nic_ethtool_ops = { .get_settings = netxen_nic_get_settings, .set_settings = netxen_nic_set_settings, @@ -744,15 +757,13 @@ struct ethtool_ops netxen_nic_ethtool_ops = { .get_ringparam = netxen_nic_get_ringparam, .get_pauseparam = netxen_nic_get_pauseparam, .set_pauseparam = netxen_nic_set_pauseparam, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, - .self_test_count = netxen_nic_diag_test_count, .self_test = netxen_nic_diag_test, .get_strings = netxen_nic_get_strings, - .get_stats_count = netxen_nic_get_stats_count, .get_ethtool_stats = netxen_nic_get_ethtool_stats, + .get_sset_count = netxen_get_sset_count, + .get_rx_csum = netxen_nic_get_rx_csum, + .set_rx_csum = netxen_nic_set_rx_csum, }; diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index a7b8d7f23259ced85ce9a5925c68dd5e3235dea5..2c19b8df98fab06c205d26afbf513b019856e670 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -569,7 +569,7 @@ int netxen_is_flash_supported(struct netxen_adapter *adapter) /* if the flash size less than 4Mb, make huge war cry and die */ for (j = 1; j < 4; j++) { addr = j * NETXEN_NIC_WINDOW_MARGIN; - for (i = 0; i < (sizeof(locs) / sizeof(locs[0])); i++) { + for (i = 0; i < ARRAY_SIZE(locs); i++) { if (netxen_rom_fast_read(adapter, locs[i], &val01) == 0 && netxen_rom_fast_read(adapter, (addr + locs[i]), &val02) == 0) { diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1811bcb8c380ad53019033c7e43df01e0ff0912b..37589265297edd86fa6047f2181c6e72fec1e5c1 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1118,10 +1118,13 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, skb = (struct sk_buff *)buffer->skb; - if (likely(netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) { + if (likely(adapter->rx_csum && + netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) { adapter->stats.csummed++; skb->ip_summed = CHECKSUM_UNNECESSARY; - } + } else + skb->ip_summed = CHECKSUM_NONE; + skb->dev = netdev; if (desc_ctx == RCV_DESC_LRO_CTXID) { /* True length was only available on the last pkt */ diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 3122d0101638a0d232ac6f983ed6c19632dfd05f..2a1d6d7ec351d37f4a7189f2e3987d76a5a8990c 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -39,7 +39,6 @@ #include "netxen_nic_phan_reg.h" #include -#include #include MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); @@ -68,7 +67,7 @@ static void netxen_tx_timeout(struct net_device *netdev); static void netxen_tx_timeout_task(struct work_struct *work); static void netxen_watchdog(unsigned long); static int netxen_handle_int(struct netxen_adapter *, struct net_device *); -static int netxen_nic_poll(struct net_device *dev, int *budget); +static int netxen_nic_poll(struct napi_struct *napi, int budget); #ifdef CONFIG_NET_POLL_CONTROLLER static void netxen_nic_poll_controller(struct net_device *netdev); #endif @@ -286,6 +285,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int valid_mac = 0; u32 val; int pci_func_id = PCI_FUNC(pdev->devfn); + DECLARE_MAC_BUF(mac); printk(KERN_INFO "%s \n", netxen_nic_driver_string); @@ -326,11 +326,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_res; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev->priv; - memset(adapter, 0 , sizeof(struct netxen_adapter)); adapter->ahw.pdev = pdev; adapter->ahw.pci_func = pci_func_id; @@ -402,12 +400,16 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->netdev = netdev; adapter->pdev = pdev; + netif_napi_add(netdev, &adapter->napi, + netxen_nic_poll, NETXEN_NETDEV_WEIGHT); + /* this will be read from FW later */ adapter->intr_scheme = -1; /* This will be reset for mezz cards */ adapter->portnum = pci_func_id; adapter->status &= ~NETXEN_NETDEV_STATUS; + adapter->rx_csum = 1; netdev->open = netxen_nic_open; netdev->stop = netxen_nic_close; @@ -422,8 +424,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netxen_nic_change_mtu(netdev, netdev->mtu); SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); - netdev->poll = netxen_nic_poll; - netdev->weight = NETXEN_NETDEV_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = netxen_nic_poll_controller; #endif @@ -575,15 +575,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { - printk(KERN_ERR "%s: Bad MAC address " - "%02x:%02x:%02x:%02x:%02x:%02x.\n", - netxen_nic_driver_name, - netdev->dev_addr[0], - netdev->dev_addr[1], - netdev->dev_addr[2], - netdev->dev_addr[3], - netdev->dev_addr[4], - netdev->dev_addr[5]); + printk(KERN_ERR "%s: Bad MAC address %s.\n", + netxen_nic_driver_name, + print_mac(mac, netdev->dev_addr)); } else { if (adapter->macaddr_set) adapter->macaddr_set(adapter, @@ -885,6 +879,8 @@ static int netxen_nic_open(struct net_device *netdev) if (!adapter->driver_mismatch) mod_timer(&adapter->watchdog_timer, jiffies); + napi_enable(&adapter->napi); + netxen_nic_enable_int(adapter); /* Done here again so that even if phantom sw overwrote it, @@ -894,6 +890,7 @@ static int netxen_nic_open(struct net_device *netdev) del_timer_sync(&adapter->watchdog_timer); printk(KERN_ERR "%s: Failed to initialize port %d\n", netxen_nic_driver_name, adapter->portnum); + napi_disable(&adapter->napi); return -EIO; } if (adapter->macaddr_set) @@ -923,6 +920,7 @@ static int netxen_nic_close(struct net_device *netdev) netif_carrier_off(netdev); netif_stop_queue(netdev); + napi_disable(&adapter->napi); netxen_nic_disable_int(adapter); @@ -1243,11 +1241,11 @@ netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) netxen_nic_disable_int(adapter); if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { - if (netif_rx_schedule_prep(netdev)) { + if (netif_rx_schedule_prep(netdev, &adapter->napi)) { /* * Interrupts are already disabled. */ - __netif_rx_schedule(netdev); + __netif_rx_schedule(netdev, &adapter->napi); } else { static unsigned int intcount = 0; if ((++intcount & 0xfff) == 0xfff) @@ -1305,14 +1303,13 @@ irqreturn_t netxen_intr(int irq, void *data) return IRQ_HANDLED; } -static int netxen_nic_poll(struct net_device *netdev, int *budget) +static int netxen_nic_poll(struct napi_struct *napi, int budget) { - struct netxen_adapter *adapter = netdev_priv(netdev); - int work_to_do = min(*budget, netdev->quota); + struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi); + struct net_device *netdev = adapter->netdev; int done = 1; int ctx; - int this_work_done; - int work_done = 0; + int work_done; DPRINTK(INFO, "polling for %d descriptors\n", *budget); @@ -1330,16 +1327,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget) * packets are on one context, it gets only half of the quota, * and ends up not processing it. */ - this_work_done = netxen_process_rcv_ring(adapter, ctx, - work_to_do / - MAX_RCV_CTX); - work_done += this_work_done; + work_done += netxen_process_rcv_ring(adapter, ctx, + budget / MAX_RCV_CTX); } - netdev->quota -= work_done; - *budget -= work_done; - - if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0) + if (work_done >= budget && netxen_nic_rx_has_work(adapter) != 0) done = 0; if (netxen_process_cmd_ring((unsigned long)adapter) == 0) @@ -1348,11 +1340,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget) DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", work_done, work_to_do); if (done) { - netif_rx_complete(netdev); + netif_rx_complete(netdev, napi); netxen_nic_enable_int(adapter); } - return !done; + return work_done; } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 05e0577a0e103b55a2c8431595ebc2cb679a4675..5b9e1b300fab24f06b879802fe00e3493ed69862 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c @@ -603,6 +603,7 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, int phy = physical_port[adapter->portnum]; unsigned char mac_addr[6]; int i; + DECLARE_MAC_BUF(mac); for (i = 0; i < 10; i++) { temp[0] = temp[1] = 0; @@ -627,15 +628,10 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, if (i == 10) { printk(KERN_ERR "%s: cannot set Mac addr for %s\n", netxen_nic_driver_name, adapter->netdev->name); - printk(KERN_ERR "MAC address set: " - "%02x:%02x:%02x:%02x:%02x:%02x.\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - printk(KERN_ERR "MAC address get: " - "%02x:%02x:%02x:%02x:%02x:%02x.\n", - mac_addr[0], - mac_addr[1], - mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); + printk(KERN_ERR "MAC address set: %s.\n", + print_mac(mac, addr)); + printk(KERN_ERR "MAC address get: %s.\n", + print_mac(mac, mac_addr)); } return 0; } diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index 22a3b3dc7d89ba7b248a20e75d0234de5a464820..14a768fbce2e67a9940f72df10e2030fc2d06fb1 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c @@ -89,7 +89,6 @@ static unsigned int ports[] __initdata = /* Information that needs to be kept for each board. */ struct ni5010_local { - struct net_device_stats stats; int o_pkt_size; spinlock_t lock; }; @@ -103,7 +102,6 @@ static irqreturn_t ni5010_interrupt(int irq, void *dev_id); static void ni5010_rx(struct net_device *dev); static void ni5010_timeout(struct net_device *dev); static int ni5010_close(struct net_device *dev); -static struct net_device_stats *ni5010_get_stats(struct net_device *dev); static void ni5010_set_multicast_list(struct net_device *dev); static void reset_receiver(struct net_device *dev); @@ -135,8 +133,6 @@ struct net_device * __init ni5010_probe(int unit) PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name)); - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = ni5010_probe1(dev, io); } else if (io != 0) { /* Don't probe at all. */ @@ -207,6 +203,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) unsigned int data = 0; int boguscount = 40; int err = -ENODEV; + DECLARE_MAC_BUF(mac); dev->base_addr = ioaddr; dev->irq = irq; @@ -272,8 +269,9 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) for (i=0; i<6; i++) { outw(i, IE_GP); - printk("%2.2x ", dev->dev_addr[i] = inb(IE_SAPROM)); + dev->dev_addr[i] = inb(IE_SAPROM); } + printk("%s ", print_mac(mac, dev->dev_addr)); PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); @@ -336,7 +334,6 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr) dev->open = ni5010_open; dev->stop = ni5010_close; dev->hard_start_xmit = ni5010_send_packet; - dev->get_stats = ni5010_get_stats; dev->set_multicast_list = ni5010_set_multicast_list; dev->tx_timeout = ni5010_timeout; dev->watchdog_timeo = HZ/20; @@ -534,11 +531,11 @@ static void ni5010_rx(struct net_device *dev) if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) { PRINTK((KERN_INFO "%s: receive error.\n", dev->name)); - lp->stats.rx_errors++; - if (rcv_stat & RS_RUNT) lp->stats.rx_length_errors++; - if (rcv_stat & RS_ALIGN) lp->stats.rx_frame_errors++; - if (rcv_stat & RS_CRC_ERR) lp->stats.rx_crc_errors++; - if (rcv_stat & RS_OFLW) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++; + if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++; + if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++; + if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++; outb(0xff, EDLC_RCLR); /* Clear the interrupt */ return; } @@ -549,8 +546,8 @@ static void ni5010_rx(struct net_device *dev) if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) { PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n", dev->name, i_pkt_size)); - lp->stats.rx_errors++; - lp->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; return; } @@ -558,7 +555,7 @@ static void ni5010_rx(struct net_device *dev) skb = dev_alloc_skb(i_pkt_size + 3); if (skb == NULL) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -575,8 +572,8 @@ static void ni5010_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += i_pkt_size; + dev->stats.rx_packets++; + dev->stats.rx_bytes += i_pkt_size; PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n", dev->name, i_pkt_size)); @@ -604,14 +601,14 @@ static int process_xmt_interrupt(struct net_device *dev) /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */ outb(MM_EN_XMT | MM_MUX, IE_MMODE); outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */ - lp->stats.collisions++; + dev->stats.collisions++; return 1; } /* FIXME: handle other xmt error conditions */ - lp->stats.tx_packets++; - lp->stats.tx_bytes += lp->o_pkt_size; + dev->stats.tx_packets++; + dev->stats.tx_bytes += lp->o_pkt_size; netif_wake_queue(dev); PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n", @@ -640,24 +637,6 @@ static int ni5010_close(struct net_device *dev) } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *ni5010_get_stats(struct net_device *dev) -{ - struct ni5010_local *lp = netdev_priv(dev); - - PRINTK2((KERN_DEBUG "%s: entering ni5010_get_stats\n", dev->name)); - - if (NI5010_DEBUG) ni5010_show_registers(dev); - - /* cli(); */ - /* Update the statistics from the device registers. */ - /* We do this in the interrupt handler */ - /* sti(); */ - - return &lp->stats; -} - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 5e7999db2096a69cd0e67bdaae38de4a72868889..6b3384a24f072dbd6b7b4e5c50d4ff6dc5483178 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -382,8 +382,6 @@ struct net_device * __init ni52_probe(int unit) memend = dev->mem_end; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = ni52_probe1(dev, io); } else if (io > 0) { /* Don't probe at all. */ diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 4ef5fe345191fa6122499aae9242702fe8a7ca77..09768524511281d06fd881f9faabdd4c4717e1bf 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -550,7 +550,6 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr) } dev->base_addr = ioaddr; - SET_MODULE_OWNER(dev); dev->open = ni65_open; dev->stop = ni65_close; dev->hard_start_xmit = ni65_send_packet; diff --git a/drivers/net/niu.c b/drivers/net/niu.c new file mode 100644 index 0000000000000000000000000000000000000000..43bfe7e6b6f5c0c167885460c2a55871066b10ea --- /dev/null +++ b/drivers/net/niu.c @@ -0,0 +1,7939 @@ +/* niu.c: Neptune ethernet driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef CONFIG_SPARC64 +#include +#endif + +#include "niu.h" + +#define DRV_MODULE_NAME "niu" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "0.5" +#define DRV_MODULE_RELDATE "October 5, 2007" + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("NIU ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#ifndef DMA_44BIT_MASK +#define DMA_44BIT_MASK 0x00000fffffffffffULL +#endif + +#ifndef readq +static u64 readq(void __iomem *reg) +{ + return (((u64)readl(reg + 0x4UL) << 32) | + (u64)readl(reg)); +} + +static void writeq(u64 val, void __iomem *reg) +{ + writel(val & 0xffffffff, reg); + writel(val >> 32, reg + 0x4UL); +} +#endif + +static struct pci_device_id niu_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)}, + {} +}; + +MODULE_DEVICE_TABLE(pci, niu_pci_tbl); + +#define NIU_TX_TIMEOUT (5 * HZ) + +#define nr64(reg) readq(np->regs + (reg)) +#define nw64(reg, val) writeq((val), np->regs + (reg)) + +#define nr64_mac(reg) readq(np->mac_regs + (reg)) +#define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg)) + +#define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg)) +#define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg)) + +#define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg)) +#define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg)) + +#define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg)) +#define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg)) + +#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int niu_debug; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "NIU debug level"); + +#define niudbg(TYPE, f, a...) \ +do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_DEBUG PFX f, ## a); \ +} while (0) + +#define niuinfo(TYPE, f, a...) \ +do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_INFO PFX f, ## a); \ +} while (0) + +#define niuwarn(TYPE, f, a...) \ +do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \ + printk(KERN_WARNING PFX f, ## a); \ +} while (0) + +#define niu_lock_parent(np, flags) \ + spin_lock_irqsave(&np->parent->lock, flags) +#define niu_unlock_parent(np, flags) \ + spin_unlock_irqrestore(&np->parent->lock, flags) + +static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64_mac(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + + nw64_mac(reg, bits); + err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay); + if (err) + dev_err(np->device, PFX "%s: bits (%llx) of register %s " + "would not clear, val[%llx]\n", + np->dev->name, (unsigned long long) bits, reg_name, + (unsigned long long) nr64_mac(reg)); + return err; +} + +#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64_ipp(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + u64 val; + + val = nr64_ipp(reg); + val |= bits; + nw64_ipp(reg, val); + + err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay); + if (err) + dev_err(np->device, PFX "%s: bits (%llx) of register %s " + "would not clear, val[%llx]\n", + np->dev->name, (unsigned long long) bits, reg_name, + (unsigned long long) nr64_ipp(reg)); + return err; +} + +#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static int __niu_wait_bits_clear(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay) +{ + while (--limit >= 0) { + u64 val = nr64(reg); + + if (!(val & bits)) + break; + udelay(delay); + } + if (limit < 0) + return -ENODEV; + return 0; +} + +#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \ +}) + +static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg, + u64 bits, int limit, int delay, + const char *reg_name) +{ + int err; + + nw64(reg, bits); + err = __niu_wait_bits_clear(np, reg, bits, limit, delay); + if (err) + dev_err(np->device, PFX "%s: bits (%llx) of register %s " + "would not clear, val[%llx]\n", + np->dev->name, (unsigned long long) bits, reg_name, + (unsigned long long) nr64(reg)); + return err; +} + +#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \ +({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \ + __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \ +}) + +static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on) +{ + u64 val = (u64) lp->timer; + + if (on) + val |= LDG_IMGMT_ARM; + + nw64(LDG_IMGMT(lp->ldg_num), val); +} + +static int niu_ldn_irq_enable(struct niu *np, int ldn, int on) +{ + unsigned long mask_reg, bits; + u64 val; + + if (ldn < 0 || ldn > LDN_MAX) + return -EINVAL; + + if (ldn < 64) { + mask_reg = LD_IM0(ldn); + bits = LD_IM0_MASK; + } else { + mask_reg = LD_IM1(ldn - 64); + bits = LD_IM1_MASK; + } + + val = nr64(mask_reg); + if (on) + val &= ~bits; + else + val |= bits; + nw64(mask_reg, val); + + return 0; +} + +static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on) +{ + struct niu_parent *parent = np->parent; + int i; + + for (i = 0; i <= LDN_MAX; i++) { + int err; + + if (parent->ldg_map[i] != lp->ldg_num) + continue; + + err = niu_ldn_irq_enable(np, i, on); + if (err) + return err; + } + return 0; +} + +static int niu_enable_interrupts(struct niu *np, int on) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + int err; + + err = niu_enable_ldn_in_ldg(np, lp, on); + if (err) + return err; + } + for (i = 0; i < np->num_ldg; i++) + niu_ldg_rearm(np, &np->ldg[i], on); + + return 0; +} + +static u32 phy_encode(u32 type, int port) +{ + return (type << (port * 2)); +} + +static u32 phy_decode(u32 val, int port) +{ + return (val >> (port * 2)) & PORT_TYPE_MASK; +} + +static int mdio_wait(struct niu *np) +{ + int limit = 1000; + u64 val; + + while (--limit > 0) { + val = nr64(MIF_FRAME_OUTPUT); + if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1) + return val & MIF_FRAME_OUTPUT_DATA; + + udelay(10); + } + + return -ENODEV; +} + +static int mdio_read(struct niu *np, int port, int dev, int reg) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); + err = mdio_wait(np); + if (err < 0) + return err; + + nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); + return mdio_wait(np); +} + +static int mdio_write(struct niu *np, int port, int dev, int reg, int data) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); + err = mdio_wait(np); + if (err < 0) + return err; + + nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); + err = mdio_wait(np); + if (err < 0) + return err; + + return 0; +} + +static int mii_read(struct niu *np, int port, int reg) +{ + nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg)); + return mdio_wait(np); +} + +static int mii_write(struct niu *np, int port, int reg, int data) +{ + int err; + + nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data)); + err = mdio_wait(np); + if (err < 0) + return err; + + return 0; +} + +static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TX_CFG_L(channel), + val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TX_CFG_H(channel), + val >> 16); + return err; +} + +static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_RX_CFG_L(channel), + val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_RX_CFG_H(channel), + val >> 16); + return err; +} + +/* Mode is always 10G fiber. */ +static int serdes_init_niu(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u32 tx_cfg, rx_cfg; + unsigned long i; + + tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV); + rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT | + PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH | + PLL_RX_CFG_EQ_LP_ADAPTIVE); + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS; + + mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, + ESR2_TI_PLL_TEST_CFG_L, test_cfg); + + tx_cfg |= PLL_TX_CFG_ENTEST; + rx_cfg |= PLL_RX_CFG_ENTEST; + } + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + int err = esr2_set_tx_cfg(np, i, tx_cfg); + if (err) + return err; + } + + for (i = 0; i < 4; i++) { + int err = esr2_set_rx_cfg(np, i, rx_cfg); + if (err) + return err; + } + + return 0; +} + +static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_H(chan)); + if (err >= 0) + *val |= ((err & 0xffff) << 16); + err = 0; + } + return err; +} + +static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_L(chan)); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_H(chan)); + if (err >= 0) { + *val |= ((err & 0xffff) << 16); + err = 0; + } + } + return err; +} + +static int esr_read_reset(struct niu *np, u32 *val) +{ + int err; + + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L); + if (err >= 0) { + *val = (err & 0xffff); + err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H); + if (err >= 0) { + *val |= ((err & 0xffff) << 16); + err = 0; + } + } + return err; +} + +static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_L(chan), val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_CTRL_H(chan), (val >> 16)); + return err; +} + +static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val) +{ + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_L(chan), val & 0xffff); + if (!err) + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_GLUE_CTRL0_H(chan), (val >> 16)); + return err; +} + +static int esr_reset(struct niu *np) +{ + u32 reset; + int err; + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L, 0x0000); + if (err) + return err; + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H, 0xffff); + if (err) + return err; + udelay(200); + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_L, 0xffff); + if (err) + return err; + udelay(200); + + err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, + ESR_RXTX_RESET_CTRL_H, 0x0000); + if (err) + return err; + udelay(200); + + err = esr_read_reset(np, &reset); + if (err) + return err; + if (reset != 0) { + dev_err(np->device, PFX "Port %u ESR_RESET " + "did not clear [%08x]\n", + np->port, reset); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_10g(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + unsigned long ctrl_reg, test_cfg_reg, i; + u64 ctrl_val, test_cfg_val, sig, mask, val; + int err; + + switch (np->port) { + case 0: + ctrl_reg = ENET_SERDES_0_CTRL_CFG; + test_cfg_reg = ENET_SERDES_0_TEST_CFG; + break; + case 1: + ctrl_reg = ENET_SERDES_1_CTRL_CFG; + test_cfg_reg = ENET_SERDES_1_TEST_CFG; + break; + + default: + return -EINVAL; + } + ctrl_val = (ENET_SERDES_CTRL_SDET_0 | + ENET_SERDES_CTRL_SDET_1 | + ENET_SERDES_CTRL_SDET_2 | + ENET_SERDES_CTRL_SDET_3 | + (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) | + (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) | + (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT)); + test_cfg_val = 0; + + if (lp->loopback_mode == LOOPBACK_PHY) { + test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_0_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_1_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_2_SHIFT) | + (ENET_TEST_MD_PAD_LOOPBACK << + ENET_SERDES_TEST_MD_3_SHIFT)); + } + + nw64(ctrl_reg, ctrl_val); + nw64(test_cfg_reg, test_cfg_val); + + /* Initialize all 4 lanes of the SERDES. */ + for (i = 0; i < 4; i++) { + u32 rxtx_ctrl, glue0; + + err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl); + if (err) + return err; + err = esr_read_glue0(np, i, &glue0); + if (err) + return err; + + rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO); + rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH | + (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT)); + + glue0 &= ~(ESR_GLUE_CTRL0_SRATE | + ESR_GLUE_CTRL0_THCNT | + ESR_GLUE_CTRL0_BLTIME); + glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB | + (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) | + (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) | + (BLTIME_300_CYCLES << + ESR_GLUE_CTRL0_BLTIME_SHIFT)); + + err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl); + if (err) + return err; + err = esr_write_glue0(np, i, glue0); + if (err) + return err; + } + + err = esr_reset(np); + if (err) + return err; + + sig = nr64(ESR_INT_SIGNALS); + switch (np->port) { + case 0: + mask = ESR_INT_SIGNALS_P0_BITS; + val = (ESR_INT_SRDY0_P0 | + ESR_INT_DET0_P0 | + ESR_INT_XSRDY_P0 | + ESR_INT_XDP_P0_CH3 | + ESR_INT_XDP_P0_CH2 | + ESR_INT_XDP_P0_CH1 | + ESR_INT_XDP_P0_CH0); + break; + + case 1: + mask = ESR_INT_SIGNALS_P1_BITS; + val = (ESR_INT_SRDY0_P1 | + ESR_INT_DET0_P1 | + ESR_INT_XSRDY_P1 | + ESR_INT_XDP_P1_CH3 | + ESR_INT_XDP_P1_CH2 | + ESR_INT_XDP_P1_CH1 | + ESR_INT_XDP_P1_CH0); + break; + + default: + return -EINVAL; + } + + if ((sig & mask) != val) { + dev_err(np->device, PFX "Port %u signal bits [%08x] are not " + "[%08x]\n", np->port, (int) (sig & mask), (int) val); + return -ENODEV; + } + + return 0; +} + +static int serdes_init_1g(struct niu *np) +{ + u64 val; + + val = nr64(ENET_SERDES_1_PLL_CFG); + val &= ~ENET_SERDES_PLL_FBDIV2; + switch (np->port) { + case 0: + val |= ENET_SERDES_PLL_HRATE0; + break; + case 1: + val |= ENET_SERDES_PLL_HRATE1; + break; + case 2: + val |= ENET_SERDES_PLL_HRATE2; + break; + case 3: + val |= ENET_SERDES_PLL_HRATE3; + break; + default: + return -EINVAL; + } + nw64(ENET_SERDES_1_PLL_CFG, val); + + return 0; +} + +static int bcm8704_reset(struct niu *np) +{ + int err, limit; + + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err < 0) + return err; + err |= BMCR_RESET; + err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + MII_BMCR, err); + if (err) + return err; + + limit = 1000; + while (--limit >= 0) { + err = mdio_read(np, np->phy_addr, + BCM8704_PHYXS_DEV_ADDR, MII_BMCR); + if (err < 0) + return err; + if (!(err & BMCR_RESET)) + break; + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u PHY will not reset " + "(bmcr=%04x)\n", np->port, (err & 0xffff)); + return -ENODEV; + } + return 0; +} + +/* When written, certain PHY registers need to be read back twice + * in order for the bits to settle properly. + */ +static int bcm8704_user_dev3_readback(struct niu *np, int reg) +{ + int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); + if (err < 0) + return err; + return 0; +} + +static int bcm8704_init_user_dev3(struct niu *np) +{ + int err; + + err = mdio_write(np, np->phy_addr, + BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL, + (USER_CONTROL_OPTXRST_LVL | + USER_CONTROL_OPBIASFLT_LVL | + USER_CONTROL_OBTMPFLT_LVL | + USER_CONTROL_OPPRFLT_LVL | + USER_CONTROL_OPTXFLT_LVL | + USER_CONTROL_OPRXLOS_LVL | + USER_CONTROL_OPRXFLT_LVL | + USER_CONTROL_OPTXON_LVL | + (0x3f << USER_CONTROL_RES1_SHIFT))); + if (err) + return err; + + err = mdio_write(np, np->phy_addr, + BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL, + (USER_PMD_TX_CTL_XFP_CLKEN | + (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) | + (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) | + USER_PMD_TX_CTL_TSCK_LPWREN)); + if (err) + return err; + + err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL); + if (err) + return err; + err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL); + if (err) + return err; + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL); + if (err < 0) + return err; + err &= ~USER_ODIG_CTRL_GPIOS; + err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT); + err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_OPT_DIGITAL_CTRL, err); + if (err) + return err; + + mdelay(1000); + + return 0; +} + +static int xcvr_init_10g(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 analog_stat0, tx_alarm_status; + int err; + u64 val; + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_LED_POLARITY; + val |= XMAC_CONFIG_FORCE_LED_ON; + nw64_mac(XMAC_CONFIG, val); + + /* XXX shared resource, lock parent XXX */ + val = nr64(MIF_CONFIG); + val |= MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + err = bcm8704_reset(np); + if (err) + return err; + + err = bcm8704_init_user_dev3(np); + if (err) + return err; + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + MII_BMCR); + if (err < 0) + return err; + err &= ~BMCR_LOOPBACK; + + if (lp->loopback_mode == LOOPBACK_MAC) + err |= BMCR_LOOPBACK; + + err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + MII_BMCR, err); + if (err) + return err; + +#if 1 + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + MII_STAT1000); + if (err < 0) + return err; + pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n", + np->port, err); + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); + if (err < 0) + return err; + pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n", + np->port, err); + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + MII_NWAYTEST); + if (err < 0) + return err; + pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n", + np->port, err); +#endif + + /* XXX dig this out it might not be so useful XXX */ + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_ANALOG_STATUS0); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_ANALOG_STATUS0); + if (err < 0) + return err; + analog_stat0 = err; + + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_TX_ALARM_STATUS); + if (err < 0) + return err; + err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, + BCM8704_USER_TX_ALARM_STATUS); + if (err < 0) + return err; + tx_alarm_status = err; + + if (analog_stat0 != 0x03fc) { + if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) { + pr_info(PFX "Port %u cable not connected " + "or bad cable.\n", np->port); + } else if (analog_stat0 == 0x639c) { + pr_info(PFX "Port %u optical module is bad " + "or missing.\n", np->port); + } + } + + return 0; +} + +static int mii_reset(struct niu *np) +{ + int limit, err; + + err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); + if (err) + return err; + + limit = 1000; + while (--limit >= 0) { + udelay(500); + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + if (!(err & BMCR_RESET)) + break; + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u MII would not reset, " + "bmcr[%04x]\n", np->port, err); + return -ENODEV; + } + + return 0; +} + +static int mii_init_common(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u16 bmcr, bmsr, adv, estat; + int err; + + err = mii_reset(np); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; + bmsr = err; + + estat = 0; + if (bmsr & BMSR_ESTATEN) { + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + return err; + estat = err; + } + + bmcr = 0; + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + if (lp->loopback_mode == LOOPBACK_MAC) { + bmcr |= BMCR_LOOPBACK; + if (lp->active_speed == SPEED_1000) + bmcr |= BMCR_SPEED1000; + if (lp->active_duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + if (lp->loopback_mode == LOOPBACK_PHY) { + u16 aux; + + aux = (BCM5464R_AUX_CTL_EXT_LB | + BCM5464R_AUX_CTL_WRITE_1); + err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); + if (err) + return err; + } + + /* XXX configurable XXX */ + /* XXX for now don't advertise half-duplex or asym pause... XXX */ + adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP; + if (bmsr & BMSR_10FULL) + adv |= ADVERTISE_10FULL; + if (bmsr & BMSR_100FULL) + adv |= ADVERTISE_100FULL; + err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); + if (err) + return err; + + if (bmsr & BMSR_ESTATEN) { + u16 ctrl1000 = 0; + + if (estat & ESTATUS_1000_TFULL) + ctrl1000 |= ADVERTISE_1000FULL; + err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); + if (err) + return err; + } + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + + err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); + if (err) + return err; + + err = mii_read(np, np->phy_addr, MII_BMCR); + if (err < 0) + return err; + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + return err; +#if 0 + pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n", + np->port, bmcr, bmsr); +#endif + + return 0; +} + +static int xcvr_init_1g(struct niu *np) +{ + u64 val; + + /* XXX shared resource, lock parent XXX */ + val = nr64(MIF_CONFIG); + val &= ~MIF_CONFIG_INDIRECT_MODE; + nw64(MIF_CONFIG, val); + + return mii_init_common(np); +} + +static int niu_xcvr_init(struct niu *np) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->xcvr_init) + err = ops->xcvr_init(np); + + return err; +} + +static int niu_serdes_init(struct niu *np) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->serdes_init) + err = ops->serdes_init(np); + + return err; +} + +static void niu_init_xif(struct niu *); + +static int niu_link_status_common(struct niu *np, int link_up) +{ + struct niu_link_config *lp = &np->link_config; + struct net_device *dev = np->dev; + unsigned long flags; + + if (!netif_carrier_ok(dev) && link_up) { + niuinfo(LINK, "%s: Link is up at %s, %s duplex\n", + dev->name, + (lp->active_speed == SPEED_10000 ? + "10Gb/sec" : + (lp->active_speed == SPEED_1000 ? + "1Gb/sec" : + (lp->active_speed == SPEED_100 ? + "100Mbit/sec" : "10Mbit/sec"))), + (lp->active_duplex == DUPLEX_FULL ? + "full" : "half")); + + spin_lock_irqsave(&np->lock, flags); + niu_init_xif(np); + spin_unlock_irqrestore(&np->lock, flags); + + netif_carrier_on(dev); + } else if (netif_carrier_ok(dev) && !link_up) { + niuwarn(LINK, "%s: Link is down\n", dev->name); + netif_carrier_off(dev); + } + + return 0; +} + +static int link_status_10g(struct niu *np, int *link_up_p) +{ + unsigned long flags; + int err, link_up; + + link_up = 0; + + spin_lock_irqsave(&np->lock, flags); + + err = -EINVAL; + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + goto out; + + err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, + BCM8704_PMD_RCV_SIGDET); + if (err < 0) + goto out; + if (!(err & PMD_RCV_SIGDET_GLOBAL)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, + BCM8704_PCS_10G_R_STATUS); + if (err < 0) + goto out; + if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) { + err = 0; + goto out; + } + + err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, + BCM8704_PHYXS_XGXS_LANE_STAT); + if (err < 0) + goto out; + + if (err != (PHYXS_XGXS_LANE_STAT_ALINGED | + PHYXS_XGXS_LANE_STAT_MAGIC | + PHYXS_XGXS_LANE_STAT_LANE3 | + PHYXS_XGXS_LANE_STAT_LANE2 | + PHYXS_XGXS_LANE_STAT_LANE1 | + PHYXS_XGXS_LANE_STAT_LANE0)) { + err = 0; + goto out; + } + + link_up = 1; + np->link_config.active_speed = SPEED_10000; + np->link_config.active_duplex = DUPLEX_FULL; + err = 0; + +out: + spin_unlock_irqrestore(&np->lock, flags); + + *link_up_p = link_up; + return err; +} + +static int link_status_1g(struct niu *np, int *link_up_p) +{ + u16 current_speed, bmsr; + unsigned long flags; + u8 current_duplex; + int err, link_up; + + link_up = 0; + current_speed = SPEED_INVALID; + current_duplex = DUPLEX_INVALID; + + spin_lock_irqsave(&np->lock, flags); + + err = -EINVAL; + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + goto out; + + err = mii_read(np, np->phy_addr, MII_BMSR); + if (err < 0) + goto out; + + bmsr = err; + if (bmsr & BMSR_LSTATUS) { + u16 adv, lpa, common, estat; + + err = mii_read(np, np->phy_addr, MII_ADVERTISE); + if (err < 0) + goto out; + adv = err; + + err = mii_read(np, np->phy_addr, MII_LPA); + if (err < 0) + goto out; + lpa = err; + + common = adv & lpa; + + err = mii_read(np, np->phy_addr, MII_ESTATUS); + if (err < 0) + goto out; + estat = err; + + link_up = 1; + if (estat & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) { + current_speed = SPEED_1000; + if (estat & ESTATUS_1000_TFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + } else { + if (common & ADVERTISE_100BASE4) { + current_speed = SPEED_100; + current_duplex = DUPLEX_HALF; + } else if (common & ADVERTISE_100FULL) { + current_speed = SPEED_100; + current_duplex = DUPLEX_FULL; + } else if (common & ADVERTISE_100HALF) { + current_speed = SPEED_100; + current_duplex = DUPLEX_HALF; + } else if (common & ADVERTISE_10FULL) { + current_speed = SPEED_10; + current_duplex = DUPLEX_FULL; + } else if (common & ADVERTISE_10HALF) { + current_speed = SPEED_10; + current_duplex = DUPLEX_HALF; + } else + link_up = 0; + } + } + err = 0; + +out: + spin_unlock_irqrestore(&np->lock, flags); + + *link_up_p = link_up; + return err; +} + +static int niu_link_status(struct niu *np, int *link_up_p) +{ + const struct niu_phy_ops *ops = np->phy_ops; + int err; + + err = 0; + if (ops->link_status) + err = ops->link_status(np, link_up_p); + + return err; +} + +static void niu_timer(unsigned long __opaque) +{ + struct niu *np = (struct niu *) __opaque; + unsigned long off; + int err, link_up; + + err = niu_link_status(np, &link_up); + if (!err) + niu_link_status_common(np, link_up); + + if (netif_carrier_ok(np->dev)) + off = 5 * HZ; + else + off = 1 * HZ; + np->timer.expires = jiffies + off; + + add_timer(&np->timer); +} + +static const struct niu_phy_ops phy_ops_10g_fiber_niu = { + .serdes_init = serdes_init_niu, + .xcvr_init = xcvr_init_10g, + .link_status = link_status_10g, +}; + +static const struct niu_phy_ops phy_ops_10g_fiber = { + .serdes_init = serdes_init_10g, + .xcvr_init = xcvr_init_10g, + .link_status = link_status_10g, +}; + +static const struct niu_phy_ops phy_ops_10g_copper = { + .serdes_init = serdes_init_10g, + .link_status = link_status_10g, /* XXX */ +}; + +static const struct niu_phy_ops phy_ops_1g_fiber = { + .serdes_init = serdes_init_1g, + .xcvr_init = xcvr_init_1g, + .link_status = link_status_1g, +}; + +static const struct niu_phy_ops phy_ops_1g_copper = { + .xcvr_init = xcvr_init_1g, + .link_status = link_status_1g, +}; + +struct niu_phy_template { + const struct niu_phy_ops *ops; + u32 phy_addr_base; +}; + +static const struct niu_phy_template phy_template_niu = { + .ops = &phy_ops_10g_fiber_niu, + .phy_addr_base = 16, +}; + +static const struct niu_phy_template phy_template_10g_fiber = { + .ops = &phy_ops_10g_fiber, + .phy_addr_base = 8, +}; + +static const struct niu_phy_template phy_template_10g_copper = { + .ops = &phy_ops_10g_copper, + .phy_addr_base = 10, +}; + +static const struct niu_phy_template phy_template_1g_fiber = { + .ops = &phy_ops_1g_fiber, + .phy_addr_base = 0, +}; + +static const struct niu_phy_template phy_template_1g_copper = { + .ops = &phy_ops_1g_copper, + .phy_addr_base = 0, +}; + +static int niu_determine_phy_disposition(struct niu *np) +{ + struct niu_parent *parent = np->parent; + u8 plat_type = parent->plat_type; + const struct niu_phy_template *tp; + u32 phy_addr_off = 0; + + if (plat_type == PLAT_TYPE_NIU) { + tp = &phy_template_niu; + phy_addr_off += np->port; + } else { + switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { + case 0: + /* 1G copper */ + tp = &phy_template_1g_copper; + if (plat_type == PLAT_TYPE_VF_P0) + phy_addr_off = 10; + else if (plat_type == PLAT_TYPE_VF_P1) + phy_addr_off = 26; + + phy_addr_off += (np->port ^ 0x3); + break; + + case NIU_FLAGS_10G: + /* 10G copper */ + tp = &phy_template_1g_copper; + break; + + case NIU_FLAGS_FIBER: + /* 1G fiber */ + tp = &phy_template_1g_fiber; + break; + + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + /* 10G fiber */ + tp = &phy_template_10g_fiber; + if (plat_type == PLAT_TYPE_VF_P0 || + plat_type == PLAT_TYPE_VF_P1) + phy_addr_off = 8; + phy_addr_off += np->port; + break; + + default: + return -EINVAL; + } + } + + np->phy_ops = tp->ops; + np->phy_addr = tp->phy_addr_base + phy_addr_off; + + return 0; +} + +static int niu_init_link(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int err, ignore; + + if (parent->plat_type == PLAT_TYPE_NIU) { + err = niu_xcvr_init(np); + if (err) + return err; + msleep(200); + } + err = niu_serdes_init(np); + if (err) + return err; + msleep(200); + err = niu_xcvr_init(np); + if (!err) + niu_link_status(np, &ignore); + return 0; +} + +static void niu_set_primary_mac(struct niu *np, unsigned char *addr) +{ + u16 reg0 = addr[4] << 8 | addr[5]; + u16 reg1 = addr[2] << 8 | addr[3]; + u16 reg2 = addr[0] << 8 | addr[1]; + + if (np->flags & NIU_FLAGS_XMAC) { + nw64_mac(XMAC_ADDR0, reg0); + nw64_mac(XMAC_ADDR1, reg1); + nw64_mac(XMAC_ADDR2, reg2); + } else { + nw64_mac(BMAC_ADDR0, reg0); + nw64_mac(BMAC_ADDR1, reg1); + nw64_mac(BMAC_ADDR2, reg2); + } +} + +static int niu_num_alt_addr(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return XMAC_NUM_ALT_ADDR; + else + return BMAC_NUM_ALT_ADDR; +} + +static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr) +{ + u16 reg0 = addr[4] << 8 | addr[5]; + u16 reg1 = addr[2] << 8 | addr[3]; + u16 reg2 = addr[0] << 8 | addr[1]; + + if (index >= niu_num_alt_addr(np)) + return -EINVAL; + + if (np->flags & NIU_FLAGS_XMAC) { + nw64_mac(XMAC_ALT_ADDR0(index), reg0); + nw64_mac(XMAC_ALT_ADDR1(index), reg1); + nw64_mac(XMAC_ALT_ADDR2(index), reg2); + } else { + nw64_mac(BMAC_ALT_ADDR0(index), reg0); + nw64_mac(BMAC_ALT_ADDR1(index), reg1); + nw64_mac(BMAC_ALT_ADDR2(index), reg2); + } + + return 0; +} + +static int niu_enable_alt_mac(struct niu *np, int index, int on) +{ + unsigned long reg; + u64 val, mask; + + if (index >= niu_num_alt_addr(np)) + return -EINVAL; + + if (np->flags & NIU_FLAGS_XMAC) + reg = XMAC_ADDR_CMPEN; + else + reg = BMAC_ADDR_CMPEN; + + mask = 1 << index; + + val = nr64_mac(reg); + if (on) + val |= mask; + else + val &= ~mask; + nw64_mac(reg, val); + + return 0; +} + +static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg, + int num, int mac_pref) +{ + u64 val = nr64_mac(reg); + val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR); + val |= num; + if (mac_pref) + val |= HOST_INFO_MPR; + nw64_mac(reg, val); +} + +static int __set_rdc_table_num(struct niu *np, + int xmac_index, int bmac_index, + int rdc_table_num, int mac_pref) +{ + unsigned long reg; + + if (rdc_table_num & ~HOST_INFO_MACRDCTBLN) + return -EINVAL; + if (np->flags & NIU_FLAGS_XMAC) + reg = XMAC_HOST_INFO(xmac_index); + else + reg = BMAC_HOST_INFO(bmac_index); + __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref); + return 0; +} + +static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num, + int mac_pref) +{ + return __set_rdc_table_num(np, 17, 0, table_num, mac_pref); +} + +static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num, + int mac_pref) +{ + return __set_rdc_table_num(np, 16, 8, table_num, mac_pref); +} + +static int niu_set_alt_mac_rdc_table(struct niu *np, int idx, + int table_num, int mac_pref) +{ + if (idx >= niu_num_alt_addr(np)) + return -EINVAL; + return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref); +} + +static u64 vlan_entry_set_parity(u64 reg_val) +{ + u64 port01_mask; + u64 port23_mask; + + port01_mask = 0x00ff; + port23_mask = 0xff00; + + if (hweight64(reg_val & port01_mask) & 1) + reg_val |= ENET_VLAN_TBL_PARITY0; + else + reg_val &= ~ENET_VLAN_TBL_PARITY0; + + if (hweight64(reg_val & port23_mask) & 1) + reg_val |= ENET_VLAN_TBL_PARITY1; + else + reg_val &= ~ENET_VLAN_TBL_PARITY1; + + return reg_val; +} + +static void vlan_tbl_write(struct niu *np, unsigned long index, + int port, int vpr, int rdc_table) +{ + u64 reg_val = nr64(ENET_VLAN_TBL(index)); + + reg_val &= ~((ENET_VLAN_TBL_VPR | + ENET_VLAN_TBL_VLANRDCTBLN) << + ENET_VLAN_TBL_SHIFT(port)); + if (vpr) + reg_val |= (ENET_VLAN_TBL_VPR << + ENET_VLAN_TBL_SHIFT(port)); + reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port)); + + reg_val = vlan_entry_set_parity(reg_val); + + nw64(ENET_VLAN_TBL(index), reg_val); +} + +static void vlan_tbl_clear(struct niu *np) +{ + int i; + + for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) + nw64(ENET_VLAN_TBL(i), 0); +} + +static int tcam_wait_bit(struct niu *np, u64 bit) +{ + int limit = 1000; + + while (--limit > 0) { + if (nr64(TCAM_CTL) & bit) + break; + udelay(1); + } + if (limit < 0) + return -ENODEV; + + return 0; +} + +static int tcam_flush(struct niu *np, int index) +{ + nw64(TCAM_KEY_0, 0x00); + nw64(TCAM_KEY_MASK_0, 0xff); + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +#if 0 +static int tcam_read(struct niu *np, int index, + u64 *key, u64 *mask) +{ + int err; + + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index)); + err = tcam_wait_bit(np, TCAM_CTL_STAT); + if (!err) { + key[0] = nr64(TCAM_KEY_0); + key[1] = nr64(TCAM_KEY_1); + key[2] = nr64(TCAM_KEY_2); + key[3] = nr64(TCAM_KEY_3); + mask[0] = nr64(TCAM_KEY_MASK_0); + mask[1] = nr64(TCAM_KEY_MASK_1); + mask[2] = nr64(TCAM_KEY_MASK_2); + mask[3] = nr64(TCAM_KEY_MASK_3); + } + return err; +} +#endif + +static int tcam_write(struct niu *np, int index, + u64 *key, u64 *mask) +{ + nw64(TCAM_KEY_0, key[0]); + nw64(TCAM_KEY_1, key[1]); + nw64(TCAM_KEY_2, key[2]); + nw64(TCAM_KEY_3, key[3]); + nw64(TCAM_KEY_MASK_0, mask[0]); + nw64(TCAM_KEY_MASK_1, mask[1]); + nw64(TCAM_KEY_MASK_2, mask[2]); + nw64(TCAM_KEY_MASK_3, mask[3]); + nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +#if 0 +static int tcam_assoc_read(struct niu *np, int index, u64 *data) +{ + int err; + + nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index)); + err = tcam_wait_bit(np, TCAM_CTL_STAT); + if (!err) + *data = nr64(TCAM_KEY_1); + + return err; +} +#endif + +static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data) +{ + nw64(TCAM_KEY_1, assoc_data); + nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index)); + + return tcam_wait_bit(np, TCAM_CTL_STAT); +} + +static void tcam_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val &= ~FFLP_CFG_1_TCAM_DIS; + else + val |= FFLP_CFG_1_TCAM_DIS; + nw64(FFLP_CFG_1, val); +} + +static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio) +{ + u64 val = nr64(FFLP_CFG_1); + + val &= ~(FFLP_CFG_1_FFLPINITDONE | + FFLP_CFG_1_CAMLAT | + FFLP_CFG_1_CAMRATIO); + val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT); + val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT); + nw64(FFLP_CFG_1, val); + + val = nr64(FFLP_CFG_1); + val |= FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); +} + +static int tcam_user_eth_class_enable(struct niu *np, unsigned long class, + int on) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_ETHERTYPE1 || + class > CLASS_CODE_ETHERTYPE2) + return -EINVAL; + + reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); + val = nr64(reg); + if (on) + val |= L2_CLS_VLD; + else + val &= ~L2_CLS_VLD; + nw64(reg, val); + + return 0; +} + +#if 0 +static int tcam_user_eth_class_set(struct niu *np, unsigned long class, + u64 ether_type) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_ETHERTYPE1 || + class > CLASS_CODE_ETHERTYPE2 || + (ether_type & ~(u64)0xffff) != 0) + return -EINVAL; + + reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); + val = nr64(reg); + val &= ~L2_CLS_ETYPE; + val |= (ether_type << L2_CLS_ETYPE_SHIFT); + nw64(reg, val); + + return 0; +} +#endif + +static int tcam_user_ip_class_enable(struct niu *np, unsigned long class, + int on) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_USER_PROG4) + return -EINVAL; + + reg = L3_CLS(class - CLASS_CODE_USER_PROG1); + val = nr64(reg); + if (on) + val |= L3_CLS_VALID; + else + val &= ~L3_CLS_VALID; + nw64(reg, val); + + return 0; +} + +#if 0 +static int tcam_user_ip_class_set(struct niu *np, unsigned long class, + int ipv6, u64 protocol_id, + u64 tos_mask, u64 tos_val) +{ + unsigned long reg; + u64 val; + + if (class < CLASS_CODE_USER_PROG1 || + class > CLASS_CODE_USER_PROG4 || + (protocol_id & ~(u64)0xff) != 0 || + (tos_mask & ~(u64)0xff) != 0 || + (tos_val & ~(u64)0xff) != 0) + return -EINVAL; + + reg = L3_CLS(class - CLASS_CODE_USER_PROG1); + val = nr64(reg); + val &= ~(L3_CLS_IPVER | L3_CLS_PID | + L3_CLS_TOSMASK | L3_CLS_TOS); + if (ipv6) + val |= L3_CLS_IPVER; + val |= (protocol_id << L3_CLS_PID_SHIFT); + val |= (tos_mask << L3_CLS_TOSMASK_SHIFT); + val |= (tos_val << L3_CLS_TOS_SHIFT); + nw64(reg, val); + + return 0; +} +#endif + +static int tcam_early_init(struct niu *np) +{ + unsigned long i; + int err; + + tcam_enable(np, 0); + tcam_set_lat_and_ratio(np, + DEFAULT_TCAM_LATENCY, + DEFAULT_TCAM_ACCESS_RATIO); + for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) { + err = tcam_user_eth_class_enable(np, i, 0); + if (err) + return err; + } + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) { + err = tcam_user_ip_class_enable(np, i, 0); + if (err) + return err; + } + + return 0; +} + +static int tcam_flush_all(struct niu *np) +{ + unsigned long i; + + for (i = 0; i < np->parent->tcam_num_entries; i++) { + int err = tcam_flush(np, i); + if (err) + return err; + } + return 0; +} + +static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) +{ + return ((u64)index | (num_entries == 1 ? + HASH_TBL_ADDR_AUTOINC : 0)); +} + +#if 0 +static int hash_read(struct niu *np, unsigned long partition, + unsigned long index, unsigned long num_entries, + u64 *data) +{ + u64 val = hash_addr_regval(index, num_entries); + unsigned long i; + + if (partition >= FCRAM_NUM_PARTITIONS || + index + num_entries > FCRAM_SIZE) + return -EINVAL; + + nw64(HASH_TBL_ADDR(partition), val); + for (i = 0; i < num_entries; i++) + data[i] = nr64(HASH_TBL_DATA(partition)); + + return 0; +} +#endif + +static int hash_write(struct niu *np, unsigned long partition, + unsigned long index, unsigned long num_entries, + u64 *data) +{ + u64 val = hash_addr_regval(index, num_entries); + unsigned long i; + + if (partition >= FCRAM_NUM_PARTITIONS || + index + (num_entries * 8) > FCRAM_SIZE) + return -EINVAL; + + nw64(HASH_TBL_ADDR(partition), val); + for (i = 0; i < num_entries; i++) + nw64(HASH_TBL_DATA(partition), data[i]); + + return 0; +} + +static void fflp_reset(struct niu *np) +{ + u64 val; + + nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST); + udelay(10); + nw64(FFLP_CFG_1, 0); + + val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); +} + +static void fflp_set_timings(struct niu *np) +{ + u64 val = nr64(FFLP_CFG_1); + + val &= ~FFLP_CFG_1_FFLPINITDONE; + val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT); + nw64(FFLP_CFG_1, val); + + val = nr64(FFLP_CFG_1); + val |= FFLP_CFG_1_FFLPINITDONE; + nw64(FFLP_CFG_1, val); + + val = nr64(FCRAM_REF_TMR); + val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN); + val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT); + val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT); + nw64(FCRAM_REF_TMR, val); +} + +static int fflp_set_partition(struct niu *np, u64 partition, + u64 mask, u64 base, int enable) +{ + unsigned long reg; + u64 val; + + if (partition >= FCRAM_NUM_PARTITIONS || + (mask & ~(u64)0x1f) != 0 || + (base & ~(u64)0x1f) != 0) + return -EINVAL; + + reg = FLW_PRT_SEL(partition); + + val = nr64(reg); + val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE); + val |= (mask << FLW_PRT_SEL_MASK_SHIFT); + val |= (base << FLW_PRT_SEL_BASE_SHIFT); + if (enable) + val |= FLW_PRT_SEL_EXT; + nw64(reg, val); + + return 0; +} + +static int fflp_disable_all_partitions(struct niu *np) +{ + unsigned long i; + + for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) { + int err = fflp_set_partition(np, 0, 0, 0, 0); + if (err) + return err; + } + return 0; +} + +static void fflp_llcsnap_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val |= FFLP_CFG_1_LLCSNAP; + else + val &= ~FFLP_CFG_1_LLCSNAP; + nw64(FFLP_CFG_1, val); +} + +static void fflp_errors_enable(struct niu *np, int on) +{ + u64 val = nr64(FFLP_CFG_1); + + if (on) + val &= ~FFLP_CFG_1_ERRORDIS; + else + val |= FFLP_CFG_1_ERRORDIS; + nw64(FFLP_CFG_1, val); +} + +static int fflp_hash_clear(struct niu *np) +{ + struct fcram_hash_ipv4 ent; + unsigned long i; + + /* IPV4 hash entry with valid bit clear, rest is don't care. */ + memset(&ent, 0, sizeof(ent)); + ent.header = HASH_HEADER_EXT; + + for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { + int err = hash_write(np, 0, i, 1, (u64 *) &ent); + if (err) + return err; + } + return 0; +} + +static int fflp_early_init(struct niu *np) +{ + struct niu_parent *parent; + unsigned long flags; + int err; + + niu_lock_parent(np, flags); + + parent = np->parent; + err = 0; + if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { + niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n", + np->port); + if (np->parent->plat_type != PLAT_TYPE_NIU) { + fflp_reset(np); + fflp_set_timings(np); + err = fflp_disable_all_partitions(np); + if (err) { + niudbg(PROBE, "fflp_disable_all_partitions " + "failed, err=%d\n", err); + goto out; + } + } + + err = tcam_early_init(np); + if (err) { + niudbg(PROBE, "tcam_early_init failed, err=%d\n", + err); + goto out; + } + fflp_llcsnap_enable(np, 1); + fflp_errors_enable(np, 0); + nw64(H1POLY, 0); + nw64(H2POLY, 0); + + err = tcam_flush_all(np); + if (err) { + niudbg(PROBE, "tcam_flush_all failed, err=%d\n", + err); + goto out; + } + if (np->parent->plat_type != PLAT_TYPE_NIU) { + err = fflp_hash_clear(np); + if (err) { + niudbg(PROBE, "fflp_hash_clear failed, " + "err=%d\n", err); + goto out; + } + } + + vlan_tbl_clear(np); + + niudbg(PROBE, "fflp_early_init: Success\n"); + parent->flags |= PARENT_FLGS_CLS_HWINIT; + } +out: + niu_unlock_parent(np, flags); + return err; +} + +static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key) +{ + if (class_code < CLASS_CODE_USER_PROG1 || + class_code > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); + return 0; +} + +static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key) +{ + if (class_code < CLASS_CODE_USER_PROG1 || + class_code > CLASS_CODE_SCTP_IPV6) + return -EINVAL; + + nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); + return 0; +} + +static void niu_rx_skb_append(struct sk_buff *skb, struct page *page, + u32 offset, u32 size) +{ + int i = skb_shinfo(skb)->nr_frags; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + frag->page = page; + frag->page_offset = offset; + frag->size = size; + + skb->len += size; + skb->data_len += size; + skb->truesize += size; + + skb_shinfo(skb)->nr_frags = i + 1; +} + +static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a) +{ + a >>= PAGE_SHIFT; + a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); + + return (a & (MAX_RBR_RING_SIZE - 1)); +} + +static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, + struct page ***link) +{ + unsigned int h = niu_hash_rxaddr(rp, addr); + struct page *p, **pp; + + addr &= PAGE_MASK; + pp = &rp->rxhash[h]; + for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { + if (p->index == addr) { + *link = pp; + break; + } + } + + return p; +} + +static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base) +{ + unsigned int h = niu_hash_rxaddr(rp, base); + + page->index = base; + page->mapping = (struct address_space *) rp->rxhash[h]; + rp->rxhash[h] = page; +} + +static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, + gfp_t mask, int start_index) +{ + struct page *page; + u64 addr; + int i; + + page = alloc_page(mask); + if (!page) + return -ENOMEM; + + addr = np->ops->map_page(np->device, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + niu_hash_page(rp, page, addr); + if (rp->rbr_blocks_per_page > 1) + atomic_add(rp->rbr_blocks_per_page - 1, + &compound_head(page)->_count); + + for (i = 0; i < rp->rbr_blocks_per_page; i++) { + __le32 *rbr = &rp->rbr[start_index + i]; + + *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT); + addr += rp->rbr_block_size; + } + + return 0; +} + +static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) +{ + int index = rp->rbr_index; + + rp->rbr_pending++; + if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { + int err = niu_rbr_add_page(np, rp, mask, index); + + if (unlikely(err)) { + rp->rbr_pending--; + return; + } + + rp->rbr_index += rp->rbr_blocks_per_page; + BUG_ON(rp->rbr_index > rp->rbr_table_size); + if (rp->rbr_index == rp->rbr_table_size) + rp->rbr_index = 0; + + if (rp->rbr_pending >= rp->rbr_kick_thresh) { + nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); + rp->rbr_pending = 0; + } + } +} + +static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp) +{ + unsigned int index = rp->rcr_index; + int num_rcr = 0; + + rp->rx_dropped++; + while (1) { + struct page *page, **link; + u64 addr, val; + u32 rcr_size; + + num_rcr++; + + val = le64_to_cpup(&rp->rcr[index]); + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; + page = niu_find_rxpage(rp, addr, &link); + + rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> + RCR_ENTRY_PKTBUFSZ_SHIFT]; + if ((page->index + PAGE_SIZE) - rcr_size == addr) { + *link = (struct page *) page->mapping; + np->ops->unmap_page(np->device, page->index, + PAGE_SIZE, DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + __free_page(page); + rp->rbr_refill_pending++; + } + + index = NEXT_RCR(rp, index); + if (!(val & RCR_ENTRY_MULTI)) + break; + + } + rp->rcr_index = index; + + return num_rcr; +} + +static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp) +{ + unsigned int index = rp->rcr_index; + struct sk_buff *skb; + int len, num_rcr; + + skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); + if (unlikely(!skb)) + return niu_rx_pkt_ignore(np, rp); + + num_rcr = 0; + while (1) { + struct page *page, **link; + u32 rcr_size, append_size; + u64 addr, val, off; + + num_rcr++; + + val = le64_to_cpup(&rp->rcr[index]); + + len = (val & RCR_ENTRY_L2_LEN) >> + RCR_ENTRY_L2_LEN_SHIFT; + len -= ETH_FCS_LEN; + + addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << + RCR_ENTRY_PKT_BUF_ADDR_SHIFT; + page = niu_find_rxpage(rp, addr, &link); + + rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> + RCR_ENTRY_PKTBUFSZ_SHIFT]; + + off = addr & ~PAGE_MASK; + append_size = rcr_size; + if (num_rcr == 1) { + int ptype; + + off += 2; + append_size -= 2; + + ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); + if ((ptype == RCR_PKT_TYPE_TCP || + ptype == RCR_PKT_TYPE_UDP) && + !(val & (RCR_ENTRY_NOPORT | + RCR_ENTRY_ERROR))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } + if (!(val & RCR_ENTRY_MULTI)) + append_size = len - skb->len; + + niu_rx_skb_append(skb, page, off, append_size); + if ((page->index + rp->rbr_block_size) - rcr_size == addr) { + *link = (struct page *) page->mapping; + np->ops->unmap_page(np->device, page->index, + PAGE_SIZE, DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + rp->rbr_refill_pending++; + } else + get_page(page); + + index = NEXT_RCR(rp, index); + if (!(val & RCR_ENTRY_MULTI)) + break; + + } + rp->rcr_index = index; + + skb_reserve(skb, NET_IP_ALIGN); + __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); + + rp->rx_packets++; + rp->rx_bytes += skb->len; + + skb->protocol = eth_type_trans(skb, np->dev); + netif_receive_skb(skb); + + return num_rcr; +} + +static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask) +{ + int blocks_per_page = rp->rbr_blocks_per_page; + int err, index = rp->rbr_index; + + err = 0; + while (index < (rp->rbr_table_size - blocks_per_page)) { + err = niu_rbr_add_page(np, rp, mask, index); + if (err) + break; + + index += blocks_per_page; + } + + rp->rbr_index = index; + return err; +} + +static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp) +{ + int i; + + for (i = 0; i < MAX_RBR_RING_SIZE; i++) { + struct page *page; + + page = rp->rxhash[i]; + while (page) { + struct page *next = (struct page *) page->mapping; + u64 base = page->index; + + np->ops->unmap_page(np->device, base, PAGE_SIZE, + DMA_FROM_DEVICE); + page->index = 0; + page->mapping = NULL; + + __free_page(page); + + page = next; + } + } + + for (i = 0; i < rp->rbr_table_size; i++) + rp->rbr[i] = cpu_to_le32(0); + rp->rbr_index = 0; +} + +static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) +{ + struct tx_buff_info *tb = &rp->tx_buffs[idx]; + struct sk_buff *skb = tb->skb; + struct tx_pkt_hdr *tp; + u64 tx_flags; + int i, len; + + tp = (struct tx_pkt_hdr *) skb->data; + tx_flags = le64_to_cpup(&tp->flags); + + rp->tx_packets++; + rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - + ((tx_flags & TXHDR_PAD) / 2)); + + len = skb_headlen(skb); + np->ops->unmap_single(np->device, tb->mapping, + len, DMA_TO_DEVICE); + + if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) + rp->mark_pending--; + + tb->skb = NULL; + do { + idx = NEXT_TX(rp, idx); + len -= MAX_TX_DESC_LEN; + } while (len > 0); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + tb = &rp->tx_buffs[idx]; + BUG_ON(tb->skb != NULL); + np->ops->unmap_page(np->device, tb->mapping, + skb_shinfo(skb)->frags[i].size, + DMA_TO_DEVICE); + idx = NEXT_TX(rp, idx); + } + + dev_kfree_skb(skb); + + return idx; +} + +#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4) + +static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) +{ + u16 pkt_cnt, tmp; + int cons; + u64 cs; + + cs = rp->tx_cs; + if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) + goto out; + + tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; + pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & + (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); + + rp->last_pkt_cnt = tmp; + + cons = rp->cons; + + niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", + np->dev->name, pkt_cnt, cons); + + while (pkt_cnt--) + cons = release_tx_packet(np, rp, cons); + + rp->cons = cons; + smp_mb(); + +out: + if (unlikely(netif_queue_stopped(np->dev) && + (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { + netif_tx_lock(np->dev); + if (netif_queue_stopped(np->dev) && + (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) + netif_wake_queue(np->dev); + netif_tx_unlock(np->dev); + } +} + +static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) +{ + int qlen, rcr_done = 0, work_done = 0; + struct rxdma_mailbox *mbox = rp->mbox; + u64 stat; + +#if 1 + stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); + qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; +#else + stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); + qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); +#endif + mbox->rx_dma_ctl_stat = 0; + mbox->rcrstat_a = 0; + + niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", + np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); + + rcr_done = work_done = 0; + qlen = min(qlen, budget); + while (work_done < qlen) { + rcr_done += niu_process_rx_pkt(np, rp); + work_done++; + } + + if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { + unsigned int i; + + for (i = 0; i < rp->rbr_refill_pending; i++) + niu_rbr_refill(np, rp, GFP_ATOMIC); + rp->rbr_refill_pending = 0; + } + + stat = (RX_DMA_CTL_STAT_MEX | + ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | + ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); + + nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); + + return work_done; +} + +static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget) +{ + u64 v0 = lp->v0; + u32 tx_vec = (v0 >> 32); + u32 rx_vec = (v0 & 0xffffffff); + int i, work_done = 0; + + niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", + np->dev->name, (unsigned long long) v0); + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + if (tx_vec & (1 << rp->tx_channel)) + niu_tx_work(np, rp); + nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); + } + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + if (rx_vec & (1 << rp->rx_channel)) { + int this_work_done; + + this_work_done = niu_rx_work(np, rp, + budget); + + budget -= this_work_done; + work_done += this_work_done; + } + nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); + } + + return work_done; +} + +static int niu_poll(struct napi_struct *napi, int budget) +{ + struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); + struct niu *np = lp->np; + int work_done; + + work_done = niu_poll_core(np, lp, budget); + + if (work_done < budget) { + netif_rx_complete(np->dev, napi); + niu_ldg_rearm(np, lp, 1); + } + return work_done; +} + +static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, + u64 stat) +{ + dev_err(np->device, PFX "%s: RX channel %u errors ( ", + np->dev->name, rp->rx_channel); + + if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) + printk("RBR_TMOUT "); + if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) + printk("RSP_CNT "); + if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) + printk("BYTE_EN_BUS "); + if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) + printk("RSP_DAT "); + if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) + printk("RCR_ACK "); + if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) + printk("RCR_SHA_PAR "); + if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) + printk("RBR_PRE_PAR "); + if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) + printk("CONFIG "); + if (stat & RX_DMA_CTL_STAT_RCRINCON) + printk("RCRINCON "); + if (stat & RX_DMA_CTL_STAT_RCRFULL) + printk("RCRFULL "); + if (stat & RX_DMA_CTL_STAT_RBRFULL) + printk("RBRFULL "); + if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) + printk("RBRLOGPAGE "); + if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) + printk("CFIGLOGPAGE "); + if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) + printk("DC_FIDO "); + + printk(")\n"); +} + +static int niu_rx_error(struct niu *np, struct rx_ring_info *rp) +{ + u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); + int err = 0; + + dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", + np->dev->name, rp->rx_channel, (unsigned long long) stat); + + niu_log_rxchan_errors(np, rp, stat); + + if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | + RX_DMA_CTL_STAT_PORT_FATAL)) + err = -EINVAL; + + nw64(RX_DMA_CTL_STAT(rp->rx_channel), + stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); + + return err; +} + +static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, + u64 cs) +{ + dev_err(np->device, PFX "%s: TX channel %u errors ( ", + np->dev->name, rp->tx_channel); + + if (cs & TX_CS_MBOX_ERR) + printk("MBOX "); + if (cs & TX_CS_PKT_SIZE_ERR) + printk("PKT_SIZE "); + if (cs & TX_CS_TX_RING_OFLOW) + printk("TX_RING_OFLOW "); + if (cs & TX_CS_PREF_BUF_PAR_ERR) + printk("PREF_BUF_PAR "); + if (cs & TX_CS_NACK_PREF) + printk("NACK_PREF "); + if (cs & TX_CS_NACK_PKT_RD) + printk("NACK_PKT_RD "); + if (cs & TX_CS_CONF_PART_ERR) + printk("CONF_PART "); + if (cs & TX_CS_PKT_PRT_ERR) + printk("PKT_PTR "); + + printk(")\n"); +} + +static int niu_tx_error(struct niu *np, struct tx_ring_info *rp) +{ + u64 cs, logh, logl; + + cs = nr64(TX_CS(rp->tx_channel)); + logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); + logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); + + dev_err(np->device, PFX "%s: TX channel %u error, " + "cs[%llx] logh[%llx] logl[%llx]\n", + np->dev->name, rp->tx_channel, + (unsigned long long) cs, + (unsigned long long) logh, + (unsigned long long) logl); + + niu_log_txchan_errors(np, rp, cs); + + return -ENODEV; +} + +static int niu_mif_interrupt(struct niu *np) +{ + u64 mif_status = nr64(MIF_STATUS); + int phy_mdint = 0; + + if (np->flags & NIU_FLAGS_XMAC) { + u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); + + if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) + phy_mdint = 1; + } + + dev_err(np->device, PFX "%s: MIF interrupt, " + "stat[%llx] phy_mdint(%d)\n", + np->dev->name, (unsigned long long) mif_status, phy_mdint); + + return -ENODEV; +} + +static void niu_xmac_interrupt(struct niu *np) +{ + struct niu_xmac_stats *mp = &np->mac_stats.xmac; + u64 val; + + val = nr64_mac(XTXMAC_STATUS); + if (val & XTXMAC_STATUS_FRAME_CNT_EXP) + mp->tx_frames += TXMAC_FRM_CNT_COUNT; + if (val & XTXMAC_STATUS_BYTE_CNT_EXP) + mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; + if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) + mp->tx_fifo_errors++; + if (val & XTXMAC_STATUS_TXMAC_OFLOW) + mp->tx_overflow_errors++; + if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) + mp->tx_max_pkt_size_errors++; + if (val & XTXMAC_STATUS_TXMAC_UFLOW) + mp->tx_underflow_errors++; + + val = nr64_mac(XRXMAC_STATUS); + if (val & XRXMAC_STATUS_LCL_FLT_STATUS) + mp->rx_local_faults++; + if (val & XRXMAC_STATUS_RFLT_DET) + mp->rx_remote_faults++; + if (val & XRXMAC_STATUS_LFLT_CNT_EXP) + mp->rx_link_faults += LINK_FAULT_CNT_COUNT; + if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) + mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; + if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) + mp->rx_frags += RXMAC_FRAG_CNT_COUNT; + if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) + mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) + mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; + if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) + mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; + if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) + mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; + if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) + mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; + if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) + mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; + if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) + mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; + if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) + mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; + if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) + mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; + if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) + mp->rx_octets += RXMAC_BT_CNT_COUNT; + if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) + mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; + if (val & XRXMAC_STATUS_LENERR_CNT_EXP) + mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; + if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) + mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; + if (val & XRXMAC_STATUS_RXUFLOW) + mp->rx_underflows++; + if (val & XRXMAC_STATUS_RXOFLOW) + mp->rx_overflows++; + + val = nr64_mac(XMAC_FC_STAT); + if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) + mp->pause_off_state++; + if (val & XMAC_FC_STAT_TX_MAC_PAUSE) + mp->pause_on_state++; + if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) + mp->pause_received++; +} + +static void niu_bmac_interrupt(struct niu *np) +{ + struct niu_bmac_stats *mp = &np->mac_stats.bmac; + u64 val; + + val = nr64_mac(BTXMAC_STATUS); + if (val & BTXMAC_STATUS_UNDERRUN) + mp->tx_underflow_errors++; + if (val & BTXMAC_STATUS_MAX_PKT_ERR) + mp->tx_max_pkt_size_errors++; + if (val & BTXMAC_STATUS_BYTE_CNT_EXP) + mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; + if (val & BTXMAC_STATUS_FRAME_CNT_EXP) + mp->tx_frames += BTXMAC_FRM_CNT_COUNT; + + val = nr64_mac(BRXMAC_STATUS); + if (val & BRXMAC_STATUS_OVERFLOW) + mp->rx_overflows++; + if (val & BRXMAC_STATUS_FRAME_CNT_EXP) + mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; + if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) + mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; + if (val & BRXMAC_STATUS_CRC_ERR_EXP) + mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; + if (val & BRXMAC_STATUS_LEN_ERR_EXP) + mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; + + val = nr64_mac(BMAC_CTRL_STATUS); + if (val & BMAC_CTRL_STATUS_NOPAUSE) + mp->pause_off_state++; + if (val & BMAC_CTRL_STATUS_PAUSE) + mp->pause_on_state++; + if (val & BMAC_CTRL_STATUS_PAUSE_RECV) + mp->pause_received++; +} + +static int niu_mac_interrupt(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_xmac_interrupt(np); + else + niu_bmac_interrupt(np); + + return 0; +} + +static void niu_log_device_error(struct niu *np, u64 stat) +{ + dev_err(np->device, PFX "%s: Core device errors ( ", + np->dev->name); + + if (stat & SYS_ERR_MASK_META2) + printk("META2 "); + if (stat & SYS_ERR_MASK_META1) + printk("META1 "); + if (stat & SYS_ERR_MASK_PEU) + printk("PEU "); + if (stat & SYS_ERR_MASK_TXC) + printk("TXC "); + if (stat & SYS_ERR_MASK_RDMC) + printk("RDMC "); + if (stat & SYS_ERR_MASK_TDMC) + printk("TDMC "); + if (stat & SYS_ERR_MASK_ZCP) + printk("ZCP "); + if (stat & SYS_ERR_MASK_FFLP) + printk("FFLP "); + if (stat & SYS_ERR_MASK_IPP) + printk("IPP "); + if (stat & SYS_ERR_MASK_MAC) + printk("MAC "); + if (stat & SYS_ERR_MASK_SMX) + printk("SMX "); + + printk(")\n"); +} + +static int niu_device_error(struct niu *np) +{ + u64 stat = nr64(SYS_ERR_STAT); + + dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n", + np->dev->name, (unsigned long long) stat); + + niu_log_device_error(np, stat); + + return -ENODEV; +} + +static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp) +{ + u64 v0 = lp->v0; + u64 v1 = lp->v1; + u64 v2 = lp->v2; + int i, err = 0; + + if (v1 & 0x00000000ffffffffULL) { + u32 rx_vec = (v1 & 0xffffffff); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + if (rx_vec & (1 << rp->rx_channel)) { + int r = niu_rx_error(np, rp); + if (r) + err = r; + } + } + } + if (v1 & 0x7fffffff00000000ULL) { + u32 tx_vec = (v1 >> 32) & 0x7fffffff; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + if (tx_vec & (1 << rp->tx_channel)) { + int r = niu_tx_error(np, rp); + if (r) + err = r; + } + } + } + if ((v0 | v1) & 0x8000000000000000ULL) { + int r = niu_mif_interrupt(np); + if (r) + err = r; + } + if (v2) { + if (v2 & 0x01ef) { + int r = niu_mac_interrupt(np); + if (r) + err = r; + } + if (v2 & 0x0210) { + int r = niu_device_error(np); + if (r) + err = r; + } + } + + if (err) + niu_enable_interrupts(np, 0); + + return -EINVAL; +} + +static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp, + int ldn) +{ + struct rxdma_mailbox *mbox = rp->mbox; + u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); + + stat_write = (RX_DMA_CTL_STAT_RCRTHRES | + RX_DMA_CTL_STAT_RCRTO); + nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); + + niudbg(INTR, "%s: rxchan_intr stat[%llx]\n", + np->dev->name, (unsigned long long) stat); +} + +static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp, + int ldn) +{ + rp->tx_cs = nr64(TX_CS(rp->tx_channel)); + + niudbg(INTR, "%s: txchan_intr cs[%llx]\n", + np->dev->name, (unsigned long long) rp->tx_cs); +} + +static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0) +{ + struct niu_parent *parent = np->parent; + u32 rx_vec, tx_vec; + int i; + + tx_vec = (v0 >> 32); + rx_vec = (v0 & 0xffffffff); + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + int ldn = LDN_RXDMA(rp->rx_channel); + + if (parent->ldg_map[ldn] != ldg) + continue; + + nw64(LD_IM0(ldn), LD_IM0_MASK); + if (rx_vec & (1 << rp->rx_channel)) + niu_rxchan_intr(np, rp, ldn); + } + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + int ldn = LDN_TXDMA(rp->tx_channel); + + if (parent->ldg_map[ldn] != ldg) + continue; + + nw64(LD_IM0(ldn), LD_IM0_MASK); + if (tx_vec & (1 << rp->tx_channel)) + niu_txchan_intr(np, rp, ldn); + } +} + +static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, + u64 v0, u64 v1, u64 v2) +{ + if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) { + lp->v0 = v0; + lp->v1 = v1; + lp->v2 = v2; + __niu_fastpath_interrupt(np, lp->ldg_num, v0); + __netif_rx_schedule(np->dev, &lp->napi); + } +} + +static irqreturn_t niu_interrupt(int irq, void *dev_id) +{ + struct niu_ldg *lp = dev_id; + struct niu *np = lp->np; + int ldg = lp->ldg_num; + unsigned long flags; + u64 v0, v1, v2; + + if (netif_msg_intr(np)) + printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ", + lp, ldg); + + spin_lock_irqsave(&np->lock, flags); + + v0 = nr64(LDSV0(ldg)); + v1 = nr64(LDSV1(ldg)); + v2 = nr64(LDSV2(ldg)); + + if (netif_msg_intr(np)) + printk("v0[%llx] v1[%llx] v2[%llx]\n", + (unsigned long long) v0, + (unsigned long long) v1, + (unsigned long long) v2); + + if (unlikely(!v0 && !v1 && !v2)) { + spin_unlock_irqrestore(&np->lock, flags); + return IRQ_NONE; + } + + if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) { + int err = niu_slowpath_interrupt(np, lp); + if (err) + goto out; + } + if (likely(v0 & ~((u64)1 << LDN_MIF))) + niu_schedule_napi(np, lp, v0, v1, v2); + else + niu_ldg_rearm(np, lp, 1); +out: + spin_unlock_irqrestore(&np->lock, flags); + + return IRQ_HANDLED; +} + +static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp) +{ + if (rp->mbox) { + np->ops->free_coherent(np->device, + sizeof(struct rxdma_mailbox), + rp->mbox, rp->mbox_dma); + rp->mbox = NULL; + } + if (rp->rcr) { + np->ops->free_coherent(np->device, + MAX_RCR_RING_SIZE * sizeof(__le64), + rp->rcr, rp->rcr_dma); + rp->rcr = NULL; + rp->rcr_table_size = 0; + rp->rcr_index = 0; + } + if (rp->rbr) { + niu_rbr_free(np, rp); + + np->ops->free_coherent(np->device, + MAX_RBR_RING_SIZE * sizeof(__le32), + rp->rbr, rp->rbr_dma); + rp->rbr = NULL; + rp->rbr_table_size = 0; + rp->rbr_index = 0; + } + kfree(rp->rxhash); + rp->rxhash = NULL; +} + +static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp) +{ + if (rp->mbox) { + np->ops->free_coherent(np->device, + sizeof(struct txdma_mailbox), + rp->mbox, rp->mbox_dma); + rp->mbox = NULL; + } + if (rp->descr) { + int i; + + for (i = 0; i < MAX_TX_RING_SIZE; i++) { + if (rp->tx_buffs[i].skb) + (void) release_tx_packet(np, rp, i); + } + + np->ops->free_coherent(np->device, + MAX_TX_RING_SIZE * sizeof(__le64), + rp->descr, rp->descr_dma); + rp->descr = NULL; + rp->pending = 0; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + } +} + +static void niu_free_channels(struct niu *np) +{ + int i; + + if (np->rx_rings) { + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_free_rx_ring_info(np, rp); + } + kfree(np->rx_rings); + np->rx_rings = NULL; + np->num_rx_rings = 0; + } + + if (np->tx_rings) { + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_free_tx_ring_info(np, rp); + } + kfree(np->tx_rings); + np->tx_rings = NULL; + np->num_tx_rings = 0; + } +} + +static int niu_alloc_rx_ring_info(struct niu *np, + struct rx_ring_info *rp) +{ + BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64); + + rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *), + GFP_KERNEL); + if (!rp->rxhash) + return -ENOMEM; + + rp->mbox = np->ops->alloc_coherent(np->device, + sizeof(struct rxdma_mailbox), + &rp->mbox_dma, GFP_KERNEL); + if (!rp->mbox) + return -ENOMEM; + if ((unsigned long)rp->mbox & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "RXDMA mailbox %p\n", np->dev->name, rp->mbox); + return -EINVAL; + } + + rp->rcr = np->ops->alloc_coherent(np->device, + MAX_RCR_RING_SIZE * sizeof(__le64), + &rp->rcr_dma, GFP_KERNEL); + if (!rp->rcr) + return -ENOMEM; + if ((unsigned long)rp->rcr & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "RXDMA RCR table %p\n", np->dev->name, rp->rcr); + return -EINVAL; + } + rp->rcr_table_size = MAX_RCR_RING_SIZE; + rp->rcr_index = 0; + + rp->rbr = np->ops->alloc_coherent(np->device, + MAX_RBR_RING_SIZE * sizeof(__le32), + &rp->rbr_dma, GFP_KERNEL); + if (!rp->rbr) + return -ENOMEM; + if ((unsigned long)rp->rbr & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "RXDMA RBR table %p\n", np->dev->name, rp->rbr); + return -EINVAL; + } + rp->rbr_table_size = MAX_RBR_RING_SIZE; + rp->rbr_index = 0; + rp->rbr_pending = 0; + + return 0; +} + +static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp) +{ + int mtu = np->dev->mtu; + + /* These values are recommended by the HW designers for fair + * utilization of DRR amongst the rings. + */ + rp->max_burst = mtu + 32; + if (rp->max_burst > 4096) + rp->max_burst = 4096; +} + +static int niu_alloc_tx_ring_info(struct niu *np, + struct tx_ring_info *rp) +{ + BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64); + + rp->mbox = np->ops->alloc_coherent(np->device, + sizeof(struct txdma_mailbox), + &rp->mbox_dma, GFP_KERNEL); + if (!rp->mbox) + return -ENOMEM; + if ((unsigned long)rp->mbox & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "TXDMA mailbox %p\n", np->dev->name, rp->mbox); + return -EINVAL; + } + + rp->descr = np->ops->alloc_coherent(np->device, + MAX_TX_RING_SIZE * sizeof(__le64), + &rp->descr_dma, GFP_KERNEL); + if (!rp->descr) + return -ENOMEM; + if ((unsigned long)rp->descr & (64UL - 1)) { + dev_err(np->device, PFX "%s: Coherent alloc gives misaligned " + "TXDMA descr table %p\n", np->dev->name, rp->descr); + return -EINVAL; + } + + rp->pending = MAX_TX_RING_SIZE; + rp->prod = 0; + rp->cons = 0; + rp->wrap_bit = 0; + + /* XXX make these configurable... XXX */ + rp->mark_freq = rp->pending / 4; + + niu_set_max_burst(np, rp); + + return 0; +} + +static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp) +{ + u16 bs; + + switch (PAGE_SIZE) { + case 4 * 1024: + case 8 * 1024: + case 16 * 1024: + case 32 * 1024: + rp->rbr_block_size = PAGE_SIZE; + rp->rbr_blocks_per_page = 1; + break; + + default: + if (PAGE_SIZE % (32 * 1024) == 0) + bs = 32 * 1024; + else if (PAGE_SIZE % (16 * 1024) == 0) + bs = 16 * 1024; + else if (PAGE_SIZE % (8 * 1024) == 0) + bs = 8 * 1024; + else if (PAGE_SIZE % (4 * 1024) == 0) + bs = 4 * 1024; + else + BUG(); + rp->rbr_block_size = bs; + rp->rbr_blocks_per_page = PAGE_SIZE / bs; + } + + rp->rbr_sizes[0] = 256; + rp->rbr_sizes[1] = 1024; + if (np->dev->mtu > ETH_DATA_LEN) { + switch (PAGE_SIZE) { + case 4 * 1024: + rp->rbr_sizes[2] = 4096; + break; + + default: + rp->rbr_sizes[2] = 8192; + break; + } + } else { + rp->rbr_sizes[2] = 2048; + } + rp->rbr_sizes[3] = rp->rbr_block_size; +} + +static int niu_alloc_channels(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int first_rx_channel, first_tx_channel; + int i, port, err; + + port = np->port; + first_rx_channel = first_tx_channel = 0; + for (i = 0; i < port; i++) { + first_rx_channel += parent->rxchan_per_port[i]; + first_tx_channel += parent->txchan_per_port[i]; + } + + np->num_rx_rings = parent->rxchan_per_port[port]; + np->num_tx_rings = parent->txchan_per_port[port]; + + np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), + GFP_KERNEL); + err = -ENOMEM; + if (!np->rx_rings) + goto out_err; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + rp->np = np; + rp->rx_channel = first_rx_channel + i; + + err = niu_alloc_rx_ring_info(np, rp); + if (err) + goto out_err; + + niu_size_rbr(np, rp); + + /* XXX better defaults, configurable, etc... XXX */ + rp->nonsyn_window = 64; + rp->nonsyn_threshold = rp->rcr_table_size - 64; + rp->syn_window = 64; + rp->syn_threshold = rp->rcr_table_size - 64; + rp->rcr_pkt_threshold = 16; + rp->rcr_timeout = 8; + rp->rbr_kick_thresh = RBR_REFILL_MIN; + if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) + rp->rbr_kick_thresh = rp->rbr_blocks_per_page; + + err = niu_rbr_fill(np, rp, GFP_KERNEL); + if (err) + return err; + } + + np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), + GFP_KERNEL); + err = -ENOMEM; + if (!np->tx_rings) + goto out_err; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + rp->np = np; + rp->tx_channel = first_tx_channel + i; + + err = niu_alloc_tx_ring_info(np, rp); + if (err) + goto out_err; + } + + return 0; + +out_err: + niu_free_channels(np); + return err; +} + +static int niu_tx_cs_sng_poll(struct niu *np, int channel) +{ + int limit = 1000; + + while (--limit > 0) { + u64 val = nr64(TX_CS(channel)); + if (val & TX_CS_SNG_STATE) + return 0; + } + return -ENODEV; +} + +static int niu_tx_channel_stop(struct niu *np, int channel) +{ + u64 val = nr64(TX_CS(channel)); + + val |= TX_CS_STOP_N_GO; + nw64(TX_CS(channel), val); + + return niu_tx_cs_sng_poll(np, channel); +} + +static int niu_tx_cs_reset_poll(struct niu *np, int channel) +{ + int limit = 1000; + + while (--limit > 0) { + u64 val = nr64(TX_CS(channel)); + if (!(val & TX_CS_RST)) + return 0; + } + return -ENODEV; +} + +static int niu_tx_channel_reset(struct niu *np, int channel) +{ + u64 val = nr64(TX_CS(channel)); + int err; + + val |= TX_CS_RST; + nw64(TX_CS(channel), val); + + err = niu_tx_cs_reset_poll(np, channel); + if (!err) + nw64(TX_RING_KICK(channel), 0); + + return err; +} + +static int niu_tx_channel_lpage_init(struct niu *np, int channel) +{ + u64 val; + + nw64(TX_LOG_MASK1(channel), 0); + nw64(TX_LOG_VAL1(channel), 0); + nw64(TX_LOG_MASK2(channel), 0); + nw64(TX_LOG_VAL2(channel), 0); + nw64(TX_LOG_PAGE_RELO1(channel), 0); + nw64(TX_LOG_PAGE_RELO2(channel), 0); + nw64(TX_LOG_PAGE_HDL(channel), 0); + + val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; + val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1); + nw64(TX_LOG_PAGE_VLD(channel), val); + + /* XXX TXDMA 32bit mode? XXX */ + + return 0; +} + +static void niu_txc_enable_port(struct niu *np, int on) +{ + unsigned long flags; + u64 val, mask; + + niu_lock_parent(np, flags); + val = nr64(TXC_CONTROL); + mask = (u64)1 << np->port; + if (on) { + val |= TXC_CONTROL_ENABLE | mask; + } else { + val &= ~mask; + if ((val & ~TXC_CONTROL_ENABLE) == 0) + val &= ~TXC_CONTROL_ENABLE; + } + nw64(TXC_CONTROL, val); + niu_unlock_parent(np, flags); +} + +static void niu_txc_set_imask(struct niu *np, u64 imask) +{ + unsigned long flags; + u64 val; + + niu_lock_parent(np, flags); + val = nr64(TXC_INT_MASK); + val &= ~TXC_INT_MASK_VAL(np->port); + val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); + niu_unlock_parent(np, flags); +} + +static void niu_txc_port_dma_enable(struct niu *np, int on) +{ + u64 val = 0; + + if (on) { + int i; + + for (i = 0; i < np->num_tx_rings; i++) + val |= (1 << np->tx_rings[i].tx_channel); + } + nw64(TXC_PORT_DMA(np->port), val); +} + +static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + int err, channel = rp->tx_channel; + u64 val, ring_len; + + err = niu_tx_channel_stop(np, channel); + if (err) + return err; + + err = niu_tx_channel_reset(np, channel); + if (err) + return err; + + err = niu_tx_channel_lpage_init(np, channel); + if (err) + return err; + + nw64(TXC_DMA_MAX(channel), rp->max_burst); + nw64(TX_ENT_MSK(channel), 0); + + if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | + TX_RNG_CFIG_STADDR)) { + dev_err(np->device, PFX "%s: TX ring channel %d " + "DMA addr (%llx) is not aligned.\n", + np->dev->name, channel, + (unsigned long long) rp->descr_dma); + return -EINVAL; + } + + /* The length field in TX_RNG_CFIG is measured in 64-byte + * blocks. rp->pending is the number of TX descriptors in + * our ring, 8 bytes each, thus we divide by 8 bytes more + * to get the proper value the chip wants. + */ + ring_len = (rp->pending / 8); + + val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) | + rp->descr_dma); + nw64(TX_RNG_CFIG(channel), val); + + if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || + ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { + dev_err(np->device, PFX "%s: TX ring channel %d " + "MBOX addr (%llx) is has illegal bits.\n", + np->dev->name, channel, + (unsigned long long) rp->mbox_dma); + return -EINVAL; + } + nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); + nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); + + nw64(TX_CS(channel), 0); + + rp->last_pkt_cnt = 0; + + return 0; +} + +static void niu_init_rdc_groups(struct niu *np) +{ + struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; + int i, first_table_num = tp->first_table_num; + + for (i = 0; i < tp->num_tables; i++) { + struct rdc_table *tbl = &tp->tables[i]; + int this_table = first_table_num + i; + int slot; + + for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) + nw64(RDC_TBL(this_table, slot), + tbl->rxdma_channel[slot]); + } + + nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); +} + +static void niu_init_drr_weight(struct niu *np) +{ + int type = phy_decode(np->parent->port_phy, np->port); + u64 val; + + switch (type) { + case PORT_TYPE_10G: + val = PT_DRR_WEIGHT_DEFAULT_10G; + break; + + case PORT_TYPE_1G: + default: + val = PT_DRR_WEIGHT_DEFAULT_1G; + break; + } + nw64(PT_DRR_WT(np->port), val); +} + +static int niu_init_hostinfo(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int i, err, num_alt = niu_num_alt_addr(np); + int first_rdc_table = tp->first_table_num; + + err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + if (err) + return err; + + err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + if (err) + return err; + + for (i = 0; i < num_alt; i++) { + err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1); + if (err) + return err; + } + + return 0; +} + +static int niu_rx_channel_reset(struct niu *np, int channel) +{ + return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel), + RXDMA_CFIG1_RST, 1000, 10, + "RXDMA_CFIG1"); +} + +static int niu_rx_channel_lpage_init(struct niu *np, int channel) +{ + u64 val; + + nw64(RX_LOG_MASK1(channel), 0); + nw64(RX_LOG_VAL1(channel), 0); + nw64(RX_LOG_MASK2(channel), 0); + nw64(RX_LOG_VAL2(channel), 0); + nw64(RX_LOG_PAGE_RELO1(channel), 0); + nw64(RX_LOG_PAGE_RELO2(channel), 0); + nw64(RX_LOG_PAGE_HDL(channel), 0); + + val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; + val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1); + nw64(RX_LOG_PAGE_VLD(channel), val); + + return 0; +} + +static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp) +{ + u64 val; + + val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | + ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | + ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | + ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); + nw64(RDC_RED_PARA(rp->rx_channel), val); +} + +static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) +{ + u64 val = 0; + + switch (rp->rbr_block_size) { + case 4 * 1024: + val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 16 * 1024: + val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + case 32 * 1024: + val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT); + break; + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD2; + switch (rp->rbr_sizes[2]) { + case 2 * 1024: + val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 4 * 1024: + val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + case 16 * 1024: + val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT); + break; + + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD1; + switch (rp->rbr_sizes[1]) { + case 1 * 1024: + val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 2 * 1024: + val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 4 * 1024: + val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + case 8 * 1024: + val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT); + break; + + default: + return -EINVAL; + } + val |= RBR_CFIG_B_VLD0; + switch (rp->rbr_sizes[0]) { + case 256: + val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 512: + val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 1 * 1024: + val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + case 2 * 1024: + val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT); + break; + + default: + return -EINVAL; + } + + *ret = val; + return 0; +} + +static int niu_enable_rx_channel(struct niu *np, int channel, int on) +{ + u64 val = nr64(RXDMA_CFIG1(channel)); + int limit; + + if (on) + val |= RXDMA_CFIG1_EN; + else + val &= ~RXDMA_CFIG1_EN; + nw64(RXDMA_CFIG1(channel), val); + + limit = 1000; + while (--limit > 0) { + if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST) + break; + udelay(10); + } + if (limit <= 0) + return -ENODEV; + return 0; +} + +static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + int err, channel = rp->rx_channel; + u64 val; + + err = niu_rx_channel_reset(np, channel); + if (err) + return err; + + err = niu_rx_channel_lpage_init(np, channel); + if (err) + return err; + + niu_rx_channel_wred_init(np, rp); + + nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY); + nw64(RX_DMA_CTL_STAT(channel), + (RX_DMA_CTL_STAT_MEX | + RX_DMA_CTL_STAT_RCRTHRES | + RX_DMA_CTL_STAT_RCRTO | + RX_DMA_CTL_STAT_RBR_EMPTY)); + nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); + nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); + nw64(RBR_CFIG_A(channel), + ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | + (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); + err = niu_compute_rbr_cfig_b(rp, &val); + if (err) + return err; + nw64(RBR_CFIG_B(channel), val); + nw64(RCRCFIG_A(channel), + ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | + (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); + nw64(RCRCFIG_B(channel), + ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | + RCRCFIG_B_ENTOUT | + ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); + + err = niu_enable_rx_channel(np, channel, 1); + if (err) + return err; + + nw64(RBR_KICK(channel), rp->rbr_index); + + val = nr64(RX_DMA_CTL_STAT(channel)); + val |= RX_DMA_CTL_STAT_RBR_EMPTY; + nw64(RX_DMA_CTL_STAT(channel), val); + + return 0; +} + +static int niu_init_rx_channels(struct niu *np) +{ + unsigned long flags; + u64 seed = jiffies_64; + int err, i; + + niu_lock_parent(np, flags); + nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); + nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL)); + niu_unlock_parent(np, flags); + + /* XXX RXDMA 32bit mode? XXX */ + + niu_init_rdc_groups(np); + niu_init_drr_weight(np); + + err = niu_init_hostinfo(np); + if (err) + return err; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + err = niu_init_one_rx_channel(np, rp); + if (err) + return err; + } + + return 0; +} + +static int niu_set_ip_frag_rule(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_classifier *cp = &np->clas; + struct niu_tcam_entry *tp; + int index, err; + + /* XXX fix this allocation scheme XXX */ + index = cp->tcam_index; + tp = &parent->tcam[index]; + + /* Note that the noport bit is the same in both ipv4 and + * ipv6 format TCAM entries. + */ + memset(tp, 0, sizeof(*tp)); + tp->key[1] = TCAM_V4KEY1_NOPORT; + tp->key_mask[1] = TCAM_V4KEY1_NOPORT; + tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | + ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT)); + err = tcam_write(np, index, tp->key, tp->key_mask); + if (err) + return err; + err = tcam_assoc_write(np, index, tp->assoc_data); + if (err) + return err; + + return 0; +} + +static int niu_init_classifier_hw(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_classifier *cp = &np->clas; + int i, err; + + nw64(H1POLY, cp->h1_init); + nw64(H2POLY, cp->h2_init); + + err = niu_init_hostinfo(np); + if (err) + return err; + + for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) { + struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; + + vlan_tbl_write(np, i, np->port, + vp->vlan_pref, vp->rdc_num); + } + + for (i = 0; i < cp->num_alt_mac_mappings; i++) { + struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; + + err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, + ap->rdc_num, ap->mac_pref); + if (err) + return err; + } + + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { + int index = i - CLASS_CODE_USER_PROG1; + + err = niu_set_tcam_key(np, i, parent->tcam_key[index]); + if (err) + return err; + err = niu_set_flow_key(np, i, parent->flow_key[index]); + if (err) + return err; + } + + err = niu_set_ip_frag_rule(np); + if (err) + return err; + + tcam_enable(np, 1); + + return 0; +} + +static int niu_zcp_write(struct niu *np, int index, u64 *data) +{ + nw64(ZCP_RAM_DATA0, data[0]); + nw64(ZCP_RAM_DATA1, data[1]); + nw64(ZCP_RAM_DATA2, data[2]); + nw64(ZCP_RAM_DATA3, data[3]); + nw64(ZCP_RAM_DATA4, data[4]); + nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL); + nw64(ZCP_RAM_ACC, + (ZCP_RAM_ACC_WRITE | + (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | + (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); + + return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); +} + +static int niu_zcp_read(struct niu *np, int index, u64 *data) +{ + int err; + + err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); + if (err) { + dev_err(np->device, PFX "%s: ZCP read busy won't clear, " + "ZCP_RAM_ACC[%llx]\n", np->dev->name, + (unsigned long long) nr64(ZCP_RAM_ACC)); + return err; + } + + nw64(ZCP_RAM_ACC, + (ZCP_RAM_ACC_READ | + (0 << ZCP_RAM_ACC_ZFCID_SHIFT) | + (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); + + err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY, + 1000, 100); + if (err) { + dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, " + "ZCP_RAM_ACC[%llx]\n", np->dev->name, + (unsigned long long) nr64(ZCP_RAM_ACC)); + return err; + } + + data[0] = nr64(ZCP_RAM_DATA0); + data[1] = nr64(ZCP_RAM_DATA1); + data[2] = nr64(ZCP_RAM_DATA2); + data[3] = nr64(ZCP_RAM_DATA3); + data[4] = nr64(ZCP_RAM_DATA4); + + return 0; +} + +static void niu_zcp_cfifo_reset(struct niu *np) +{ + u64 val = nr64(RESET_CFIFO); + + val |= RESET_CFIFO_RST(np->port); + nw64(RESET_CFIFO, val); + udelay(10); + + val &= ~RESET_CFIFO_RST(np->port); + nw64(RESET_CFIFO, val); +} + +static int niu_init_zcp(struct niu *np) +{ + u64 data[5], rbuf[5]; + int i, max, err; + + if (np->parent->plat_type != PLAT_TYPE_NIU) { + if (np->port == 0 || np->port == 1) + max = ATLAS_P0_P1_CFIFO_ENTRIES; + else + max = ATLAS_P2_P3_CFIFO_ENTRIES; + } else + max = NIU_CFIFO_ENTRIES; + + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + data[4] = 0; + + for (i = 0; i < max; i++) { + err = niu_zcp_write(np, i, data); + if (err) + return err; + err = niu_zcp_read(np, i, rbuf); + if (err) + return err; + } + + niu_zcp_cfifo_reset(np); + nw64(CFIFO_ECC(np->port), 0); + nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL); + (void) nr64(ZCP_INT_STAT); + nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL); + + return 0; +} + +static void niu_ipp_write(struct niu *np, int index, u64 *data) +{ + u64 val = nr64_ipp(IPP_CFIG); + + nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W); + nw64_ipp(IPP_DFIFO_WR_PTR, index); + nw64_ipp(IPP_DFIFO_WR0, data[0]); + nw64_ipp(IPP_DFIFO_WR1, data[1]); + nw64_ipp(IPP_DFIFO_WR2, data[2]); + nw64_ipp(IPP_DFIFO_WR3, data[3]); + nw64_ipp(IPP_DFIFO_WR4, data[4]); + nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W); +} + +static void niu_ipp_read(struct niu *np, int index, u64 *data) +{ + nw64_ipp(IPP_DFIFO_RD_PTR, index); + data[0] = nr64_ipp(IPP_DFIFO_RD0); + data[1] = nr64_ipp(IPP_DFIFO_RD1); + data[2] = nr64_ipp(IPP_DFIFO_RD2); + data[3] = nr64_ipp(IPP_DFIFO_RD3); + data[4] = nr64_ipp(IPP_DFIFO_RD4); +} + +static int niu_ipp_reset(struct niu *np) +{ + return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST, + 1000, 100, "IPP_CFIG"); +} + +static int niu_init_ipp(struct niu *np) +{ + u64 data[5], rbuf[5], val; + int i, max, err; + + if (np->parent->plat_type != PLAT_TYPE_NIU) { + if (np->port == 0 || np->port == 1) + max = ATLAS_P0_P1_DFIFO_ENTRIES; + else + max = ATLAS_P2_P3_DFIFO_ENTRIES; + } else + max = NIU_DFIFO_ENTRIES; + + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + data[4] = 0; + + for (i = 0; i < max; i++) { + niu_ipp_write(np, i, data); + niu_ipp_read(np, i, rbuf); + } + + (void) nr64_ipp(IPP_INT_STAT); + (void) nr64_ipp(IPP_INT_STAT); + + err = niu_ipp_reset(np); + if (err) + return err; + + (void) nr64_ipp(IPP_PKT_DIS); + (void) nr64_ipp(IPP_BAD_CS_CNT); + (void) nr64_ipp(IPP_ECC); + + (void) nr64_ipp(IPP_INT_STAT); + + nw64_ipp(IPP_MSK, ~IPP_MSK_ALL); + + val = nr64_ipp(IPP_CFIG); + val &= ~IPP_CFIG_IP_MAX_PKT; + val |= (IPP_CFIG_IPP_ENABLE | + IPP_CFIG_DFIFO_ECC_EN | + IPP_CFIG_DROP_BAD_CRC | + IPP_CFIG_CKSUM_EN | + (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT)); + nw64_ipp(IPP_CFIG, val); + + return 0; +} + +static void niu_init_xif_xmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + val = nr64_mac(XMAC_CONFIG); + + if ((np->flags & NIU_FLAGS_10G) != 0 && + (np->flags & NIU_FLAGS_FIBER) != 0) { + if (netif_carrier_ok(np->dev)) { + val |= XMAC_CONFIG_LED_POLARITY; + val &= ~XMAC_CONFIG_FORCE_LED_ON; + } else { + val |= XMAC_CONFIG_FORCE_LED_ON; + val &= ~XMAC_CONFIG_LED_POLARITY; + } + } + + val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; + + val |= XMAC_CONFIG_TX_OUTPUT_EN; + + if (lp->loopback_mode == LOOPBACK_MAC) { + val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; + val |= XMAC_CONFIG_LOOPBACK; + } else { + val &= ~XMAC_CONFIG_LOOPBACK; + } + + if (np->flags & NIU_FLAGS_10G) { + val &= ~XMAC_CONFIG_LFS_DISABLE; + } else { + val |= XMAC_CONFIG_LFS_DISABLE; + if (!(np->flags & NIU_FLAGS_FIBER)) + val |= XMAC_CONFIG_1G_PCS_BYPASS; + else + val &= ~XMAC_CONFIG_1G_PCS_BYPASS; + } + + val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; + + if (lp->active_speed == SPEED_100) + val |= XMAC_CONFIG_SEL_CLK_25MHZ; + else + val &= ~XMAC_CONFIG_SEL_CLK_25MHZ; + + nw64_mac(XMAC_CONFIG, val); + + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_MODE_MASK; + if (np->flags & NIU_FLAGS_10G) { + val |= XMAC_CONFIG_MODE_XGMII; + } else { + if (lp->active_speed == SPEED_100) + val |= XMAC_CONFIG_MODE_MII; + else + val |= XMAC_CONFIG_MODE_GMII; + } + + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_init_xif_bmac(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + val = BMAC_XIF_CONFIG_TX_OUTPUT_EN; + + if (lp->loopback_mode == LOOPBACK_MAC) + val |= BMAC_XIF_CONFIG_MII_LOOPBACK; + else + val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK; + + if (lp->active_speed == SPEED_1000) + val |= BMAC_XIF_CONFIG_GMII_MODE; + else + val &= ~BMAC_XIF_CONFIG_GMII_MODE; + + val &= ~(BMAC_XIF_CONFIG_LINK_LED | + BMAC_XIF_CONFIG_LED_POLARITY); + + if (!(np->flags & NIU_FLAGS_10G) && + !(np->flags & NIU_FLAGS_FIBER) && + lp->active_speed == SPEED_100) + val |= BMAC_XIF_CONFIG_25MHZ_CLOCK; + else + val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK; + + nw64_mac(BMAC_XIF_CONFIG, val); +} + +static void niu_init_xif(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_init_xif_xmac(np); + else + niu_init_xif_bmac(np); +} + +static void niu_pcs_mii_reset(struct niu *np) +{ + u64 val = nr64_pcs(PCS_MII_CTL); + val |= PCS_MII_CTL_RST; + nw64_pcs(PCS_MII_CTL, val); +} + +static void niu_xpcs_reset(struct niu *np) +{ + u64 val = nr64_xpcs(XPCS_CONTROL1); + val |= XPCS_CONTROL1_RESET; + nw64_xpcs(XPCS_CONTROL1, val); +} + +static int niu_init_pcs(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + u64 val; + + switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { + case NIU_FLAGS_FIBER: + /* 1G fiber */ + nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); + nw64_pcs(PCS_DPATH_MODE, 0); + niu_pcs_mii_reset(np); + break; + + case NIU_FLAGS_10G: + case NIU_FLAGS_10G | NIU_FLAGS_FIBER: + if (!(np->flags & NIU_FLAGS_XMAC)) + return -EINVAL; + + /* 10G copper or fiber */ + val = nr64_mac(XMAC_CONFIG); + val &= ~XMAC_CONFIG_10G_XPCS_BYPASS; + nw64_mac(XMAC_CONFIG, val); + + niu_xpcs_reset(np); + + val = nr64_xpcs(XPCS_CONTROL1); + if (lp->loopback_mode == LOOPBACK_PHY) + val |= XPCS_CONTROL1_LOOPBACK; + else + val &= ~XPCS_CONTROL1_LOOPBACK; + nw64_xpcs(XPCS_CONTROL1, val); + + nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0); + (void) nr64_xpcs(XPCS_SYMERR_CNT01); + (void) nr64_xpcs(XPCS_SYMERR_CNT23); + break; + + case 0: + /* 1G copper */ + nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); + niu_pcs_mii_reset(np); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int niu_reset_tx_xmac(struct niu *np) +{ + return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST, + (XTXMAC_SW_RST_REG_RS | + XTXMAC_SW_RST_SOFT_RST), + 1000, 100, "XTXMAC_SW_RST"); +} + +static int niu_reset_tx_bmac(struct niu *np) +{ + int limit; + + nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET)) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u TX BMAC would not reset, " + "BTXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(BTXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_tx_mac(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return niu_reset_tx_xmac(np); + else + return niu_reset_tx_bmac(np); +} + +static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max) +{ + u64 val; + + val = nr64_mac(XMAC_MIN); + val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE | + XMAC_MIN_RX_MIN_PKT_SIZE); + val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT); + val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT); + nw64_mac(XMAC_MIN, val); + + nw64_mac(XMAC_MAX, max); + + nw64_mac(XTXMAC_STAT_MSK, ~(u64)0); + + val = nr64_mac(XMAC_IPG); + if (np->flags & NIU_FLAGS_10G) { + val &= ~XMAC_IPG_IPG_XGMII; + val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT); + } else { + val &= ~XMAC_IPG_IPG_MII_GMII; + val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT); + } + nw64_mac(XMAC_IPG, val); + + val = nr64_mac(XMAC_CONFIG); + val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC | + XMAC_CONFIG_STRETCH_MODE | + XMAC_CONFIG_VAR_MIN_IPG_EN | + XMAC_CONFIG_TX_ENABLE); + nw64_mac(XMAC_CONFIG, val); + + nw64_mac(TXMAC_FRM_CNT, 0); + nw64_mac(TXMAC_BYTE_CNT, 0); +} + +static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max) +{ + u64 val; + + nw64_mac(BMAC_MIN_FRAME, min); + nw64_mac(BMAC_MAX_FRAME, max); + + nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0); + nw64_mac(BMAC_CTRL_TYPE, 0x8808); + nw64_mac(BMAC_PREAMBLE_SIZE, 7); + + val = nr64_mac(BTXMAC_CONFIG); + val &= ~(BTXMAC_CONFIG_FCS_DISABLE | + BTXMAC_CONFIG_ENABLE); + nw64_mac(BTXMAC_CONFIG, val); +} + +static void niu_init_tx_mac(struct niu *np) +{ + u64 min, max; + + min = 64; + if (np->dev->mtu > ETH_DATA_LEN) + max = 9216; + else + max = 1522; + + /* The XMAC_MIN register only accepts values for TX min which + * have the low 3 bits cleared. + */ + BUILD_BUG_ON(min & 0x7); + + if (np->flags & NIU_FLAGS_XMAC) + niu_init_tx_xmac(np, min, max); + else + niu_init_tx_bmac(np, min, max); +} + +static int niu_reset_rx_xmac(struct niu *np) +{ + int limit; + + nw64_mac(XRXMAC_SW_RST, + XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS | + XRXMAC_SW_RST_SOFT_RST))) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u RX XMAC would not reset, " + "XRXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(XRXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_rx_bmac(struct niu *np) +{ + int limit; + + nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET); + limit = 1000; + while (--limit >= 0) { + if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET)) + break; + udelay(100); + } + if (limit < 0) { + dev_err(np->device, PFX "Port %u RX BMAC would not reset, " + "BRXMAC_SW_RST[%llx]\n", + np->port, + (unsigned long long) nr64_mac(BRXMAC_SW_RST)); + return -ENODEV; + } + + return 0; +} + +static int niu_reset_rx_mac(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return niu_reset_rx_xmac(np); + else + return niu_reset_rx_bmac(np); +} + +static void niu_init_rx_xmac(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int first_rdc_table = tp->first_table_num; + unsigned long i; + u64 val; + + nw64_mac(XMAC_ADD_FILT0, 0); + nw64_mac(XMAC_ADD_FILT1, 0); + nw64_mac(XMAC_ADD_FILT2, 0); + nw64_mac(XMAC_ADD_FILT12_MASK, 0); + nw64_mac(XMAC_ADD_FILT00_MASK, 0); + for (i = 0; i < MAC_NUM_HASH; i++) + nw64_mac(XMAC_HASH_TBL(i), 0); + nw64_mac(XRXMAC_STAT_MSK, ~(u64)0); + niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + + val = nr64_mac(XMAC_CONFIG); + val &= ~(XMAC_CONFIG_RX_MAC_ENABLE | + XMAC_CONFIG_PROMISCUOUS | + XMAC_CONFIG_PROMISC_GROUP | + XMAC_CONFIG_ERR_CHK_DIS | + XMAC_CONFIG_RX_CRC_CHK_DIS | + XMAC_CONFIG_RESERVED_MULTICAST | + XMAC_CONFIG_RX_CODEV_CHK_DIS | + XMAC_CONFIG_ADDR_FILTER_EN | + XMAC_CONFIG_RCV_PAUSE_ENABLE | + XMAC_CONFIG_STRIP_CRC | + XMAC_CONFIG_PASS_FLOW_CTRL | + XMAC_CONFIG_MAC2IPP_PKT_CNT_EN); + val |= (XMAC_CONFIG_HASH_FILTER_EN); + nw64_mac(XMAC_CONFIG, val); + + nw64_mac(RXMAC_BT_CNT, 0); + nw64_mac(RXMAC_BC_FRM_CNT, 0); + nw64_mac(RXMAC_MC_FRM_CNT, 0); + nw64_mac(RXMAC_FRAG_CNT, 0); + nw64_mac(RXMAC_HIST_CNT1, 0); + nw64_mac(RXMAC_HIST_CNT2, 0); + nw64_mac(RXMAC_HIST_CNT3, 0); + nw64_mac(RXMAC_HIST_CNT4, 0); + nw64_mac(RXMAC_HIST_CNT5, 0); + nw64_mac(RXMAC_HIST_CNT6, 0); + nw64_mac(RXMAC_HIST_CNT7, 0); + nw64_mac(RXMAC_MPSZER_CNT, 0); + nw64_mac(RXMAC_CRC_ER_CNT, 0); + nw64_mac(RXMAC_CD_VIO_CNT, 0); + nw64_mac(LINK_FAULT_CNT, 0); +} + +static void niu_init_rx_bmac(struct niu *np) +{ + struct niu_parent *parent = np->parent; + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; + int first_rdc_table = tp->first_table_num; + unsigned long i; + u64 val; + + nw64_mac(BMAC_ADD_FILT0, 0); + nw64_mac(BMAC_ADD_FILT1, 0); + nw64_mac(BMAC_ADD_FILT2, 0); + nw64_mac(BMAC_ADD_FILT12_MASK, 0); + nw64_mac(BMAC_ADD_FILT00_MASK, 0); + for (i = 0; i < MAC_NUM_HASH; i++) + nw64_mac(BMAC_HASH_TBL(i), 0); + niu_set_primary_mac_rdc_table(np, first_rdc_table, 1); + niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1); + nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0); + + val = nr64_mac(BRXMAC_CONFIG); + val &= ~(BRXMAC_CONFIG_ENABLE | + BRXMAC_CONFIG_STRIP_PAD | + BRXMAC_CONFIG_STRIP_FCS | + BRXMAC_CONFIG_PROMISC | + BRXMAC_CONFIG_PROMISC_GRP | + BRXMAC_CONFIG_ADDR_FILT_EN | + BRXMAC_CONFIG_DISCARD_DIS); + val |= (BRXMAC_CONFIG_HASH_FILT_EN); + nw64_mac(BRXMAC_CONFIG, val); + + val = nr64_mac(BMAC_ADDR_CMPEN); + val |= BMAC_ADDR_CMPEN_EN0; + nw64_mac(BMAC_ADDR_CMPEN, val); +} + +static void niu_init_rx_mac(struct niu *np) +{ + niu_set_primary_mac(np, np->dev->dev_addr); + + if (np->flags & NIU_FLAGS_XMAC) + niu_init_rx_xmac(np); + else + niu_init_rx_bmac(np); +} + +static void niu_enable_tx_xmac(struct niu *np, int on) +{ + u64 val = nr64_mac(XMAC_CONFIG); + + if (on) + val |= XMAC_CONFIG_TX_ENABLE; + else + val &= ~XMAC_CONFIG_TX_ENABLE; + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_enable_tx_bmac(struct niu *np, int on) +{ + u64 val = nr64_mac(BTXMAC_CONFIG); + + if (on) + val |= BTXMAC_CONFIG_ENABLE; + else + val &= ~BTXMAC_CONFIG_ENABLE; + nw64_mac(BTXMAC_CONFIG, val); +} + +static void niu_enable_tx_mac(struct niu *np, int on) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_enable_tx_xmac(np, on); + else + niu_enable_tx_bmac(np, on); +} + +static void niu_enable_rx_xmac(struct niu *np, int on) +{ + u64 val = nr64_mac(XMAC_CONFIG); + + val &= ~(XMAC_CONFIG_HASH_FILTER_EN | + XMAC_CONFIG_PROMISCUOUS); + + if (np->flags & NIU_FLAGS_MCAST) + val |= XMAC_CONFIG_HASH_FILTER_EN; + if (np->flags & NIU_FLAGS_PROMISC) + val |= XMAC_CONFIG_PROMISCUOUS; + + if (on) + val |= XMAC_CONFIG_RX_MAC_ENABLE; + else + val &= ~XMAC_CONFIG_RX_MAC_ENABLE; + nw64_mac(XMAC_CONFIG, val); +} + +static void niu_enable_rx_bmac(struct niu *np, int on) +{ + u64 val = nr64_mac(BRXMAC_CONFIG); + + val &= ~(BRXMAC_CONFIG_HASH_FILT_EN | + BRXMAC_CONFIG_PROMISC); + + if (np->flags & NIU_FLAGS_MCAST) + val |= BRXMAC_CONFIG_HASH_FILT_EN; + if (np->flags & NIU_FLAGS_PROMISC) + val |= BRXMAC_CONFIG_PROMISC; + + if (on) + val |= BRXMAC_CONFIG_ENABLE; + else + val &= ~BRXMAC_CONFIG_ENABLE; + nw64_mac(BRXMAC_CONFIG, val); +} + +static void niu_enable_rx_mac(struct niu *np, int on) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_enable_rx_xmac(np, on); + else + niu_enable_rx_bmac(np, on); +} + +static int niu_init_mac(struct niu *np) +{ + int err; + + niu_init_xif(np); + err = niu_init_pcs(np); + if (err) + return err; + + err = niu_reset_tx_mac(np); + if (err) + return err; + niu_init_tx_mac(np); + err = niu_reset_rx_mac(np); + if (err) + return err; + niu_init_rx_mac(np); + + /* This looks hookey but the RX MAC reset we just did will + * undo some of the state we setup in niu_init_tx_mac() so we + * have to call it again. In particular, the RX MAC reset will + * set the XMAC_MAX register back to it's default value. + */ + niu_init_tx_mac(np); + niu_enable_tx_mac(np, 1); + + niu_enable_rx_mac(np, 1); + + return 0; +} + +static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + (void) niu_tx_channel_stop(np, rp->tx_channel); +} + +static void niu_stop_tx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_stop_one_tx_channel(np, rp); + } +} + +static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp) +{ + (void) niu_tx_channel_reset(np, rp->tx_channel); +} + +static void niu_reset_tx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + niu_reset_one_tx_channel(np, rp); + } +} + +static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + (void) niu_enable_rx_channel(np, rp->rx_channel, 0); +} + +static void niu_stop_rx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_stop_one_rx_channel(np, rp); + } +} + +static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp) +{ + int channel = rp->rx_channel; + + (void) niu_rx_channel_reset(np, channel); + nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL); + nw64(RX_DMA_CTL_STAT(channel), 0); + (void) niu_enable_rx_channel(np, channel, 0); +} + +static void niu_reset_rx_channels(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + niu_reset_one_rx_channel(np, rp); + } +} + +static void niu_disable_ipp(struct niu *np) +{ + u64 rd, wr, val; + int limit; + + rd = nr64_ipp(IPP_DFIFO_RD_PTR); + wr = nr64_ipp(IPP_DFIFO_WR_PTR); + limit = 100; + while (--limit >= 0 && (rd != wr)) { + rd = nr64_ipp(IPP_DFIFO_RD_PTR); + wr = nr64_ipp(IPP_DFIFO_WR_PTR); + } + if (limit < 0 && + (rd != 0 && wr != 1)) { + dev_err(np->device, PFX "%s: IPP would not quiesce, " + "rd_ptr[%llx] wr_ptr[%llx]\n", + np->dev->name, + (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR), + (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR)); + } + + val = nr64_ipp(IPP_CFIG); + val &= ~(IPP_CFIG_IPP_ENABLE | + IPP_CFIG_DFIFO_ECC_EN | + IPP_CFIG_DROP_BAD_CRC | + IPP_CFIG_CKSUM_EN); + nw64_ipp(IPP_CFIG, val); + + (void) niu_ipp_reset(np); +} + +static int niu_init_hw(struct niu *np) +{ + int i, err; + + niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name); + niu_txc_enable_port(np, 1); + niu_txc_port_dma_enable(np, 1); + niu_txc_set_imask(np, 0); + + niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name); + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + err = niu_init_one_tx_channel(np, rp); + if (err) + return err; + } + + niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name); + err = niu_init_rx_channels(np); + if (err) + goto out_uninit_tx_channels; + + niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name); + err = niu_init_classifier_hw(np); + if (err) + goto out_uninit_rx_channels; + + niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name); + err = niu_init_zcp(np); + if (err) + goto out_uninit_rx_channels; + + niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name); + err = niu_init_ipp(np); + if (err) + goto out_uninit_rx_channels; + + niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name); + err = niu_init_mac(np); + if (err) + goto out_uninit_ipp; + + return 0; + +out_uninit_ipp: + niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name); + niu_disable_ipp(np); + +out_uninit_rx_channels: + niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name); + niu_stop_rx_channels(np); + niu_reset_rx_channels(np); + +out_uninit_tx_channels: + niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name); + niu_stop_tx_channels(np); + niu_reset_tx_channels(np); + + return err; +} + +static void niu_stop_hw(struct niu *np) +{ + niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name); + niu_enable_interrupts(np, 0); + + niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name); + niu_enable_rx_mac(np, 0); + + niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name); + niu_disable_ipp(np); + + niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name); + niu_stop_tx_channels(np); + + niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name); + niu_stop_rx_channels(np); + + niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name); + niu_reset_tx_channels(np); + + niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name); + niu_reset_rx_channels(np); +} + +static int niu_request_irq(struct niu *np) +{ + int i, j, err; + + err = 0; + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + err = request_irq(lp->irq, niu_interrupt, + IRQF_SHARED | IRQF_SAMPLE_RANDOM, + np->dev->name, lp); + if (err) + goto out_free_irqs; + + } + + return 0; + +out_free_irqs: + for (j = 0; j < i; j++) { + struct niu_ldg *lp = &np->ldg[j]; + + free_irq(lp->irq, lp); + } + return err; +} + +static void niu_free_irq(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + free_irq(lp->irq, lp); + } +} + +static void niu_enable_napi(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) + napi_enable(&np->ldg[i].napi); +} + +static void niu_disable_napi(struct niu *np) +{ + int i; + + for (i = 0; i < np->num_ldg; i++) + napi_disable(&np->ldg[i].napi); +} + +static int niu_open(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + int err; + + netif_carrier_off(dev); + + err = niu_alloc_channels(np); + if (err) + goto out_err; + + err = niu_enable_interrupts(np, 0); + if (err) + goto out_free_channels; + + err = niu_request_irq(np); + if (err) + goto out_free_channels; + + niu_enable_napi(np); + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (err) { + niu_disable_napi(np); + goto out_free_irq; + } + + netif_start_queue(dev); + + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + + return 0; + +out_free_irq: + niu_free_irq(np); + +out_free_channels: + niu_free_channels(np); + +out_err: + return err; +} + +static void niu_full_shutdown(struct niu *np, struct net_device *dev) +{ + cancel_work_sync(&np->reset_task); + + niu_disable_napi(np); + netif_stop_queue(dev); + + del_timer_sync(&np->timer); + + spin_lock_irq(&np->lock); + + niu_stop_hw(np); + + spin_unlock_irq(&np->lock); +} + +static int niu_close(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + niu_full_shutdown(np, dev); + + niu_free_irq(np); + + niu_free_channels(np); + + return 0; +} + +static void niu_sync_xmac_stats(struct niu *np) +{ + struct niu_xmac_stats *mp = &np->mac_stats.xmac; + + mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); + mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); + + mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); + mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); + mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); + mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); + mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); + mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); + mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); + mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); + mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); + mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); + mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); + mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); + mp->rx_octets += nr64_mac(RXMAC_BT_CNT); + mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); + mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); + mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); +} + +static void niu_sync_bmac_stats(struct niu *np) +{ + struct niu_bmac_stats *mp = &np->mac_stats.bmac; + + mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); + mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); + + mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); + mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); + mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); + mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); +} + +static void niu_sync_mac_stats(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_sync_xmac_stats(np); + else + niu_sync_bmac_stats(np); +} + +static void niu_get_rx_stats(struct niu *np) +{ + unsigned long pkts, dropped, errors, bytes; + int i; + + pkts = dropped = errors = bytes = 0; + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + pkts += rp->rx_packets; + bytes += rp->rx_bytes; + dropped += rp->rx_dropped; + errors += rp->rx_errors; + } + np->net_stats.rx_packets = pkts; + np->net_stats.rx_bytes = bytes; + np->net_stats.rx_dropped = dropped; + np->net_stats.rx_errors = errors; +} + +static void niu_get_tx_stats(struct niu *np) +{ + unsigned long pkts, errors, bytes; + int i; + + pkts = errors = bytes = 0; + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + pkts += rp->tx_packets; + bytes += rp->tx_bytes; + errors += rp->tx_errors; + } + np->net_stats.tx_packets = pkts; + np->net_stats.tx_bytes = bytes; + np->net_stats.tx_errors = errors; +} + +static struct net_device_stats *niu_get_stats(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + niu_get_rx_stats(np); + niu_get_tx_stats(np); + + return &np->net_stats; +} + +static void niu_load_hash_xmac(struct niu *np, u16 *hash) +{ + int i; + + for (i = 0; i < 16; i++) + nw64_mac(XMAC_HASH_TBL(i), hash[i]); +} + +static void niu_load_hash_bmac(struct niu *np, u16 *hash) +{ + int i; + + for (i = 0; i < 16; i++) + nw64_mac(BMAC_HASH_TBL(i), hash[i]); +} + +static void niu_load_hash(struct niu *np, u16 *hash) +{ + if (np->flags & NIU_FLAGS_XMAC) + niu_load_hash_xmac(np, hash); + else + niu_load_hash_bmac(np, hash); +} + +static void niu_set_rx_mode(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + int i, alt_cnt, err; + struct dev_addr_list *addr; + unsigned long flags; + u16 hash[16] = { 0, }; + + spin_lock_irqsave(&np->lock, flags); + niu_enable_rx_mac(np, 0); + + np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); + if (dev->flags & IFF_PROMISC) + np->flags |= NIU_FLAGS_PROMISC; + if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0)) + np->flags |= NIU_FLAGS_MCAST; + + alt_cnt = dev->uc_count; + if (alt_cnt > niu_num_alt_addr(np)) { + alt_cnt = 0; + np->flags |= NIU_FLAGS_PROMISC; + } + + if (alt_cnt) { + int index = 0; + + for (addr = dev->uc_list; addr; addr = addr->next) { + err = niu_set_alt_mac(np, index, + addr->da_addr); + if (err) + printk(KERN_WARNING PFX "%s: Error %d " + "adding alt mac %d\n", + dev->name, err, index); + err = niu_enable_alt_mac(np, index, 1); + if (err) + printk(KERN_WARNING PFX "%s: Error %d " + "enabling alt mac %d\n", + dev->name, err, index); + + index++; + } + } else { + for (i = 0; i < niu_num_alt_addr(np); i++) { + err = niu_enable_alt_mac(np, i, 0); + if (err) + printk(KERN_WARNING PFX "%s: Error %d " + "disabling alt mac %d\n", + dev->name, err, i); + } + } + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 16; i++) + hash[i] = 0xffff; + } else if (dev->mc_count > 0) { + for (addr = dev->mc_list; addr; addr = addr->next) { + u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); + + crc >>= 24; + hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); + } + } + + if (np->flags & NIU_FLAGS_MCAST) + niu_load_hash(np, hash); + + niu_enable_rx_mac(np, 1); + spin_unlock_irqrestore(&np->lock, flags); +} + +static int niu_set_mac_addr(struct net_device *dev, void *p) +{ + struct niu *np = netdev_priv(dev); + struct sockaddr *addr = p; + unsigned long flags; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) + return 0; + + spin_lock_irqsave(&np->lock, flags); + niu_enable_rx_mac(np, 0); + niu_set_primary_mac(np, dev->dev_addr); + niu_enable_rx_mac(np, 1); + spin_unlock_irqrestore(&np->lock, flags); + + return 0; +} + +static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return -EOPNOTSUPP; +} + +static void niu_netif_stop(struct niu *np) +{ + np->dev->trans_start = jiffies; /* prevent tx timeout */ + + niu_disable_napi(np); + + netif_tx_disable(np->dev); +} + +static void niu_netif_start(struct niu *np) +{ + /* NOTE: unconditional netif_wake_queue is only appropriate + * so long as all callers are assured to have free tx slots + * (such as after niu_init_hw). + */ + netif_wake_queue(np->dev); + + niu_enable_napi(np); + + niu_enable_interrupts(np, 1); +} + +static void niu_reset_task(struct work_struct *work) +{ + struct niu *np = container_of(work, struct niu, reset_task); + unsigned long flags; + int err; + + spin_lock_irqsave(&np->lock, flags); + if (!netif_running(np->dev)) { + spin_unlock_irqrestore(&np->lock, flags); + return; + } + + spin_unlock_irqrestore(&np->lock, flags); + + del_timer_sync(&np->timer); + + niu_netif_stop(np); + + spin_lock_irqsave(&np->lock, flags); + + niu_stop_hw(np); + + err = niu_init_hw(np); + if (!err) { + np->timer.expires = jiffies + HZ; + add_timer(&np->timer); + niu_netif_start(np); + } + + spin_unlock_irqrestore(&np->lock, flags); +} + +static void niu_tx_timeout(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + dev_err(np->device, PFX "%s: Transmit timed out, resetting\n", + dev->name); + + schedule_work(&np->reset_task); +} + +static void niu_set_txd(struct tx_ring_info *rp, int index, + u64 mapping, u64 len, u64 mark, + u64 n_frags) +{ + __le64 *desc = &rp->descr[index]; + + *desc = cpu_to_le64(mark | + (n_frags << TX_DESC_NUM_PTR_SHIFT) | + (len << TX_DESC_TR_LEN_SHIFT) | + (mapping & TX_DESC_SAD)); +} + +static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr, + u64 pad_bytes, u64 len) +{ + u16 eth_proto, eth_proto_inner; + u64 csum_bits, l3off, ihl, ret; + u8 ip_proto; + int ipv6; + + eth_proto = be16_to_cpu(ehdr->h_proto); + eth_proto_inner = eth_proto; + if (eth_proto == ETH_P_8021Q) { + struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr; + __be16 val = vp->h_vlan_encapsulated_proto; + + eth_proto_inner = be16_to_cpu(val); + } + + ipv6 = ihl = 0; + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + ip_proto = ip_hdr(skb)->protocol; + ihl = ip_hdr(skb)->ihl; + break; + case __constant_htons(ETH_P_IPV6): + ip_proto = ipv6_hdr(skb)->nexthdr; + ihl = (40 >> 2); + ipv6 = 1; + break; + default: + ip_proto = ihl = 0; + break; + } + + csum_bits = TXHDR_CSUM_NONE; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u64 start, stuff; + + csum_bits = (ip_proto == IPPROTO_TCP ? + TXHDR_CSUM_TCP : + (ip_proto == IPPROTO_UDP ? + TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); + + start = skb_transport_offset(skb) - + (pad_bytes + sizeof(struct tx_pkt_hdr)); + stuff = start + skb->csum_offset; + + csum_bits |= (start / 2) << TXHDR_L4START_SHIFT; + csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT; + } + + l3off = skb_network_offset(skb) - + (pad_bytes + sizeof(struct tx_pkt_hdr)); + + ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) | + (len << TXHDR_LEN_SHIFT) | + ((l3off / 2) << TXHDR_L3START_SHIFT) | + (ihl << TXHDR_IHL_SHIFT) | + ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | + ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | + (ipv6 ? TXHDR_IP_VER : 0) | + csum_bits); + + return ret; +} + +static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb) +{ + return &np->tx_rings[0]; +} + +static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + unsigned long align, headroom; + struct tx_ring_info *rp; + struct tx_pkt_hdr *tp; + unsigned int len, nfg; + struct ethhdr *ehdr; + int prod, i, tlen; + u64 mapping, mrk; + + rp = tx_ring_select(np, skb); + + if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + dev_err(np->device, PFX "%s: BUG! Tx ring full when " + "queue awake!\n", dev->name); + rp->tx_errors++; + return NETDEV_TX_BUSY; + } + + if (skb->len < ETH_ZLEN) { + unsigned int pad_bytes = ETH_ZLEN - skb->len; + + if (skb_pad(skb, pad_bytes)) + goto out; + skb_put(skb, pad_bytes); + } + + len = sizeof(struct tx_pkt_hdr) + 15; + if (skb_headroom(skb) < len) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, len); + if (!skb_new) { + rp->tx_errors++; + goto out_drop; + } + kfree_skb(skb); + skb = skb_new; + } + + align = ((unsigned long) skb->data & (16 - 1)); + headroom = align + sizeof(struct tx_pkt_hdr); + + ehdr = (struct ethhdr *) skb->data; + tp = (struct tx_pkt_hdr *) skb_push(skb, headroom); + + len = skb->len - sizeof(struct tx_pkt_hdr); + tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); + tp->resv = 0; + + len = skb_headlen(skb); + mapping = np->ops->map_single(np->device, skb->data, + len, DMA_TO_DEVICE); + + prod = rp->prod; + + rp->tx_buffs[prod].skb = skb; + rp->tx_buffs[prod].mapping = mapping; + + mrk = TX_DESC_SOP; + if (++rp->mark_counter == rp->mark_freq) { + rp->mark_counter = 0; + mrk |= TX_DESC_MARK; + rp->mark_pending++; + } + + tlen = len; + nfg = skb_shinfo(skb)->nr_frags; + while (tlen > 0) { + tlen -= MAX_TX_DESC_LEN; + nfg++; + } + + while (len > 0) { + unsigned int this_len = len; + + if (this_len > MAX_TX_DESC_LEN) + this_len = MAX_TX_DESC_LEN; + + niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); + mrk = nfg = 0; + + prod = NEXT_TX(rp, prod); + mapping += this_len; + len -= this_len; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + len = frag->size; + mapping = np->ops->map_page(np->device, frag->page, + frag->page_offset, len, + DMA_TO_DEVICE); + + rp->tx_buffs[prod].skb = NULL; + rp->tx_buffs[prod].mapping = mapping; + + niu_set_txd(rp, prod, mapping, len, 0, 0); + + prod = NEXT_TX(rp, prod); + } + + if (prod < rp->prod) + rp->wrap_bit ^= TX_RING_KICK_WRAP; + rp->prod = prod; + + nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); + + if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { + netif_stop_queue(dev); + if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) + netif_wake_queue(dev); + } + + dev->trans_start = jiffies; + +out: + return NETDEV_TX_OK; + +out_drop: + rp->tx_errors++; + kfree_skb(skb); + goto out; +} + +static int niu_change_mtu(struct net_device *dev, int new_mtu) +{ + struct niu *np = netdev_priv(dev); + int err, orig_jumbo, new_jumbo; + + if (new_mtu < 68 || new_mtu > NIU_MAX_MTU) + return -EINVAL; + + orig_jumbo = (dev->mtu > ETH_DATA_LEN); + new_jumbo = (new_mtu > ETH_DATA_LEN); + + dev->mtu = new_mtu; + + if (!netif_running(dev) || + (orig_jumbo == new_jumbo)) + return 0; + + niu_full_shutdown(np, dev); + + niu_free_channels(np); + + niu_enable_napi(np); + + err = niu_alloc_channels(np); + if (err) + return err; + + spin_lock_irq(&np->lock); + + err = niu_init_hw(np); + if (!err) { + init_timer(&np->timer); + np->timer.expires = jiffies + HZ; + np->timer.data = (unsigned long) np; + np->timer.function = niu_timer; + + err = niu_enable_interrupts(np, 1); + if (err) + niu_stop_hw(np); + } + + spin_unlock_irq(&np->lock); + + if (!err) { + netif_start_queue(dev); + if (np->link_config.loopback_mode != LOOPBACK_DISABLED) + netif_carrier_on(dev); + + add_timer(&np->timer); + } + + return err; +} + +static void niu_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct niu *np = netdev_priv(dev); + struct niu_vpd *vpd = &np->vpd; + + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); + sprintf(info->fw_version, "%d.%d", + vpd->fcode_major, vpd->fcode_minor); + if (np->parent->plat_type != PLAT_TYPE_NIU) + strcpy(info->bus_info, pci_name(np->pdev)); +} + +static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct niu *np = netdev_priv(dev); + struct niu_link_config *lp; + + lp = &np->link_config; + + memset(cmd, 0, sizeof(*cmd)); + cmd->phy_address = np->phy_addr; + cmd->supported = lp->supported; + cmd->advertising = lp->advertising; + cmd->autoneg = lp->autoneg; + cmd->speed = lp->active_speed; + cmd->duplex = lp->active_duplex; + + return 0; +} + +static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + return -EINVAL; +} + +static u32 niu_get_msglevel(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + return np->msg_enable; +} + +static void niu_set_msglevel(struct net_device *dev, u32 value) +{ + struct niu *np = netdev_priv(dev); + np->msg_enable = value; +} + +static int niu_get_eeprom_len(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + return np->eeprom_len; +} + +static int niu_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct niu *np = netdev_priv(dev); + u32 offset, len, val; + + offset = eeprom->offset; + len = eeprom->len; + + if (offset + len < offset) + return -EINVAL; + if (offset >= np->eeprom_len) + return -EINVAL; + if (offset + len > np->eeprom_len) + len = eeprom->len = np->eeprom_len - offset; + + if (offset & 3) { + u32 b_offset, b_count; + + b_offset = offset & 3; + b_count = 4 - b_offset; + if (b_count > len) + b_count = len; + + val = nr64(ESPC_NCR((offset - b_offset) / 4)); + memcpy(data, ((char *)&val) + b_offset, b_count); + data += b_count; + len -= b_count; + offset += b_count; + } + while (len >= 4) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, 4); + data += 4; + len -= 4; + offset += 4; + } + if (len) { + val = nr64(ESPC_NCR(offset / 4)); + memcpy(data, &val, len); + } + return 0; +} + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_xmac_stat_keys[] = { + { "tx_frames" }, + { "tx_bytes" }, + { "tx_fifo_errors" }, + { "tx_overflow_errors" }, + { "tx_max_pkt_size_errors" }, + { "tx_underflow_errors" }, + { "rx_local_faults" }, + { "rx_remote_faults" }, + { "rx_link_faults" }, + { "rx_align_errors" }, + { "rx_frags" }, + { "rx_mcasts" }, + { "rx_bcasts" }, + { "rx_hist_cnt1" }, + { "rx_hist_cnt2" }, + { "rx_hist_cnt3" }, + { "rx_hist_cnt4" }, + { "rx_hist_cnt5" }, + { "rx_hist_cnt6" }, + { "rx_hist_cnt7" }, + { "rx_octets" }, + { "rx_code_violations" }, + { "rx_len_errors" }, + { "rx_crc_errors" }, + { "rx_underflows" }, + { "rx_overflows" }, + { "pause_off_state" }, + { "pause_on_state" }, + { "pause_received" }, +}; + +#define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_bmac_stat_keys[] = { + { "tx_underflow_errors" }, + { "tx_max_pkt_size_errors" }, + { "tx_bytes" }, + { "tx_frames" }, + { "rx_overflows" }, + { "rx_frames" }, + { "rx_align_errors" }, + { "rx_crc_errors" }, + { "rx_len_errors" }, + { "pause_off_state" }, + { "pause_on_state" }, + { "pause_received" }, +}; + +#define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_rxchan_stat_keys[] = { + { "rx_channel" }, + { "rx_packets" }, + { "rx_bytes" }, + { "rx_dropped" }, + { "rx_errors" }, +}; + +#define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys) + +static const struct { + const char string[ETH_GSTRING_LEN]; +} niu_txchan_stat_keys[] = { + { "tx_channel" }, + { "tx_packets" }, + { "tx_bytes" }, + { "tx_errors" }, +}; + +#define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys) + +static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct niu *np = netdev_priv(dev); + int i; + + if (stringset != ETH_SS_STATS) + return; + + if (np->flags & NIU_FLAGS_XMAC) { + memcpy(data, niu_xmac_stat_keys, + sizeof(niu_xmac_stat_keys)); + data += sizeof(niu_xmac_stat_keys); + } else { + memcpy(data, niu_bmac_stat_keys, + sizeof(niu_bmac_stat_keys)); + data += sizeof(niu_bmac_stat_keys); + } + for (i = 0; i < np->num_rx_rings; i++) { + memcpy(data, niu_rxchan_stat_keys, + sizeof(niu_rxchan_stat_keys)); + data += sizeof(niu_rxchan_stat_keys); + } + for (i = 0; i < np->num_tx_rings; i++) { + memcpy(data, niu_txchan_stat_keys, + sizeof(niu_txchan_stat_keys)); + data += sizeof(niu_txchan_stat_keys); + } +} + +static int niu_get_stats_count(struct net_device *dev) +{ + struct niu *np = netdev_priv(dev); + + return ((np->flags & NIU_FLAGS_XMAC ? + NUM_XMAC_STAT_KEYS : + NUM_BMAC_STAT_KEYS) + + (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + + (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); +} + +static void niu_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct niu *np = netdev_priv(dev); + int i; + + niu_sync_mac_stats(np); + if (np->flags & NIU_FLAGS_XMAC) { + memcpy(data, &np->mac_stats.xmac, + sizeof(struct niu_xmac_stats)); + data += (sizeof(struct niu_xmac_stats) / sizeof(u64)); + } else { + memcpy(data, &np->mac_stats.bmac, + sizeof(struct niu_bmac_stats)); + data += (sizeof(struct niu_bmac_stats) / sizeof(u64)); + } + for (i = 0; i < np->num_rx_rings; i++) { + struct rx_ring_info *rp = &np->rx_rings[i]; + + data[0] = rp->rx_channel; + data[1] = rp->rx_packets; + data[2] = rp->rx_bytes; + data[3] = rp->rx_dropped; + data[4] = rp->rx_errors; + data += 5; + } + for (i = 0; i < np->num_tx_rings; i++) { + struct tx_ring_info *rp = &np->tx_rings[i]; + + data[0] = rp->tx_channel; + data[1] = rp->tx_packets; + data[2] = rp->tx_bytes; + data[3] = rp->tx_errors; + data += 4; + } +} + +static u64 niu_led_state_save(struct niu *np) +{ + if (np->flags & NIU_FLAGS_XMAC) + return nr64_mac(XMAC_CONFIG); + else + return nr64_mac(BMAC_XIF_CONFIG); +} + +static void niu_led_state_restore(struct niu *np, u64 val) +{ + if (np->flags & NIU_FLAGS_XMAC) + nw64_mac(XMAC_CONFIG, val); + else + nw64_mac(BMAC_XIF_CONFIG, val); +} + +static void niu_force_led(struct niu *np, int on) +{ + u64 val, reg, bit; + + if (np->flags & NIU_FLAGS_XMAC) { + reg = XMAC_CONFIG; + bit = XMAC_CONFIG_FORCE_LED_ON; + } else { + reg = BMAC_XIF_CONFIG; + bit = BMAC_XIF_CONFIG_LINK_LED; + } + + val = nr64_mac(reg); + if (on) + val |= bit; + else + val &= ~bit; + nw64_mac(reg, val); +} + +static int niu_phys_id(struct net_device *dev, u32 data) +{ + struct niu *np = netdev_priv(dev); + u64 orig_led_state; + int i; + + if (!netif_running(dev)) + return -EAGAIN; + + if (data == 0) + data = 2; + + orig_led_state = niu_led_state_save(np); + for (i = 0; i < (data * 2); i++) { + int on = ((i % 2) == 0); + + niu_force_led(np, on); + + if (msleep_interruptible(500)) + break; + } + niu_led_state_restore(np, orig_led_state); + + return 0; +} + +static const struct ethtool_ops niu_ethtool_ops = { + .get_drvinfo = niu_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = niu_get_msglevel, + .set_msglevel = niu_set_msglevel, + .get_eeprom_len = niu_get_eeprom_len, + .get_eeprom = niu_get_eeprom, + .get_settings = niu_get_settings, + .set_settings = niu_set_settings, + .get_strings = niu_get_strings, + .get_stats_count = niu_get_stats_count, + .get_ethtool_stats = niu_get_ethtool_stats, + .phys_id = niu_phys_id, +}; + +static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, + int ldg, int ldn) +{ + if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) + return -EINVAL; + if (ldn < 0 || ldn > LDN_MAX) + return -EINVAL; + + parent->ldg_map[ldn] = ldg; + + if (np->parent->plat_type == PLAT_TYPE_NIU) { + /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by + * the firmware, and we're not supposed to change them. + * Validate the mapping, because if it's wrong we probably + * won't get any interrupts and that's painful to debug. + */ + if (nr64(LDG_NUM(ldn)) != ldg) { + dev_err(np->device, PFX "Port %u, mis-matched " + "LDG assignment " + "for ldn %d, should be %d is %llu\n", + np->port, ldn, ldg, + (unsigned long long) nr64(LDG_NUM(ldn))); + return -EINVAL; + } + } else + nw64(LDG_NUM(ldn), ldg); + + return 0; +} + +static int niu_set_ldg_timer_res(struct niu *np, int res) +{ + if (res < 0 || res > LDG_TIMER_RES_VAL) + return -EINVAL; + + + nw64(LDG_TIMER_RES, res); + + return 0; +} + +static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector) +{ + if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) || + (func < 0 || func > 3) || + (vector < 0 || vector > 0x1f)) + return -EINVAL; + + nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector); + + return 0; +} + +static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) +{ + u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | + (addr << ESPC_PIO_STAT_ADDR_SHIFT)); + int limit; + + if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT)) + return -EINVAL; + + frame = frame_base; + nw64(ESPC_PIO_STAT, frame); + limit = 64; + do { + udelay(5); + frame = nr64(ESPC_PIO_STAT); + if (frame & ESPC_PIO_STAT_READ_END) + break; + } while (limit--); + if (!(frame & ESPC_PIO_STAT_READ_END)) { + dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", + (unsigned long long) frame); + return -ENODEV; + } + + frame = frame_base; + nw64(ESPC_PIO_STAT, frame); + limit = 64; + do { + udelay(5); + frame = nr64(ESPC_PIO_STAT); + if (frame & ESPC_PIO_STAT_READ_END) + break; + } while (limit--); + if (!(frame & ESPC_PIO_STAT_READ_END)) { + dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n", + (unsigned long long) frame); + return -ENODEV; + } + + frame = nr64(ESPC_PIO_STAT); + return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; +} + +static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) +{ + int err = niu_pci_eeprom_read(np, off); + u16 val; + + if (err < 0) + return err; + val = (err << 8); + err = niu_pci_eeprom_read(np, off + 1); + if (err < 0) + return err; + val |= (err & 0xff); + + return val; +} + +static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) +{ + int err = niu_pci_eeprom_read(np, off); + u16 val; + + if (err < 0) + return err; + + val = (err & 0xff); + err = niu_pci_eeprom_read(np, off + 1); + if (err < 0) + return err; + + val |= (err & 0xff) << 8; + + return val; +} + +static int __devinit niu_pci_vpd_get_propname(struct niu *np, + u32 off, + char *namebuf, + int namebuf_len) +{ + int i; + + for (i = 0; i < namebuf_len; i++) { + int err = niu_pci_eeprom_read(np, off + i); + if (err < 0) + return err; + *namebuf++ = err; + if (!err) + break; + } + if (i >= namebuf_len) + return -EINVAL; + + return i + 1; +} + +static void __devinit niu_vpd_parse_version(struct niu *np) +{ + struct niu_vpd *vpd = &np->vpd; + int len = strlen(vpd->version) + 1; + const char *s = vpd->version; + int i; + + for (i = 0; i < len - 5; i++) { + if (!strncmp(s + i, "FCode ", 5)) + break; + } + if (i >= len - 5) + return; + + s += i + 5; + sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); + + niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n", + vpd->fcode_major, vpd->fcode_minor); + if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || + (vpd->fcode_major == NIU_VPD_MIN_MAJOR && + vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) + np->flags |= NIU_FLAGS_VPD_VALID; +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static int __devinit niu_pci_vpd_scan_props(struct niu *np, + u32 start, u32 end) +{ + unsigned int found_mask = 0; +#define FOUND_MASK_MODEL 0x00000001 +#define FOUND_MASK_BMODEL 0x00000002 +#define FOUND_MASK_VERS 0x00000004 +#define FOUND_MASK_MAC 0x00000008 +#define FOUND_MASK_NMAC 0x00000010 +#define FOUND_MASK_PHY 0x00000020 +#define FOUND_MASK_ALL 0x0000003f + + niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n", + start, end); + while (start < end) { + int len, err, instance, type, prop_len; + char namebuf[64]; + u8 *prop_buf; + int max_len; + + if (found_mask == FOUND_MASK_ALL) { + niu_vpd_parse_version(np); + return 1; + } + + err = niu_pci_eeprom_read(np, start + 2); + if (err < 0) + return err; + len = err; + start += 3; + + instance = niu_pci_eeprom_read(np, start); + type = niu_pci_eeprom_read(np, start + 3); + prop_len = niu_pci_eeprom_read(np, start + 4); + err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64); + if (err < 0) + return err; + + prop_buf = NULL; + max_len = 0; + if (!strcmp(namebuf, "model")) { + prop_buf = np->vpd.model; + max_len = NIU_VPD_MODEL_MAX; + found_mask |= FOUND_MASK_MODEL; + } else if (!strcmp(namebuf, "board-model")) { + prop_buf = np->vpd.board_model; + max_len = NIU_VPD_BD_MODEL_MAX; + found_mask |= FOUND_MASK_BMODEL; + } else if (!strcmp(namebuf, "version")) { + prop_buf = np->vpd.version; + max_len = NIU_VPD_VERSION_MAX; + found_mask |= FOUND_MASK_VERS; + } else if (!strcmp(namebuf, "local-mac-address")) { + prop_buf = np->vpd.local_mac; + max_len = ETH_ALEN; + found_mask |= FOUND_MASK_MAC; + } else if (!strcmp(namebuf, "num-mac-addresses")) { + prop_buf = &np->vpd.mac_num; + max_len = 1; + found_mask |= FOUND_MASK_NMAC; + } else if (!strcmp(namebuf, "phy-type")) { + prop_buf = np->vpd.phy_type; + max_len = NIU_VPD_PHY_TYPE_MAX; + found_mask |= FOUND_MASK_PHY; + } + + if (max_len && prop_len > max_len) { + dev_err(np->device, PFX "Property '%s' length (%d) is " + "too long.\n", namebuf, prop_len); + return -EINVAL; + } + + if (prop_buf) { + u32 off = start + 5 + err; + int i; + + niudbg(PROBE, "VPD_SCAN: Reading in property [%s] " + "len[%d]\n", namebuf, prop_len); + for (i = 0; i < prop_len; i++) + *prop_buf++ = niu_pci_eeprom_read(np, off + i); + } + + start += len; + } + + return 0; +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) +{ + u32 offset; + int err; + + err = niu_pci_eeprom_read16_swp(np, start + 1); + if (err < 0) + return; + + offset = err + 3; + + while (start + offset < ESPC_EEPROM_SIZE) { + u32 here = start + offset; + u32 end; + + err = niu_pci_eeprom_read(np, here); + if (err != 0x90) + return; + + err = niu_pci_eeprom_read16_swp(np, here + 1); + if (err < 0) + return; + + here = start + offset + 3; + end = start + offset + err; + + offset += err; + + err = niu_pci_vpd_scan_props(np, here, end); + if (err < 0 || err == 1) + return; + } +} + +/* ESPC_PIO_EN_ENABLE must be set */ +static u32 __devinit niu_pci_vpd_offset(struct niu *np) +{ + u32 start = 0, end = ESPC_EEPROM_SIZE, ret; + int err; + + while (start < end) { + ret = start; + + /* ROM header signature? */ + err = niu_pci_eeprom_read16(np, start + 0); + if (err != 0x55aa) + return 0; + + /* Apply offset to PCI data structure. */ + err = niu_pci_eeprom_read16(np, start + 23); + if (err < 0) + return 0; + start += err; + + /* Check for "PCIR" signature. */ + err = niu_pci_eeprom_read16(np, start + 0); + if (err != 0x5043) + return 0; + err = niu_pci_eeprom_read16(np, start + 2); + if (err != 0x4952) + return 0; + + /* Check for OBP image type. */ + err = niu_pci_eeprom_read(np, start + 20); + if (err < 0) + return 0; + if (err != 0x01) { + err = niu_pci_eeprom_read(np, ret + 2); + if (err < 0) + return 0; + + start = ret + (err * 512); + continue; + } + + err = niu_pci_eeprom_read16_swp(np, start + 8); + if (err < 0) + return err; + ret += err; + + err = niu_pci_eeprom_read(np, ret + 0); + if (err != 0x82) + return 0; + + return ret; + } + + return 0; +} + +static int __devinit niu_phy_type_prop_decode(struct niu *np, + const char *phy_prop) +{ + if (!strcmp(phy_prop, "mif")) { + /* 1G copper, MII */ + np->flags &= ~(NIU_FLAGS_FIBER | + NIU_FLAGS_10G); + np->mac_xcvr = MAC_XCVR_MII; + } else if (!strcmp(phy_prop, "xgf")) { + /* 10G fiber, XPCS */ + np->flags |= (NIU_FLAGS_10G | + NIU_FLAGS_FIBER); + np->mac_xcvr = MAC_XCVR_XPCS; + } else if (!strcmp(phy_prop, "pcs")) { + /* 1G fiber, PCS */ + np->flags &= ~NIU_FLAGS_10G; + np->flags |= NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_PCS; + } else if (!strcmp(phy_prop, "xgc")) { + /* 10G copper, XPCS */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_XPCS; + } else { + return -EINVAL; + } + return 0; +} + +static void __devinit niu_pci_vpd_validate(struct niu *np) +{ + struct net_device *dev = np->dev; + struct niu_vpd *vpd = &np->vpd; + u8 val8; + + if (!is_valid_ether_addr(&vpd->local_mac[0])) { + dev_err(np->device, PFX "VPD MAC invalid, " + "falling back to SPROM.\n"); + + np->flags &= ~NIU_FLAGS_VPD_VALID; + return; + } + + if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { + dev_err(np->device, PFX "Illegal phy string [%s].\n", + np->vpd.phy_type); + dev_err(np->device, PFX "Falling back to SPROM.\n"); + np->flags &= ~NIU_FLAGS_VPD_VALID; + return; + } + + memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN); + + val8 = dev->perm_addr[5]; + dev->perm_addr[5] += np->port; + if (dev->perm_addr[5] < val8) + dev->perm_addr[4]++; + + memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); +} + +static int __devinit niu_pci_probe_sprom(struct niu *np) +{ + struct net_device *dev = np->dev; + int len, i; + u64 val, sum; + u8 val8; + + val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ); + val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT; + len = val / 4; + + np->eeprom_len = len; + + niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val); + + sum = 0; + for (i = 0; i < len; i++) { + val = nr64(ESPC_NCR(i)); + sum += (val >> 0) & 0xff; + sum += (val >> 8) & 0xff; + sum += (val >> 16) & 0xff; + sum += (val >> 24) & 0xff; + } + niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff)); + if ((sum & 0xff) != 0xab) { + dev_err(np->device, PFX "Bad SPROM checksum " + "(%x, should be 0xab)\n", (int) (sum & 0xff)); + return -EINVAL; + } + + val = nr64(ESPC_PHY_TYPE); + switch (np->port) { + case 0: + val = (val & ESPC_PHY_TYPE_PORT0) >> + ESPC_PHY_TYPE_PORT0_SHIFT; + break; + case 1: + val = (val & ESPC_PHY_TYPE_PORT1) >> + ESPC_PHY_TYPE_PORT1_SHIFT; + break; + case 2: + val = (val & ESPC_PHY_TYPE_PORT2) >> + ESPC_PHY_TYPE_PORT2_SHIFT; + break; + case 3: + val = (val & ESPC_PHY_TYPE_PORT3) >> + ESPC_PHY_TYPE_PORT3_SHIFT; + break; + default: + dev_err(np->device, PFX "Bogus port number %u\n", + np->port); + return -EINVAL; + } + niudbg(PROBE, "SPROM: PHY type %llx\n", (unsigned long long) val); + + switch (val) { + case ESPC_PHY_TYPE_1G_COPPER: + /* 1G copper, MII */ + np->flags &= ~(NIU_FLAGS_FIBER | + NIU_FLAGS_10G); + np->mac_xcvr = MAC_XCVR_MII; + break; + + case ESPC_PHY_TYPE_1G_FIBER: + /* 1G fiber, PCS */ + np->flags &= ~NIU_FLAGS_10G; + np->flags |= NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_PCS; + break; + + case ESPC_PHY_TYPE_10G_COPPER: + /* 10G copper, XPCS */ + np->flags |= NIU_FLAGS_10G; + np->flags &= ~NIU_FLAGS_FIBER; + np->mac_xcvr = MAC_XCVR_XPCS; + break; + + case ESPC_PHY_TYPE_10G_FIBER: + /* 10G fiber, XPCS */ + np->flags |= (NIU_FLAGS_10G | + NIU_FLAGS_FIBER); + np->mac_xcvr = MAC_XCVR_XPCS; + break; + + default: + dev_err(np->device, PFX "Bogus SPROM phy type %llu\n", + (unsigned long long) val); + return -EINVAL; + } + + val = nr64(ESPC_MAC_ADDR0); + niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n", + (unsigned long long) val); + dev->perm_addr[0] = (val >> 0) & 0xff; + dev->perm_addr[1] = (val >> 8) & 0xff; + dev->perm_addr[2] = (val >> 16) & 0xff; + dev->perm_addr[3] = (val >> 24) & 0xff; + + val = nr64(ESPC_MAC_ADDR1); + niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n", + (unsigned long long) val); + dev->perm_addr[4] = (val >> 0) & 0xff; + dev->perm_addr[5] = (val >> 8) & 0xff; + + if (!is_valid_ether_addr(&dev->perm_addr[0])) { + dev_err(np->device, PFX "SPROM MAC address invalid\n"); + dev_err(np->device, PFX "[ \n"); + for (i = 0; i < 6; i++) + printk("%02x ", dev->perm_addr[i]); + printk("]\n"); + return -EINVAL; + } + + val8 = dev->perm_addr[5]; + dev->perm_addr[5] += np->port; + if (dev->perm_addr[5] < val8) + dev->perm_addr[4]++; + + memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + + val = nr64(ESPC_MOD_STR_LEN); + niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", + (unsigned long long) val); + if (val > 8 * 4) + return -EINVAL; + + for (i = 0; i < val; i += 4) { + u64 tmp = nr64(ESPC_NCR(5 + (i / 4))); + + np->vpd.model[i + 3] = (tmp >> 0) & 0xff; + np->vpd.model[i + 2] = (tmp >> 8) & 0xff; + np->vpd.model[i + 1] = (tmp >> 16) & 0xff; + np->vpd.model[i + 0] = (tmp >> 24) & 0xff; + } + np->vpd.model[val] = '\0'; + + val = nr64(ESPC_BD_MOD_STR_LEN); + niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", + (unsigned long long) val); + if (val > 4 * 4) + return -EINVAL; + + for (i = 0; i < val; i += 4) { + u64 tmp = nr64(ESPC_NCR(14 + (i / 4))); + + np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; + np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; + np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; + np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; + } + np->vpd.board_model[val] = '\0'; + + np->vpd.mac_num = + nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL; + niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n", + np->vpd.mac_num); + + return 0; +} + +static int __devinit niu_get_and_validate_port(struct niu *np) +{ + struct niu_parent *parent = np->parent; + + if (np->port <= 1) + np->flags |= NIU_FLAGS_XMAC; + + if (!parent->num_ports) { + if (parent->plat_type == PLAT_TYPE_NIU) { + parent->num_ports = 2; + } else { + parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & + ESPC_NUM_PORTS_MACS_VAL; + + if (!parent->num_ports) + parent->num_ports = 4; + } + } + + niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n", + np->port, parent->num_ports); + if (np->port >= parent->num_ports) + return -ENODEV; + + return 0; +} + +static int __devinit phy_record(struct niu_parent *parent, + struct phy_probe_info *p, + int dev_id_1, int dev_id_2, u8 phy_port, + int type) +{ + u32 id = (dev_id_1 << 16) | dev_id_2; + u8 idx; + + if (dev_id_1 < 0 || dev_id_2 < 0) + return 0; + if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) { + if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) + return 0; + } else { + if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R) + return 0; + } + + pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n", + parent->index, id, + (type == PHY_TYPE_PMA_PMD ? + "PMA/PMD" : + (type == PHY_TYPE_PCS ? + "PCS" : "MII")), + phy_port); + + if (p->cur[type] >= NIU_MAX_PORTS) { + printk(KERN_ERR PFX "Too many PHY ports.\n"); + return -EINVAL; + } + idx = p->cur[type]; + p->phy_id[type][idx] = id; + p->phy_port[type][idx] = phy_port; + p->cur[type] = idx + 1; + return 0; +} + +static int __devinit port_has_10g(struct phy_probe_info *p, int port) +{ + int i; + + for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { + if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) + return 1; + } + for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { + if (p->phy_port[PHY_TYPE_PCS][i] == port) + return 1; + } + + return 0; +} + +static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) +{ + int port, cnt; + + cnt = 0; + *lowest = 32; + for (port = 8; port < 32; port++) { + if (port_has_10g(p, port)) { + if (!cnt) + *lowest = port; + cnt++; + } + } + + return cnt; +} + +static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) +{ + *lowest = 32; + if (p->cur[PHY_TYPE_MII]) + *lowest = p->phy_port[PHY_TYPE_MII][0]; + + return p->cur[PHY_TYPE_MII]; +} + +static void __devinit niu_n2_divide_channels(struct niu_parent *parent) +{ + int num_ports = parent->num_ports; + int i; + + for (i = 0; i < num_ports; i++) { + parent->rxchan_per_port[i] = (16 / num_ports); + parent->txchan_per_port[i] = (16 / num_ports); + + pr_info(PFX "niu%d: Port %u [%u RX chans] " + "[%u TX chans]\n", + parent->index, i, + parent->rxchan_per_port[i], + parent->txchan_per_port[i]); + } +} + +static void __devinit niu_divide_channels(struct niu_parent *parent, + int num_10g, int num_1g) +{ + int num_ports = parent->num_ports; + int rx_chans_per_10g, rx_chans_per_1g; + int tx_chans_per_10g, tx_chans_per_1g; + int i, tot_rx, tot_tx; + + if (!num_10g || !num_1g) { + rx_chans_per_10g = rx_chans_per_1g = + (NIU_NUM_RXCHAN / num_ports); + tx_chans_per_10g = tx_chans_per_1g = + (NIU_NUM_TXCHAN / num_ports); + } else { + rx_chans_per_1g = NIU_NUM_RXCHAN / 8; + rx_chans_per_10g = (NIU_NUM_RXCHAN - + (rx_chans_per_1g * num_1g)) / + num_10g; + + tx_chans_per_1g = NIU_NUM_TXCHAN / 6; + tx_chans_per_10g = (NIU_NUM_TXCHAN - + (tx_chans_per_1g * num_1g)) / + num_10g; + } + + tot_rx = tot_tx = 0; + for (i = 0; i < num_ports; i++) { + int type = phy_decode(parent->port_phy, i); + + if (type == PORT_TYPE_10G) { + parent->rxchan_per_port[i] = rx_chans_per_10g; + parent->txchan_per_port[i] = tx_chans_per_10g; + } else { + parent->rxchan_per_port[i] = rx_chans_per_1g; + parent->txchan_per_port[i] = tx_chans_per_1g; + } + pr_info(PFX "niu%d: Port %u [%u RX chans] " + "[%u TX chans]\n", + parent->index, i, + parent->rxchan_per_port[i], + parent->txchan_per_port[i]); + tot_rx += parent->rxchan_per_port[i]; + tot_tx += parent->txchan_per_port[i]; + } + + if (tot_rx > NIU_NUM_RXCHAN) { + printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), " + "resetting to one per port.\n", + parent->index, tot_rx); + for (i = 0; i < num_ports; i++) + parent->rxchan_per_port[i] = 1; + } + if (tot_tx > NIU_NUM_TXCHAN) { + printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), " + "resetting to one per port.\n", + parent->index, tot_tx); + for (i = 0; i < num_ports; i++) + parent->txchan_per_port[i] = 1; + } + if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { + printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, " + "RX[%d] TX[%d]\n", + parent->index, tot_rx, tot_tx); + } +} + +static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, + int num_10g, int num_1g) +{ + int i, num_ports = parent->num_ports; + int rdc_group, rdc_groups_per_port; + int rdc_channel_base; + + rdc_group = 0; + rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports; + + rdc_channel_base = 0; + + for (i = 0; i < num_ports; i++) { + struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; + int grp, num_channels = parent->rxchan_per_port[i]; + int this_channel_offset; + + tp->first_table_num = rdc_group; + tp->num_tables = rdc_groups_per_port; + this_channel_offset = 0; + for (grp = 0; grp < tp->num_tables; grp++) { + struct rdc_table *rt = &tp->tables[grp]; + int slot; + + pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ", + parent->index, i, tp->first_table_num + grp); + for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) { + rt->rxdma_channel[slot] = + rdc_channel_base + this_channel_offset; + + printk("%d ", rt->rxdma_channel[slot]); + + if (++this_channel_offset == num_channels) + this_channel_offset = 0; + } + printk("]\n"); + } + + parent->rdc_default[i] = rdc_channel_base; + + rdc_channel_base += num_channels; + rdc_group += rdc_groups_per_port; + } +} + +static int __devinit fill_phy_probe_info(struct niu *np, + struct niu_parent *parent, + struct phy_probe_info *info) +{ + unsigned long flags; + int port, err; + + memset(info, 0, sizeof(*info)); + + /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */ + niu_lock_parent(np, flags); + err = 0; + for (port = 8; port < 32; port++) { + int dev_id_1, dev_id_2; + + dev_id_1 = mdio_read(np, port, + NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1); + dev_id_2 = mdio_read(np, port, + NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_PMA_PMD); + if (err) + break; + dev_id_1 = mdio_read(np, port, + NIU_PCS_DEV_ADDR, MII_PHYSID1); + dev_id_2 = mdio_read(np, port, + NIU_PCS_DEV_ADDR, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_PCS); + if (err) + break; + dev_id_1 = mii_read(np, port, MII_PHYSID1); + dev_id_2 = mii_read(np, port, MII_PHYSID2); + err = phy_record(parent, info, dev_id_1, dev_id_2, port, + PHY_TYPE_MII); + if (err) + break; + } + niu_unlock_parent(np, flags); + + return err; +} + +static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) +{ + struct phy_probe_info *info = &parent->phy_probe_info; + int lowest_10g, lowest_1g; + int num_10g, num_1g; + u32 val; + int err; + + err = fill_phy_probe_info(np, parent, info); + if (err) + return err; + + num_10g = count_10g_ports(info, &lowest_10g); + num_1g = count_1g_ports(info, &lowest_1g); + + switch ((num_10g << 4) | num_1g) { + case 0x24: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + /* fallthru */ + case 0x22: + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + case 0x20: + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_10G, 1)); + break; + + case 0x10: + val = phy_encode(PORT_TYPE_10G, np->port); + break; + + case 0x14: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + /* fallthru */ + case 0x13: + if ((lowest_10g & 0x7) == 0) + val = (phy_encode(PORT_TYPE_10G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + else + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_10G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + case 0x04: + if (lowest_1g == 10) + parent->plat_type = PLAT_TYPE_VF_P0; + else if (lowest_1g == 26) + parent->plat_type = PLAT_TYPE_VF_P1; + else + goto unknown_vg_1g_port; + + val = (phy_encode(PORT_TYPE_1G, 0) | + phy_encode(PORT_TYPE_1G, 1) | + phy_encode(PORT_TYPE_1G, 2) | + phy_encode(PORT_TYPE_1G, 3)); + break; + + default: + printk(KERN_ERR PFX "Unsupported port config " + "10G[%d] 1G[%d]\n", + num_10g, num_1g); + return -EINVAL; + } + + parent->port_phy = val; + + if (parent->plat_type == PLAT_TYPE_NIU) + niu_n2_divide_channels(parent); + else + niu_divide_channels(parent, num_10g, num_1g); + + niu_divide_rdc_groups(parent, num_10g, num_1g); + + return 0; + +unknown_vg_1g_port: + printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n", + lowest_1g); + return -EINVAL; +} + +static int __devinit niu_probe_ports(struct niu *np) +{ + struct niu_parent *parent = np->parent; + int err, i; + + niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n", + parent->port_phy); + + if (parent->port_phy == PORT_PHY_UNKNOWN) { + err = walk_phys(np, parent); + if (err) + return err; + + niu_set_ldg_timer_res(np, 2); + for (i = 0; i <= LDN_MAX; i++) + niu_ldn_irq_enable(np, i, 0); + } + + if (parent->port_phy == PORT_PHY_INVALID) + return -EINVAL; + + return 0; +} + +static int __devinit niu_classifier_swstate_init(struct niu *np) +{ + struct niu_classifier *cp = &np->clas; + + niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n", + np->parent->tcam_num_entries); + + cp->tcam_index = (u16) np->port; + cp->h1_init = 0xffffffff; + cp->h2_init = 0xffff; + + return fflp_early_init(np); +} + +static void __devinit niu_link_config_init(struct niu *np) +{ + struct niu_link_config *lp = &np->link_config; + + lp->advertising = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full | + ADVERTISED_Autoneg); + lp->speed = lp->active_speed = SPEED_INVALID; + lp->duplex = lp->active_duplex = DUPLEX_INVALID; +#if 0 + lp->loopback_mode = LOOPBACK_MAC; + lp->active_speed = SPEED_10000; + lp->active_duplex = DUPLEX_FULL; +#else + lp->loopback_mode = LOOPBACK_DISABLED; +#endif +} + +static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) +{ + switch (np->port) { + case 0: + np->mac_regs = np->regs + XMAC_PORT0_OFF; + np->ipp_off = 0x00000; + np->pcs_off = 0x04000; + np->xpcs_off = 0x02000; + break; + + case 1: + np->mac_regs = np->regs + XMAC_PORT1_OFF; + np->ipp_off = 0x08000; + np->pcs_off = 0x0a000; + np->xpcs_off = 0x08000; + break; + + case 2: + np->mac_regs = np->regs + BMAC_PORT2_OFF; + np->ipp_off = 0x04000; + np->pcs_off = 0x0e000; + np->xpcs_off = ~0UL; + break; + + case 3: + np->mac_regs = np->regs + BMAC_PORT3_OFF; + np->ipp_off = 0x0c000; + np->pcs_off = 0x12000; + np->xpcs_off = ~0UL; + break; + + default: + dev_err(np->device, PFX "Port %u is invalid, cannot " + "compute MAC block offset.\n", np->port); + return -EINVAL; + } + + return 0; +} + +static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) +{ + struct msix_entry msi_vec[NIU_NUM_LDG]; + struct niu_parent *parent = np->parent; + struct pci_dev *pdev = np->pdev; + int i, num_irqs, err; + u8 first_ldg; + + first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; + for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) + ldg_num_map[i] = first_ldg + i; + + num_irqs = (parent->rxchan_per_port[np->port] + + parent->txchan_per_port[np->port] + + (np->port == 0 ? 3 : 1)); + BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); + +retry: + for (i = 0; i < num_irqs; i++) { + msi_vec[i].vector = 0; + msi_vec[i].entry = i; + } + + err = pci_enable_msix(pdev, msi_vec, num_irqs); + if (err < 0) { + np->flags &= ~NIU_FLAGS_MSIX; + return; + } + if (err > 0) { + num_irqs = err; + goto retry; + } + + np->flags |= NIU_FLAGS_MSIX; + for (i = 0; i < num_irqs; i++) + np->ldg[i].irq = msi_vec[i].vector; + np->num_ldg = num_irqs; +} + +static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) +{ +#ifdef CONFIG_SPARC64 + struct of_device *op = np->op; + const u32 *int_prop; + int i; + + int_prop = of_get_property(op->node, "interrupts", NULL); + if (!int_prop) + return -ENODEV; + + for (i = 0; i < op->num_irqs; i++) { + ldg_num_map[i] = int_prop[i]; + np->ldg[i].irq = op->irqs[i]; + } + + np->num_ldg = op->num_irqs; + + return 0; +#else + return -EINVAL; +#endif +} + +static int __devinit niu_ldg_init(struct niu *np) +{ + struct niu_parent *parent = np->parent; + u8 ldg_num_map[NIU_NUM_LDG]; + int first_chan, num_chan; + int i, err, ldg_rotor; + u8 port; + + np->num_ldg = 1; + np->ldg[0].irq = np->dev->irq; + if (parent->plat_type == PLAT_TYPE_NIU) { + err = niu_n2_irq_init(np, ldg_num_map); + if (err) + return err; + } else + niu_try_msix(np, ldg_num_map); + + port = np->port; + for (i = 0; i < np->num_ldg; i++) { + struct niu_ldg *lp = &np->ldg[i]; + + netif_napi_add(np->dev, &lp->napi, niu_poll, 64); + + lp->np = np; + lp->ldg_num = ldg_num_map[i]; + lp->timer = 2; /* XXX */ + + /* On N2 NIU the firmware has setup the SID mappings so they go + * to the correct values that will route the LDG to the proper + * interrupt in the NCU interrupt table. + */ + if (np->parent->plat_type != PLAT_TYPE_NIU) { + err = niu_set_ldg_sid(np, lp->ldg_num, port, i); + if (err) + return err; + } + } + + /* We adopt the LDG assignment ordering used by the N2 NIU + * 'interrupt' properties because that simplifies a lot of + * things. This ordering is: + * + * MAC + * MIF (if port zero) + * SYSERR (if port zero) + * RX channels + * TX channels + */ + + ldg_rotor = 0; + + err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor], + LDN_MAC(port)); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + if (port == 0) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_MIF); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_DEVICE_ERROR); + if (err) + return err; + + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + + } + + first_chan = 0; + for (i = 0; i < port; i++) + first_chan += parent->rxchan_per_port[port]; + num_chan = parent->rxchan_per_port[port]; + + for (i = first_chan; i < (first_chan + num_chan); i++) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_RXDMA(i)); + if (err) + return err; + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + } + + first_chan = 0; + for (i = 0; i < port; i++) + first_chan += parent->txchan_per_port[port]; + num_chan = parent->txchan_per_port[port]; + for (i = first_chan; i < (first_chan + num_chan); i++) { + err = niu_ldg_assign_ldn(np, parent, + ldg_num_map[ldg_rotor], + LDN_TXDMA(i)); + if (err) + return err; + ldg_rotor++; + if (ldg_rotor == np->num_ldg) + ldg_rotor = 0; + } + + return 0; +} + +static void __devexit niu_ldg_free(struct niu *np) +{ + if (np->flags & NIU_FLAGS_MSIX) + pci_disable_msix(np->pdev); +} + +static int __devinit niu_get_of_props(struct niu *np) +{ +#ifdef CONFIG_SPARC64 + struct net_device *dev = np->dev; + struct device_node *dp; + const char *phy_type; + const u8 *mac_addr; + int prop_len; + + if (np->parent->plat_type == PLAT_TYPE_NIU) + dp = np->op->node; + else + dp = pci_device_to_OF_node(np->pdev); + + phy_type = of_get_property(dp, "phy-type", &prop_len); + if (!phy_type) { + dev_err(np->device, PFX "%s: OF node lacks " + "phy-type property\n", + dp->full_name); + return -EINVAL; + } + + if (!strcmp(phy_type, "none")) + return -ENODEV; + + strcpy(np->vpd.phy_type, phy_type); + + if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { + dev_err(np->device, PFX "%s: Illegal phy string [%s].\n", + dp->full_name, np->vpd.phy_type); + return -EINVAL; + } + + mac_addr = of_get_property(dp, "local-mac-address", &prop_len); + if (!mac_addr) { + dev_err(np->device, PFX "%s: OF node lacks " + "local-mac-address property\n", + dp->full_name); + return -EINVAL; + } + if (prop_len != dev->addr_len) { + dev_err(np->device, PFX "%s: OF MAC address prop len (%d) " + "is wrong.\n", + dp->full_name, prop_len); + } + memcpy(dev->perm_addr, mac_addr, dev->addr_len); + if (!is_valid_ether_addr(&dev->perm_addr[0])) { + int i; + + dev_err(np->device, PFX "%s: OF MAC address is invalid\n", + dp->full_name); + dev_err(np->device, PFX "%s: [ \n", + dp->full_name); + for (i = 0; i < 6; i++) + printk("%02x ", dev->perm_addr[i]); + printk("]\n"); + return -EINVAL; + } + + memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); + + return 0; +#else + return -EINVAL; +#endif +} + +static int __devinit niu_get_invariants(struct niu *np) +{ + int err, have_props; + u32 offset; + + err = niu_get_of_props(np); + if (err == -ENODEV) + return err; + + have_props = !err; + + err = niu_get_and_validate_port(np); + if (err) + return err; + + err = niu_init_mac_ipp_pcs_base(np); + if (err) + return err; + + if (!have_props) { + if (np->parent->plat_type == PLAT_TYPE_NIU) + return -EINVAL; + + nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE); + offset = niu_pci_vpd_offset(np); + niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n", + offset); + if (offset) + niu_pci_vpd_fetch(np, offset); + nw64(ESPC_PIO_EN, 0); + + if (np->flags & NIU_FLAGS_VPD_VALID) + niu_pci_vpd_validate(np); + + if (!(np->flags & NIU_FLAGS_VPD_VALID)) { + err = niu_pci_probe_sprom(np); + if (err) + return err; + } + } + + err = niu_probe_ports(np); + if (err) + return err; + + niu_ldg_init(np); + + niu_classifier_swstate_init(np); + niu_link_config_init(np); + + err = niu_determine_phy_disposition(np); + if (!err) + err = niu_init_link(np); + + return err; +} + +static LIST_HEAD(niu_parent_list); +static DEFINE_MUTEX(niu_parent_lock); +static int niu_parent_index; + +static ssize_t show_port_phy(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + u32 port_phy = p->port_phy; + char *orig_buf = buf; + int i; + + if (port_phy == PORT_PHY_UNKNOWN || + port_phy == PORT_PHY_INVALID) + return 0; + + for (i = 0; i < p->num_ports; i++) { + const char *type_str; + int type; + + type = phy_decode(port_phy, i); + if (type == PORT_TYPE_10G) + type_str = "10G"; + else + type_str = "1G"; + buf += sprintf(buf, + (i == 0) ? "%s" : " %s", + type_str); + } + buf += sprintf(buf, "\n"); + return buf - orig_buf; +} + +static ssize_t show_plat_type(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + const char *type_str; + + switch (p->plat_type) { + case PLAT_TYPE_ATLAS: + type_str = "atlas"; + break; + case PLAT_TYPE_NIU: + type_str = "niu"; + break; + case PLAT_TYPE_VF_P0: + type_str = "vf_p0"; + break; + case PLAT_TYPE_VF_P1: + type_str = "vf_p1"; + break; + default: + type_str = "unknown"; + break; + } + + return sprintf(buf, "%s\n", type_str); +} + +static ssize_t __show_chan_per_port(struct device *dev, + struct device_attribute *attr, char *buf, + int rx) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + char *orig_buf = buf; + u8 *arr; + int i; + + arr = (rx ? p->rxchan_per_port : p->txchan_per_port); + + for (i = 0; i < p->num_ports; i++) { + buf += sprintf(buf, + (i == 0) ? "%d" : " %d", + arr[i]); + } + buf += sprintf(buf, "\n"); + + return buf - orig_buf; +} + +static ssize_t show_rxchan_per_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return __show_chan_per_port(dev, attr, buf, 1); +} + +static ssize_t show_txchan_per_port(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return __show_chan_per_port(dev, attr, buf, 1); +} + +static ssize_t show_num_ports(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct niu_parent *p = plat_dev->dev.platform_data; + + return sprintf(buf, "%d\n", p->num_ports); +} + +static struct device_attribute niu_parent_attributes[] = { + __ATTR(port_phy, S_IRUGO, show_port_phy, NULL), + __ATTR(plat_type, S_IRUGO, show_plat_type, NULL), + __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL), + __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL), + __ATTR(num_ports, S_IRUGO, show_num_ports, NULL), + {} +}; + +static struct niu_parent * __devinit niu_new_parent(struct niu *np, + union niu_parent_id *id, + u8 ptype) +{ + struct platform_device *plat_dev; + struct niu_parent *p; + int i; + + niudbg(PROBE, "niu_new_parent: Creating new parent.\n"); + + plat_dev = platform_device_register_simple("niu", niu_parent_index, + NULL, 0); + if (!plat_dev) + return NULL; + + for (i = 0; attr_name(niu_parent_attributes[i]); i++) { + int err = device_create_file(&plat_dev->dev, + &niu_parent_attributes[i]); + if (err) + goto fail_unregister; + } + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + goto fail_unregister; + + p->index = niu_parent_index++; + + plat_dev->dev.platform_data = p; + p->plat_dev = plat_dev; + + memcpy(&p->id, id, sizeof(*id)); + p->plat_type = ptype; + INIT_LIST_HEAD(&p->list); + atomic_set(&p->refcnt, 0); + list_add(&p->list, &niu_parent_list); + spin_lock_init(&p->lock); + + p->rxdma_clock_divider = 7500; + + p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; + if (p->plat_type == PLAT_TYPE_NIU) + p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; + + for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) { + int index = i - CLASS_CODE_USER_PROG1; + + p->tcam_key[index] = TCAM_KEY_TSEL; + p->flow_key[index] = (FLOW_KEY_IPSA | + FLOW_KEY_IPDA | + FLOW_KEY_PROTO | + (FLOW_KEY_L4_BYTE12 << + FLOW_KEY_L4_0_SHIFT) | + (FLOW_KEY_L4_BYTE12 << + FLOW_KEY_L4_1_SHIFT)); + } + + for (i = 0; i < LDN_MAX + 1; i++) + p->ldg_map[i] = LDG_INVALID; + + return p; + +fail_unregister: + platform_device_unregister(plat_dev); + return NULL; +} + +static struct niu_parent * __devinit niu_get_parent(struct niu *np, + union niu_parent_id *id, + u8 ptype) +{ + struct niu_parent *p, *tmp; + int port = np->port; + + niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n", + ptype, port); + + mutex_lock(&niu_parent_lock); + p = NULL; + list_for_each_entry(tmp, &niu_parent_list, list) { + if (!memcmp(id, &tmp->id, sizeof(*id))) { + p = tmp; + break; + } + } + if (!p) + p = niu_new_parent(np, id, ptype); + + if (p) { + char port_name[6]; + int err; + + sprintf(port_name, "port%d", port); + err = sysfs_create_link(&p->plat_dev->dev.kobj, + &np->device->kobj, + port_name); + if (!err) { + p->ports[port] = np; + atomic_inc(&p->refcnt); + } + } + mutex_unlock(&niu_parent_lock); + + return p; +} + +static void niu_put_parent(struct niu *np) +{ + struct niu_parent *p = np->parent; + u8 port = np->port; + char port_name[6]; + + BUG_ON(!p || p->ports[port] != np); + + niudbg(PROBE, "niu_put_parent: port[%u]\n", port); + + sprintf(port_name, "port%d", port); + + mutex_lock(&niu_parent_lock); + + sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); + + p->ports[port] = NULL; + np->parent = NULL; + + if (atomic_dec_and_test(&p->refcnt)) { + list_del(&p->list); + platform_device_unregister(p->plat_dev); + } + + mutex_unlock(&niu_parent_lock); +} + +static void *niu_pci_alloc_coherent(struct device *dev, size_t size, + u64 *handle, gfp_t flag) +{ + dma_addr_t dh; + void *ret; + + ret = dma_alloc_coherent(dev, size, &dh, flag); + if (ret) + *handle = dh; + return ret; +} + +static void niu_pci_free_coherent(struct device *dev, size_t size, + void *cpu_addr, u64 handle) +{ + dma_free_coherent(dev, size, cpu_addr, handle); +} + +static u64 niu_pci_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return dma_map_page(dev, page, offset, size, direction); +} + +static void niu_pci_unmap_page(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction) +{ + return dma_unmap_page(dev, dma_address, size, direction); +} + +static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return dma_map_single(dev, cpu_addr, size, direction); +} + +static void niu_pci_unmap_single(struct device *dev, u64 dma_address, + size_t size, + enum dma_data_direction direction) +{ + dma_unmap_single(dev, dma_address, size, direction); +} + +static const struct niu_ops niu_pci_ops = { + .alloc_coherent = niu_pci_alloc_coherent, + .free_coherent = niu_pci_free_coherent, + .map_page = niu_pci_map_page, + .unmap_page = niu_pci_unmap_page, + .map_single = niu_pci_map_single, + .unmap_single = niu_pci_unmap_single, +}; + +static void __devinit niu_driver_version(void) +{ + static int niu_version_printed; + + if (niu_version_printed++ == 0) + pr_info("%s", version); +} + +static struct net_device * __devinit niu_alloc_and_init( + struct device *gen_dev, struct pci_dev *pdev, + struct of_device *op, const struct niu_ops *ops, + u8 port) +{ + struct net_device *dev = alloc_etherdev(sizeof(struct niu)); + struct niu *np; + + if (!dev) { + dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); + return NULL; + } + + SET_NETDEV_DEV(dev, gen_dev); + + np = netdev_priv(dev); + np->dev = dev; + np->pdev = pdev; + np->op = op; + np->device = gen_dev; + np->ops = ops; + + np->msg_enable = niu_debug; + + spin_lock_init(&np->lock); + INIT_WORK(&np->reset_task, niu_reset_task); + + np->port = port; + + return dev; +} + +static void __devinit niu_assign_netdev_ops(struct net_device *dev) +{ + dev->open = niu_open; + dev->stop = niu_close; + dev->get_stats = niu_get_stats; + dev->set_multicast_list = niu_set_rx_mode; + dev->set_mac_address = niu_set_mac_addr; + dev->do_ioctl = niu_ioctl; + dev->tx_timeout = niu_tx_timeout; + dev->hard_start_xmit = niu_start_xmit; + dev->ethtool_ops = &niu_ethtool_ops; + dev->watchdog_timeo = NIU_TX_TIMEOUT; + dev->change_mtu = niu_change_mtu; +} + +static void __devinit niu_device_announce(struct niu *np) +{ + struct net_device *dev = np->dev; + int i; + + pr_info("%s: NIU Ethernet ", dev->name); + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], + i == 5 ? '\n' : ':'); + + pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", + dev->name, + (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), + (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), + (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), + (np->mac_xcvr == MAC_XCVR_MII ? "MII" : + (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), + np->vpd.phy_type); +} + +static int __devinit niu_pci_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned long niureg_base, niureg_len; + union niu_parent_id parent_id; + struct net_device *dev; + struct niu *np; + int err, pos; + u64 dma_mask; + u16 val16; + + niu_driver_version(); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, PFX "Cannot enable PCI device, " + "aborting.\n"); + return err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || + !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, PFX "Cannot find proper PCI device " + "base addresses, aborting.\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + err = pci_request_regions(pdev, DRV_MODULE_NAME); + if (err) { + dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, " + "aborting.\n"); + goto err_out_disable_pdev; + } + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pos <= 0) { + dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " + "aborting.\n"); + goto err_out_free_res; + } + + dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, + &niu_pci_ops, PCI_FUNC(pdev->devfn)); + if (!dev) { + err = -ENOMEM; + goto err_out_free_res; + } + np = netdev_priv(dev); + + memset(&parent_id, 0, sizeof(parent_id)); + parent_id.pci.domain = pci_domain_nr(pdev->bus); + parent_id.pci.bus = pdev->bus->number; + parent_id.pci.device = PCI_SLOT(pdev->devfn); + + np->parent = niu_get_parent(np, &parent_id, + PLAT_TYPE_ATLAS); + if (!np->parent) { + err = -ENOMEM; + goto err_out_free_dev; + } + + pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); + val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; + val16 |= (PCI_EXP_DEVCTL_CERE | + PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_URRE | + PCI_EXP_DEVCTL_RELAX_EN); + pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); + + dma_mask = DMA_44BIT_MASK; + err = pci_set_dma_mask(pdev, dma_mask); + if (!err) { + dev->features |= NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, PFX "Unable to obtain 44 bit " + "DMA for consistent allocations, " + "aborting.\n"); + goto err_out_release_parent; + } + } + if (err || dma_mask == DMA_32BIT_MASK) { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, PFX "No usable DMA configuration, " + "aborting.\n"); + goto err_out_release_parent; + } + } + + dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); + + niureg_base = pci_resource_start(pdev, 0); + niureg_len = pci_resource_len(pdev, 0); + + np->regs = ioremap_nocache(niureg_base, niureg_len); + if (!np->regs) { + dev_err(&pdev->dev, PFX "Cannot map device registers, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_release_parent; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + dev->irq = pdev->irq; + + niu_assign_netdev_ops(dev); + + err = niu_get_invariants(np); + if (err) { + if (err != -ENODEV) + dev_err(&pdev->dev, PFX "Problem fetching invariants " + "of chip, aborting.\n"); + goto err_out_iounmap; + } + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_iounmap; + } + + pci_set_drvdata(pdev, dev); + + niu_device_announce(np); + + return 0; + +err_out_iounmap: + if (np->regs) { + iounmap(np->regs); + np->regs = NULL; + } + +err_out_release_parent: + niu_put_parent(np); + +err_out_free_dev: + free_netdev(dev); + +err_out_free_res: + pci_release_regions(pdev); + +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + return err; +} + +static void __devexit niu_pci_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + struct niu *np = netdev_priv(dev); + + unregister_netdev(dev); + if (np->regs) { + iounmap(np->regs); + np->regs = NULL; + } + + niu_ldg_free(np); + + niu_put_parent(np); + + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } +} + +static int niu_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct niu *np = netdev_priv(dev); + unsigned long flags; + + if (!netif_running(dev)) + return 0; + + flush_scheduled_work(); + niu_netif_stop(np); + + del_timer_sync(&np->timer); + + spin_lock_irqsave(&np->lock, flags); + niu_enable_interrupts(np, 0); + spin_unlock_irqrestore(&np->lock, flags); + + netif_device_detach(dev); + + spin_lock_irqsave(&np->lock, flags); + niu_stop_hw(np); + spin_unlock_irqrestore(&np->lock, flags); + + pci_save_state(pdev); + + return 0; +} + +static int niu_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct niu *np = netdev_priv(dev); + unsigned long flags; + int err; + + if (!netif_running(dev)) + return 0; + + pci_restore_state(pdev); + + netif_device_attach(dev); + + spin_lock_irqsave(&np->lock, flags); + + err = niu_init_hw(np); + if (!err) { + np->timer.expires = jiffies + HZ; + add_timer(&np->timer); + niu_netif_start(np); + } + + spin_unlock_irqrestore(&np->lock, flags); + + return err; +} + +static struct pci_driver niu_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = niu_pci_tbl, + .probe = niu_pci_init_one, + .remove = __devexit_p(niu_pci_remove_one), + .suspend = niu_suspend, + .resume = niu_resume, +}; + +#ifdef CONFIG_SPARC64 +static void *niu_phys_alloc_coherent(struct device *dev, size_t size, + u64 *dma_addr, gfp_t flag) +{ + unsigned long order = get_order(size); + unsigned long page = __get_free_pages(flag, order); + + if (page == 0UL) + return NULL; + memset((char *)page, 0, PAGE_SIZE << order); + *dma_addr = __pa(page); + + return (void *) page; +} + +static void niu_phys_free_coherent(struct device *dev, size_t size, + void *cpu_addr, u64 handle) +{ + unsigned long order = get_order(size); + + free_pages((unsigned long) cpu_addr, order); +} + +static u64 niu_phys_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + return page_to_phys(page) + offset; +} + +static void niu_phys_unmap_page(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction) +{ + /* Nothing to do. */ +} + +static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction) +{ + return __pa(cpu_addr); +} + +static void niu_phys_unmap_single(struct device *dev, u64 dma_address, + size_t size, + enum dma_data_direction direction) +{ + /* Nothing to do. */ +} + +static const struct niu_ops niu_phys_ops = { + .alloc_coherent = niu_phys_alloc_coherent, + .free_coherent = niu_phys_free_coherent, + .map_page = niu_phys_map_page, + .unmap_page = niu_phys_unmap_page, + .map_single = niu_phys_map_single, + .unmap_single = niu_phys_unmap_single, +}; + +static unsigned long res_size(struct resource *r) +{ + return r->end - r->start + 1UL; +} + +static int __devinit niu_of_probe(struct of_device *op, + const struct of_device_id *match) +{ + union niu_parent_id parent_id; + struct net_device *dev; + struct niu *np; + const u32 *reg; + int err; + + niu_driver_version(); + + reg = of_get_property(op->node, "reg", NULL); + if (!reg) { + dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n", + op->node->full_name); + return -ENODEV; + } + + dev = niu_alloc_and_init(&op->dev, NULL, op, + &niu_phys_ops, reg[0] & 0x1); + if (!dev) { + err = -ENOMEM; + goto err_out; + } + np = netdev_priv(dev); + + memset(&parent_id, 0, sizeof(parent_id)); + parent_id.of = of_get_parent(op->node); + + np->parent = niu_get_parent(np, &parent_id, + PLAT_TYPE_NIU); + if (!np->parent) { + err = -ENOMEM; + goto err_out_free_dev; + } + + dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); + + np->regs = of_ioremap(&op->resource[1], 0, + res_size(&op->resource[1]), + "niu regs"); + if (!np->regs) { + dev_err(&op->dev, PFX "Cannot map device registers, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_release_parent; + } + + np->vir_regs_1 = of_ioremap(&op->resource[2], 0, + res_size(&op->resource[2]), + "niu vregs-1"); + if (!np->vir_regs_1) { + dev_err(&op->dev, PFX "Cannot map device vir registers 1, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + np->vir_regs_2 = of_ioremap(&op->resource[3], 0, + res_size(&op->resource[3]), + "niu vregs-2"); + if (!np->vir_regs_2) { + dev_err(&op->dev, PFX "Cannot map device vir registers 2, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + niu_assign_netdev_ops(dev); + + err = niu_get_invariants(np); + if (err) { + if (err != -ENODEV) + dev_err(&op->dev, PFX "Problem fetching invariants " + "of chip, aborting.\n"); + goto err_out_iounmap; + } + + err = register_netdev(dev); + if (err) { + dev_err(&op->dev, PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_iounmap; + } + + dev_set_drvdata(&op->dev, dev); + + niu_device_announce(np); + + return 0; + +err_out_iounmap: + if (np->vir_regs_1) { + of_iounmap(&op->resource[2], np->vir_regs_1, + res_size(&op->resource[2])); + np->vir_regs_1 = NULL; + } + + if (np->vir_regs_2) { + of_iounmap(&op->resource[3], np->vir_regs_2, + res_size(&op->resource[3])); + np->vir_regs_2 = NULL; + } + + if (np->regs) { + of_iounmap(&op->resource[1], np->regs, + res_size(&op->resource[1])); + np->regs = NULL; + } + +err_out_release_parent: + niu_put_parent(np); + +err_out_free_dev: + free_netdev(dev); + +err_out: + return err; +} + +static int __devexit niu_of_remove(struct of_device *op) +{ + struct net_device *dev = dev_get_drvdata(&op->dev); + + if (dev) { + struct niu *np = netdev_priv(dev); + + unregister_netdev(dev); + + if (np->vir_regs_1) { + of_iounmap(&op->resource[2], np->vir_regs_1, + res_size(&op->resource[2])); + np->vir_regs_1 = NULL; + } + + if (np->vir_regs_2) { + of_iounmap(&op->resource[3], np->vir_regs_2, + res_size(&op->resource[3])); + np->vir_regs_2 = NULL; + } + + if (np->regs) { + of_iounmap(&op->resource[1], np->regs, + res_size(&op->resource[1])); + np->regs = NULL; + } + + niu_ldg_free(np); + + niu_put_parent(np); + + free_netdev(dev); + dev_set_drvdata(&op->dev, NULL); + } + return 0; +} + +static struct of_device_id niu_match[] = { + { + .name = "network", + .compatible = "SUNW,niusl", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, niu_match); + +static struct of_platform_driver niu_of_driver = { + .name = "niu", + .match_table = niu_match, + .probe = niu_of_probe, + .remove = __devexit_p(niu_of_remove), +}; + +#endif /* CONFIG_SPARC64 */ + +static int __init niu_init(void) +{ + int err = 0; + + BUILD_BUG_ON((PAGE_SIZE < 4 * 1024) || + ((PAGE_SIZE > 32 * 1024) && + ((PAGE_SIZE % (32 * 1024)) != 0 && + (PAGE_SIZE % (16 * 1024)) != 0 && + (PAGE_SIZE % (8 * 1024)) != 0 && + (PAGE_SIZE % (4 * 1024)) != 0))); + + niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); + +#ifdef CONFIG_SPARC64 + err = of_register_driver(&niu_of_driver, &of_bus_type); +#endif + + if (!err) { + err = pci_register_driver(&niu_pci_driver); +#ifdef CONFIG_SPARC64 + if (err) + of_unregister_driver(&niu_of_driver); +#endif + } + + return err; +} + +static void __exit niu_exit(void) +{ + pci_unregister_driver(&niu_pci_driver); +#ifdef CONFIG_SPARC64 + of_unregister_driver(&niu_of_driver); +#endif +} + +module_init(niu_init); +module_exit(niu_exit); diff --git a/drivers/net/niu.h b/drivers/net/niu.h new file mode 100644 index 0000000000000000000000000000000000000000..10e3f111b6d5d17ed9ed50ac3dfce729485eb791 --- /dev/null +++ b/drivers/net/niu.h @@ -0,0 +1,3222 @@ +/* niu.h: Definitions for Neptune ethernet driver. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifndef _NIU_H +#define _NIU_H + +#define PIO 0x000000UL +#define FZC_PIO 0x080000UL +#define FZC_MAC 0x180000UL +#define FZC_IPP 0x280000UL +#define FFLP 0x300000UL +#define FZC_FFLP 0x380000UL +#define PIO_VADDR 0x400000UL +#define ZCP 0x500000UL +#define FZC_ZCP 0x580000UL +#define DMC 0x600000UL +#define FZC_DMC 0x680000UL +#define TXC 0x700000UL +#define FZC_TXC 0x780000UL +#define PIO_LDSV 0x800000UL +#define PIO_PIO_LDGIM 0x900000UL +#define PIO_IMASK0 0xa00000UL +#define PIO_IMASK1 0xb00000UL +#define FZC_PROM 0xc80000UL +#define FZC_PIM 0xd80000UL + +#define LDSV0(LDG) (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL) +#define LDSV1(LDG) (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL) +#define LDSV2(LDG) (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL) + +#define LDG_IMGMT(LDG) (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL) +#define LDG_IMGMT_ARM 0x0000000080000000ULL +#define LDG_IMGMT_TIMER 0x000000000000003fULL + +#define LD_IM0(IDX) (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL) +#define LD_IM0_MASK 0x0000000000000003ULL + +#define LD_IM1(IDX) (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL) +#define LD_IM1_MASK 0x0000000000000003ULL + +#define LDG_TIMER_RES (FZC_PIO + 0x00008UL) +#define LDG_TIMER_RES_VAL 0x00000000000fffffULL + +#define DIRTY_TID_CTL (FZC_PIO + 0x00010UL) +#define DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL +#define DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL +#define DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL +#define DIRTY_TID_CTL_DTIDENAB 0x0000000000000001ULL + +#define DIRTY_TID_STAT (FZC_PIO + 0x00018UL) +#define DIRTY_TID_STAT_NPWSTAT 0x0000000000003f00ULL +#define DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL + +#define RST_CTL (FZC_PIO + 0x00038UL) +#define RST_CTL_MAC_RST3 0x0000000000400000ULL +#define RST_CTL_MAC_RST2 0x0000000000200000ULL +#define RST_CTL_MAC_RST1 0x0000000000100000ULL +#define RST_CTL_MAC_RST0 0x0000000000080000ULL +#define RST_CTL_ACK_TO_EN 0x0000000000000800ULL +#define RST_CTL_ACK_TO_VAL 0x00000000000007feULL + +#define SMX_CFIG_DAT (FZC_PIO + 0x00040UL) +#define SMX_CFIG_DAT_RAS_DET 0x0000000080000000ULL +#define SMX_CFIG_DAT_RAS_INJ 0x0000000040000000ULL +#define SMX_CFIG_DAT_XACT_TO 0x000000000fffffffULL + +#define SMX_INT_STAT (FZC_PIO + 0x00048UL) +#define SMX_INT_STAT_STAT 0x00000000ffffffffULL + +#define SMX_CTL (FZC_PIO + 0x00050UL) +#define SMX_CTL_CTL 0x00000000ffffffffULL + +#define SMX_DBG_VEC (FZC_PIO + 0x00058UL) +#define SMX_DBG_VEC_VEC 0x00000000ffffffffULL + +#define PIO_DBG_SEL (FZC_PIO + 0x00060UL) +#define PIO_DBG_SEL_SEL 0x000000000000003fULL + +#define PIO_TRAIN_VEC (FZC_PIO + 0x00068UL) +#define PIO_TRAIN_VEC_VEC 0x00000000ffffffffULL + +#define PIO_ARB_CTL (FZC_PIO + 0x00070UL) +#define PIO_ARB_CTL_CTL 0x00000000ffffffffULL + +#define PIO_ARB_DBG_VEC (FZC_PIO + 0x00078UL) +#define PIO_ARB_DBG_VEC_VEC 0x00000000ffffffffULL + +#define SYS_ERR_MASK (FZC_PIO + 0x00090UL) +#define SYS_ERR_MASK_META2 0x0000000000000400ULL +#define SYS_ERR_MASK_META1 0x0000000000000200ULL +#define SYS_ERR_MASK_PEU 0x0000000000000100ULL +#define SYS_ERR_MASK_TXC 0x0000000000000080ULL +#define SYS_ERR_MASK_RDMC 0x0000000000000040ULL +#define SYS_ERR_MASK_TDMC 0x0000000000000020ULL +#define SYS_ERR_MASK_ZCP 0x0000000000000010ULL +#define SYS_ERR_MASK_FFLP 0x0000000000000008ULL +#define SYS_ERR_MASK_IPP 0x0000000000000004ULL +#define SYS_ERR_MASK_MAC 0x0000000000000002ULL +#define SYS_ERR_MASK_SMX 0x0000000000000001ULL + +#define SYS_ERR_STAT (FZC_PIO + 0x00098UL) +#define SYS_ERR_STAT_META2 0x0000000000000400ULL +#define SYS_ERR_STAT_META1 0x0000000000000200ULL +#define SYS_ERR_STAT_PEU 0x0000000000000100ULL +#define SYS_ERR_STAT_TXC 0x0000000000000080ULL +#define SYS_ERR_STAT_RDMC 0x0000000000000040ULL +#define SYS_ERR_STAT_TDMC 0x0000000000000020ULL +#define SYS_ERR_STAT_ZCP 0x0000000000000010ULL +#define SYS_ERR_STAT_FFLP 0x0000000000000008ULL +#define SYS_ERR_STAT_IPP 0x0000000000000004ULL +#define SYS_ERR_STAT_MAC 0x0000000000000002ULL +#define SYS_ERR_STAT_SMX 0x0000000000000001ULL + +#define SID(LDG) (FZC_PIO + 0x10200UL + (LDG) * 8UL) +#define SID_FUNC 0x0000000000000060ULL +#define SID_FUNC_SHIFT 5 +#define SID_VECTOR 0x000000000000001fULL +#define SID_VECTOR_SHIFT 0 + +#define LDG_NUM(LDN) (FZC_PIO + 0x20000UL + (LDN) * 8UL) + +#define XMAC_PORT0_OFF (FZC_MAC + 0x000000) +#define XMAC_PORT1_OFF (FZC_MAC + 0x006000) +#define BMAC_PORT2_OFF (FZC_MAC + 0x00c000) +#define BMAC_PORT3_OFF (FZC_MAC + 0x010000) + +/* XMAC registers, offset from np->mac_regs */ + +#define XTXMAC_SW_RST 0x00000UL +#define XTXMAC_SW_RST_REG_RS 0x0000000000000002ULL +#define XTXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL + +#define XRXMAC_SW_RST 0x00008UL +#define XRXMAC_SW_RST_REG_RS 0x0000000000000002ULL +#define XRXMAC_SW_RST_SOFT_RST 0x0000000000000001ULL + +#define XTXMAC_STATUS 0x00020UL +#define XTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL +#define XTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL +#define XTXMAC_STATUS_TXFIFO_XFR_ERR 0x0000000000000010ULL +#define XTXMAC_STATUS_TXMAC_OFLOW 0x0000000000000008ULL +#define XTXMAC_STATUS_MAX_PSIZE_ERR 0x0000000000000004ULL +#define XTXMAC_STATUS_TXMAC_UFLOW 0x0000000000000002ULL +#define XTXMAC_STATUS_FRAME_XMITED 0x0000000000000001ULL + +#define XRXMAC_STATUS 0x00028UL +#define XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL +#define XRXMAC_STATUS_LCL_FLT_STATUS 0x0000000000080000ULL +#define XRXMAC_STATUS_RFLT_DET 0x0000000000040000ULL +#define XRXMAC_STATUS_LFLT_CNT_EXP 0x0000000000020000ULL +#define XRXMAC_STATUS_PHY_MDINT 0x0000000000010000ULL +#define XRXMAC_STATUS_ALIGNERR_CNT_EXP 0x0000000000010000ULL +#define XRXMAC_STATUS_RXFRAG_CNT_EXP 0x0000000000008000ULL +#define XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL +#define XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL +#define XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL +#define XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL +#define XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL +#define XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL +#define XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL +#define XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL +#define XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL +#define XRXMAC_STATUS_CVIOLERR_CNT_EXP 0x0000000000000020ULL +#define XRXMAC_STATUS_LENERR_CNT_EXP 0x0000000000000010ULL +#define XRXMAC_STATUS_CRCERR_CNT_EXP 0x0000000000000008ULL +#define XRXMAC_STATUS_RXUFLOW 0x0000000000000004ULL +#define XRXMAC_STATUS_RXOFLOW 0x0000000000000002ULL +#define XRXMAC_STATUS_FRAME_RCVD 0x0000000000000001ULL + +#define XMAC_FC_STAT 0x00030UL +#define XMAC_FC_STAT_RX_RCV_PAUSE_TIME 0x00000000ffff0000ULL +#define XMAC_FC_STAT_TX_MAC_NPAUSE 0x0000000000000004ULL +#define XMAC_FC_STAT_TX_MAC_PAUSE 0x0000000000000002ULL +#define XMAC_FC_STAT_RX_MAC_RPAUSE 0x0000000000000001ULL + +#define XTXMAC_STAT_MSK 0x00040UL +#define XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL +#define XTXMAC_STAT_MSK_BYTE_CNT_EXP 0x0000000000000400ULL +#define XTXMAC_STAT_MSK_TXFIFO_XFR_ERR 0x0000000000000010ULL +#define XTXMAC_STAT_MSK_TXMAC_OFLOW 0x0000000000000008ULL +#define XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL +#define XTXMAC_STAT_MSK_TXMAC_UFLOW 0x0000000000000002ULL +#define XTXMAC_STAT_MSK_FRAME_XMITED 0x0000000000000001ULL + +#define XRXMAC_STAT_MSK 0x00048UL +#define XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK 0x0000000000080000ULL +#define XRXMAC_STAT_MSK_RFLT_DET 0x0000000000040000ULL +#define XRXMAC_STAT_MSK_LFLT_CNT_EXP 0x0000000000020000ULL +#define XRXMAC_STAT_MSK_PHY_MDINT 0x0000000000010000ULL +#define XRXMAC_STAT_MSK_RXFRAG_CNT_EXP 0x0000000000008000ULL +#define XRXMAC_STAT_MSK_RXMULTF_CNT_EXP 0x0000000000004000ULL +#define XRXMAC_STAT_MSK_RXBCAST_CNT_EXP 0x0000000000002000ULL +#define XRXMAC_STAT_MSK_RXHIST6_CNT_EXP 0x0000000000001000ULL +#define XRXMAC_STAT_MSK_RXHIST5_CNT_EXP 0x0000000000000800ULL +#define XRXMAC_STAT_MSK_RXHIST4_CNT_EXP 0x0000000000000400ULL +#define XRXMAC_STAT_MSK_RXHIST3_CNT_EXP 0x0000000000000200ULL +#define XRXMAC_STAT_MSK_RXHIST2_CNT_EXP 0x0000000000000100ULL +#define XRXMAC_STAT_MSK_RXHIST1_CNT_EXP 0x0000000000000080ULL +#define XRXMAC_STAT_MSK_RXOCTET_CNT_EXP 0x0000000000000040ULL +#define XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP 0x0000000000000020ULL +#define XRXMAC_STAT_MSK_LENERR_CNT_EXP 0x0000000000000010ULL +#define XRXMAC_STAT_MSK_CRCERR_CNT_EXP 0x0000000000000008ULL +#define XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP 0x0000000000000004ULL +#define XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP 0x0000000000000002ULL +#define XRXMAC_STAT_MSK_FRAME_RCVD 0x0000000000000001ULL + +#define XMAC_FC_MSK 0x00050UL +#define XMAC_FC_MSK_TX_MAC_NPAUSE 0x0000000000000004ULL +#define XMAC_FC_MSK_TX_MAC_PAUSE 0x0000000000000002ULL +#define XMAC_FC_MSK_RX_MAC_RPAUSE 0x0000000000000001ULL + +#define XMAC_CONFIG 0x00060UL +#define XMAC_CONFIG_SEL_CLK_25MHZ 0x0000000080000000ULL +#define XMAC_CONFIG_1G_PCS_BYPASS 0x0000000040000000ULL +#define XMAC_CONFIG_10G_XPCS_BYPASS 0x0000000020000000ULL +#define XMAC_CONFIG_MODE_MASK 0x0000000018000000ULL +#define XMAC_CONFIG_MODE_XGMII 0x0000000000000000ULL +#define XMAC_CONFIG_MODE_GMII 0x0000000008000000ULL +#define XMAC_CONFIG_MODE_MII 0x0000000010000000ULL +#define XMAC_CONFIG_LFS_DISABLE 0x0000000004000000ULL +#define XMAC_CONFIG_LOOPBACK 0x0000000002000000ULL +#define XMAC_CONFIG_TX_OUTPUT_EN 0x0000000001000000ULL +#define XMAC_CONFIG_SEL_POR_CLK_SRC 0x0000000000800000ULL +#define XMAC_CONFIG_LED_POLARITY 0x0000000000400000ULL +#define XMAC_CONFIG_FORCE_LED_ON 0x0000000000200000ULL +#define XMAC_CONFIG_PASS_FLOW_CTRL 0x0000000000100000ULL +#define XMAC_CONFIG_RCV_PAUSE_ENABLE 0x0000000000080000ULL +#define XMAC_CONFIG_MAC2IPP_PKT_CNT_EN 0x0000000000040000ULL +#define XMAC_CONFIG_STRIP_CRC 0x0000000000020000ULL +#define XMAC_CONFIG_ADDR_FILTER_EN 0x0000000000010000ULL +#define XMAC_CONFIG_HASH_FILTER_EN 0x0000000000008000ULL +#define XMAC_CONFIG_RX_CODEV_CHK_DIS 0x0000000000004000ULL +#define XMAC_CONFIG_RESERVED_MULTICAST 0x0000000000002000ULL +#define XMAC_CONFIG_RX_CRC_CHK_DIS 0x0000000000001000ULL +#define XMAC_CONFIG_ERR_CHK_DIS 0x0000000000000800ULL +#define XMAC_CONFIG_PROMISC_GROUP 0x0000000000000400ULL +#define XMAC_CONFIG_PROMISCUOUS 0x0000000000000200ULL +#define XMAC_CONFIG_RX_MAC_ENABLE 0x0000000000000100ULL +#define XMAC_CONFIG_WARNING_MSG_EN 0x0000000000000080ULL +#define XMAC_CONFIG_ALWAYS_NO_CRC 0x0000000000000008ULL +#define XMAC_CONFIG_VAR_MIN_IPG_EN 0x0000000000000004ULL +#define XMAC_CONFIG_STRETCH_MODE 0x0000000000000002ULL +#define XMAC_CONFIG_TX_ENABLE 0x0000000000000001ULL + +#define XMAC_IPG 0x00080UL +#define XMAC_IPG_STRETCH_CONST 0x0000000000e00000ULL +#define XMAC_IPG_STRETCH_CONST_SHIFT 21 +#define XMAC_IPG_STRETCH_RATIO 0x00000000001f0000ULL +#define XMAC_IPG_STRETCH_RATIO_SHIFT 16 +#define XMAC_IPG_IPG_MII_GMII 0x000000000000ff00ULL +#define XMAC_IPG_IPG_MII_GMII_SHIFT 8 +#define XMAC_IPG_IPG_XGMII 0x0000000000000007ULL +#define XMAC_IPG_IPG_XGMII_SHIFT 0 + +#define IPG_12_15_XGMII 3 +#define IPG_16_19_XGMII 4 +#define IPG_20_23_XGMII 5 +#define IPG_12_MII_GMII 10 +#define IPG_13_MII_GMII 11 +#define IPG_14_MII_GMII 12 +#define IPG_15_MII_GMII 13 +#define IPG_16_MII_GMII 14 + +#define XMAC_MIN 0x00088UL +#define XMAC_MIN_RX_MIN_PKT_SIZE 0x000000003ff00000ULL +#define XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20 +#define XMAC_MIN_SLOT_TIME 0x000000000003fc00ULL +#define XMAC_MIN_SLOT_TIME_SHFT 10 +#define XMAC_MIN_TX_MIN_PKT_SIZE 0x00000000000003ffULL +#define XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0 + +#define XMAC_MAX 0x00090UL +#define XMAC_MAX_FRAME_SIZE 0x0000000000003fffULL +#define XMAC_MAX_FRAME_SIZE_SHFT 0 + +#define XMAC_ADDR0 0x000a0UL +#define XMAC_ADDR0_ADDR0 0x000000000000ffffULL + +#define XMAC_ADDR1 0x000a8UL +#define XMAC_ADDR1_ADDR1 0x000000000000ffffULL + +#define XMAC_ADDR2 0x000b0UL +#define XMAC_ADDR2_ADDR2 0x000000000000ffffULL + +#define XMAC_ADDR_CMPEN 0x00208UL +#define XMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL +#define XMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL +#define XMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL +#define XMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL +#define XMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL +#define XMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL +#define XMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL +#define XMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL +#define XMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL +#define XMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL +#define XMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL +#define XMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL +#define XMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL +#define XMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL +#define XMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL +#define XMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL + +#define XMAC_NUM_ALT_ADDR 16 + +#define XMAC_ALT_ADDR0(NUM) (0x00218UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL + +#define XMAC_ALT_ADDR1(NUM) (0x00220UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL + +#define XMAC_ALT_ADDR2(NUM) (0x00228UL + (NUM)*0x18UL) +#define XMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL + +#define XMAC_ADD_FILT0 0x00818UL +#define XMAC_ADD_FILT0_FILT0 0x000000000000ffffULL + +#define XMAC_ADD_FILT1 0x00820UL +#define XMAC_ADD_FILT1_FILT1 0x000000000000ffffULL + +#define XMAC_ADD_FILT2 0x00828UL +#define XMAC_ADD_FILT2_FILT2 0x000000000000ffffULL + +#define XMAC_ADD_FILT12_MASK 0x00830UL +#define XMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL + +#define XMAC_ADD_FILT00_MASK 0x00838UL +#define XMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL + +#define XMAC_HASH_TBL(NUM) (0x00840UL + (NUM) * 0x8UL) +#define XMAC_HASH_TBL_VAL 0x000000000000ffffULL + +#define XMAC_NUM_HOST_INFO 20 + +#define XMAC_HOST_INFO(NUM) (0x00900UL + (NUM) * 0x8UL) + +#define XMAC_PA_DATA0 0x00b80UL +#define XMAC_PA_DATA0_VAL 0x00000000ffffffffULL + +#define XMAC_PA_DATA1 0x00b88UL +#define XMAC_PA_DATA1_VAL 0x00000000ffffffffULL + +#define XMAC_DEBUG_SEL 0x00b90UL +#define XMAC_DEBUG_SEL_XMAC 0x0000000000000078ULL +#define XMAC_DEBUG_SEL_MAC 0x0000000000000007ULL + +#define XMAC_TRAIN_VEC 0x00b98UL +#define XMAC_TRAIN_VEC_VAL 0x00000000ffffffffULL + +#define RXMAC_BT_CNT 0x00100UL +#define RXMAC_BT_CNT_COUNT 0x00000000ffffffffULL + +#define RXMAC_BC_FRM_CNT 0x00108UL +#define RXMAC_BC_FRM_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_MC_FRM_CNT 0x00110UL +#define RXMAC_MC_FRM_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_FRAG_CNT 0x00118UL +#define RXMAC_FRAG_CNT_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT1 0x00120UL +#define RXMAC_HIST_CNT1_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT2 0x00128UL +#define RXMAC_HIST_CNT2_COUNT 0x00000000001fffffULL + +#define RXMAC_HIST_CNT3 0x00130UL +#define RXMAC_HIST_CNT3_COUNT 0x00000000000fffffULL + +#define RXMAC_HIST_CNT4 0x00138UL +#define RXMAC_HIST_CNT4_COUNT 0x000000000007ffffULL + +#define RXMAC_HIST_CNT5 0x00140UL +#define RXMAC_HIST_CNT5_COUNT 0x000000000003ffffULL + +#define RXMAC_HIST_CNT6 0x00148UL +#define RXMAC_HIST_CNT6_COUNT 0x000000000000ffffULL + +#define RXMAC_MPSZER_CNT 0x00150UL +#define RXMAC_MPSZER_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_CRC_ER_CNT 0x00158UL +#define RXMAC_CRC_ER_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_CD_VIO_CNT 0x00160UL +#define RXMAC_CD_VIO_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_ALIGN_ERR_CNT 0x00168UL +#define RXMAC_ALIGN_ERR_CNT_COUNT 0x00000000000000ffULL + +#define TXMAC_FRM_CNT 0x00170UL +#define TXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL + +#define TXMAC_BYTE_CNT 0x00178UL +#define TXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define LINK_FAULT_CNT 0x00180UL +#define LINK_FAULT_CNT_COUNT 0x00000000000000ffULL + +#define RXMAC_HIST_CNT7 0x00188UL +#define RXMAC_HIST_CNT7_COUNT 0x0000000007ffffffULL + +#define XMAC_SM_REG 0x001a8UL +#define XMAC_SM_REG_STATE 0x00000000ffffffffULL + +#define XMAC_INTER1 0x001b0UL +#define XMAC_INTERN1_SIGNALS1 0x00000000ffffffffULL + +#define XMAC_INTER2 0x001b8UL +#define XMAC_INTERN2_SIGNALS2 0x00000000ffffffffULL + +/* BMAC registers, offset from np->mac_regs */ + +#define BTXMAC_SW_RST 0x00000UL +#define BTXMAC_SW_RST_RESET 0x0000000000000001ULL + +#define BRXMAC_SW_RST 0x00008UL +#define BRXMAC_SW_RST_RESET 0x0000000000000001ULL + +#define BMAC_SEND_PAUSE 0x00010UL +#define BMAC_SEND_PAUSE_SEND 0x0000000000010000ULL +#define BMAC_SEND_PAUSE_TIME 0x000000000000ffffULL + +#define BTXMAC_STATUS 0x00020UL +#define BTXMAC_STATUS_XMIT 0x0000000000000001ULL +#define BTXMAC_STATUS_UNDERRUN 0x0000000000000002ULL +#define BTXMAC_STATUS_MAX_PKT_ERR 0x0000000000000004ULL +#define BTXMAC_STATUS_BYTE_CNT_EXP 0x0000000000000400ULL +#define BTXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000800ULL + +#define BRXMAC_STATUS 0x00028UL +#define BRXMAC_STATUS_RX_PKT 0x0000000000000001ULL +#define BRXMAC_STATUS_OVERFLOW 0x0000000000000002ULL +#define BRXMAC_STATUS_FRAME_CNT_EXP 0x0000000000000004ULL +#define BRXMAC_STATUS_ALIGN_ERR_EXP 0x0000000000000008ULL +#define BRXMAC_STATUS_CRC_ERR_EXP 0x0000000000000010ULL +#define BRXMAC_STATUS_LEN_ERR_EXP 0x0000000000000020ULL + +#define BMAC_CTRL_STATUS 0x00030UL +#define BMAC_CTRL_STATUS_PAUSE_RECV 0x0000000000000001ULL +#define BMAC_CTRL_STATUS_PAUSE 0x0000000000000002ULL +#define BMAC_CTRL_STATUS_NOPAUSE 0x0000000000000004ULL +#define BMAC_CTRL_STATUS_TIME 0x00000000ffff0000ULL +#define BMAC_CTRL_STATUS_TIME_SHIFT 16 + +#define BTXMAC_STATUS_MASK 0x00040UL +#define BRXMAC_STATUS_MASK 0x00048UL +#define BMAC_CTRL_STATUS_MASK 0x00050UL + +#define BTXMAC_CONFIG 0x00060UL +#define BTXMAC_CONFIG_ENABLE 0x0000000000000001ULL +#define BTXMAC_CONFIG_FCS_DISABLE 0x0000000000000002ULL + +#define BRXMAC_CONFIG 0x00068UL +#define BRXMAC_CONFIG_DISCARD_DIS 0x0000000000000080ULL +#define BRXMAC_CONFIG_ADDR_FILT_EN 0x0000000000000040ULL +#define BRXMAC_CONFIG_HASH_FILT_EN 0x0000000000000020ULL +#define BRXMAC_CONFIG_PROMISC_GRP 0x0000000000000010ULL +#define BRXMAC_CONFIG_PROMISC 0x0000000000000008ULL +#define BRXMAC_CONFIG_STRIP_FCS 0x0000000000000004ULL +#define BRXMAC_CONFIG_STRIP_PAD 0x0000000000000002ULL +#define BRXMAC_CONFIG_ENABLE 0x0000000000000001ULL + +#define BMAC_CTRL_CONFIG 0x00070UL +#define BMAC_CTRL_CONFIG_TX_PAUSE_EN 0x0000000000000001ULL +#define BMAC_CTRL_CONFIG_RX_PAUSE_EN 0x0000000000000002ULL +#define BMAC_CTRL_CONFIG_PASS_CTRL 0x0000000000000004ULL + +#define BMAC_XIF_CONFIG 0x00078UL +#define BMAC_XIF_CONFIG_TX_OUTPUT_EN 0x0000000000000001ULL +#define BMAC_XIF_CONFIG_MII_LOOPBACK 0x0000000000000002ULL +#define BMAC_XIF_CONFIG_GMII_MODE 0x0000000000000008ULL +#define BMAC_XIF_CONFIG_LINK_LED 0x0000000000000020ULL +#define BMAC_XIF_CONFIG_LED_POLARITY 0x0000000000000040ULL +#define BMAC_XIF_CONFIG_25MHZ_CLOCK 0x0000000000000080ULL + +#define BMAC_MIN_FRAME 0x000a0UL +#define BMAC_MIN_FRAME_VAL 0x00000000000003ffULL + +#define BMAC_MAX_FRAME 0x000a8UL +#define BMAC_MAX_FRAME_MAX_BURST 0x000000003fff0000ULL +#define BMAC_MAX_FRAME_MAX_BURST_SHIFT 16 +#define BMAC_MAX_FRAME_MAX_FRAME 0x0000000000003fffULL +#define BMAC_MAX_FRAME_MAX_FRAME_SHIFT 0 + +#define BMAC_PREAMBLE_SIZE 0x000b0UL +#define BMAC_PREAMBLE_SIZE_VAL 0x00000000000003ffULL + +#define BMAC_CTRL_TYPE 0x000c8UL + +#define BMAC_ADDR0 0x00100UL +#define BMAC_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_ADDR1 0x00108UL +#define BMAC_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_ADDR2 0x00110UL +#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_NUM_ALT_ADDR 7 + +#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_ALT_ADDR1(NUM) (0x00120UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_ALT_ADDR2(NUM) (0x00128UL + (NUM)*0x18UL) +#define BMAC_ALT_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_FC_ADDR0 0x00268UL +#define BMAC_FC_ADDR0_ADDR0 0x000000000000ffffULL + +#define BMAC_FC_ADDR1 0x00270UL +#define BMAC_FC_ADDR1_ADDR1 0x000000000000ffffULL + +#define BMAC_FC_ADDR2 0x00278UL +#define BMAC_FC_ADDR2_ADDR2 0x000000000000ffffULL + +#define BMAC_ADD_FILT0 0x00298UL +#define BMAC_ADD_FILT0_FILT0 0x000000000000ffffULL + +#define BMAC_ADD_FILT1 0x002a0UL +#define BMAC_ADD_FILT1_FILT1 0x000000000000ffffULL + +#define BMAC_ADD_FILT2 0x002a8UL +#define BMAC_ADD_FILT2_FILT2 0x000000000000ffffULL + +#define BMAC_ADD_FILT12_MASK 0x002b0UL +#define BMAC_ADD_FILT12_MASK_VAL 0x00000000000000ffULL + +#define BMAC_ADD_FILT00_MASK 0x002b8UL +#define BMAC_ADD_FILT00_MASK_VAL 0x000000000000ffffULL + +#define BMAC_HASH_TBL(NUM) (0x002c0UL + (NUM) * 0x8UL) +#define BMAC_HASH_TBL_VAL 0x000000000000ffffULL + +#define BRXMAC_FRAME_CNT 0x00370 +#define BRXMAC_FRAME_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_MAX_LEN_ERR_CNT 0x00378 + +#define BRXMAC_ALIGN_ERR_CNT 0x00380 +#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_CRC_ERR_CNT 0x00388 +#define BRXMAC_ALIGN_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BRXMAC_CODE_VIOL_ERR_CNT 0x00390 +#define BRXMAC_CODE_VIOL_ERR_CNT_COUNT 0x000000000000ffffULL + +#define BMAC_STATE_MACHINE 0x003a0 + +#define BMAC_ADDR_CMPEN 0x003f8UL +#define BMAC_ADDR_CMPEN_EN15 0x0000000000008000ULL +#define BMAC_ADDR_CMPEN_EN14 0x0000000000004000ULL +#define BMAC_ADDR_CMPEN_EN13 0x0000000000002000ULL +#define BMAC_ADDR_CMPEN_EN12 0x0000000000001000ULL +#define BMAC_ADDR_CMPEN_EN11 0x0000000000000800ULL +#define BMAC_ADDR_CMPEN_EN10 0x0000000000000400ULL +#define BMAC_ADDR_CMPEN_EN9 0x0000000000000200ULL +#define BMAC_ADDR_CMPEN_EN8 0x0000000000000100ULL +#define BMAC_ADDR_CMPEN_EN7 0x0000000000000080ULL +#define BMAC_ADDR_CMPEN_EN6 0x0000000000000040ULL +#define BMAC_ADDR_CMPEN_EN5 0x0000000000000020ULL +#define BMAC_ADDR_CMPEN_EN4 0x0000000000000010ULL +#define BMAC_ADDR_CMPEN_EN3 0x0000000000000008ULL +#define BMAC_ADDR_CMPEN_EN2 0x0000000000000004ULL +#define BMAC_ADDR_CMPEN_EN1 0x0000000000000002ULL +#define BMAC_ADDR_CMPEN_EN0 0x0000000000000001ULL + +#define BMAC_NUM_HOST_INFO 9 + +#define BMAC_HOST_INFO(NUM) (0x00400UL + (NUM) * 0x8UL) + +#define BTXMAC_BYTE_CNT 0x00448UL +#define BTXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define BTXMAC_FRM_CNT 0x00450UL +#define BTXMAC_FRM_CNT_COUNT 0x00000000ffffffffULL + +#define BRXMAC_BYTE_CNT 0x00458UL +#define BRXMAC_BYTE_CNT_COUNT 0x00000000ffffffffULL + +#define HOST_INFO_MPR 0x0000000000000100ULL +#define HOST_INFO_MACRDCTBLN 0x0000000000000007ULL + +/* XPCS registers, offset from np->regs + np->xpcs_off */ + +#define XPCS_CONTROL1 (FZC_MAC + 0x00000UL) +#define XPCS_CONTROL1_RESET 0x0000000000008000ULL +#define XPCS_CONTROL1_LOOPBACK 0x0000000000004000ULL +#define XPCS_CONTROL1_SPEED_SELECT3 0x0000000000002000ULL +#define XPCS_CONTROL1_CSR_LOW_PWR 0x0000000000000800ULL +#define XPCS_CONTROL1_CSR_SPEED1 0x0000000000000040ULL +#define XPCS_CONTROL1_CSR_SPEED0 0x000000000000003cULL + +#define XPCS_STATUS1 (FZC_MAC + 0x00008UL) +#define XPCS_STATUS1_CSR_FAULT 0x0000000000000080ULL +#define XPCS_STATUS1_CSR_RXLNK_STAT 0x0000000000000004ULL +#define XPCS_STATUS1_CSR_LPWR_ABLE 0x0000000000000002ULL + +#define XPCS_DEVICE_IDENTIFIER (FZC_MAC + 0x00010UL) +#define XPCS_DEVICE_IDENTIFIER_VAL 0x00000000ffffffffULL + +#define XPCS_SPEED_ABILITY (FZC_MAC + 0x00018UL) +#define XPCS_SPEED_ABILITY_10GIG 0x0000000000000001ULL + +#define XPCS_DEV_IN_PKG (FZC_MAC + 0x00020UL) +#define XPCS_DEV_IN_PKG_CSR_VEND2 0x0000000080000000ULL +#define XPCS_DEV_IN_PKG_CSR_VEND1 0x0000000040000000ULL +#define XPCS_DEV_IN_PKG_DTE_XS 0x0000000000000020ULL +#define XPCS_DEV_IN_PKG_PHY_XS 0x0000000000000010ULL +#define XPCS_DEV_IN_PKG_PCS 0x0000000000000008ULL +#define XPCS_DEV_IN_PKG_WIS 0x0000000000000004ULL +#define XPCS_DEV_IN_PKG_PMD_PMA 0x0000000000000002ULL +#define XPCS_DEV_IN_PKG_CLS22 0x0000000000000001ULL + +#define XPCS_CONTROL2 (FZC_MAC + 0x00028UL) +#define XPCS_CONTROL2_CSR_PSC_SEL 0x0000000000000003ULL + +#define XPCS_STATUS2 (FZC_MAC + 0x00030UL) +#define XPCS_STATUS2_CSR_DEV_PRES 0x000000000000c000ULL +#define XPCS_STATUS2_CSR_TX_FAULT 0x0000000000000800ULL +#define XPCS_STATUS2_CSR_RCV_FAULT 0x0000000000000400ULL +#define XPCS_STATUS2_TEN_GBASE_W 0x0000000000000004ULL +#define XPCS_STATUS2_TEN_GBASE_X 0x0000000000000002ULL +#define XPCS_STATUS2_TEN_GBASE_R 0x0000000000000001ULL + +#define XPCS_PKG_ID (FZC_MAC + 0x00038UL) +#define XPCS_PKG_ID_VAL 0x00000000ffffffffULL + +#define XPCS_STATUS(IDX) (FZC_MAC + 0x00040UL) +#define XPCS_STATUS_CSR_LANE_ALIGN 0x0000000000001000ULL +#define XPCS_STATUS_CSR_PATTEST_CAP 0x0000000000000800ULL +#define XPCS_STATUS_CSR_LANE3_SYNC 0x0000000000000008ULL +#define XPCS_STATUS_CSR_LANE2_SYNC 0x0000000000000004ULL +#define XPCS_STATUS_CSR_LANE1_SYNC 0x0000000000000002ULL +#define XPCS_STATUS_CSR_LANE0_SYNC 0x0000000000000001ULL + +#define XPCS_TEST_CONTROL (FZC_MAC + 0x00048UL) +#define XPCS_TEST_CONTROL_TXTST_EN 0x0000000000000004ULL +#define XPCS_TEST_CONTROL_TPAT_SEL 0x0000000000000003ULL + +#define XPCS_CFG_VENDOR1 (FZC_MAC + 0x00050UL) +#define XPCS_CFG_VENDOR1_DBG_IOTST 0x0000000000000080ULL +#define XPCS_CFG_VENDOR1_DBG_SEL 0x0000000000000078ULL +#define XPCS_CFG_VENDOR1_BYPASS_DET 0x0000000000000004ULL +#define XPCS_CFG_VENDOR1_TXBUF_EN 0x0000000000000002ULL +#define XPCS_CFG_VENDOR1_XPCS_EN 0x0000000000000001ULL + +#define XPCS_DIAG_VENDOR2 (FZC_MAC + 0x00058UL) +#define XPCS_DIAG_VENDOR2_SSM_LANE3 0x0000000001e00000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE2 0x00000000001e0000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE1 0x000000000001e000ULL +#define XPCS_DIAG_VENDOR2_SSM_LANE0 0x0000000000001e00ULL +#define XPCS_DIAG_VENDOR2_EBUF_SM 0x00000000000001feULL +#define XPCS_DIAG_VENDOR2_RCV_SM 0x0000000000000001ULL + +#define XPCS_MASK1 (FZC_MAC + 0x00060UL) +#define XPCS_MASK1_FAULT_MASK 0x0000000000000080ULL +#define XPCS_MASK1_RXALIGN_STAT_MSK 0x0000000000000004ULL + +#define XPCS_PKT_COUNT (FZC_MAC + 0x00068UL) +#define XPCS_PKT_COUNT_TX 0x00000000ffff0000ULL +#define XPCS_PKT_COUNT_RX 0x000000000000ffffULL + +#define XPCS_TX_SM (FZC_MAC + 0x00070UL) +#define XPCS_TX_SM_VAL 0x000000000000000fULL + +#define XPCS_DESKEW_ERR_CNT (FZC_MAC + 0x00078UL) +#define XPCS_DESKEW_ERR_CNT_VAL 0x00000000000000ffULL + +#define XPCS_SYMERR_CNT01 (FZC_MAC + 0x00080UL) +#define XPCS_SYMERR_CNT01_LANE1 0x00000000ffff0000ULL +#define XPCS_SYMERR_CNT01_LANE0 0x000000000000ffffULL + +#define XPCS_SYMERR_CNT23 (FZC_MAC + 0x00088UL) +#define XPCS_SYMERR_CNT23_LANE3 0x00000000ffff0000ULL +#define XPCS_SYMERR_CNT23_LANE2 0x000000000000ffffULL + +#define XPCS_TRAINING_VECTOR (FZC_MAC + 0x00090UL) +#define XPCS_TRAINING_VECTOR_VAL 0x00000000ffffffffULL + +/* PCS registers, offset from np->regs + np->pcs_off */ + +#define PCS_MII_CTL (FZC_MAC + 0x00000UL) +#define PCS_MII_CTL_RST 0x0000000000008000ULL +#define PCS_MII_CTL_10_100_SPEED 0x0000000000002000ULL +#define PCS_MII_AUTONEG_EN 0x0000000000001000ULL +#define PCS_MII_PWR_DOWN 0x0000000000000800ULL +#define PCS_MII_ISOLATE 0x0000000000000400ULL +#define PCS_MII_AUTONEG_RESTART 0x0000000000000200ULL +#define PCS_MII_DUPLEX 0x0000000000000100ULL +#define PCS_MII_COLL_TEST 0x0000000000000080ULL +#define PCS_MII_1000MB_SPEED 0x0000000000000040ULL + +#define PCS_MII_STAT (FZC_MAC + 0x00008UL) +#define PCS_MII_STAT_EXT_STATUS 0x0000000000000100ULL +#define PCS_MII_STAT_AUTONEG_DONE 0x0000000000000020ULL +#define PCS_MII_STAT_REMOTE_FAULT 0x0000000000000010ULL +#define PCS_MII_STAT_AUTONEG_ABLE 0x0000000000000008ULL +#define PCS_MII_STAT_LINK_STATUS 0x0000000000000004ULL +#define PCS_MII_STAT_JABBER_DET 0x0000000000000002ULL +#define PCS_MII_STAT_EXT_CAP 0x0000000000000001ULL + +#define PCS_MII_ADV (FZC_MAC + 0x00010UL) +#define PCS_MII_ADV_NEXT_PAGE 0x0000000000008000ULL +#define PCS_MII_ADV_ACK 0x0000000000004000ULL +#define PCS_MII_ADV_REMOTE_FAULT 0x0000000000003000ULL +#define PCS_MII_ADV_ASM_DIR 0x0000000000000100ULL +#define PCS_MII_ADV_PAUSE 0x0000000000000080ULL +#define PCS_MII_ADV_HALF_DUPLEX 0x0000000000000040ULL +#define PCS_MII_ADV_FULL_DUPLEX 0x0000000000000020ULL + +#define PCS_MII_PARTNER (FZC_MAC + 0x00018UL) +#define PCS_MII_PARTNER_NEXT_PAGE 0x0000000000008000ULL +#define PCS_MII_PARTNER_ACK 0x0000000000004000ULL +#define PCS_MII_PARTNER_REMOTE_FAULT 0x0000000000002000ULL +#define PCS_MII_PARTNER_PAUSE 0x0000000000000180ULL +#define PCS_MII_PARTNER_HALF_DUPLEX 0x0000000000000040ULL +#define PCS_MII_PARTNER_FULL_DUPLEX 0x0000000000000020ULL + +#define PCS_CONF (FZC_MAC + 0x00020UL) +#define PCS_CONF_MASK 0x0000000000000040ULL +#define PCS_CONF_10MS_TMR_OVERRIDE 0x0000000000000020ULL +#define PCS_CONF_JITTER_STUDY 0x0000000000000018ULL +#define PCS_CONF_SIGDET_ACTIVE_LOW 0x0000000000000004ULL +#define PCS_CONF_SIGDET_OVERRIDE 0x0000000000000002ULL +#define PCS_CONF_ENABLE 0x0000000000000001ULL + +#define PCS_STATE (FZC_MAC + 0x00028UL) +#define PCS_STATE_D_PARTNER_FAIL 0x0000000020000000ULL +#define PCS_STATE_D_WAIT_C_CODES_ACK 0x0000000010000000ULL +#define PCS_STATE_D_SYNC_LOSS 0x0000000008000000ULL +#define PCS_STATE_D_NO_GOOD_C_CODES 0x0000000004000000ULL +#define PCS_STATE_D_SERDES 0x0000000002000000ULL +#define PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL +#define PCS_STATE_L_SIGDET 0x0000000000400000ULL +#define PCS_STATE_L_SYNC_LOSS 0x0000000000200000ULL +#define PCS_STATE_L_C_CODES 0x0000000000100000ULL +#define PCS_STATE_LINK_CFG_STATE 0x000000000001e000ULL +#define PCS_STATE_SEQ_DET_STATE 0x0000000000001800ULL +#define PCS_STATE_WORD_SYNC_STATE 0x0000000000000700ULL +#define PCS_STATE_NO_IDLE 0x000000000000000fULL + +#define PCS_INTERRUPT (FZC_MAC + 0x00030UL) +#define PCS_INTERRUPT_LSTATUS 0x0000000000000004ULL + +#define PCS_DPATH_MODE (FZC_MAC + 0x000a0UL) +#define PCS_DPATH_MODE_PCS 0x0000000000000000ULL +#define PCS_DPATH_MODE_MII 0x0000000000000002ULL +#define PCS_DPATH_MODE_LINKUP_F_ENAB 0x0000000000000001ULL + +#define PCS_PKT_CNT (FZC_MAC + 0x000c0UL) +#define PCS_PKT_CNT_RX 0x0000000007ff0000ULL +#define PCS_PKT_CNT_TX 0x00000000000007ffULL + +#define MIF_BB_MDC (FZC_MAC + 0x16000UL) +#define MIF_BB_MDC_CLK 0x0000000000000001ULL + +#define MIF_BB_MDO (FZC_MAC + 0x16008UL) +#define MIF_BB_MDO_DAT 0x0000000000000001ULL + +#define MIF_BB_MDO_EN (FZC_MAC + 0x16010UL) +#define MIF_BB_MDO_EN_VAL 0x0000000000000001ULL + +#define MIF_FRAME_OUTPUT (FZC_MAC + 0x16018UL) +#define MIF_FRAME_OUTPUT_ST 0x00000000c0000000ULL +#define MIF_FRAME_OUTPUT_ST_SHIFT 30 +#define MIF_FRAME_OUTPUT_OP_ADDR 0x0000000000000000ULL +#define MIF_FRAME_OUTPUT_OP_WRITE 0x0000000010000000ULL +#define MIF_FRAME_OUTPUT_OP_READ_INC 0x0000000020000000ULL +#define MIF_FRAME_OUTPUT_OP_READ 0x0000000030000000ULL +#define MIF_FRAME_OUTPUT_OP_SHIFT 28 +#define MIF_FRAME_OUTPUT_PORT 0x000000000f800000ULL +#define MIF_FRAME_OUTPUT_PORT_SHIFT 23 +#define MIF_FRAME_OUTPUT_REG 0x00000000007c0000ULL +#define MIF_FRAME_OUTPUT_REG_SHIFT 18 +#define MIF_FRAME_OUTPUT_TA 0x0000000000030000ULL +#define MIF_FRAME_OUTPUT_TA_SHIFT 16 +#define MIF_FRAME_OUTPUT_DATA 0x000000000000ffffULL +#define MIF_FRAME_OUTPUT_DATA_SHIFT 0 + +#define MDIO_ADDR_OP(port, dev, reg) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_ADDR | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MDIO_READ_OP(port, dev) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_READ | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) + +#define MDIO_WRITE_OP(port, dev, data) \ + ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + MIF_FRAME_OUTPUT_OP_WRITE | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MII_READ_OP(port, reg) \ + ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT)) + +#define MII_WRITE_OP(port, reg, data) \ + ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \ + (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \ + (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \ + (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \ + (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \ + (data << MIF_FRAME_OUTPUT_DATA_SHIFT)) + +#define MIF_CONFIG (FZC_MAC + 0x16020UL) +#define MIF_CONFIG_ATCA_GE 0x0000000000010000ULL +#define MIF_CONFIG_INDIRECT_MODE 0x0000000000008000ULL +#define MIF_CONFIG_POLL_PRT_PHYADDR 0x0000000000003c00ULL +#define MIF_CONFIG_POLL_DEV_REG_ADDR 0x00000000000003e0ULL +#define MIF_CONFIG_BB_MODE 0x0000000000000010ULL +#define MIF_CONFIG_POLL_EN 0x0000000000000008ULL +#define MIF_CONFIG_BB_SER_SEL 0x0000000000000006ULL +#define MIF_CONFIG_MANUAL_MODE 0x0000000000000001ULL + +#define MIF_POLL_STATUS (FZC_MAC + 0x16028UL) +#define MIF_POLL_STATUS_DATA 0x00000000ffff0000ULL +#define MIF_POLL_STATUS_STAT 0x000000000000ffffULL + +#define MIF_POLL_MASK (FZC_MAC + 0x16030UL) +#define MIF_POLL_MASK_VAL 0x000000000000ffffULL + +#define MIF_SM (FZC_MAC + 0x16038UL) +#define MIF_SM_PORT_ADDR 0x00000000001f0000ULL +#define MIF_SM_MDI_1 0x0000000000004000ULL +#define MIF_SM_MDI_0 0x0000000000002400ULL +#define MIF_SM_MDCLK 0x0000000000001000ULL +#define MIF_SM_MDO_EN 0x0000000000000800ULL +#define MIF_SM_MDO 0x0000000000000400ULL +#define MIF_SM_MDI 0x0000000000000200ULL +#define MIF_SM_CTL 0x00000000000001c0ULL +#define MIF_SM_EX 0x000000000000003fULL + +#define MIF_STATUS (FZC_MAC + 0x16040UL) +#define MIF_STATUS_MDINT1 0x0000000000000020ULL +#define MIF_STATUS_MDINT0 0x0000000000000010ULL + +#define MIF_MASK (FZC_MAC + 0x16048UL) +#define MIF_MASK_MDINT1 0x0000000000000020ULL +#define MIF_MASK_MDINT0 0x0000000000000010ULL +#define MIF_MASK_PEU_ERR 0x0000000000000008ULL +#define MIF_MASK_YC 0x0000000000000004ULL +#define MIF_MASK_XGE_ERR0 0x0000000000000002ULL +#define MIF_MASK_MIF_INIT_DONE 0x0000000000000001ULL + +#define ENET_SERDES_RESET (FZC_MAC + 0x14000UL) +#define ENET_SERDES_RESET_1 0x0000000000000002ULL +#define ENET_SERDES_RESET_0 0x0000000000000001ULL + +#define ENET_SERDES_CFG (FZC_MAC + 0x14008UL) +#define ENET_SERDES_BE_LOOPBACK 0x0000000000000002ULL +#define ENET_SERDES_CFG_FORCE_RDY 0x0000000000000001ULL + +#define ENET_SERDES_0_PLL_CFG (FZC_MAC + 0x14010UL) +#define ENET_SERDES_PLL_FBDIV0 0x0000000000000001ULL +#define ENET_SERDES_PLL_FBDIV1 0x0000000000000002ULL +#define ENET_SERDES_PLL_FBDIV2 0x0000000000000004ULL +#define ENET_SERDES_PLL_HRATE0 0x0000000000000008ULL +#define ENET_SERDES_PLL_HRATE1 0x0000000000000010ULL +#define ENET_SERDES_PLL_HRATE2 0x0000000000000020ULL +#define ENET_SERDES_PLL_HRATE3 0x0000000000000040ULL + +#define ENET_SERDES_0_CTRL_CFG (FZC_MAC + 0x14018UL) +#define ENET_SERDES_CTRL_SDET_0 0x0000000000000001ULL +#define ENET_SERDES_CTRL_SDET_1 0x0000000000000002ULL +#define ENET_SERDES_CTRL_SDET_2 0x0000000000000004ULL +#define ENET_SERDES_CTRL_SDET_3 0x0000000000000008ULL +#define ENET_SERDES_CTRL_EMPH_0 0x0000000000000070ULL +#define ENET_SERDES_CTRL_EMPH_0_SHIFT 4 +#define ENET_SERDES_CTRL_EMPH_1 0x0000000000000380ULL +#define ENET_SERDES_CTRL_EMPH_1_SHIFT 7 +#define ENET_SERDES_CTRL_EMPH_2 0x0000000000001c00ULL +#define ENET_SERDES_CTRL_EMPH_2_SHIFT 10 +#define ENET_SERDES_CTRL_EMPH_3 0x000000000000e000ULL +#define ENET_SERDES_CTRL_EMPH_3_SHIFT 13 +#define ENET_SERDES_CTRL_LADJ_0 0x0000000000070000ULL +#define ENET_SERDES_CTRL_LADJ_0_SHIFT 16 +#define ENET_SERDES_CTRL_LADJ_1 0x0000000000380000ULL +#define ENET_SERDES_CTRL_LADJ_1_SHIFT 19 +#define ENET_SERDES_CTRL_LADJ_2 0x0000000001c00000ULL +#define ENET_SERDES_CTRL_LADJ_2_SHIFT 22 +#define ENET_SERDES_CTRL_LADJ_3 0x000000000e000000ULL +#define ENET_SERDES_CTRL_LADJ_3_SHIFT 25 +#define ENET_SERDES_CTRL_RXITERM_0 0x0000000010000000ULL +#define ENET_SERDES_CTRL_RXITERM_1 0x0000000020000000ULL +#define ENET_SERDES_CTRL_RXITERM_2 0x0000000040000000ULL +#define ENET_SERDES_CTRL_RXITERM_3 0x0000000080000000ULL + +#define ENET_SERDES_0_TEST_CFG (FZC_MAC + 0x14020UL) +#define ENET_SERDES_TEST_MD_0 0x0000000000000003ULL +#define ENET_SERDES_TEST_MD_0_SHIFT 0 +#define ENET_SERDES_TEST_MD_1 0x000000000000000cULL +#define ENET_SERDES_TEST_MD_1_SHIFT 2 +#define ENET_SERDES_TEST_MD_2 0x0000000000000030ULL +#define ENET_SERDES_TEST_MD_2_SHIFT 4 +#define ENET_SERDES_TEST_MD_3 0x00000000000000c0ULL +#define ENET_SERDES_TEST_MD_3_SHIFT 6 + +#define ENET_TEST_MD_NO_LOOPBACK 0x0 +#define ENET_TEST_MD_EWRAP 0x1 +#define ENET_TEST_MD_PAD_LOOPBACK 0x2 +#define ENET_TEST_MD_REV_LOOPBACK 0x3 + +#define ENET_SERDES_1_PLL_CFG (FZC_MAC + 0x14028UL) +#define ENET_SERDES_1_CTRL_CFG (FZC_MAC + 0x14030UL) +#define ENET_SERDES_1_TEST_CFG (FZC_MAC + 0x14038UL) + +#define ENET_RGMII_CFG_REG (FZC_MAC + 0x14040UL) + +#define ESR_INT_SIGNALS (FZC_MAC + 0x14800UL) +#define ESR_INT_SIGNALS_ALL 0x00000000ffffffffULL +#define ESR_INT_SIGNALS_P0_BITS 0x0000000033e0000fULL +#define ESR_INT_SIGNALS_P1_BITS 0x000000000c1f00f0ULL +#define ESR_INT_SRDY0_P0 0x0000000020000000ULL +#define ESR_INT_DET0_P0 0x0000000010000000ULL +#define ESR_INT_SRDY0_P1 0x0000000008000000ULL +#define ESR_INT_DET0_P1 0x0000000004000000ULL +#define ESR_INT_XSRDY_P0 0x0000000002000000ULL +#define ESR_INT_XDP_P0_CH3 0x0000000001000000ULL +#define ESR_INT_XDP_P0_CH2 0x0000000000800000ULL +#define ESR_INT_XDP_P0_CH1 0x0000000000400000ULL +#define ESR_INT_XDP_P0_CH0 0x0000000000200000ULL +#define ESR_INT_XSRDY_P1 0x0000000000100000ULL +#define ESR_INT_XDP_P1_CH3 0x0000000000080000ULL +#define ESR_INT_XDP_P1_CH2 0x0000000000040000ULL +#define ESR_INT_XDP_P1_CH1 0x0000000000020000ULL +#define ESR_INT_XDP_P1_CH0 0x0000000000010000ULL +#define ESR_INT_SLOSS_P1_CH3 0x0000000000000080ULL +#define ESR_INT_SLOSS_P1_CH2 0x0000000000000040ULL +#define ESR_INT_SLOSS_P1_CH1 0x0000000000000020ULL +#define ESR_INT_SLOSS_P1_CH0 0x0000000000000010ULL +#define ESR_INT_SLOSS_P0_CH3 0x0000000000000008ULL +#define ESR_INT_SLOSS_P0_CH2 0x0000000000000004ULL +#define ESR_INT_SLOSS_P0_CH1 0x0000000000000002ULL +#define ESR_INT_SLOSS_P0_CH0 0x0000000000000001ULL + +#define ESR_DEBUG_SEL (FZC_MAC + 0x14808UL) +#define ESR_DEBUG_SEL_VAL 0x000000000000003fULL + +/* SerDes registers behind MIF */ +#define NIU_ESR_DEV_ADDR 0x1e +#define ESR_BASE 0x0000 + +#define ESR_RXTX_COMM_CTRL_L (ESR_BASE + 0x0000) +#define ESR_RXTX_COMM_CTRL_H (ESR_BASE + 0x0001) + +#define ESR_RXTX_RESET_CTRL_L (ESR_BASE + 0x0002) +#define ESR_RXTX_RESET_CTRL_H (ESR_BASE + 0x0003) + +#define ESR_RX_POWER_CTRL_L (ESR_BASE + 0x0004) +#define ESR_RX_POWER_CTRL_H (ESR_BASE + 0x0005) + +#define ESR_TX_POWER_CTRL_L (ESR_BASE + 0x0006) +#define ESR_TX_POWER_CTRL_H (ESR_BASE + 0x0007) + +#define ESR_MISC_POWER_CTRL_L (ESR_BASE + 0x0008) +#define ESR_MISC_POWER_CTRL_H (ESR_BASE + 0x0009) + +#define ESR_RXTX_CTRL_L(CHAN) (ESR_BASE + 0x0080 + (CHAN) * 0x10) +#define ESR_RXTX_CTRL_H(CHAN) (ESR_BASE + 0x0081 + (CHAN) * 0x10) +#define ESR_RXTX_CTRL_BIASCNTL 0x80000000 +#define ESR_RXTX_CTRL_RESV1 0x7c000000 +#define ESR_RXTX_CTRL_TDENFIFO 0x02000000 +#define ESR_RXTX_CTRL_TDWS20 0x01000000 +#define ESR_RXTX_CTRL_VMUXLO 0x00c00000 +#define ESR_RXTX_CTRL_VMUXLO_SHIFT 22 +#define ESR_RXTX_CTRL_VPULSELO 0x00300000 +#define ESR_RXTX_CTRL_VPULSELO_SHIFT 20 +#define ESR_RXTX_CTRL_RESV2 0x000f0000 +#define ESR_RXTX_CTRL_RESV3 0x0000c000 +#define ESR_RXTX_CTRL_RXPRESWIN 0x00003000 +#define ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12 +#define ESR_RXTX_CTRL_RESV4 0x00000800 +#define ESR_RXTX_CTRL_RISEFALL 0x00000700 +#define ESR_RXTX_CTRL_RISEFALL_SHIFT 8 +#define ESR_RXTX_CTRL_RESV5 0x000000fe +#define ESR_RXTX_CTRL_ENSTRETCH 0x00000001 + +#define ESR_RXTX_TUNING_L(CHAN) (ESR_BASE + 0x0082 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING_H(CHAN) (ESR_BASE + 0x0083 + (CHAN) * 0x10) + +#define ESR_RX_SYNCCHAR_L(CHAN) (ESR_BASE + 0x0084 + (CHAN) * 0x10) +#define ESR_RX_SYNCCHAR_H(CHAN) (ESR_BASE + 0x0085 + (CHAN) * 0x10) + +#define ESR_RXTX_TEST_L(CHAN) (ESR_BASE + 0x0086 + (CHAN) * 0x10) +#define ESR_RXTX_TEST_H(CHAN) (ESR_BASE + 0x0087 + (CHAN) * 0x10) + +#define ESR_GLUE_CTRL0_L(CHAN) (ESR_BASE + 0x0088 + (CHAN) * 0x10) +#define ESR_GLUE_CTRL0_H(CHAN) (ESR_BASE + 0x0089 + (CHAN) * 0x10) +#define ESR_GLUE_CTRL0_RESV1 0xf8000000 +#define ESR_GLUE_CTRL0_BLTIME 0x07000000 +#define ESR_GLUE_CTRL0_BLTIME_SHIFT 24 +#define ESR_GLUE_CTRL0_RESV2 0x00ff0000 +#define ESR_GLUE_CTRL0_RXLOS_TEST 0x00008000 +#define ESR_GLUE_CTRL0_RESV3 0x00004000 +#define ESR_GLUE_CTRL0_RXLOSENAB 0x00002000 +#define ESR_GLUE_CTRL0_FASTRESYNC 0x00001000 +#define ESR_GLUE_CTRL0_SRATE 0x00000f00 +#define ESR_GLUE_CTRL0_SRATE_SHIFT 8 +#define ESR_GLUE_CTRL0_THCNT 0x000000ff +#define ESR_GLUE_CTRL0_THCNT_SHIFT 0 + +#define BLTIME_64_CYCLES 0 +#define BLTIME_128_CYCLES 1 +#define BLTIME_256_CYCLES 2 +#define BLTIME_300_CYCLES 3 +#define BLTIME_384_CYCLES 4 +#define BLTIME_512_CYCLES 5 +#define BLTIME_1024_CYCLES 6 +#define BLTIME_2048_CYCLES 7 + +#define ESR_GLUE_CTRL1_L(CHAN) (ESR_BASE + 0x008a + (CHAN) * 0x10) +#define ESR_GLUE_CTRL1_H(CHAN) (ESR_BASE + 0x008b + (CHAN) * 0x10) +#define ESR_RXTX_TUNING1_L(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING1_H(CHAN) (ESR_BASE + 0x00c2 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING2_L(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING2_H(CHAN) (ESR_BASE + 0x0102 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING3_L(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) +#define ESR_RXTX_TUNING3_H(CHAN) (ESR_BASE + 0x0142 + (CHAN) * 0x10) + +#define NIU_ESR2_DEV_ADDR 0x1e +#define ESR2_BASE 0x8000 + +#define ESR2_TI_PLL_CFG_L (ESR2_BASE + 0x000) +#define ESR2_TI_PLL_CFG_H (ESR2_BASE + 0x001) +#define PLL_CFG_STD 0x00000c00 +#define PLL_CFG_STD_SHIFT 10 +#define PLL_CFG_LD 0x00000300 +#define PLL_CFG_LD_SHIFT 8 +#define PLL_CFG_MPY 0x0000001e +#define PLL_CFG_MPY_SHIFT 1 +#define PLL_CFG_ENPLL 0x00000001 + +#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002) +#define ESR2_TI_PLL_STS_H (ESR2_BASE + 0x003) +#define PLL_STS_LOCK 0x00000001 + +#define ESR2_TI_PLL_TEST_CFG_L (ESR2_BASE + 0x004) +#define ESR2_TI_PLL_TEST_CFG_H (ESR2_BASE + 0x005) +#define PLL_TEST_INVPATT 0x00004000 +#define PLL_TEST_RATE 0x00003000 +#define PLL_TEST_RATE_SHIFT 12 +#define PLL_TEST_CFG_ENBSAC 0x00000400 +#define PLL_TEST_CFG_ENBSRX 0x00000200 +#define PLL_TEST_CFG_ENBSTX 0x00000100 +#define PLL_TEST_CFG_LOOPBACK_PAD 0x00000040 +#define PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080 +#define PLL_TEST_CFG_LOOPBACK_CML_EN 0x000000c0 +#define PLL_TEST_CFG_CLKBYP 0x00000030 +#define PLL_TEST_CFG_CLKBYP_SHIFT 4 +#define PLL_TEST_CFG_EN_RXPATT 0x00000008 +#define PLL_TEST_CFG_EN_TXPATT 0x00000004 +#define PLL_TEST_CFG_TPATT 0x00000003 +#define PLL_TEST_CFG_TPATT_SHIFT 0 + +#define ESR2_TI_PLL_TX_CFG_L(CHAN) (ESR2_BASE + 0x100 + (CHAN) * 4) +#define ESR2_TI_PLL_TX_CFG_H(CHAN) (ESR2_BASE + 0x101 + (CHAN) * 4) +#define PLL_TX_CFG_RDTCT 0x00600000 +#define PLL_TX_CFG_RDTCT_SHIFT 21 +#define PLL_TX_CFG_ENIDL 0x00100000 +#define PLL_TX_CFG_BSTX 0x00020000 +#define PLL_TX_CFG_ENFTP 0x00010000 +#define PLL_TX_CFG_DE 0x0000f000 +#define PLL_TX_CFG_DE_SHIFT 12 +#define PLL_TX_CFG_SWING_125MV 0x00000000 +#define PLL_TX_CFG_SWING_250MV 0x00000200 +#define PLL_TX_CFG_SWING_500MV 0x00000400 +#define PLL_TX_CFG_SWING_625MV 0x00000600 +#define PLL_TX_CFG_SWING_750MV 0x00000800 +#define PLL_TX_CFG_SWING_1000MV 0x00000a00 +#define PLL_TX_CFG_SWING_1250MV 0x00000c00 +#define PLL_TX_CFG_SWING_1375MV 0x00000e00 +#define PLL_TX_CFG_CM 0x00000100 +#define PLL_TX_CFG_INVPAIR 0x00000080 +#define PLL_TX_CFG_RATE 0x00000060 +#define PLL_TX_CFG_RATE_SHIFT 5 +#define PLL_TX_CFG_BUSWIDTH 0x0000001c +#define PLL_TX_CFG_BUSWIDTH_SHIFT 2 +#define PLL_TX_CFG_ENTEST 0x00000002 +#define PLL_TX_CFG_ENTX 0x00000001 + +#define ESR2_TI_PLL_TX_STS_L(CHAN) (ESR2_BASE + 0x102 + (CHAN) * 4) +#define ESR2_TI_PLL_TX_STS_H(CHAN) (ESR2_BASE + 0x103 + (CHAN) * 4) +#define PLL_TX_STS_RDTCTIP 0x00000002 +#define PLL_TX_STS_TESTFAIL 0x00000001 + +#define ESR2_TI_PLL_RX_CFG_L(CHAN) (ESR2_BASE + 0x120 + (CHAN) * 4) +#define ESR2_TI_PLL_RX_CFG_H(CHAN) (ESR2_BASE + 0x121 + (CHAN) * 4) +#define PLL_RX_CFG_BSINRXN 0x02000000 +#define PLL_RX_CFG_BSINRXP 0x01000000 +#define PLL_RX_CFG_EQ_MAX_LF 0x00000000 +#define PLL_RX_CFG_EQ_LP_ADAPTIVE 0x00080000 +#define PLL_RX_CFG_EQ_LP_1084MHZ 0x00400000 +#define PLL_RX_CFG_EQ_LP_805MHZ 0x00480000 +#define PLL_RX_CFG_EQ_LP_573MHZ 0x00500000 +#define PLL_RX_CFG_EQ_LP_402MHZ 0x00580000 +#define PLL_RX_CFG_EQ_LP_304MHZ 0x00600000 +#define PLL_RX_CFG_EQ_LP_216MHZ 0x00680000 +#define PLL_RX_CFG_EQ_LP_156MHZ 0x00700000 +#define PLL_RX_CFG_EQ_LP_135MHZ 0x00780000 +#define PLL_RX_CFG_EQ_SHIFT 19 +#define PLL_RX_CFG_CDR 0x00070000 +#define PLL_RX_CFG_CDR_SHIFT 16 +#define PLL_RX_CFG_LOS_DIS 0x00000000 +#define PLL_RX_CFG_LOS_HTHRESH 0x00004000 +#define PLL_RX_CFG_LOS_LTHRESH 0x00008000 +#define PLL_RX_CFG_ALIGN_DIS 0x00000000 +#define PLL_RX_CFG_ALIGN_ENA 0x00001000 +#define PLL_RX_CFG_ALIGN_JOG 0x00002000 +#define PLL_RX_CFG_TERM_VDDT 0x00000000 +#define PLL_RX_CFG_TERM_0P8VDDT 0x00000100 +#define PLL_RX_CFG_TERM_FLOAT 0x00000300 +#define PLL_RX_CFG_INVPAIR 0x00000080 +#define PLL_RX_CFG_RATE 0x00000060 +#define PLL_RX_CFG_RATE_SHIFT 5 +#define PLL_RX_CFG_BUSWIDTH 0x0000001c +#define PLL_RX_CFG_BUSWIDTH_SHIFT 2 +#define PLL_RX_CFG_ENTEST 0x00000002 +#define PLL_RX_CFG_ENRX 0x00000001 + +#define ESR2_TI_PLL_RX_STS_L(CHAN) (ESR2_BASE + 0x122 + (CHAN) * 4) +#define ESR2_TI_PLL_RX_STS_H(CHAN) (ESR2_BASE + 0x123 + (CHAN) * 4) +#define PLL_RX_STS_CRCIDTCT 0x00000200 +#define PLL_RX_STS_CWDTCT 0x00000100 +#define PLL_RX_STS_BSRXN 0x00000020 +#define PLL_RX_STS_BSRXP 0x00000010 +#define PLL_RX_STS_LOSDTCT 0x00000008 +#define PLL_RX_STS_ODDCG 0x00000004 +#define PLL_RX_STS_SYNC 0x00000002 +#define PLL_RX_STS_TESTFAIL 0x00000001 + +#define ENET_VLAN_TBL(IDX) (FZC_FFLP + 0x00000UL + (IDX) * 8UL) +#define ENET_VLAN_TBL_PARITY1 0x0000000000020000ULL +#define ENET_VLAN_TBL_PARITY0 0x0000000000010000ULL +#define ENET_VLAN_TBL_VPR 0x0000000000000008ULL +#define ENET_VLAN_TBL_VLANRDCTBLN 0x0000000000000007ULL +#define ENET_VLAN_TBL_SHIFT(PORT) ((PORT) * 4) + +#define ENET_VLAN_TBL_NUM_ENTRIES 4096 + +#define FFLP_VLAN_PAR_ERR (FZC_FFLP + 0x0800UL) +#define FFLP_VLAN_PAR_ERR_ERR 0x0000000080000000ULL +#define FFLP_VLAN_PAR_ERR_M_ERR 0x0000000040000000ULL +#define FFLP_VLAN_PAR_ERR_ADDR 0x000000003ffc0000ULL +#define FFLP_VLAN_PAR_ERR_DATA 0x000000000003ffffULL + +#define L2_CLS(IDX) (FZC_FFLP + 0x20000UL + (IDX) * 8UL) +#define L2_CLS_VLD 0x0000000000010000ULL +#define L2_CLS_ETYPE 0x000000000000ffffULL +#define L2_CLS_ETYPE_SHIFT 0 + +#define L3_CLS(IDX) (FZC_FFLP + 0x20010UL + (IDX) * 8UL) +#define L3_CLS_VALID 0x0000000002000000ULL +#define L3_CLS_IPVER 0x0000000001000000ULL +#define L3_CLS_PID 0x0000000000ff0000ULL +#define L3_CLS_PID_SHIFT 16 +#define L3_CLS_TOSMASK 0x000000000000ff00ULL +#define L3_CLS_TOSMASK_SHIFT 8 +#define L3_CLS_TOS 0x00000000000000ffULL +#define L3_CLS_TOS_SHIFT 0 + +#define TCAM_KEY(IDX) (FZC_FFLP + 0x20030UL + (IDX) * 8UL) +#define TCAM_KEY_DISC 0x0000000000000008ULL +#define TCAM_KEY_TSEL 0x0000000000000004ULL +#define TCAM_KEY_IPADDR 0x0000000000000001ULL + +#define TCAM_KEY_0 (FZC_FFLP + 0x20090UL) +#define TCAM_KEY_0_KEY 0x00000000000000ffULL /* bits 192-199 */ + +#define TCAM_KEY_1 (FZC_FFLP + 0x20098UL) +#define TCAM_KEY_1_KEY 0xffffffffffffffffULL /* bits 128-191 */ + +#define TCAM_KEY_2 (FZC_FFLP + 0x200a0UL) +#define TCAM_KEY_2_KEY 0xffffffffffffffffULL /* bits 64-127 */ + +#define TCAM_KEY_3 (FZC_FFLP + 0x200a8UL) +#define TCAM_KEY_3_KEY 0xffffffffffffffffULL /* bits 0-63 */ + +#define TCAM_KEY_MASK_0 (FZC_FFLP + 0x200b0UL) +#define TCAM_KEY_MASK_0_KEY_SEL 0x00000000000000ffULL /* bits 192-199 */ + +#define TCAM_KEY_MASK_1 (FZC_FFLP + 0x200b8UL) +#define TCAM_KEY_MASK_1_KEY_SEL 0xffffffffffffffffULL /* bits 128-191 */ + +#define TCAM_KEY_MASK_2 (FZC_FFLP + 0x200c0UL) +#define TCAM_KEY_MASK_2_KEY_SEL 0xffffffffffffffffULL /* bits 64-127 */ + +#define TCAM_KEY_MASK_3 (FZC_FFLP + 0x200c8UL) +#define TCAM_KEY_MASK_3_KEY_SEL 0xffffffffffffffffULL /* bits 0-63 */ + +#define TCAM_CTL (FZC_FFLP + 0x200d0UL) +#define TCAM_CTL_RWC 0x00000000001c0000ULL +#define TCAM_CTL_RWC_TCAM_WRITE 0x0000000000000000ULL +#define TCAM_CTL_RWC_TCAM_READ 0x0000000000040000ULL +#define TCAM_CTL_RWC_TCAM_COMPARE 0x0000000000080000ULL +#define TCAM_CTL_RWC_RAM_WRITE 0x0000000000100000ULL +#define TCAM_CTL_RWC_RAM_READ 0x0000000000140000ULL +#define TCAM_CTL_STAT 0x0000000000020000ULL +#define TCAM_CTL_MATCH 0x0000000000010000ULL +#define TCAM_CTL_LOC 0x00000000000003ffULL + +#define TCAM_ERR (FZC_FFLP + 0x200d8UL) +#define TCAM_ERR_ERR 0x0000000080000000ULL +#define TCAM_ERR_P_ECC 0x0000000040000000ULL +#define TCAM_ERR_MULT 0x0000000020000000ULL +#define TCAM_ERR_ADDR 0x0000000000ff0000ULL +#define TCAM_ERR_SYNDROME 0x000000000000ffffULL + +#define HASH_LOOKUP_ERR_LOG1 (FZC_FFLP + 0x200e0UL) +#define HASH_LOOKUP_ERR_LOG1_ERR 0x0000000000000008ULL +#define HASH_LOOKUP_ERR_LOG1_MULT_LK 0x0000000000000004ULL +#define HASH_LOOKUP_ERR_LOG1_CU 0x0000000000000002ULL +#define HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL + +#define HASH_LOOKUP_ERR_LOG2 (FZC_FFLP + 0x200e8UL) +#define HASH_LOOKUP_ERR_LOG2_H1 0x000000007ffff800ULL +#define HASH_LOOKUP_ERR_LOG2_SUBAREA 0x0000000000000700ULL +#define HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL + +#define FFLP_CFG_1 (FZC_FFLP + 0x20100UL) +#define FFLP_CFG_1_TCAM_DIS 0x0000000004000000ULL +#define FFLP_CFG_1_PIO_DBG_SEL 0x0000000003800000ULL +#define FFLP_CFG_1_PIO_FIO_RST 0x0000000000400000ULL +#define FFLP_CFG_1_PIO_FIO_LAT 0x0000000000300000ULL +#define FFLP_CFG_1_CAMLAT 0x00000000000f0000ULL +#define FFLP_CFG_1_CAMLAT_SHIFT 16 +#define FFLP_CFG_1_CAMRATIO 0x000000000000f000ULL +#define FFLP_CFG_1_CAMRATIO_SHIFT 12 +#define FFLP_CFG_1_FCRAMRATIO 0x0000000000000f00ULL +#define FFLP_CFG_1_FCRAMRATIO_SHIFT 8 +#define FFLP_CFG_1_FCRAMOUTDR_MASK 0x00000000000000f0ULL +#define FFLP_CFG_1_FCRAMOUTDR_NORMAL 0x0000000000000000ULL +#define FFLP_CFG_1_FCRAMOUTDR_STRONG 0x0000000000000050ULL +#define FFLP_CFG_1_FCRAMOUTDR_WEAK 0x00000000000000a0ULL +#define FFLP_CFG_1_FCRAMQS 0x0000000000000008ULL +#define FFLP_CFG_1_ERRORDIS 0x0000000000000004ULL +#define FFLP_CFG_1_FFLPINITDONE 0x0000000000000002ULL +#define FFLP_CFG_1_LLCSNAP 0x0000000000000001ULL + +#define DEFAULT_FCRAMRATIO 10 + +#define DEFAULT_TCAM_LATENCY 4 +#define DEFAULT_TCAM_ACCESS_RATIO 10 + +#define TCP_CFLAG_MSK (FZC_FFLP + 0x20108UL) +#define TCP_CFLAG_MSK_MASK 0x0000000000000fffULL + +#define FCRAM_REF_TMR (FZC_FFLP + 0x20110UL) +#define FCRAM_REF_TMR_MAX 0x00000000ffff0000ULL +#define FCRAM_REF_TMR_MAX_SHIFT 16 +#define FCRAM_REF_TMR_MIN 0x000000000000ffffULL +#define FCRAM_REF_TMR_MIN_SHIFT 0 + +#define DEFAULT_FCRAM_REFRESH_MAX 512 +#define DEFAULT_FCRAM_REFRESH_MIN 512 + +#define FCRAM_FIO_ADDR (FZC_FFLP + 0x20118UL) +#define FCRAM_FIO_ADDR_ADDR 0x00000000000000ffULL + +#define FCRAM_FIO_DAT (FZC_FFLP + 0x20120UL) +#define FCRAM_FIO_DAT_DATA 0x000000000000ffffULL + +#define FCRAM_ERR_TST0 (FZC_FFLP + 0x20128UL) +#define FCRAM_ERR_TST0_SYND 0x00000000000000ffULL + +#define FCRAM_ERR_TST1 (FZC_FFLP + 0x20130UL) +#define FCRAM_ERR_TST1_DAT 0x00000000ffffffffULL + +#define FCRAM_ERR_TST2 (FZC_FFLP + 0x20138UL) +#define FCRAM_ERR_TST2_DAT 0x00000000ffffffffULL + +#define FFLP_ERR_MASK (FZC_FFLP + 0x20140UL) +#define FFLP_ERR_MASK_HSH_TBL_DAT 0x00000000000007f8ULL +#define FFLP_ERR_MASK_HSH_TBL_LKUP 0x0000000000000004ULL +#define FFLP_ERR_MASK_TCAM 0x0000000000000002ULL +#define FFLP_ERR_MASK_VLAN 0x0000000000000001ULL + +#define FFLP_DBG_TRAIN_VCT (FZC_FFLP + 0x20148UL) +#define FFLP_DBG_TRAIN_VCT_VECTOR 0x00000000ffffffffULL + +#define FCRAM_PHY_RD_LAT (FZC_FFLP + 0x20150UL) +#define FCRAM_PHY_RD_LAT_LAT 0x00000000000000ffULL + +/* Ethernet TCAM format */ +#define TCAM_ETHKEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_ETHKEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_ETHKEY0_CLASS_CODE_SHIFT 3 +#define TCAM_ETHKEY0_RESV2 0x0000000000000007ULL +#define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM) (0xff << ((7 - NUM) * 8)) +#define TCAM_ETHKEY2_FRAME_BYTE8 0xff00000000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56 +#define TCAM_ETHKEY2_FRAME_BYTE9 0x00ff000000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48 +#define TCAM_ETHKEY2_FRAME_BYTE10 0x0000ff0000000000ULL +#define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT 40 +#define TCAM_ETHKEY2_FRAME_RESV 0x000000ffffffffffULL +#define TCAM_ETHKEY3_FRAME_RESV 0xffffffffffffffffULL + +/* IPV4 TCAM format */ +#define TCAM_V4KEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_V4KEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_V4KEY0_CLASS_CODE_SHIFT 3 +#define TCAM_V4KEY0_RESV2 0x0000000000000007ULL +#define TCAM_V4KEY1_L2RDCNUM 0xf800000000000000ULL +#define TCAM_V4KEY1_L2RDCNUM_SHIFT 59 +#define TCAM_V4KEY1_NOPORT 0x0400000000000000ULL +#define TCAM_V4KEY1_RESV 0x03ffffffffffffffULL +#define TCAM_V4KEY2_RESV 0xffff000000000000ULL +#define TCAM_V4KEY2_TOS 0x0000ff0000000000ULL +#define TCAM_V4KEY2_TOS_SHIFT 40 +#define TCAM_V4KEY2_PROTO 0x000000ff00000000ULL +#define TCAM_V4KEY2_PROTO_SHIFT 32 +#define TCAM_V4KEY2_PORT_SPI 0x00000000ffffffffULL +#define TCAM_V4KEY2_PORT_SPI_SHIFT 0 +#define TCAM_V4KEY3_SADDR 0xffffffff00000000ULL +#define TCAM_V4KEY3_SADDR_SHIFT 32 +#define TCAM_V4KEY3_DADDR 0x00000000ffffffffULL +#define TCAM_V4KEY3_DADDR_SHIFT 0 + +/* IPV6 TCAM format */ +#define TCAM_V6KEY0_RESV1 0xffffffffffffff00ULL +#define TCAM_V6KEY0_CLASS_CODE 0x00000000000000f8ULL +#define TCAM_V6KEY0_CLASS_CODE_SHIFT 3 +#define TCAM_V6KEY0_RESV2 0x0000000000000007ULL +#define TCAM_V6KEY1_L2RDCNUM 0xf800000000000000ULL +#define TCAM_V6KEY1_L2RDCNUM_SHIFT 59 +#define TCAM_V6KEY1_NOPORT 0x0400000000000000ULL +#define TCAM_V6KEY1_RESV 0x03ff000000000000ULL +#define TCAM_V6KEY1_TOS 0x0000ff0000000000ULL +#define TCAM_V6KEY1_TOS_SHIFT 40 +#define TCAM_V6KEY1_NEXT_HDR 0x000000ff00000000ULL +#define TCAM_V6KEY1_NEXT_HDR_SHIFT 32 +#define TCAM_V6KEY1_PORT_SPI 0x00000000ffffffffULL +#define TCAM_V6KEY1_PORT_SPI_SHIFT 0 +#define TCAM_V6KEY2_ADDR_HIGH 0xffffffffffffffffULL +#define TCAM_V6KEY3_ADDR_LOW 0xffffffffffffffffULL + +#define TCAM_ASSOCDATA_SYNDROME 0x000003fffc000000ULL +#define TCAM_ASSOCDATA_SYNDROME_SHIFT 26 +#define TCAM_ASSOCDATA_ZFID 0x0000000003ffc000ULL +#define TCAM_ASSOCDATA_ZFID_SHIFT 14 +#define TCAM_ASSOCDATA_V4_ECC_OK 0x0000000000002000ULL +#define TCAM_ASSOCDATA_DISC 0x0000000000001000ULL +#define TCAM_ASSOCDATA_TRES_MASK 0x0000000000000c00ULL +#define TCAM_ASSOCDATA_TRES_USE_L2RDC 0x0000000000000000ULL +#define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL +#define TCAM_ASSOCDATA_TRES_OVR_RDC 0x0000000000000800ULL +#define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF 0x0000000000000c00ULL +#define TCAM_ASSOCDATA_RDCTBL 0x0000000000000380ULL +#define TCAM_ASSOCDATA_RDCTBL_SHIFT 7 +#define TCAM_ASSOCDATA_OFFSET 0x000000000000007cULL +#define TCAM_ASSOCDATA_OFFSET_SHIFT 2 +#define TCAM_ASSOCDATA_ZFVLD 0x0000000000000002ULL +#define TCAM_ASSOCDATA_AGE 0x0000000000000001ULL + +#define FLOW_KEY(IDX) (FZC_FFLP + 0x40000UL + (IDX) * 8UL) +#define FLOW_KEY_PORT 0x0000000000000200ULL +#define FLOW_KEY_L2DA 0x0000000000000100ULL +#define FLOW_KEY_VLAN 0x0000000000000080ULL +#define FLOW_KEY_IPSA 0x0000000000000040ULL +#define FLOW_KEY_IPDA 0x0000000000000020ULL +#define FLOW_KEY_PROTO 0x0000000000000010ULL +#define FLOW_KEY_L4_0 0x000000000000000cULL +#define FLOW_KEY_L4_0_SHIFT 2 +#define FLOW_KEY_L4_1 0x0000000000000003ULL +#define FLOW_KEY_L4_1_SHIFT 0 + +#define FLOW_KEY_L4_NONE 0x0 +#define FLOW_KEY_L4_RESV 0x1 +#define FLOW_KEY_L4_BYTE12 0x2 +#define FLOW_KEY_L4_BYTE56 0x3 + +#define H1POLY (FZC_FFLP + 0x40060UL) +#define H1POLY_INITVAL 0x00000000ffffffffULL + +#define H2POLY (FZC_FFLP + 0x40068UL) +#define H2POLY_INITVAL 0x000000000000ffffULL + +#define FLW_PRT_SEL(IDX) (FZC_FFLP + 0x40070UL + (IDX) * 8UL) +#define FLW_PRT_SEL_EXT 0x0000000000010000ULL +#define FLW_PRT_SEL_MASK 0x0000000000001f00ULL +#define FLW_PRT_SEL_MASK_SHIFT 8 +#define FLW_PRT_SEL_BASE 0x000000000000001fULL +#define FLW_PRT_SEL_BASE_SHIFT 0 + +#define HASH_TBL_ADDR(IDX) (FFLP + 0x00000UL + (IDX) * 8192UL) +#define HASH_TBL_ADDR_AUTOINC 0x0000000000800000ULL +#define HASH_TBL_ADDR_ADDR 0x00000000007fffffULL + +#define HASH_TBL_DATA(IDX) (FFLP + 0x00008UL + (IDX) * 8192UL) +#define HASH_TBL_DATA_DATA 0xffffffffffffffffULL + +/* FCRAM hash table entries are up to 8 64-bit words in size. + * The layout of each entry is determined by the settings in the + * first word, which is the header. + * + * The indexing is controllable per partition (there is one partition + * per RDC group, thus a total of eight) using the BASE and MASK fields + * of FLW_PRT_SEL above. + */ +#define FCRAM_SIZE 0x800000 +#define FCRAM_NUM_PARTITIONS 8 + +/* Generic HASH entry header, used for all non-optimized formats. */ +#define HASH_HEADER_FMT 0x8000000000000000ULL +#define HASH_HEADER_EXT 0x4000000000000000ULL +#define HASH_HEADER_VALID 0x2000000000000000ULL +#define HASH_HEADER_RESVD 0x1000000000000000ULL +#define HASH_HEADER_L2_DADDR 0x0ffffffffffff000ULL +#define HASH_HEADER_L2_DADDR_SHIFT 12 +#define HASH_HEADER_VLAN 0x0000000000000fffULL +#define HASH_HEADER_VLAN_SHIFT 0 + +/* Optimized format, just a header with a special layout defined below. + * Set FMT and EXT both to zero to indicate this layout is being used. + */ +#define HASH_OPT_HEADER_FMT 0x8000000000000000ULL +#define HASH_OPT_HEADER_EXT 0x4000000000000000ULL +#define HASH_OPT_HEADER_VALID 0x2000000000000000ULL +#define HASH_OPT_HEADER_RDCOFF 0x1f00000000000000ULL +#define HASH_OPT_HEADER_RDCOFF_SHIFT 56 +#define HASH_OPT_HEADER_HASH2 0x00ffff0000000000ULL +#define HASH_OPT_HEADER_HASH2_SHIFT 40 +#define HASH_OPT_HEADER_RESVD 0x000000ff00000000ULL +#define HASH_OPT_HEADER_USERINFO 0x00000000ffffffffULL +#define HASH_OPT_HEADER_USERINFO_SHIFT 0 + +/* Port and protocol word used for ipv4 and ipv6 layouts. */ +#define HASH_PORT_DPORT 0xffff000000000000ULL +#define HASH_PORT_DPORT_SHIFT 48 +#define HASH_PORT_SPORT 0x0000ffff00000000ULL +#define HASH_PORT_SPORT_SHIFT 32 +#define HASH_PORT_PROTO 0x00000000ff000000ULL +#define HASH_PORT_PROTO_SHIFT 24 +#define HASH_PORT_PORT_OFF 0x0000000000c00000ULL +#define HASH_PORT_PORT_OFF_SHIFT 22 +#define HASH_PORT_PORT_RESV 0x00000000003fffffULL + +/* Action word used for ipv4 and ipv6 layouts. */ +#define HASH_ACTION_RESV1 0xe000000000000000ULL +#define HASH_ACTION_RDCOFF 0x1f00000000000000ULL +#define HASH_ACTION_RDCOFF_SHIFT 56 +#define HASH_ACTION_ZFVALID 0x0080000000000000ULL +#define HASH_ACTION_RESV2 0x0070000000000000ULL +#define HASH_ACTION_ZFID 0x000fff0000000000ULL +#define HASH_ACTION_ZFID_SHIFT 40 +#define HASH_ACTION_RESV3 0x000000ff00000000ULL +#define HASH_ACTION_USERINFO 0x00000000ffffffffULL +#define HASH_ACTION_USERINFO_SHIFT 0 + +/* IPV4 address word. Addresses are in network endian. */ +#define HASH_IP4ADDR_SADDR 0xffffffff00000000ULL +#define HASH_IP4ADDR_SADDR_SHIFT 32 +#define HASH_IP4ADDR_DADDR 0x00000000ffffffffULL +#define HASH_IP4ADDR_DADDR_SHIFT 0 + +/* IPV6 address layout is 4 words, first two are saddr, next two + * are daddr. Addresses are in network endian. + */ + +struct fcram_hash_opt { + u64 header; +}; + +/* EXT=1, FMT=0 */ +struct fcram_hash_ipv4 { + u64 header; + u64 addrs; + u64 ports; + u64 action; +}; + +/* EXT=1, FMT=1 */ +struct fcram_hash_ipv6 { + u64 header; + u64 addrs[4]; + u64 ports; + u64 action; +}; + +#define HASH_TBL_DATA_LOG(IDX) (FFLP + 0x00010UL + (IDX) * 8192UL) +#define HASH_TBL_DATA_LOG_ERR 0x0000000080000000ULL +#define HASH_TBL_DATA_LOG_ADDR 0x000000007fffff00ULL +#define HASH_TBL_DATA_LOG_SYNDROME 0x00000000000000ffULL + +#define RX_DMA_CK_DIV (FZC_DMC + 0x00000UL) +#define RX_DMA_CK_DIV_CNT 0x000000000000ffffULL + +#define DEF_RDC(IDX) (FZC_DMC + 0x00008UL + (IDX) * 0x8UL) +#define DEF_RDC_VAL 0x000000000000001fULL + +#define PT_DRR_WT(IDX) (FZC_DMC + 0x00028UL + (IDX) * 0x8UL) +#define PT_DRR_WT_VAL 0x000000000000ffffULL + +#define PT_DRR_WEIGHT_DEFAULT_10G 0x0400 +#define PT_DRR_WEIGHT_DEFAULT_1G 0x0066 + +#define PT_USE(IDX) (FZC_DMC + 0x00048UL + (IDX) * 0x8UL) +#define PT_USE_CNT 0x00000000000fffffULL + +#define RED_RAN_INIT (FZC_DMC + 0x00068UL) +#define RED_RAN_INIT_OPMODE 0x0000000000010000ULL +#define RED_RAN_INIT_VAL 0x000000000000ffffULL + +#define RX_ADDR_MD (FZC_DMC + 0x00070UL) +#define RX_ADDR_MD_DBG_PT_MUX_SEL 0x000000000000000cULL +#define RX_ADDR_MD_RAM_ACC 0x0000000000000002ULL +#define RX_ADDR_MD_MODE32 0x0000000000000001ULL + +#define RDMC_PRE_PAR_ERR (FZC_DMC + 0x00078UL) +#define RDMC_PRE_PAR_ERR_ERR 0x0000000000008000ULL +#define RDMC_PRE_PAR_ERR_MERR 0x0000000000004000ULL +#define RDMC_PRE_PAR_ERR_ADDR 0x00000000000000ffULL + +#define RDMC_SHA_PAR_ERR (FZC_DMC + 0x00080UL) +#define RDMC_SHA_PAR_ERR_ERR 0x0000000000008000ULL +#define RDMC_SHA_PAR_ERR_MERR 0x0000000000004000ULL +#define RDMC_SHA_PAR_ERR_ADDR 0x00000000000000ffULL + +#define RDMC_MEM_ADDR (FZC_DMC + 0x00088UL) +#define RDMC_MEM_ADDR_PRE_SHAD 0x0000000000000100ULL +#define RDMC_MEM_ADDR_ADDR 0x00000000000000ffULL + +#define RDMC_MEM_DAT0 (FZC_DMC + 0x00090UL) +#define RDMC_MEM_DAT0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define RDMC_MEM_DAT1 (FZC_DMC + 0x00098UL) +#define RDMC_MEM_DAT1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define RDMC_MEM_DAT2 (FZC_DMC + 0x000a0UL) +#define RDMC_MEM_DAT2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define RDMC_MEM_DAT3 (FZC_DMC + 0x000a8UL) +#define RDMC_MEM_DAT3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define RDMC_MEM_DAT4 (FZC_DMC + 0x000b0UL) +#define RDMC_MEM_DAT4_DATA 0x00000000000fffffULL /* bits 147:128 */ + +#define RX_CTL_DAT_FIFO_STAT (FZC_DMC + 0x000b8UL) +#define RX_CTL_DAT_FIFO_STAT_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR 0x000000000000000fULL + +#define RX_CTL_DAT_FIFO_MASK (FZC_DMC + 0x000c0UL) +#define RX_CTL_DAT_FIFO_MASK_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR 0x000000000000000fULL + +#define RDMC_TRAINING_VECTOR (FZC_DMC + 0x000c8UL) +#define RDMC_TRAINING_VECTOR_TRAINING_VECTOR 0x00000000ffffffffULL + +#define RX_CTL_DAT_FIFO_STAT_DBG (FZC_DMC + 0x000d0UL) +#define RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH 0x0000000000000100ULL +#define RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR 0x00000000000000f0ULL +#define RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR 0x000000000000000fULL + +#define RDC_TBL(TBL,SLOT) (FZC_ZCP + 0x10000UL + \ + (TBL) * (8UL * 16UL) + \ + (SLOT) * 8UL) +#define RDC_TBL_RDC 0x000000000000000fULL + +#define RX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x20000UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL +#define RX_LOG_PAGE_VLD_FUNC_SHIFT 2 +#define RX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL +#define RX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL + +#define RX_LOG_MASK1(IDX) (FZC_DMC + 0x20008UL + (IDX) * 0x40UL) +#define RX_LOG_MASK1_MASK 0x00000000ffffffffULL + +#define RX_LOG_VAL1(IDX) (FZC_DMC + 0x20010UL + (IDX) * 0x40UL) +#define RX_LOG_VAL1_VALUE 0x00000000ffffffffULL + +#define RX_LOG_MASK2(IDX) (FZC_DMC + 0x20018UL + (IDX) * 0x40UL) +#define RX_LOG_MASK2_MASK 0x00000000ffffffffULL + +#define RX_LOG_VAL2(IDX) (FZC_DMC + 0x20020UL + (IDX) * 0x40UL) +#define RX_LOG_VAL2_VALUE 0x00000000ffffffffULL + +#define RX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x20028UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL + +#define RX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x20030UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL + +#define RX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x20038UL + (IDX) * 0x40UL) +#define RX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL + +#define TX_LOG_PAGE_VLD(IDX) (FZC_DMC + 0x40000UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_VLD_FUNC 0x000000000000000cULL +#define TX_LOG_PAGE_VLD_FUNC_SHIFT 2 +#define TX_LOG_PAGE_VLD_PAGE1 0x0000000000000002ULL +#define TX_LOG_PAGE_VLD_PAGE0 0x0000000000000001ULL + +#define TX_LOG_MASK1(IDX) (FZC_DMC + 0x40008UL + (IDX) * 0x200UL) +#define TX_LOG_MASK1_MASK 0x00000000ffffffffULL + +#define TX_LOG_VAL1(IDX) (FZC_DMC + 0x40010UL + (IDX) * 0x200UL) +#define TX_LOG_VAL1_VALUE 0x00000000ffffffffULL + +#define TX_LOG_MASK2(IDX) (FZC_DMC + 0x40018UL + (IDX) * 0x200UL) +#define TX_LOG_MASK2_MASK 0x00000000ffffffffULL + +#define TX_LOG_VAL2(IDX) (FZC_DMC + 0x40020UL + (IDX) * 0x200UL) +#define TX_LOG_VAL2_VALUE 0x00000000ffffffffULL + +#define TX_LOG_PAGE_RELO1(IDX) (FZC_DMC + 0x40028UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_RELO1_RELO 0x00000000ffffffffULL + +#define TX_LOG_PAGE_RELO2(IDX) (FZC_DMC + 0x40030UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_RELO2_RELO 0x00000000ffffffffULL + +#define TX_LOG_PAGE_HDL(IDX) (FZC_DMC + 0x40038UL + (IDX) * 0x200UL) +#define TX_LOG_PAGE_HDL_HANDLE 0x00000000000fffffULL + +#define TX_ADDR_MD (FZC_DMC + 0x45000UL) +#define TX_ADDR_MD_MODE32 0x0000000000000001ULL + +#define RDC_RED_PARA(IDX) (FZC_DMC + 0x30000UL + (IDX) * 0x40UL) +#define RDC_RED_PARA_THRE_SYN 0x00000000fff00000ULL +#define RDC_RED_PARA_THRE_SYN_SHIFT 20 +#define RDC_RED_PARA_WIN_SYN 0x00000000000f0000ULL +#define RDC_RED_PARA_WIN_SYN_SHIFT 16 +#define RDC_RED_PARA_THRE 0x000000000000fff0ULL +#define RDC_RED_PARA_THRE_SHIFT 4 +#define RDC_RED_PARA_WIN 0x000000000000000fULL +#define RDC_RED_PARA_WIN_SHIFT 0 + +#define RED_DIS_CNT(IDX) (FZC_DMC + 0x30008UL + (IDX) * 0x40UL) +#define RED_DIS_CNT_OFLOW 0x0000000000010000ULL +#define RED_DIS_CNT_COUNT 0x000000000000ffffULL + +#define IPP_CFIG (FZC_IPP + 0x00000UL) +#define IPP_CFIG_SOFT_RST 0x0000000080000000ULL +#define IPP_CFIG_IP_MAX_PKT 0x0000000001ffff00ULL +#define IPP_CFIG_IP_MAX_PKT_SHIFT 8 +#define IPP_CFIG_FFLP_CS_PIO_W 0x0000000000000080ULL +#define IPP_CFIG_PFIFO_PIO_W 0x0000000000000040ULL +#define IPP_CFIG_DFIFO_PIO_W 0x0000000000000020ULL +#define IPP_CFIG_CKSUM_EN 0x0000000000000010ULL +#define IPP_CFIG_DROP_BAD_CRC 0x0000000000000008ULL +#define IPP_CFIG_DFIFO_ECC_EN 0x0000000000000004ULL +#define IPP_CFIG_DEBUG_BUS_OUT_EN 0x0000000000000002ULL +#define IPP_CFIG_IPP_ENABLE 0x0000000000000001ULL + +#define IPP_PKT_DIS (FZC_IPP + 0x00020UL) +#define IPP_PKT_DIS_COUNT 0x0000000000003fffULL + +#define IPP_BAD_CS_CNT (FZC_IPP + 0x00028UL) +#define IPP_BAD_CS_CNT_COUNT 0x0000000000003fffULL + +#define IPP_ECC (FZC_IPP + 0x00030UL) +#define IPP_ECC_COUNT 0x00000000000000ffULL + +#define IPP_INT_STAT (FZC_IPP + 0x00040UL) +#define IPP_INT_STAT_SOP_MISS 0x0000000080000000ULL +#define IPP_INT_STAT_EOP_MISS 0x0000000040000000ULL +#define IPP_INT_STAT_DFIFO_UE 0x0000000030000000ULL +#define IPP_INT_STAT_DFIFO_CE 0x000000000c000000ULL +#define IPP_INT_STAT_DFIFO_ECC 0x0000000003000000ULL +#define IPP_INT_STAT_DFIFO_ECC_IDX 0x00000000007ff000ULL +#define IPP_INT_STAT_PFIFO_PERR 0x0000000000000800ULL +#define IPP_INT_STAT_ECC_ERR_MAX 0x0000000000000400ULL +#define IPP_INT_STAT_PFIFO_ERR_IDX 0x00000000000003f0ULL +#define IPP_INT_STAT_PFIFO_OVER 0x0000000000000008ULL +#define IPP_INT_STAT_PFIFO_UND 0x0000000000000004ULL +#define IPP_INT_STAT_BAD_CS_MX 0x0000000000000002ULL +#define IPP_INT_STAT_PKT_DIS_MX 0x0000000000000001ULL +#define IPP_INT_STAT_ALL 0x00000000ff7fffffULL + +#define IPP_MSK (FZC_IPP + 0x00048UL) +#define IPP_MSK_ECC_ERR_MX 0x0000000000000080ULL +#define IPP_MSK_DFIFO_EOP_SOP 0x0000000000000040ULL +#define IPP_MSK_DFIFO_UC 0x0000000000000020ULL +#define IPP_MSK_PFIFO_PAR 0x0000000000000010ULL +#define IPP_MSK_PFIFO_OVER 0x0000000000000008ULL +#define IPP_MSK_PFIFO_UND 0x0000000000000004ULL +#define IPP_MSK_BAD_CS 0x0000000000000002ULL +#define IPP_MSK_PKT_DIS_CNT 0x0000000000000001ULL +#define IPP_MSK_ALL 0x00000000000000ffULL + +#define IPP_PFIFO_RD0 (FZC_IPP + 0x00060UL) +#define IPP_PFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_PFIFO_RD1 (FZC_IPP + 0x00068UL) +#define IPP_PFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_PFIFO_RD2 (FZC_IPP + 0x00070UL) +#define IPP_PFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_PFIFO_RD3 (FZC_IPP + 0x00078UL) +#define IPP_PFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_PFIFO_RD4 (FZC_IPP + 0x00080UL) +#define IPP_PFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_PFIFO_WR0 (FZC_IPP + 0x00088UL) +#define IPP_PFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_PFIFO_WR1 (FZC_IPP + 0x00090UL) +#define IPP_PFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_PFIFO_WR2 (FZC_IPP + 0x00098UL) +#define IPP_PFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_PFIFO_WR3 (FZC_IPP + 0x000a0UL) +#define IPP_PFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_PFIFO_WR4 (FZC_IPP + 0x000a8UL) +#define IPP_PFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_PFIFO_RD_PTR (FZC_IPP + 0x000b0UL) +#define IPP_PFIFO_RD_PTR_PTR 0x000000000000003fULL + +#define IPP_PFIFO_WR_PTR (FZC_IPP + 0x000b8UL) +#define IPP_PFIFO_WR_PTR_PTR 0x000000000000007fULL + +#define IPP_DFIFO_RD0 (FZC_IPP + 0x000c0UL) +#define IPP_DFIFO_RD0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_DFIFO_RD1 (FZC_IPP + 0x000c8UL) +#define IPP_DFIFO_RD1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_DFIFO_RD2 (FZC_IPP + 0x000d0UL) +#define IPP_DFIFO_RD2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_DFIFO_RD3 (FZC_IPP + 0x000d8UL) +#define IPP_DFIFO_RD3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_DFIFO_RD4 (FZC_IPP + 0x000e0UL) +#define IPP_DFIFO_RD4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_DFIFO_WR0 (FZC_IPP + 0x000e8UL) +#define IPP_DFIFO_WR0_DATA 0x00000000ffffffffULL /* bits 31:0 */ + +#define IPP_DFIFO_WR1 (FZC_IPP + 0x000f0UL) +#define IPP_DFIFO_WR1_DATA 0x00000000ffffffffULL /* bits 63:32 */ + +#define IPP_DFIFO_WR2 (FZC_IPP + 0x000f8UL) +#define IPP_DFIFO_WR2_DATA 0x00000000ffffffffULL /* bits 95:64 */ + +#define IPP_DFIFO_WR3 (FZC_IPP + 0x00100UL) +#define IPP_DFIFO_WR3_DATA 0x00000000ffffffffULL /* bits 127:96 */ + +#define IPP_DFIFO_WR4 (FZC_IPP + 0x00108UL) +#define IPP_DFIFO_WR4_DATA 0x00000000ffffffffULL /* bits 145:128 */ + +#define IPP_DFIFO_RD_PTR (FZC_IPP + 0x00110UL) +#define IPP_DFIFO_RD_PTR_PTR 0x0000000000000fffULL + +#define IPP_DFIFO_WR_PTR (FZC_IPP + 0x00118UL) +#define IPP_DFIFO_WR_PTR_PTR 0x0000000000000fffULL + +#define IPP_SM (FZC_IPP + 0x00120UL) +#define IPP_SM_SM 0x00000000ffffffffULL + +#define IPP_CS_STAT (FZC_IPP + 0x00128UL) +#define IPP_CS_STAT_BCYC_CNT 0x00000000ff000000ULL +#define IPP_CS_STAT_IP_LEN 0x0000000000fff000ULL +#define IPP_CS_STAT_CS_FAIL 0x0000000000000800ULL +#define IPP_CS_STAT_TERM 0x0000000000000400ULL +#define IPP_CS_STAT_BAD_NUM 0x0000000000000200ULL +#define IPP_CS_STAT_CS_STATE 0x00000000000001ffULL + +#define IPP_FFLP_CS_INFO (FZC_IPP + 0x00130UL) +#define IPP_FFLP_CS_INFO_PKT_ID 0x0000000000003c00ULL +#define IPP_FFLP_CS_INFO_L4_PROTO 0x0000000000000300ULL +#define IPP_FFLP_CS_INFO_V4_HD_LEN 0x00000000000000f0ULL +#define IPP_FFLP_CS_INFO_L3_VER 0x000000000000000cULL +#define IPP_FFLP_CS_INFO_L2_OP 0x0000000000000003ULL + +#define IPP_DBG_SEL (FZC_IPP + 0x00138UL) +#define IPP_DBG_SEL_SEL 0x000000000000000fULL + +#define IPP_DFIFO_ECC_SYND (FZC_IPP + 0x00140UL) +#define IPP_DFIFO_ECC_SYND_SYND 0x000000000000ffffULL + +#define IPP_DFIFO_EOP_RD_PTR (FZC_IPP + 0x00148UL) +#define IPP_DFIFO_EOP_RD_PTR_PTR 0x0000000000000fffULL + +#define IPP_ECC_CTL (FZC_IPP + 0x00150UL) +#define IPP_ECC_CTL_DIS_DBL 0x0000000080000000ULL +#define IPP_ECC_CTL_COR_DBL 0x0000000000020000ULL +#define IPP_ECC_CTL_COR_SNG 0x0000000000010000ULL +#define IPP_ECC_CTL_COR_ALL 0x0000000000000400ULL +#define IPP_ECC_CTL_COR_1 0x0000000000000100ULL +#define IPP_ECC_CTL_COR_LST 0x0000000000000004ULL +#define IPP_ECC_CTL_COR_SND 0x0000000000000002ULL +#define IPP_ECC_CTL_COR_FSR 0x0000000000000001ULL + +#define NIU_DFIFO_ENTRIES 1024 +#define ATLAS_P0_P1_DFIFO_ENTRIES 2048 +#define ATLAS_P2_P3_DFIFO_ENTRIES 1024 + +#define ZCP_CFIG (FZC_ZCP + 0x00000UL) +#define ZCP_CFIG_ZCP_32BIT_MODE 0x0000000001000000ULL +#define ZCP_CFIG_ZCP_DEBUG_SEL 0x0000000000ff0000ULL +#define ZCP_CFIG_DMA_TH 0x000000000000ffe0ULL +#define ZCP_CFIG_ECC_CHK_DIS 0x0000000000000010ULL +#define ZCP_CFIG_PAR_CHK_DIS 0x0000000000000008ULL +#define ZCP_CFIG_DIS_BUFF_RSP_IF 0x0000000000000004ULL +#define ZCP_CFIG_DIS_BUFF_REQ_IF 0x0000000000000002ULL +#define ZCP_CFIG_ZC_ENABLE 0x0000000000000001ULL + +#define ZCP_INT_STAT (FZC_ZCP + 0x00008UL) +#define ZCP_INT_STAT_RRFIFO_UNDERRUN 0x0000000000008000ULL +#define ZCP_INT_STAT_RRFIFO_OVERRUN 0x0000000000004000ULL +#define ZCP_INT_STAT_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL +#define ZCP_INT_STAT_BUFFER_OVERFLOW 0x0000000000000800ULL +#define ZCP_INT_STAT_STAT_TBL_PERR 0x0000000000000400ULL +#define ZCP_INT_STAT_DYN_TBL_PERR 0x0000000000000200ULL +#define ZCP_INT_STAT_BUF_TBL_PERR 0x0000000000000100ULL +#define ZCP_INT_STAT_TT_PROGRAM_ERR 0x0000000000000080ULL +#define ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL +#define ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL +#define ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL +#define ZCP_INT_STAT_CFIFO_ECC3 0x0000000000000008ULL +#define ZCP_INT_STAT_CFIFO_ECC2 0x0000000000000004ULL +#define ZCP_INT_STAT_CFIFO_ECC1 0x0000000000000002ULL +#define ZCP_INT_STAT_CFIFO_ECC0 0x0000000000000001ULL +#define ZCP_INT_STAT_ALL 0x000000000000ffffULL + +#define ZCP_INT_MASK (FZC_ZCP + 0x00010UL) +#define ZCP_INT_MASK_RRFIFO_UNDERRUN 0x0000000000008000ULL +#define ZCP_INT_MASK_RRFIFO_OVERRUN 0x0000000000004000ULL +#define ZCP_INT_MASK_LOJ 0x0000000000002000ULL +#define ZCP_INT_MASK_RSPFIFO_UNCOR_ERR 0x0000000000001000ULL +#define ZCP_INT_MASK_BUFFER_OVERFLOW 0x0000000000000800ULL +#define ZCP_INT_MASK_STAT_TBL_PERR 0x0000000000000400ULL +#define ZCP_INT_MASK_DYN_TBL_PERR 0x0000000000000200ULL +#define ZCP_INT_MASK_BUF_TBL_PERR 0x0000000000000100ULL +#define ZCP_INT_MASK_TT_PROGRAM_ERR 0x0000000000000080ULL +#define ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL +#define ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL +#define ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL +#define ZCP_INT_MASK_CFIFO_ECC3 0x0000000000000008ULL +#define ZCP_INT_MASK_CFIFO_ECC2 0x0000000000000004ULL +#define ZCP_INT_MASK_CFIFO_ECC1 0x0000000000000002ULL +#define ZCP_INT_MASK_CFIFO_ECC0 0x0000000000000001ULL +#define ZCP_INT_MASK_ALL 0x000000000000ffffULL + +#define BAM4BUF (FZC_ZCP + 0x00018UL) +#define BAM4BUF_LOJ 0x0000000080000000ULL +#define BAM4BUF_EN_CK 0x0000000040000000ULL +#define BAM4BUF_IDX_END0 0x000000003ff00000ULL +#define BAM4BUF_IDX_ST0 0x00000000000ffc00ULL +#define BAM4BUF_OFFSET0 0x00000000000003ffULL + +#define BAM8BUF (FZC_ZCP + 0x00020UL) +#define BAM8BUF_LOJ 0x0000000080000000ULL +#define BAM8BUF_EN_CK 0x0000000040000000ULL +#define BAM8BUF_IDX_END1 0x000000003ff00000ULL +#define BAM8BUF_IDX_ST1 0x00000000000ffc00ULL +#define BAM8BUF_OFFSET1 0x00000000000003ffULL + +#define BAM16BUF (FZC_ZCP + 0x00028UL) +#define BAM16BUF_LOJ 0x0000000080000000ULL +#define BAM16BUF_EN_CK 0x0000000040000000ULL +#define BAM16BUF_IDX_END2 0x000000003ff00000ULL +#define BAM16BUF_IDX_ST2 0x00000000000ffc00ULL +#define BAM16BUF_OFFSET2 0x00000000000003ffULL + +#define BAM32BUF (FZC_ZCP + 0x00030UL) +#define BAM32BUF_LOJ 0x0000000080000000ULL +#define BAM32BUF_EN_CK 0x0000000040000000ULL +#define BAM32BUF_IDX_END3 0x000000003ff00000ULL +#define BAM32BUF_IDX_ST3 0x00000000000ffc00ULL +#define BAM32BUF_OFFSET3 0x00000000000003ffULL + +#define DST4BUF (FZC_ZCP + 0x00038UL) +#define DST4BUF_DS_OFFSET0 0x00000000000003ffULL + +#define DST8BUF (FZC_ZCP + 0x00040UL) +#define DST8BUF_DS_OFFSET1 0x00000000000003ffULL + +#define DST16BUF (FZC_ZCP + 0x00048UL) +#define DST16BUF_DS_OFFSET2 0x00000000000003ffULL + +#define DST32BUF (FZC_ZCP + 0x00050UL) +#define DST32BUF_DS_OFFSET3 0x00000000000003ffULL + +#define ZCP_RAM_DATA0 (FZC_ZCP + 0x00058UL) +#define ZCP_RAM_DATA0_DAT0 0x00000000ffffffffULL + +#define ZCP_RAM_DATA1 (FZC_ZCP + 0x00060UL) +#define ZCP_RAM_DAT10_DAT1 0x00000000ffffffffULL + +#define ZCP_RAM_DATA2 (FZC_ZCP + 0x00068UL) +#define ZCP_RAM_DATA2_DAT2 0x00000000ffffffffULL + +#define ZCP_RAM_DATA3 (FZC_ZCP + 0x00070UL) +#define ZCP_RAM_DATA3_DAT3 0x00000000ffffffffULL + +#define ZCP_RAM_DATA4 (FZC_ZCP + 0x00078UL) +#define ZCP_RAM_DATA4_DAT4 0x00000000000000ffULL + +#define ZCP_RAM_BE (FZC_ZCP + 0x00080UL) +#define ZCP_RAM_BE_VAL 0x000000000001ffffULL + +#define ZCP_RAM_ACC (FZC_ZCP + 0x00088UL) +#define ZCP_RAM_ACC_BUSY 0x0000000080000000ULL +#define ZCP_RAM_ACC_READ 0x0000000040000000ULL +#define ZCP_RAM_ACC_WRITE 0x0000000000000000ULL +#define ZCP_RAM_ACC_LOJ 0x0000000020000000ULL +#define ZCP_RAM_ACC_ZFCID 0x000000001ffe0000ULL +#define ZCP_RAM_ACC_ZFCID_SHIFT 17 +#define ZCP_RAM_ACC_RAM_SEL 0x000000000001f000ULL +#define ZCP_RAM_ACC_RAM_SEL_SHIFT 12 +#define ZCP_RAM_ACC_CFIFOADDR 0x0000000000000fffULL +#define ZCP_RAM_ACC_CFIFOADDR_SHIFT 0 + +#define ZCP_RAM_SEL_BAM(INDEX) (0x00 + (INDEX)) +#define ZCP_RAM_SEL_TT_STATIC 0x08 +#define ZCP_RAM_SEL_TT_DYNAMIC 0x09 +#define ZCP_RAM_SEL_CFIFO(PORT) (0x10 + (PORT)) + +#define NIU_CFIFO_ENTRIES 1024 +#define ATLAS_P0_P1_CFIFO_ENTRIES 2048 +#define ATLAS_P2_P3_CFIFO_ENTRIES 1024 + +#define CHK_BIT_DATA (FZC_ZCP + 0x00090UL) +#define CHK_BIT_DATA_DATA 0x000000000000ffffULL + +#define RESET_CFIFO (FZC_ZCP + 0x00098UL) +#define RESET_CFIFO_RST(PORT) (0x1 << (PORT)) + +#define CFIFO_ECC(PORT) (FZC_ZCP + 0x000a0UL + (PORT) * 8UL) +#define CFIFO_ECC_DIS_DBLBIT_ERR 0x0000000080000000ULL +#define CFIFO_ECC_DBLBIT_ERR 0x0000000000020000ULL +#define CFIFO_ECC_SINGLEBIT_ERR 0x0000000000010000ULL +#define CFIFO_ECC_ALL_PKT 0x0000000000000400ULL +#define CFIFO_ECC_LAST_LINE 0x0000000000000004ULL +#define CFIFO_ECC_2ND_LINE 0x0000000000000002ULL +#define CFIFO_ECC_1ST_LINE 0x0000000000000001ULL + +#define ZCP_TRAINING_VECTOR (FZC_ZCP + 0x000c0UL) +#define ZCP_TRAINING_VECTOR_VECTOR 0x00000000ffffffffULL + +#define ZCP_STATE_MACHINE (FZC_ZCP + 0x000c8UL) +#define ZCP_STATE_MACHINE_SM 0x00000000ffffffffULL + +/* Same bits as ZCP_INT_STAT */ +#define ZCP_INT_STAT_TEST (FZC_ZCP + 0x00108UL) + +#define RXDMA_CFIG1(IDX) (DMC + 0x00000UL + (IDX) * 0x200UL) +#define RXDMA_CFIG1_EN 0x0000000080000000ULL +#define RXDMA_CFIG1_RST 0x0000000040000000ULL +#define RXDMA_CFIG1_QST 0x0000000020000000ULL +#define RXDMA_CFIG1_MBADDR_H 0x0000000000000fffULL /* mboxaddr 43:32 */ + +#define RXDMA_CFIG2(IDX) (DMC + 0x00008UL + (IDX) * 0x200UL) +#define RXDMA_CFIG2_MBADDR_L 0x00000000ffffffc0ULL /* mboxaddr 31:6 */ +#define RXDMA_CFIG2_OFFSET 0x0000000000000006ULL +#define RXDMA_CFIG2_OFFSET_SHIFT 1 +#define RXDMA_CFIG2_FULL_HDR 0x0000000000000001ULL + +#define RBR_CFIG_A(IDX) (DMC + 0x00010UL + (IDX) * 0x200UL) +#define RBR_CFIG_A_LEN 0xffff000000000000ULL +#define RBR_CFIG_A_LEN_SHIFT 48 +#define RBR_CFIG_A_STADDR_BASE 0x00000ffffffc0000ULL +#define RBR_CFIG_A_STADDR 0x000000000003ffc0ULL + +#define RBR_CFIG_B(IDX) (DMC + 0x00018UL + (IDX) * 0x200UL) +#define RBR_CFIG_B_BLKSIZE 0x0000000003000000ULL +#define RBR_CFIG_B_BLKSIZE_SHIFT 24 +#define RBR_CFIG_B_VLD2 0x0000000000800000ULL +#define RBR_CFIG_B_BUFSZ2 0x0000000000030000ULL +#define RBR_CFIG_B_BUFSZ2_SHIFT 16 +#define RBR_CFIG_B_VLD1 0x0000000000008000ULL +#define RBR_CFIG_B_BUFSZ1 0x0000000000000300ULL +#define RBR_CFIG_B_BUFSZ1_SHIFT 8 +#define RBR_CFIG_B_VLD0 0x0000000000000080ULL +#define RBR_CFIG_B_BUFSZ0 0x0000000000000003ULL +#define RBR_CFIG_B_BUFSZ0_SHIFT 0 + +#define RBR_BLKSIZE_4K 0x0 +#define RBR_BLKSIZE_8K 0x1 +#define RBR_BLKSIZE_16K 0x2 +#define RBR_BLKSIZE_32K 0x3 +#define RBR_BUFSZ2_2K 0x0 +#define RBR_BUFSZ2_4K 0x1 +#define RBR_BUFSZ2_8K 0x2 +#define RBR_BUFSZ2_16K 0x3 +#define RBR_BUFSZ1_1K 0x0 +#define RBR_BUFSZ1_2K 0x1 +#define RBR_BUFSZ1_4K 0x2 +#define RBR_BUFSZ1_8K 0x3 +#define RBR_BUFSZ0_256 0x0 +#define RBR_BUFSZ0_512 0x1 +#define RBR_BUFSZ0_1K 0x2 +#define RBR_BUFSZ0_2K 0x3 + +#define RBR_KICK(IDX) (DMC + 0x00020UL + (IDX) * 0x200UL) +#define RBR_KICK_BKADD 0x000000000000ffffULL + +#define RBR_STAT(IDX) (DMC + 0x00028UL + (IDX) * 0x200UL) +#define RBR_STAT_QLEN 0x000000000000ffffULL + +#define RBR_HDH(IDX) (DMC + 0x00030UL + (IDX) * 0x200UL) +#define RBR_HDH_HEAD_H 0x0000000000000fffULL + +#define RBR_HDL(IDX) (DMC + 0x00038UL + (IDX) * 0x200UL) +#define RBR_HDL_HEAD_L 0x00000000fffffffcULL + +#define RCRCFIG_A(IDX) (DMC + 0x00040UL + (IDX) * 0x200UL) +#define RCRCFIG_A_LEN 0xffff000000000000ULL +#define RCRCFIG_A_LEN_SHIFT 48 +#define RCRCFIG_A_STADDR_BASE 0x00000ffffff80000ULL +#define RCRCFIG_A_STADDR 0x000000000007ffc0ULL + +#define RCRCFIG_B(IDX) (DMC + 0x00048UL + (IDX) * 0x200UL) +#define RCRCFIG_B_PTHRES 0x00000000ffff0000ULL +#define RCRCFIG_B_PTHRES_SHIFT 16 +#define RCRCFIG_B_ENTOUT 0x0000000000008000ULL +#define RCRCFIG_B_TIMEOUT 0x000000000000003fULL +#define RCRCFIG_B_TIMEOUT_SHIFT 0 + +#define RCRSTAT_A(IDX) (DMC + 0x00050UL + (IDX) * 0x200UL) +#define RCRSTAT_A_QLEN 0x000000000000ffffULL + +#define RCRSTAT_B(IDX) (DMC + 0x00058UL + (IDX) * 0x200UL) +#define RCRSTAT_B_TIPTR_H 0x0000000000000fffULL + +#define RCRSTAT_C(IDX) (DMC + 0x00060UL + (IDX) * 0x200UL) +#define RCRSTAT_C_TIPTR_L 0x00000000fffffff8ULL + +#define RX_DMA_CTL_STAT(IDX) (DMC + 0x00070UL + (IDX) * 0x200UL) +#define RX_DMA_CTL_STAT_RBR_TMOUT 0x0020000000000000ULL +#define RX_DMA_CTL_STAT_RSP_CNT_ERR 0x0010000000000000ULL +#define RX_DMA_CTL_STAT_BYTE_EN_BUS 0x0008000000000000ULL +#define RX_DMA_CTL_STAT_RSP_DAT_ERR 0x0004000000000000ULL +#define RX_DMA_CTL_STAT_RCR_ACK_ERR 0x0002000000000000ULL +#define RX_DMA_CTL_STAT_DC_FIFO_ERR 0x0001000000000000ULL +#define RX_DMA_CTL_STAT_MEX 0x0000800000000000ULL +#define RX_DMA_CTL_STAT_RCRTHRES 0x0000400000000000ULL +#define RX_DMA_CTL_STAT_RCRTO 0x0000200000000000ULL +#define RX_DMA_CTL_STAT_RCR_SHA_PAR 0x0000100000000000ULL +#define RX_DMA_CTL_STAT_RBR_PRE_PAR 0x0000080000000000ULL +#define RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL +#define RX_DMA_CTL_STAT_WRED_DROP 0x0000020000000000ULL +#define RX_DMA_CTL_STAT_RBR_PRE_EMTY 0x0000010000000000ULL +#define RX_DMA_CTL_STAT_RCRSHADOW_FULL 0x0000008000000000ULL +#define RX_DMA_CTL_STAT_CONFIG_ERR 0x0000004000000000ULL +#define RX_DMA_CTL_STAT_RCRINCON 0x0000002000000000ULL +#define RX_DMA_CTL_STAT_RCRFULL 0x0000001000000000ULL +#define RX_DMA_CTL_STAT_RBR_EMPTY 0x0000000800000000ULL +#define RX_DMA_CTL_STAT_RBRFULL 0x0000000400000000ULL +#define RX_DMA_CTL_STAT_RBRLOGPAGE 0x0000000200000000ULL +#define RX_DMA_CTL_STAT_CFIGLOGPAGE 0x0000000100000000ULL +#define RX_DMA_CTL_STAT_PTRREAD 0x00000000ffff0000ULL +#define RX_DMA_CTL_STAT_PTRREAD_SHIFT 16 +#define RX_DMA_CTL_STAT_PKTREAD 0x000000000000ffffULL +#define RX_DMA_CTL_STAT_PKTREAD_SHIFT 0 + +#define RX_DMA_CTL_STAT_CHAN_FATAL (RX_DMA_CTL_STAT_RBR_TMOUT | \ + RX_DMA_CTL_STAT_RSP_CNT_ERR | \ + RX_DMA_CTL_STAT_BYTE_EN_BUS | \ + RX_DMA_CTL_STAT_RSP_DAT_ERR | \ + RX_DMA_CTL_STAT_RCR_ACK_ERR | \ + RX_DMA_CTL_STAT_RCR_SHA_PAR | \ + RX_DMA_CTL_STAT_RBR_PRE_PAR | \ + RX_DMA_CTL_STAT_CONFIG_ERR | \ + RX_DMA_CTL_STAT_RCRINCON | \ + RX_DMA_CTL_STAT_RCRFULL | \ + RX_DMA_CTL_STAT_RBRFULL | \ + RX_DMA_CTL_STAT_RBRLOGPAGE | \ + RX_DMA_CTL_STAT_CFIGLOGPAGE) + +#define RX_DMA_CTL_STAT_PORT_FATAL (RX_DMA_CTL_STAT_DC_FIFO_ERR) + +#define RX_DMA_CTL_WRITE_CLEAR_ERRS (RX_DMA_CTL_STAT_RBR_EMPTY | \ + RX_DMA_CTL_STAT_RCRSHADOW_FULL | \ + RX_DMA_CTL_STAT_RBR_PRE_EMTY | \ + RX_DMA_CTL_STAT_WRED_DROP | \ + RX_DMA_CTL_STAT_PORT_DROP_PKT | \ + RX_DMA_CTL_STAT_RCRTO | \ + RX_DMA_CTL_STAT_RCRTHRES | \ + RX_DMA_CTL_STAT_DC_FIFO_ERR) + +#define RCR_FLSH(IDX) (DMC + 0x00078UL + (IDX) * 0x200UL) +#define RCR_FLSH_FLSH 0x0000000000000001ULL + +#define RXMISC(IDX) (DMC + 0x00090UL + (IDX) * 0x200UL) +#define RXMISC_OFLOW 0x0000000000010000ULL +#define RXMISC_COUNT 0x000000000000ffffULL + +#define RX_DMA_CTL_STAT_DBG(IDX) (DMC + 0x00098UL + (IDX) * 0x200UL) +#define RX_DMA_CTL_STAT_DBG_RBR_TMOUT 0x0020000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR 0x0010000000000000ULL +#define RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS 0x0008000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR 0x0004000000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR 0x0002000000000000ULL +#define RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR 0x0001000000000000ULL +#define RX_DMA_CTL_STAT_DBG_MEX 0x0000800000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRTHRES 0x0000400000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRTO 0x0000200000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR 0x0000100000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR 0x0000080000000000ULL +#define RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT 0x0000040000000000ULL +#define RX_DMA_CTL_STAT_DBG_WRED_DROP 0x0000020000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY 0x0000010000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL 0x0000008000000000ULL +#define RX_DMA_CTL_STAT_DBG_CONFIG_ERR 0x0000004000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRINCON 0x0000002000000000ULL +#define RX_DMA_CTL_STAT_DBG_RCRFULL 0x0000001000000000ULL +#define RX_DMA_CTL_STAT_DBG_RBR_EMPTY 0x0000000800000000ULL +#define RX_DMA_CTL_STAT_DBG_RBRFULL 0x0000000400000000ULL +#define RX_DMA_CTL_STAT_DBG_RBRLOGPAGE 0x0000000200000000ULL +#define RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE 0x0000000100000000ULL +#define RX_DMA_CTL_STAT_DBG_PTRREAD 0x00000000ffff0000ULL +#define RX_DMA_CTL_STAT_DBG_PKTREAD 0x000000000000ffffULL + +#define RX_DMA_ENT_MSK(IDX) (DMC + 0x00068UL + (IDX) * 0x200UL) +#define RX_DMA_ENT_MSK_RBR_TMOUT 0x0000000000200000ULL +#define RX_DMA_ENT_MSK_RSP_CNT_ERR 0x0000000000100000ULL +#define RX_DMA_ENT_MSK_BYTE_EN_BUS 0x0000000000080000ULL +#define RX_DMA_ENT_MSK_RSP_DAT_ERR 0x0000000000040000ULL +#define RX_DMA_ENT_MSK_RCR_ACK_ERR 0x0000000000020000ULL +#define RX_DMA_ENT_MSK_DC_FIFO_ERR 0x0000000000010000ULL +#define RX_DMA_ENT_MSK_RCRTHRES 0x0000000000004000ULL +#define RX_DMA_ENT_MSK_RCRTO 0x0000000000002000ULL +#define RX_DMA_ENT_MSK_RCR_SHA_PAR 0x0000000000001000ULL +#define RX_DMA_ENT_MSK_RBR_PRE_PAR 0x0000000000000800ULL +#define RX_DMA_ENT_MSK_PORT_DROP_PKT 0x0000000000000400ULL +#define RX_DMA_ENT_MSK_WRED_DROP 0x0000000000000200ULL +#define RX_DMA_ENT_MSK_RBR_PRE_EMTY 0x0000000000000100ULL +#define RX_DMA_ENT_MSK_RCR_SHADOW_FULL 0x0000000000000080ULL +#define RX_DMA_ENT_MSK_CONFIG_ERR 0x0000000000000040ULL +#define RX_DMA_ENT_MSK_RCRINCON 0x0000000000000020ULL +#define RX_DMA_ENT_MSK_RCRFULL 0x0000000000000010ULL +#define RX_DMA_ENT_MSK_RBR_EMPTY 0x0000000000000008ULL +#define RX_DMA_ENT_MSK_RBRFULL 0x0000000000000004ULL +#define RX_DMA_ENT_MSK_RBRLOGPAGE 0x0000000000000002ULL +#define RX_DMA_ENT_MSK_CFIGLOGPAGE 0x0000000000000001ULL +#define RX_DMA_ENT_MSK_ALL 0x00000000003f7fffULL + +#define TX_RNG_CFIG(IDX) (DMC + 0x40000UL + (IDX) * 0x200UL) +#define TX_RNG_CFIG_LEN 0x1fff000000000000ULL +#define TX_RNG_CFIG_LEN_SHIFT 48 +#define TX_RNG_CFIG_STADDR_BASE 0x00000ffffff80000ULL +#define TX_RNG_CFIG_STADDR 0x000000000007ffc0ULL + +#define TX_RING_HDL(IDX) (DMC + 0x40010UL + (IDX) * 0x200UL) +#define TX_RING_HDL_WRAP 0x0000000000080000ULL +#define TX_RING_HDL_HEAD 0x000000000007fff8ULL +#define TX_RING_HDL_HEAD_SHIFT 3 + +#define TX_RING_KICK(IDX) (DMC + 0x40018UL + (IDX) * 0x200UL) +#define TX_RING_KICK_WRAP 0x0000000000080000ULL +#define TX_RING_KICK_TAIL 0x000000000007fff8ULL + +#define TX_ENT_MSK(IDX) (DMC + 0x40020UL + (IDX) * 0x200UL) +#define TX_ENT_MSK_MK 0x0000000000008000ULL +#define TX_ENT_MSK_MBOX_ERR 0x0000000000000080ULL +#define TX_ENT_MSK_PKT_SIZE_ERR 0x0000000000000040ULL +#define TX_ENT_MSK_TX_RING_OFLOW 0x0000000000000020ULL +#define TX_ENT_MSK_PREF_BUF_ECC_ERR 0x0000000000000010ULL +#define TX_ENT_MSK_NACK_PREF 0x0000000000000008ULL +#define TX_ENT_MSK_NACK_PKT_RD 0x0000000000000004ULL +#define TX_ENT_MSK_CONF_PART_ERR 0x0000000000000002ULL +#define TX_ENT_MSK_PKT_PRT_ERR 0x0000000000000001ULL + +#define TX_CS(IDX) (DMC + 0x40028UL + (IDX)*0x200UL) +#define TX_CS_PKT_CNT 0x0fff000000000000ULL +#define TX_CS_PKT_CNT_SHIFT 48 +#define TX_CS_LASTMARK 0x00000fff00000000ULL +#define TX_CS_LASTMARK_SHIFT 32 +#define TX_CS_RST 0x0000000080000000ULL +#define TX_CS_RST_STATE 0x0000000040000000ULL +#define TX_CS_MB 0x0000000020000000ULL +#define TX_CS_STOP_N_GO 0x0000000010000000ULL +#define TX_CS_SNG_STATE 0x0000000008000000ULL +#define TX_CS_MK 0x0000000000008000ULL +#define TX_CS_MMK 0x0000000000004000ULL +#define TX_CS_MBOX_ERR 0x0000000000000080ULL +#define TX_CS_PKT_SIZE_ERR 0x0000000000000040ULL +#define TX_CS_TX_RING_OFLOW 0x0000000000000020ULL +#define TX_CS_PREF_BUF_PAR_ERR 0x0000000000000010ULL +#define TX_CS_NACK_PREF 0x0000000000000008ULL +#define TX_CS_NACK_PKT_RD 0x0000000000000004ULL +#define TX_CS_CONF_PART_ERR 0x0000000000000002ULL +#define TX_CS_PKT_PRT_ERR 0x0000000000000001ULL + +#define TXDMA_MBH(IDX) (DMC + 0x40030UL + (IDX) * 0x200UL) +#define TXDMA_MBH_MBADDR 0x0000000000000fffULL + +#define TXDMA_MBL(IDX) (DMC + 0x40038UL + (IDX) * 0x200UL) +#define TXDMA_MBL_MBADDR 0x00000000ffffffc0ULL + +#define TX_DMA_PRE_ST(IDX) (DMC + 0x40040UL + (IDX) * 0x200UL) +#define TX_DMA_PRE_ST_SHADOW_HD 0x000000000007ffffULL + +#define TX_RNG_ERR_LOGH(IDX) (DMC + 0x40048UL + (IDX) * 0x200UL) +#define TX_RNG_ERR_LOGH_ERR 0x0000000080000000ULL +#define TX_RNG_ERR_LOGH_MERR 0x0000000040000000ULL +#define TX_RNG_ERR_LOGH_ERRCODE 0x0000000038000000ULL +#define TX_RNG_ERR_LOGH_ERRADDR 0x0000000000000fffULL + +#define TX_RNG_ERR_LOGL(IDX) (DMC + 0x40050UL + (IDX) * 0x200UL) +#define TX_RNG_ERR_LOGL_ERRADDR 0x00000000ffffffffULL + +#define TDMC_INTR_DBG(IDX) (DMC + 0x40060UL + (IDX) * 0x200UL) +#define TDMC_INTR_DBG_MK 0x0000000000008000ULL +#define TDMC_INTR_DBG_MBOX_ERR 0x0000000000000080ULL +#define TDMC_INTR_DBG_PKT_SIZE_ERR 0x0000000000000040ULL +#define TDMC_INTR_DBG_TX_RING_OFLOW 0x0000000000000020ULL +#define TDMC_INTR_DBG_PREF_BUF_PAR_ERR 0x0000000000000010ULL +#define TDMC_INTR_DBG_NACK_PREF 0x0000000000000008ULL +#define TDMC_INTR_DBG_NACK_PKT_RD 0x0000000000000004ULL +#define TDMC_INTR_DBG_CONF_PART_ERR 0x0000000000000002ULL +#define TDMC_INTR_DBG_PKT_PART_ERR 0x0000000000000001ULL + +#define TX_CS_DBG(IDX) (DMC + 0x40068UL + (IDX) * 0x200UL) +#define TX_CS_DBG_PKT_CNT 0x0fff000000000000ULL + +#define TDMC_INJ_PAR_ERR(IDX) (DMC + 0x45040UL + (IDX) * 0x200UL) +#define TDMC_INJ_PAR_ERR_VAL 0x000000000000ffffULL + +#define TDMC_DBG_SEL(IDX) (DMC + 0x45080UL + (IDX) * 0x200UL) +#define TDMC_DBG_SEL_DBG_SEL 0x000000000000003fULL + +#define TDMC_TRAINING_VECTOR(IDX) (DMC + 0x45088UL + (IDX) * 0x200UL) +#define TDMC_TRAINING_VECTOR_VEC 0x00000000ffffffffULL + +#define TXC_DMA_MAX(CHAN) (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL) +#define TXC_DMA_MAX_LEN(CHAN) (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL) + +#define TXC_CONTROL (FZC_TXC + 0x20000UL) +#define TXC_CONTROL_ENABLE 0x0000000000000010ULL +#define TXC_CONTROL_PORT_ENABLE(X) (1 << (X)) + +#define TXC_TRAINING_VEC (FZC_TXC + 0x20008UL) +#define TXC_TRAINING_VEC_MASK 0x00000000ffffffffULL + +#define TXC_DEBUG (FZC_TXC + 0x20010UL) +#define TXC_DEBUG_SELECT 0x000000000000003fULL + +#define TXC_MAX_REORDER (FZC_TXC + 0x20018UL) +#define TXC_MAX_REORDER_PORT3 0x000000000f000000ULL +#define TXC_MAX_REORDER_PORT2 0x00000000000f0000ULL +#define TXC_MAX_REORDER_PORT1 0x0000000000000f00ULL +#define TXC_MAX_REORDER_PORT0 0x000000000000000fULL + +#define TXC_PORT_CTL(PORT) (FZC_TXC + 0x20020UL + (PORT)*0x100UL) +#define TXC_PORT_CTL_CLR_ALL_STAT 0x0000000000000001ULL + +#define TXC_PKT_STUFFED(PORT) (FZC_TXC + 0x20030UL + (PORT)*0x100UL) +#define TXC_PKT_STUFFED_PP_REORDER 0x00000000ffff0000ULL +#define TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL + +#define TXC_PKT_XMIT(PORT) (FZC_TXC + 0x20038UL + (PORT)*0x100UL) +#define TXC_PKT_XMIT_BYTES 0x00000000ffff0000ULL +#define TXC_PKT_XMIT_PKTS 0x000000000000ffffULL + +#define TXC_ROECC_CTL(PORT) (FZC_TXC + 0x20040UL + (PORT)*0x100UL) +#define TXC_ROECC_CTL_DISABLE_UE 0x0000000080000000ULL +#define TXC_ROECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL +#define TXC_ROECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL +#define TXC_ROECC_CTL_ALL_PKTS 0x0000000000000400ULL +#define TXC_ROECC_CTL_ALT_PKTS 0x0000000000000200ULL +#define TXC_ROECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL +#define TXC_ROECC_CTL_LST_PKT_LINE 0x0000000000000004ULL +#define TXC_ROECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL +#define TXC_ROECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL + +#define TXC_ROECC_ST(PORT) (FZC_TXC + 0x20048UL + (PORT)*0x100UL) +#define TXC_ROECC_CLR_ST 0x0000000080000000ULL +#define TXC_ROECC_CE 0x0000000000020000ULL +#define TXC_ROECC_UE 0x0000000000010000ULL +#define TXC_ROECC_ST_ECC_ADDR 0x00000000000003ffULL + +#define TXC_RO_DATA0(PORT) (FZC_TXC + 0x20050UL + (PORT)*0x100UL) +#define TXC_RO_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ + +#define TXC_RO_DATA1(PORT) (FZC_TXC + 0x20058UL + (PORT)*0x100UL) +#define TXC_RO_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ + +#define TXC_RO_DATA2(PORT) (FZC_TXC + 0x20060UL + (PORT)*0x100UL) +#define TXC_RO_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ + +#define TXC_RO_DATA3(PORT) (FZC_TXC + 0x20068UL + (PORT)*0x100UL) +#define TXC_RO_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ + +#define TXC_RO_DATA4(PORT) (FZC_TXC + 0x20070UL + (PORT)*0x100UL) +#define TXC_RO_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ + +#define TXC_SFECC_CTL(PORT) (FZC_TXC + 0x20078UL + (PORT)*0x100UL) +#define TXC_SFECC_CTL_DISABLE_UE 0x0000000080000000ULL +#define TXC_SFECC_CTL_DBL_BIT_ERR 0x0000000000020000ULL +#define TXC_SFECC_CTL_SNGL_BIT_ERR 0x0000000000010000ULL +#define TXC_SFECC_CTL_ALL_PKTS 0x0000000000000400ULL +#define TXC_SFECC_CTL_ALT_PKTS 0x0000000000000200ULL +#define TXC_SFECC_CTL_ONE_PKT_ONLY 0x0000000000000100ULL +#define TXC_SFECC_CTL_LST_PKT_LINE 0x0000000000000004ULL +#define TXC_SFECC_CTL_2ND_PKT_LINE 0x0000000000000002ULL +#define TXC_SFECC_CTL_1ST_PKT_LINE 0x0000000000000001ULL + +#define TXC_SFECC_ST(PORT) (FZC_TXC + 0x20080UL + (PORT)*0x100UL) +#define TXC_SFECC_ST_CLR_ST 0x0000000080000000ULL +#define TXC_SFECC_ST_CE 0x0000000000020000ULL +#define TXC_SFECC_ST_UE 0x0000000000010000ULL +#define TXC_SFECC_ST_ECC_ADDR 0x00000000000003ffULL + +#define TXC_SF_DATA0(PORT) (FZC_TXC + 0x20088UL + (PORT)*0x100UL) +#define TXC_SF_DATA0_DATA0 0x00000000ffffffffULL /* bits 31:0 */ + +#define TXC_SF_DATA1(PORT) (FZC_TXC + 0x20090UL + (PORT)*0x100UL) +#define TXC_SF_DATA1_DATA1 0x00000000ffffffffULL /* bits 63:32 */ + +#define TXC_SF_DATA2(PORT) (FZC_TXC + 0x20098UL + (PORT)*0x100UL) +#define TXC_SF_DATA2_DATA2 0x00000000ffffffffULL /* bits 95:64 */ + +#define TXC_SF_DATA3(PORT) (FZC_TXC + 0x200a0UL + (PORT)*0x100UL) +#define TXC_SF_DATA3_DATA3 0x00000000ffffffffULL /* bits 127:96 */ + +#define TXC_SF_DATA4(PORT) (FZC_TXC + 0x200a8UL + (PORT)*0x100UL) +#define TXC_SF_DATA4_DATA4 0x0000000000ffffffULL /* bits 151:128 */ + +#define TXC_RO_TIDS(PORT) (FZC_TXC + 0x200b0UL + (PORT)*0x100UL) +#define TXC_RO_TIDS_IN_USE 0x00000000ffffffffULL + +#define TXC_RO_STATE0(PORT) (FZC_TXC + 0x200b8UL + (PORT)*0x100UL) +#define TXC_RO_STATE0_DUPLICATE_TID 0x00000000ffffffffULL + +#define TXC_RO_STATE1(PORT) (FZC_TXC + 0x200c0UL + (PORT)*0x100UL) +#define TXC_RO_STATE1_UNUSED_TID 0x00000000ffffffffULL + +#define TXC_RO_STATE2(PORT) (FZC_TXC + 0x200c8UL + (PORT)*0x100UL) +#define TXC_RO_STATE2_TRANS_TIMEOUT 0x00000000ffffffffULL + +#define TXC_RO_STATE3(PORT) (FZC_TXC + 0x200d0UL + (PORT)*0x100UL) +#define TXC_RO_STATE3_ENAB_SPC_WMARK 0x0000000080000000ULL +#define TXC_RO_STATE3_RO_SPC_WMARK 0x000000007fe00000ULL +#define TXC_RO_STATE3_ROFIFO_SPC_AVAIL 0x00000000001ff800ULL +#define TXC_RO_STATE3_ENAB_RO_WMARK 0x0000000000000100ULL +#define TXC_RO_STATE3_HIGH_RO_USED 0x00000000000000f0ULL +#define TXC_RO_STATE3_NUM_RO_USED 0x000000000000000fULL + +#define TXC_RO_CTL(PORT) (FZC_TXC + 0x200d8UL + (PORT)*0x100UL) +#define TXC_RO_CTL_CLR_FAIL_STATE 0x0000000080000000ULL +#define TXC_RO_CTL_RO_ADDR 0x000000000f000000ULL +#define TXC_RO_CTL_ADDR_FAILED 0x0000000000400000ULL +#define TXC_RO_CTL_DMA_FAILED 0x0000000000200000ULL +#define TXC_RO_CTL_LEN_FAILED 0x0000000000100000ULL +#define TXC_RO_CTL_CAPT_ADDR_FAILED 0x0000000000040000ULL +#define TXC_RO_CTL_CAPT_DMA_FAILED 0x0000000000020000ULL +#define TXC_RO_CTL_CAPT_LEN_FAILED 0x0000000000010000ULL +#define TXC_RO_CTL_RO_STATE_RD_DONE 0x0000000000000080ULL +#define TXC_RO_CTL_RO_STATE_WR_DONE 0x0000000000000040ULL +#define TXC_RO_CTL_RO_STATE_RD 0x0000000000000020ULL +#define TXC_RO_CTL_RO_STATE_WR 0x0000000000000010ULL +#define TXC_RO_CTL_RO_STATE_ADDR 0x000000000000000fULL + +#define TXC_RO_ST_DATA0(PORT) (FZC_TXC + 0x200e0UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA0_DATA0 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA1(PORT) (FZC_TXC + 0x200e8UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA1_DATA1 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA2(PORT) (FZC_TXC + 0x200f0UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA2_DATA2 0x00000000ffffffffULL + +#define TXC_RO_ST_DATA3(PORT) (FZC_TXC + 0x200f8UL + (PORT)*0x100UL) +#define TXC_RO_ST_DATA3_DATA3 0x00000000ffffffffULL + +#define TXC_PORT_PACKET_REQ(PORT) (FZC_TXC + 0x20100UL + (PORT)*0x100UL) +#define TXC_PORT_PACKET_REQ_GATHER_REQ 0x00000000f0000000ULL +#define TXC_PORT_PACKET_REQ_PKT_REQ 0x000000000fff0000ULL +#define TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL + + /* bits are same as TXC_INT_STAT */ +#define TXC_INT_STAT_DBG (FZC_TXC + 0x20420UL) + +#define TXC_INT_STAT (FZC_TXC + 0x20428UL) +#define TXC_INT_STAT_VAL_SHIFT(PORT) ((PORT) * 8) +#define TXC_INT_STAT_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_SF_CE(PORT) (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_SF_UE(PORT) (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_RO_CE(PORT) (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_RO_UE(PORT) (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_REORDER_ERR(PORT) (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT)) +#define TXC_INT_STAT_PKTASM_DEAD(PORT) (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT)) + +#define TXC_INT_MASK (FZC_TXC + 0x20430UL) +#define TXC_INT_MASK_VAL_SHIFT(PORT) ((PORT) * 8) +#define TXC_INT_MASK_VAL(PORT) (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT)) + +#define TXC_INT_MASK_SF_CE 0x01 +#define TXC_INT_MASK_SF_UE 0x02 +#define TXC_INT_MASK_RO_CE 0x04 +#define TXC_INT_MASK_RO_UE 0x08 +#define TXC_INT_MASK_REORDER_ERR 0x10 +#define TXC_INT_MASK_PKTASM_DEAD 0x20 +#define TXC_INT_MASK_ALL 0x3f + +#define TXC_PORT_DMA(IDX) (FZC_TXC + 0x20028UL + (IDX)*0x100UL) + +#define ESPC_PIO_EN (FZC_PROM + 0x40000UL) +#define ESPC_PIO_EN_ENABLE 0x0000000000000001ULL + +#define ESPC_PIO_STAT (FZC_PROM + 0x40008UL) +#define ESPC_PIO_STAT_READ_START 0x0000000080000000ULL +#define ESPC_PIO_STAT_READ_END 0x0000000040000000ULL +#define ESPC_PIO_STAT_WRITE_INIT 0x0000000020000000ULL +#define ESPC_PIO_STAT_WRITE_END 0x0000000010000000ULL +#define ESPC_PIO_STAT_ADDR 0x0000000003ffff00ULL +#define ESPC_PIO_STAT_ADDR_SHIFT 8 +#define ESPC_PIO_STAT_DATA 0x00000000000000ffULL +#define ESPC_PIO_STAT_DATA_SHIFT 0 + +#define ESPC_NCR(IDX) (FZC_PROM + 0x40020UL + (IDX)*0x8UL) +#define ESPC_NCR_VAL 0x00000000ffffffffULL + +#define ESPC_MAC_ADDR0 ESPC_NCR(0) +#define ESPC_MAC_ADDR1 ESPC_NCR(1) +#define ESPC_NUM_PORTS_MACS ESPC_NCR(2) +#define ESPC_NUM_PORTS_MACS_VAL 0x00000000000000ffULL +#define ESPC_MOD_STR_LEN ESPC_NCR(4) +#define ESPC_MOD_STR_1 ESPC_NCR(5) +#define ESPC_MOD_STR_2 ESPC_NCR(6) +#define ESPC_MOD_STR_3 ESPC_NCR(7) +#define ESPC_MOD_STR_4 ESPC_NCR(8) +#define ESPC_MOD_STR_5 ESPC_NCR(9) +#define ESPC_MOD_STR_6 ESPC_NCR(10) +#define ESPC_MOD_STR_7 ESPC_NCR(11) +#define ESPC_MOD_STR_8 ESPC_NCR(12) +#define ESPC_BD_MOD_STR_LEN ESPC_NCR(13) +#define ESPC_BD_MOD_STR_1 ESPC_NCR(14) +#define ESPC_BD_MOD_STR_2 ESPC_NCR(15) +#define ESPC_BD_MOD_STR_3 ESPC_NCR(16) +#define ESPC_BD_MOD_STR_4 ESPC_NCR(17) + +#define ESPC_PHY_TYPE ESPC_NCR(18) +#define ESPC_PHY_TYPE_PORT0 0x00000000ff000000ULL +#define ESPC_PHY_TYPE_PORT0_SHIFT 24 +#define ESPC_PHY_TYPE_PORT1 0x0000000000ff0000ULL +#define ESPC_PHY_TYPE_PORT1_SHIFT 16 +#define ESPC_PHY_TYPE_PORT2 0x000000000000ff00ULL +#define ESPC_PHY_TYPE_PORT2_SHIFT 8 +#define ESPC_PHY_TYPE_PORT3 0x00000000000000ffULL +#define ESPC_PHY_TYPE_PORT3_SHIFT 0 + +#define ESPC_PHY_TYPE_1G_COPPER 3 +#define ESPC_PHY_TYPE_1G_FIBER 2 +#define ESPC_PHY_TYPE_10G_COPPER 1 +#define ESPC_PHY_TYPE_10G_FIBER 0 + +#define ESPC_MAX_FM_SZ ESPC_NCR(19) + +#define ESPC_INTR_NUM ESPC_NCR(20) +#define ESPC_INTR_NUM_PORT0 0x00000000ff000000ULL +#define ESPC_INTR_NUM_PORT1 0x0000000000ff0000ULL +#define ESPC_INTR_NUM_PORT2 0x000000000000ff00ULL +#define ESPC_INTR_NUM_PORT3 0x00000000000000ffULL + +#define ESPC_VER_IMGSZ ESPC_NCR(21) +#define ESPC_VER_IMGSZ_IMGSZ 0x00000000ffff0000ULL +#define ESPC_VER_IMGSZ_IMGSZ_SHIFT 16 +#define ESPC_VER_IMGSZ_VER 0x000000000000ffffULL +#define ESPC_VER_IMGSZ_VER_SHIFT 0 + +#define ESPC_CHKSUM ESPC_NCR(22) +#define ESPC_CHKSUM_SUM 0x00000000000000ffULL + +#define ESPC_EEPROM_SIZE 0x100000 + +#define CLASS_CODE_UNRECOG 0x00 +#define CLASS_CODE_DUMMY1 0x01 +#define CLASS_CODE_ETHERTYPE1 0x02 +#define CLASS_CODE_ETHERTYPE2 0x03 +#define CLASS_CODE_USER_PROG1 0x04 +#define CLASS_CODE_USER_PROG2 0x05 +#define CLASS_CODE_USER_PROG3 0x06 +#define CLASS_CODE_USER_PROG4 0x07 +#define CLASS_CODE_TCP_IPV4 0x08 +#define CLASS_CODE_UDP_IPV4 0x09 +#define CLASS_CODE_AH_ESP_IPV4 0x0a +#define CLASS_CODE_SCTP_IPV4 0x0b +#define CLASS_CODE_TCP_IPV6 0x0c +#define CLASS_CODE_UDP_IPV6 0x0d +#define CLASS_CODE_AH_ESP_IPV6 0x0e +#define CLASS_CODE_SCTP_IPV6 0x0f +#define CLASS_CODE_ARP 0x10 +#define CLASS_CODE_RARP 0x11 +#define CLASS_CODE_DUMMY2 0x12 +#define CLASS_CODE_DUMMY3 0x13 +#define CLASS_CODE_DUMMY4 0x14 +#define CLASS_CODE_DUMMY5 0x15 +#define CLASS_CODE_DUMMY6 0x16 +#define CLASS_CODE_DUMMY7 0x17 +#define CLASS_CODE_DUMMY8 0x18 +#define CLASS_CODE_DUMMY9 0x19 +#define CLASS_CODE_DUMMY10 0x1a +#define CLASS_CODE_DUMMY11 0x1b +#define CLASS_CODE_DUMMY12 0x1c +#define CLASS_CODE_DUMMY13 0x1d +#define CLASS_CODE_DUMMY14 0x1e +#define CLASS_CODE_DUMMY15 0x1f + +/* Logical devices and device groups */ +#define LDN_RXDMA(CHAN) (0 + (CHAN)) +#define LDN_RESV1(OFF) (16 + (OFF)) +#define LDN_TXDMA(CHAN) (32 + (CHAN)) +#define LDN_RESV2(OFF) (56 + (OFF)) +#define LDN_MIF 63 +#define LDN_MAC(PORT) (64 + (PORT)) +#define LDN_DEVICE_ERROR 68 +#define LDN_MAX LDN_DEVICE_ERROR + +#define NIU_LDG_MIN 0 +#define NIU_LDG_MAX 63 +#define NIU_NUM_LDG 64 +#define LDG_INVALID 0xff + +/* PHY stuff */ +#define NIU_PMA_PMD_DEV_ADDR 1 +#define NIU_PCS_DEV_ADDR 3 + +#define NIU_PHY_ID_MASK 0xfffff0f0 +#define NIU_PHY_ID_BCM8704 0x00206030 +#define NIU_PHY_ID_BCM5464R 0x002060b0 + +#define BCM8704_PMA_PMD_DEV_ADDR 1 +#define BCM8704_PCS_DEV_ADDR 2 +#define BCM8704_USER_DEV3_ADDR 3 +#define BCM8704_PHYXS_DEV_ADDR 4 +#define BCM8704_USER_DEV4_ADDR 4 + +#define BCM8704_PMD_RCV_SIGDET 0x000a +#define PMD_RCV_SIGDET_LANE3 0x0010 +#define PMD_RCV_SIGDET_LANE2 0x0008 +#define PMD_RCV_SIGDET_LANE1 0x0004 +#define PMD_RCV_SIGDET_LANE0 0x0002 +#define PMD_RCV_SIGDET_GLOBAL 0x0001 + +#define BCM8704_PCS_10G_R_STATUS 0x0020 +#define PCS_10G_R_STATUS_LINKSTAT 0x1000 +#define PCS_10G_R_STATUS_PRBS31_ABLE 0x0004 +#define PCS_10G_R_STATUS_HI_BER 0x0002 +#define PCS_10G_R_STATUS_BLK_LOCK 0x0001 + +#define BCM8704_USER_CONTROL 0xc800 +#define USER_CONTROL_OPTXENB_LVL 0x8000 +#define USER_CONTROL_OPTXRST_LVL 0x4000 +#define USER_CONTROL_OPBIASFLT_LVL 0x2000 +#define USER_CONTROL_OBTMPFLT_LVL 0x1000 +#define USER_CONTROL_OPPRFLT_LVL 0x0800 +#define USER_CONTROL_OPTXFLT_LVL 0x0400 +#define USER_CONTROL_OPRXLOS_LVL 0x0200 +#define USER_CONTROL_OPRXFLT_LVL 0x0100 +#define USER_CONTROL_OPTXON_LVL 0x0080 +#define USER_CONTROL_RES1 0x007f +#define USER_CONTROL_RES1_SHIFT 0 + +#define BCM8704_USER_ANALOG_CLK 0xc801 +#define BCM8704_USER_PMD_RX_CONTROL 0xc802 + +#define BCM8704_USER_PMD_TX_CONTROL 0xc803 +#define USER_PMD_TX_CTL_RES1 0xfe00 +#define USER_PMD_TX_CTL_XFP_CLKEN 0x0100 +#define USER_PMD_TX_CTL_TX_DAC_TXD 0x00c0 +#define USER_PMD_TX_CTL_TX_DAC_TXD_SH 6 +#define USER_PMD_TX_CTL_TX_DAC_TXCK 0x0030 +#define USER_PMD_TX_CTL_TX_DAC_TXCK_SH 4 +#define USER_PMD_TX_CTL_TSD_LPWREN 0x0008 +#define USER_PMD_TX_CTL_TSCK_LPWREN 0x0004 +#define USER_PMD_TX_CTL_CMU_LPWREN 0x0002 +#define USER_PMD_TX_CTL_SFIFORST 0x0001 + +#define BCM8704_USER_ANALOG_STATUS0 0xc804 +#define BCM8704_USER_OPT_DIGITAL_CTRL 0xc808 +#define BCM8704_USER_TX_ALARM_STATUS 0x9004 + +#define USER_ODIG_CTRL_FMODE 0x8000 +#define USER_ODIG_CTRL_TX_PDOWN 0x4000 +#define USER_ODIG_CTRL_RX_PDOWN 0x2000 +#define USER_ODIG_CTRL_EFILT_EN 0x1000 +#define USER_ODIG_CTRL_OPT_RST 0x0800 +#define USER_ODIG_CTRL_PCS_TIB 0x0400 +#define USER_ODIG_CTRL_PCS_RI 0x0200 +#define USER_ODIG_CTRL_RESV1 0x0180 +#define USER_ODIG_CTRL_GPIOS 0x0060 +#define USER_ODIG_CTRL_GPIOS_SHIFT 5 +#define USER_ODIG_CTRL_RESV2 0x0010 +#define USER_ODIG_CTRL_LB_ERR_DIS 0x0008 +#define USER_ODIG_CTRL_RESV3 0x0006 +#define USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001 + +#define BCM8704_PHYXS_XGXS_LANE_STAT 0x0018 +#define PHYXS_XGXS_LANE_STAT_ALINGED 0x1000 +#define PHYXS_XGXS_LANE_STAT_PATTEST 0x0800 +#define PHYXS_XGXS_LANE_STAT_MAGIC 0x0400 +#define PHYXS_XGXS_LANE_STAT_LANE3 0x0008 +#define PHYXS_XGXS_LANE_STAT_LANE2 0x0004 +#define PHYXS_XGXS_LANE_STAT_LANE1 0x0002 +#define PHYXS_XGXS_LANE_STAT_LANE0 0x0001 + +#define BCM5464R_AUX_CTL 24 +#define BCM5464R_AUX_CTL_EXT_LB 0x8000 +#define BCM5464R_AUX_CTL_EXT_PLEN 0x4000 +#define BCM5464R_AUX_CTL_ER1000 0x3000 +#define BCM5464R_AUX_CTL_ER1000_SHIFT 12 +#define BCM5464R_AUX_CTL_RESV1 0x0800 +#define BCM5464R_AUX_CTL_WRITE_1 0x0400 +#define BCM5464R_AUX_CTL_RESV2 0x0300 +#define BCM5464R_AUX_CTL_PRESP_DIS 0x0080 +#define BCM5464R_AUX_CTL_RESV3 0x0040 +#define BCM5464R_AUX_CTL_ER100 0x0030 +#define BCM5464R_AUX_CTL_ER100_SHIFT 4 +#define BCM5464R_AUX_CTL_DIAG_MODE 0x0008 +#define BCM5464R_AUX_CTL_SR_SEL 0x0007 +#define BCM5464R_AUX_CTL_SR_SEL_SHIFT 0 + +#define BCM5464R_CTRL1000_AS_MASTER 0x0800 +#define BCM5464R_CTRL1000_ENABLE_AS_MASTER 0x1000 + +#define RCR_ENTRY_MULTI 0x8000000000000000ULL +#define RCR_ENTRY_PKT_TYPE 0x6000000000000000ULL +#define RCR_ENTRY_PKT_TYPE_SHIFT 61 +#define RCR_ENTRY_ZERO_COPY 0x1000000000000000ULL +#define RCR_ENTRY_NOPORT 0x0800000000000000ULL +#define RCR_ENTRY_PROMISC 0x0400000000000000ULL +#define RCR_ENTRY_ERROR 0x0380000000000000ULL +#define RCR_ENTRY_DCF_ERR 0x0040000000000000ULL +#define RCR_ENTRY_L2_LEN 0x003fff0000000000ULL +#define RCR_ENTRY_L2_LEN_SHIFT 40 +#define RCR_ENTRY_PKTBUFSZ 0x000000c000000000ULL +#define RCR_ENTRY_PKTBUFSZ_SHIFT 38 +#define RCR_ENTRY_PKT_BUF_ADDR 0x0000003fffffffffULL /* bits 43:6 */ +#define RCR_ENTRY_PKT_BUF_ADDR_SHIFT 6 + +#define RCR_PKT_TYPE_OTHER 0x0 +#define RCR_PKT_TYPE_TCP 0x1 +#define RCR_PKT_TYPE_UDP 0x2 +#define RCR_PKT_TYPE_SCTP 0x3 + +#define NIU_RXPULL_MAX ETH_HLEN + +struct rx_pkt_hdr0 { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 inputport:2, + maccheck:1, + class:4; + u8 vlan:1, + llcsnap:1, + noport:1, + badip:1, + tcamhit:1, + tres:2, + tzfvld:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 class:4, + maccheck:1, + inputport:2; + u8 tzfvld:1, + tres:2, + tcamhit:1, + badip:1, + noport:1, + llcsnap:1, + vlan:1; +#endif +}; + +struct rx_pkt_hdr1 { + u8 hwrsvd1; + u8 tcammatch; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd2:2, + hashit:1, + exact:1, + hzfvld:1, + hashsidx:3; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 hashsidx:3, + hzfvld:1, + exact:1, + hashit:1, + hwrsvd2:2; +#endif + u8 zcrsvd; + + /* Bits 11:8 of zero copy flow ID. */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd3:4, zflowid0:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 zflowid0:4, hwrsvd3:4; +#endif + + /* Bits 7:0 of zero copy flow ID. */ + u8 zflowid1; + + /* Bits 15:8 of hash value, H2. */ + u8 hashval2_0; + + /* Bits 7:0 of hash value, H2. */ + u8 hashval2_1; + + /* Bits 19:16 of hash value, H1. */ +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 hwrsvd4:4, hashval1_0:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 hashval1_0:4, hwrsvd4:4; +#endif + + /* Bits 15:8 of hash value, H1. */ + u8 hashval1_1; + + /* Bits 7:0 of hash value, H1. */ + u8 hashval1_2; + + u8 usrdata_0; /* Bits 39:32 of user data. */ + u8 usrdata_1; /* Bits 31:24 of user data. */ + u8 usrdata_2; /* Bits 23:16 of user data. */ + u8 usrdata_3; /* Bits 15:8 of user data. */ + u8 usrdata_4; /* Bits 7:0 of user data. */ +}; + +struct tx_dma_mbox { + u64 tx_dma_pre_st; + u64 tx_cs; + u64 tx_ring_kick; + u64 tx_ring_hdl; + u64 resv1; + u32 tx_rng_err_logl; + u32 tx_rng_err_logh; + u64 resv2; + u64 resv3; +}; + +struct tx_pkt_hdr { + __le64 flags; +#define TXHDR_PAD 0x0000000000000007ULL +#define TXHDR_PAD_SHIFT 0 +#define TXHDR_LEN 0x000000003fff0000ULL +#define TXHDR_LEN_SHIFT 16 +#define TXHDR_L4STUFF 0x0000003f00000000ULL +#define TXHDR_L4STUFF_SHIFT 32 +#define TXHDR_L4START 0x00003f0000000000ULL +#define TXHDR_L4START_SHIFT 40 +#define TXHDR_L3START 0x000f000000000000ULL +#define TXHDR_L3START_SHIFT 48 +#define TXHDR_IHL 0x00f0000000000000ULL +#define TXHDR_IHL_SHIFT 52 +#define TXHDR_VLAN 0x0100000000000000ULL +#define TXHDR_LLC 0x0200000000000000ULL +#define TXHDR_IP_VER 0x2000000000000000ULL +#define TXHDR_CSUM_NONE 0x0000000000000000ULL +#define TXHDR_CSUM_TCP 0x4000000000000000ULL +#define TXHDR_CSUM_UDP 0x8000000000000000ULL +#define TXHDR_CSUM_SCTP 0xc000000000000000ULL + __le64 resv; +}; + +#define TX_DESC_SOP 0x8000000000000000ULL +#define TX_DESC_MARK 0x4000000000000000ULL +#define TX_DESC_NUM_PTR 0x3c00000000000000ULL +#define TX_DESC_NUM_PTR_SHIFT 58 +#define TX_DESC_TR_LEN 0x01fff00000000000ULL +#define TX_DESC_TR_LEN_SHIFT 44 +#define TX_DESC_SAD 0x00000fffffffffffULL +#define TX_DESC_SAD_SHIFT 0 + +struct tx_buff_info { + struct sk_buff *skb; + u64 mapping; +}; + +struct txdma_mailbox { + __le64 tx_dma_pre_st; + __le64 tx_cs; + __le64 tx_ring_kick; + __le64 tx_ring_hdl; + __le64 resv1; + __le32 tx_rng_err_logl; + __le32 tx_rng_err_logh; + __le64 resv2[2]; +} __attribute__((aligned(64))); + +#define MAX_TX_RING_SIZE 256 +#define MAX_TX_DESC_LEN 4076 + +struct tx_ring_info { + struct tx_buff_info tx_buffs[MAX_TX_RING_SIZE]; + struct niu *np; + u64 tx_cs; + int pending; + int prod; + int cons; + int wrap_bit; + u16 last_pkt_cnt; + u16 tx_channel; + u16 mark_counter; + u16 mark_freq; + u16 mark_pending; + u16 __pad; + struct txdma_mailbox *mbox; + __le64 *descr; + + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; + + u64 mbox_dma; + u64 descr_dma; + int max_burst; +}; + +#define NEXT_TX(tp, index) \ + (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) + +static inline u32 niu_tx_avail(struct tx_ring_info *tp) +{ + return (tp->pending - + ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1))); +} + +struct rxdma_mailbox { + __le64 rx_dma_ctl_stat; + __le64 rbr_stat; + __le32 rbr_hdl; + __le32 rbr_hdh; + __le64 resv1; + __le32 rcrstat_c; + __le32 rcrstat_b; + __le64 rcrstat_a; + __le64 resv2[2]; +} __attribute__((aligned(64))); + +#define MAX_RBR_RING_SIZE 128 +#define MAX_RCR_RING_SIZE (MAX_RBR_RING_SIZE * 2) + +#define RBR_REFILL_MIN 16 + +#define RX_SKB_ALLOC_SIZE 128 + NET_IP_ALIGN + +struct rx_ring_info { + struct niu *np; + int rx_channel; + u16 rbr_block_size; + u16 rbr_blocks_per_page; + u16 rbr_sizes[4]; + unsigned int rcr_index; + unsigned int rcr_table_size; + unsigned int rbr_index; + unsigned int rbr_pending; + unsigned int rbr_refill_pending; + unsigned int rbr_kick_thresh; + unsigned int rbr_table_size; + struct page **rxhash; + struct rxdma_mailbox *mbox; + __le64 *rcr; + __le32 *rbr; +#define RBR_DESCR_ADDR_SHIFT 12 + + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; + + u64 mbox_dma; + u64 rcr_dma; + u64 rbr_dma; + + /* WRED */ + int nonsyn_window; + int nonsyn_threshold; + int syn_window; + int syn_threshold; + + /* interrupt mitigation */ + int rcr_pkt_threshold; + int rcr_timeout; +}; + +#define NEXT_RCR(rp, index) \ + (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0) +#define NEXT_RBR(rp, index) \ + (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0) + +#define NIU_MAX_PORTS 4 +#define NIU_NUM_RXCHAN 16 +#define NIU_NUM_TXCHAN 24 +#define MAC_NUM_HASH 16 + +#define NIU_MAX_MTU 9216 + +#define NIU_VPD_MIN_MAJOR 3 +#define NIU_VPD_MIN_MINOR 4 + +#define NIU_VPD_MODEL_MAX 32 +#define NIU_VPD_BD_MODEL_MAX 16 +#define NIU_VPD_VERSION_MAX 64 +#define NIU_VPD_PHY_TYPE_MAX 8 + +struct niu_vpd { + char model[NIU_VPD_MODEL_MAX]; + char board_model[NIU_VPD_BD_MODEL_MAX]; + char version[NIU_VPD_VERSION_MAX]; + char phy_type[NIU_VPD_PHY_TYPE_MAX]; + u8 mac_num; + u8 __pad; + u8 local_mac[6]; + int fcode_major; + int fcode_minor; +}; + +struct niu_altmac_rdc { + u8 alt_mac_num; + u8 rdc_num; + u8 mac_pref; +}; + +struct niu_vlan_rdc { + u8 rdc_num; + u8 vlan_pref; +}; + +struct niu_classifier { + struct niu_altmac_rdc alt_mac_mappings[16]; + struct niu_vlan_rdc vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES]; + + u16 tcam_index; + u16 num_alt_mac_mappings; + + u32 h1_init; + u16 h2_init; +}; + +#define NIU_NUM_RDC_TABLES 8 +#define NIU_RDC_TABLE_SLOTS 16 + +struct rdc_table { + u8 rxdma_channel[NIU_RDC_TABLE_SLOTS]; +}; + +struct niu_rdc_tables { + struct rdc_table tables[NIU_NUM_RDC_TABLES]; + int first_table_num; + int num_tables; +}; + +#define PHY_TYPE_PMA_PMD 0 +#define PHY_TYPE_PCS 1 +#define PHY_TYPE_MII 2 +#define PHY_TYPE_MAX 3 + +struct phy_probe_info { + u32 phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS]; + u8 phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS]; + u8 cur[PHY_TYPE_MAX]; + + struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; + struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; + struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS]; +}; + +struct niu_tcam_entry { + u64 key[4]; + u64 key_mask[4]; + u64 assoc_data; +}; + +struct device_node; +union niu_parent_id { + struct { + int domain; + int bus; + int device; + } pci; + struct device_node *of; +}; + +struct niu; +struct niu_parent { + struct platform_device *plat_dev; + int index; + + union niu_parent_id id; + + struct niu *ports[NIU_MAX_PORTS]; + + atomic_t refcnt; + struct list_head list; + + spinlock_t lock; + + u32 flags; +#define PARENT_FLGS_CLS_HWINIT 0x00000001 + + u32 port_phy; +#define PORT_PHY_UNKNOWN 0x00000000 +#define PORT_PHY_INVALID 0xffffffff +#define PORT_TYPE_10G 0x01 +#define PORT_TYPE_1G 0x02 +#define PORT_TYPE_MASK 0x03 + + u8 rxchan_per_port[NIU_MAX_PORTS]; + u8 txchan_per_port[NIU_MAX_PORTS]; + + struct niu_rdc_tables rdc_group_cfg[NIU_MAX_PORTS]; + u8 rdc_default[NIU_MAX_PORTS]; + + u8 ldg_map[LDN_MAX + 1]; + + u8 plat_type; +#define PLAT_TYPE_INVALID 0x00 +#define PLAT_TYPE_ATLAS 0x01 +#define PLAT_TYPE_NIU 0x02 +#define PLAT_TYPE_VF_P0 0x03 +#define PLAT_TYPE_VF_P1 0x04 + + u8 num_ports; + + u16 tcam_num_entries; +#define NIU_PCI_TCAM_ENTRIES 256 +#define NIU_NONPCI_TCAM_ENTRIES 128 +#define NIU_TCAM_ENTRIES_MAX 256 + + int rxdma_clock_divider; + + struct phy_probe_info phy_probe_info; + + struct niu_tcam_entry tcam[NIU_TCAM_ENTRIES_MAX]; + u64 l2_cls[2]; + u64 l3_cls[4]; + u64 tcam_key[12]; + u64 flow_key[12]; +}; + +struct niu_ops { + void *(*alloc_coherent)(struct device *dev, size_t size, + u64 *handle, gfp_t flag); + void (*free_coherent)(struct device *dev, size_t size, + void *cpu_addr, u64 handle); + u64 (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction); + void (*unmap_page)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); + u64 (*map_single)(struct device *dev, void *cpu_addr, + size_t size, + enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, u64 dma_address, + size_t size, enum dma_data_direction direction); +}; + +struct niu_link_config { + /* Describes what we're trying to get. */ + u32 advertising; + u32 supported; + u16 speed; + u8 duplex; + u8 autoneg; + + /* Describes what we actually have. */ + u16 active_speed; + u8 active_duplex; +#define SPEED_INVALID 0xffff +#define DUPLEX_INVALID 0xff +#define AUTONEG_INVALID 0xff + + u8 loopback_mode; +#define LOOPBACK_DISABLED 0x00 +#define LOOPBACK_PHY 0x01 +#define LOOPBACK_MAC 0x02 +}; + +struct niu_ldg { + struct napi_struct napi; + struct niu *np; + u8 ldg_num; + u8 timer; + u64 v0, v1, v2; + unsigned int irq; +}; + +struct niu_xmac_stats { + u64 tx_frames; + u64 tx_bytes; + u64 tx_fifo_errors; + u64 tx_overflow_errors; + u64 tx_max_pkt_size_errors; + u64 tx_underflow_errors; + + u64 rx_local_faults; + u64 rx_remote_faults; + u64 rx_link_faults; + u64 rx_align_errors; + u64 rx_frags; + u64 rx_mcasts; + u64 rx_bcasts; + u64 rx_hist_cnt1; + u64 rx_hist_cnt2; + u64 rx_hist_cnt3; + u64 rx_hist_cnt4; + u64 rx_hist_cnt5; + u64 rx_hist_cnt6; + u64 rx_hist_cnt7; + u64 rx_octets; + u64 rx_code_violations; + u64 rx_len_errors; + u64 rx_crc_errors; + u64 rx_underflows; + u64 rx_overflows; + + u64 pause_off_state; + u64 pause_on_state; + u64 pause_received; +}; + +struct niu_bmac_stats { + u64 tx_underflow_errors; + u64 tx_max_pkt_size_errors; + u64 tx_bytes; + u64 tx_frames; + + u64 rx_overflows; + u64 rx_frames; + u64 rx_align_errors; + u64 rx_crc_errors; + u64 rx_len_errors; + + u64 pause_off_state; + u64 pause_on_state; + u64 pause_received; +}; + +union niu_mac_stats { + struct niu_xmac_stats xmac; + struct niu_bmac_stats bmac; +}; + +struct niu_phy_ops { + int (*serdes_init)(struct niu *np); + int (*xcvr_init)(struct niu *np); + int (*link_status)(struct niu *np, int *); +}; + +struct of_device; +struct niu { + void __iomem *regs; + struct net_device *dev; + struct pci_dev *pdev; + struct device *device; + struct niu_parent *parent; + + u32 flags; +#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ +#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ +#define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */ +#define NIU_FLAGS_VPD_VALID 0x00080000 /* VPD has valid version */ +#define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */ +#define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */ +#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */ + + u32 msg_enable; + + /* Protects hw programming, and ring state. */ + spinlock_t lock; + + const struct niu_ops *ops; + struct net_device_stats net_stats; + union niu_mac_stats mac_stats; + + struct rx_ring_info *rx_rings; + struct tx_ring_info *tx_rings; + int num_rx_rings; + int num_tx_rings; + + struct niu_ldg ldg[NIU_NUM_LDG]; + int num_ldg; + + void __iomem *mac_regs; + unsigned long ipp_off; + unsigned long pcs_off; + unsigned long xpcs_off; + + struct timer_list timer; + const struct niu_phy_ops *phy_ops; + int phy_addr; + + struct niu_link_config link_config; + + struct work_struct reset_task; + + u8 port; + u8 mac_xcvr; +#define MAC_XCVR_MII 1 +#define MAC_XCVR_PCS 2 +#define MAC_XCVR_XPCS 3 + + struct niu_classifier clas; + + struct niu_vpd vpd; + u32 eeprom_len; + + struct of_device *op; + void __iomem *vir_regs_1; + void __iomem *vir_regs_2; +}; + +#endif /* _NIU_H */ diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index ea80e6cb3dec1600ba4b5baecba4f4d56c612bba..ea71f6d82661b1aebc3a481d9d51806dbf77420d 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -1,4 +1,4 @@ -#define VERSION "0.22" +#define VERSION "0.23" /* ns83820.c by Benjamin LaHaise with contributions. * * Questions/comments/discussion to linux-ns83820@kvack.org. @@ -1247,6 +1247,149 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev) return &dev->stats; } +/* Let ethtool retrieve info */ +static int ns83820_get_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg, tanar, tbicr; + int have_optical = 0; + int fullduplex = 0; + + /* + * Here's the list of available ethtool commands from other drivers: + * cmd->advertising = + * cmd->speed = + * cmd->duplex = + * cmd->port = 0; + * cmd->phy_address = + * cmd->transceiver = 0; + * cmd->autoneg = + * cmd->maxtxpkt = 0; + * cmd->maxrxpkt = 0; + */ + + /* read current configuration */ + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + tanar = readl(dev->base + TANAR); + tbicr = readl(dev->base + TBICR); + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have an optical interface */ + have_optical = 1; + fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; + + } else { + /* We have copper */ + fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0; + } + + cmd->supported = SUPPORTED_Autoneg; + + /* we have optical interface */ + if (dev->CFG_cache & CFG_TBI_EN) { + cmd->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + } /* TODO: else copper related support */ + + cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; + switch (cfg / CFG_SPDSTS0 & 3) { + case 2: + cmd->speed = SPEED_1000; + break; + case 1: + cmd->speed = SPEED_100; + break; + default: + cmd->speed = SPEED_10; + break; + } + cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0; + return 0; +} + +/* Let ethool change settings*/ +static int ns83820_set_settings(struct net_device *ndev, + struct ethtool_cmd *cmd) +{ + struct ns83820 *dev = PRIV(ndev); + u32 cfg, tanar; + int have_optical = 0; + int fullduplex = 0; + + /* read current configuration */ + cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; + tanar = readl(dev->base + TANAR); + + if (dev->CFG_cache & CFG_TBI_EN) { + /* we have optical */ + have_optical = 1; + fullduplex = (tanar & TANAR_FULL_DUP); + + } else { + /* we have copper */ + fullduplex = cfg & CFG_DUPSTS; + } + + spin_lock_irq(&dev->misc_lock); + spin_lock(&dev->tx_lock); + + /* Set duplex */ + if (cmd->duplex != fullduplex) { + if (have_optical) { + /*set full duplex*/ + if (cmd->duplex == DUPLEX_FULL) { + /* force full duplex */ + writel(readl(dev->base + TXCFG) + | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP, + dev->base + TXCFG); + writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, + dev->base + RXCFG); + /* Light up full duplex LED */ + writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, + dev->base + GPIOR); + } else { + /*TODO: set half duplex */ + } + + } else { + /*we have copper*/ + /* TODO: Set duplex for copper cards */ + } + printk(KERN_INFO "%s: Duplex set via ethtool\n", + ndev->name); + } + + /* Set autonegotiation */ + if (1) { + if (cmd->autoneg == AUTONEG_ENABLE) { + /* restart auto negotiation */ + writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN, + dev->base + TBICR); + writel(TBICR_MR_AN_ENABLE, dev->base + TBICR); + dev->linkstate = LINK_AUTONEGOTIATE; + + printk(KERN_INFO "%s: autoneg enabled via ethtool\n", + ndev->name); + } else { + /* disable auto negotiation */ + writel(0x00000000, dev->base + TBICR); + } + + printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name, + cmd->autoneg ? "ENABLED" : "DISABLED"); + } + + phy_intr(ndev); + spin_unlock(&dev->tx_lock); + spin_unlock_irq(&dev->misc_lock); + + return 0; +} +/* end ethtool get/set support -df */ + static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { struct ns83820 *dev = PRIV(ndev); @@ -1263,8 +1406,10 @@ static u32 ns83820_get_link(struct net_device *ndev) } static const struct ethtool_ops ops = { - .get_drvinfo = ns83820_get_drvinfo, - .get_link = ns83820_get_link + .get_settings = ns83820_get_settings, + .set_settings = ns83820_set_settings, + .get_drvinfo = ns83820_get_drvinfo, + .get_link = ns83820_get_link }; /* this function is called in irq context from the ISR */ @@ -1817,6 +1962,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ long addr; int err; int using_dac = 0; + DECLARE_MAC_BUF(mac); /* See if we can set the dma mask early on; failure is fatal. */ if (sizeof(dma_addr_t) == 8 && @@ -1843,7 +1989,6 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ spin_lock_init(&dev->misc_lock); dev->pci_dev = pci_dev; - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pci_dev->dev); INIT_WORK(&dev->tq_refill, queue_refill); @@ -2082,13 +2227,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ ndev->features |= NETIF_F_HIGHDMA; } - printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %02x:%02x:%02x:%02x:%02x:%02x io=0x%08lx irq=%d f=%s\n", + printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %s io=0x%08lx irq=%d f=%s\n", ndev->name, (unsigned)readl(dev->base + SRR) >> 8, (unsigned)readl(dev->base + SRR) & 0xff, - ndev->dev_addr[0], ndev->dev_addr[1], - ndev->dev_addr[2], ndev->dev_addr[3], - ndev->dev_addr[4], ndev->dev_addr[5], + print_mac(mac, ndev->dev_addr), addr, pci_dev->irq, (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg" ); diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 0b3066a6fe405217250c2a97e67fdbadc926cbdc..9f9a421c99b33fa4a107d4aa7f3f19160a3b51b8 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -34,24 +34,29 @@ #include #include +#include #include "pasemi_mac.h" +/* We have our own align, since ppc64 in general has it at 0 because + * of design flaws in some of the server bridge chips. However, for + * PWRficient doing the unaligned copies is more expensive than doing + * unaligned DMA, so make sure the data is aligned instead. + */ +#define LOCAL_SKB_ALIGN 2 /* TODO list * - * - Get rid of pci_{read,write}_config(), map registers with ioremap - * for performance - * - PHY support * - Multicast support * - Large MTU support - * - Other performance improvements + * - SW LRO + * - Multiqueue RX/TX */ /* Must be a power of two */ -#define RX_RING_SIZE 512 -#define TX_RING_SIZE 512 +#define RX_RING_SIZE 4096 +#define TX_RING_SIZE 4096 #define DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV | \ @@ -63,12 +68,16 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) -#define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)]) -#define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)]) -#define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)]) -#define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)]) +#define TX_RING(mac, num) ((mac)->tx->ring[(num) & (TX_RING_SIZE-1)]) +#define TX_RING_INFO(mac, num) ((mac)->tx->ring_info[(num) & (TX_RING_SIZE-1)]) +#define RX_RING(mac, num) ((mac)->rx->ring[(num) & (RX_RING_SIZE-1)]) +#define RX_RING_INFO(mac, num) ((mac)->rx->ring_info[(num) & (RX_RING_SIZE-1)]) #define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)]) +#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ + & ((ring)->size - 1)) +#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) + #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ MODULE_LICENSE("GPL"); @@ -81,6 +90,43 @@ MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); static struct pasdma_status *dma_status; +static int translation_enabled(void) +{ +#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) + return 1; +#else + return firmware_has_feature(FW_FEATURE_LPAR); +#endif +} + +static void write_iob_reg(struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + out_le32(mac->iob_regs+reg, val); +} + +static unsigned int read_mac_reg(struct pasemi_mac *mac, unsigned int reg) +{ + return in_le32(mac->regs+reg); +} + +static void write_mac_reg(struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + out_le32(mac->regs+reg, val); +} + +static unsigned int read_dma_reg(struct pasemi_mac *mac, unsigned int reg) +{ + return in_le32(mac->dma_regs+reg); +} + +static void write_dma_reg(struct pasemi_mac *mac, unsigned int reg, + unsigned int val) +{ + out_le32(mac->dma_regs+reg, val); +} + static int pasemi_get_mac_addr(struct pasemi_mac *mac) { struct pci_dev *pdev = mac->pdev; @@ -128,11 +174,36 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac) return 0; } +static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, + struct sk_buff *skb, + dma_addr_t *dmas) +{ + int f; + int nfrags = skb_shinfo(skb)->nr_frags; + + pci_unmap_single(mac->dma_pdev, dmas[0], skb_headlen(skb), + PCI_DMA_TODEVICE); + + for (f = 0; f < nfrags; f++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; + + pci_unmap_page(mac->dma_pdev, dmas[f+1], frag->size, + PCI_DMA_TODEVICE); + } + dev_kfree_skb_irq(skb); + + /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, + * aligned up to a power of 2 + */ + return (nfrags + 3) & ~1; +} + static int pasemi_mac_setup_rx_resources(struct net_device *dev) { struct pasemi_mac_rxring *ring; struct pasemi_mac *mac = netdev_priv(dev); int chan_id = mac->dma_rxch; + unsigned int cfg; ring = kzalloc(sizeof(*ring), GFP_KERNEL); @@ -141,22 +212,22 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) spin_lock_init(&ring->lock); - ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * + ring->size = RX_RING_SIZE; + ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * RX_RING_SIZE, GFP_KERNEL); - if (!ring->desc_info) - goto out_desc_info; + if (!ring->ring_info) + goto out_ring_info; /* Allocate descriptors */ - ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * - sizeof(struct pas_dma_xct_descr), + ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, + RX_RING_SIZE * sizeof(u64), &ring->dma, GFP_KERNEL); - if (!ring->desc) - goto out_desc; + if (!ring->ring) + goto out_ring_desc; - memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); + memset(ring->ring, 0, RX_RING_SIZE * sizeof(u64)); ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), @@ -166,22 +237,34 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id), - PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); + write_dma_reg(mac, PAS_DMA_RXCHAN_BASEL(chan_id), PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma)); + + write_dma_reg(mac, PAS_DMA_RXCHAN_BASEU(chan_id), + PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | + PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); + + cfg = PAS_DMA_RXCHAN_CFG_HBU(2); + + if (translation_enabled()) + cfg |= PAS_DMA_RXCHAN_CFG_CTR; - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id), - PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) | - PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2)); + write_dma_reg(mac, PAS_DMA_RXCHAN_CFG(chan_id), cfg); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id), - PAS_DMA_RXCHAN_CFG_HBU(1)); + write_dma_reg(mac, PAS_DMA_RXINT_BASEL(mac->dma_if), + PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if), - PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers))); + write_dma_reg(mac, PAS_DMA_RXINT_BASEU(mac->dma_if), + PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | + PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if), - PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) | - PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); + cfg = PAS_DMA_RXINT_CFG_DHL(3) | PAS_DMA_RXINT_CFG_L2 | + PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | + PAS_DMA_RXINT_CFG_HEN; + + if (translation_enabled()) + cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; + + write_dma_reg(mac, PAS_DMA_RXINT_CFG(mac->dma_if), cfg); ring->next_to_fill = 0; ring->next_to_clean = 0; @@ -194,11 +277,11 @@ static int pasemi_mac_setup_rx_resources(struct net_device *dev) out_buffers: dma_free_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), - mac->rx->desc, mac->rx->dma); -out_desc: - kfree(ring->desc_info); -out_desc_info: + RX_RING_SIZE * sizeof(u64), + mac->rx->ring, mac->rx->dma); +out_ring_desc: + kfree(ring->ring_info); +out_ring_info: kfree(ring); out_ring: return -ENOMEM; @@ -211,6 +294,7 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) u32 val; int chan_id = mac->dma_txch; struct pasemi_mac_txring *ring; + unsigned int cfg; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) @@ -218,35 +302,39 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) spin_lock_init(&ring->lock); - ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) * + ring->size = TX_RING_SIZE; + ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * TX_RING_SIZE, GFP_KERNEL); - if (!ring->desc_info) - goto out_desc_info; + if (!ring->ring_info) + goto out_ring_info; /* Allocate descriptors */ - ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev, - TX_RING_SIZE * - sizeof(struct pas_dma_xct_descr), + ring->ring = dma_alloc_coherent(&mac->dma_pdev->dev, + TX_RING_SIZE * sizeof(u64), &ring->dma, GFP_KERNEL); - if (!ring->desc) - goto out_desc; + if (!ring->ring) + goto out_ring_desc; - memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr)); + memset(ring->ring, 0, TX_RING_SIZE * sizeof(u64)); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id), - PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); + write_dma_reg(mac, PAS_DMA_TXCHAN_BASEL(chan_id), + PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma)); val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32); - val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2); + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); + + write_dma_reg(mac, PAS_DMA_TXCHAN_BASEU(chan_id), val); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val); + cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | + PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | + PAS_DMA_TXCHAN_CFG_UP | + PAS_DMA_TXCHAN_CFG_WT(2); - pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id), - PAS_DMA_TXCHAN_CFG_TY_IFACE | - PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | - PAS_DMA_TXCHAN_CFG_UP | - PAS_DMA_TXCHAN_CFG_WT(2)); + if (translation_enabled()) + cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; - ring->next_to_use = 0; + write_dma_reg(mac, PAS_DMA_TXCHAN_CFG(chan_id), cfg); + + ring->next_to_fill = 0; ring->next_to_clean = 0; snprintf(ring->irq_name, sizeof(ring->irq_name), @@ -255,9 +343,9 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) return 0; -out_desc: - kfree(ring->desc_info); -out_desc_info: +out_ring_desc: + kfree(ring->ring_info); +out_ring_info: kfree(ring); out_ring: return -ENOMEM; @@ -266,33 +354,37 @@ static int pasemi_mac_setup_tx_resources(struct net_device *dev) static void pasemi_mac_free_tx_resources(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); - unsigned int i; + unsigned int i, j; struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; - - for (i = 0; i < TX_RING_SIZE; i++) { - info = &TX_DESC_INFO(mac, i); - dp = &TX_DESC(mac, i); - if (info->dma) { - if (info->skb) { - pci_unmap_single(mac->dma_pdev, - info->dma, - info->skb->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_any(info->skb); - } - info->dma = 0; - info->skb = NULL; - dp->mactx = 0; - dp->ptr = 0; - } + dma_addr_t dmas[MAX_SKB_FRAGS+1]; + int freed; + int start, limit; + + start = mac->tx->next_to_clean; + limit = mac->tx->next_to_fill; + + /* Compensate for when fill has wrapped and clean has not */ + if (start > limit) + limit += TX_RING_SIZE; + + for (i = start; i < limit; i += freed) { + info = &TX_RING_INFO(mac, i+1); + if (info->dma && info->skb) { + for (j = 0; j <= skb_shinfo(info->skb)->nr_frags; j++) + dmas[j] = TX_RING_INFO(mac, i+1+j).dma; + freed = pasemi_mac_unmap_tx_skb(mac, info->skb, dmas); + } else + freed = 2; } + for (i = 0; i < TX_RING_SIZE; i++) + TX_RING(mac, i) = 0; + dma_free_coherent(&mac->dma_pdev->dev, - TX_RING_SIZE * sizeof(struct pas_dma_xct_descr), - mac->tx->desc, mac->tx->dma); + TX_RING_SIZE * sizeof(u64), + mac->tx->ring, mac->tx->dma); - kfree(mac->tx->desc_info); + kfree(mac->tx->ring_info); kfree(mac->tx); mac->tx = NULL; } @@ -302,72 +394,66 @@ static void pasemi_mac_free_rx_resources(struct net_device *dev) struct pasemi_mac *mac = netdev_priv(dev); unsigned int i; struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; for (i = 0; i < RX_RING_SIZE; i++) { - info = &RX_DESC_INFO(mac, i); - dp = &RX_DESC(mac, i); - if (info->skb) { - if (info->dma) { - pci_unmap_single(mac->dma_pdev, - info->dma, - info->skb->len, - PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(info->skb); - } - info->dma = 0; - info->skb = NULL; - dp->macrx = 0; - dp->ptr = 0; + info = &RX_RING_INFO(mac, i); + if (info->skb && info->dma) { + pci_unmap_single(mac->dma_pdev, + info->dma, + info->skb->len, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(info->skb); } + info->dma = 0; + info->skb = NULL; } + for (i = 0; i < RX_RING_SIZE; i++) + RX_RING(mac, i) = 0; + dma_free_coherent(&mac->dma_pdev->dev, - RX_RING_SIZE * sizeof(struct pas_dma_xct_descr), - mac->rx->desc, mac->rx->dma); + RX_RING_SIZE * sizeof(u64), + mac->rx->ring, mac->rx->dma); dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), mac->rx->buffers, mac->rx->buf_dma); - kfree(mac->rx->desc_info); + kfree(mac->rx->ring_info); kfree(mac->rx); mac->rx = NULL; } -static void pasemi_mac_replenish_rx_ring(struct net_device *dev) +static void pasemi_mac_replenish_rx_ring(struct net_device *dev, int limit) { struct pasemi_mac *mac = netdev_priv(dev); - unsigned int i; - int start = mac->rx->next_to_fill; - unsigned int limit, count; - - limit = (mac->rx->next_to_clean + RX_RING_SIZE - - mac->rx->next_to_fill) & (RX_RING_SIZE - 1); - - /* Check to see if we're doing first-time setup */ - if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0)) - limit = RX_RING_SIZE; + int fill, count; if (limit <= 0) return; - i = start; - for (count = limit; count; count--) { - struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i); - u64 *buff = &RX_BUFF(mac, i); + fill = mac->rx->next_to_fill; + for (count = 0; count < limit; count++) { + struct pasemi_mac_buffer *info = &RX_RING_INFO(mac, fill); + u64 *buff = &RX_BUFF(mac, fill); struct sk_buff *skb; dma_addr_t dma; + /* Entry in use? */ + WARN_ON(*buff); + /* skb might still be in there for recycle on short receives */ if (info->skb) skb = info->skb; - else + else { skb = dev_alloc_skb(BUF_SIZE); + skb_reserve(skb, LOCAL_SKB_ALIGN); + } if (unlikely(!skb)) break; - dma = pci_map_single(mac->dma_pdev, skb->data, skb->len, + dma = pci_map_single(mac->dma_pdev, skb->data, + BUF_SIZE - LOCAL_SKB_ALIGN, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(dma))) { @@ -378,19 +464,15 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev) info->skb = skb; info->dma = dma; *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); - i++; + fill++; } wmb(); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_INCR(mac->dma_rxch), - limit - count); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_INCR(mac->dma_if), - limit - count); + write_dma_reg(mac, PAS_DMA_RXINT_INCR(mac->dma_if), count); - mac->rx->next_to_fill += limit - count; + mac->rx->next_to_fill = (mac->rx->next_to_fill + count) & + (RX_RING_SIZE - 1); } static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) @@ -404,9 +486,7 @@ static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac) reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), - reg); + write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); } static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) @@ -418,69 +498,96 @@ static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac) reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); + write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); } +static inline void pasemi_mac_rx_error(struct pasemi_mac *mac, u64 macrx) +{ + unsigned int rcmdsta, ccmdsta; + + if (!netif_msg_rx_err(mac)) + return; + + rcmdsta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + ccmdsta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); + + printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", + macrx, *mac->rx_status); + + printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", + rcmdsta, ccmdsta); +} + +static inline void pasemi_mac_tx_error(struct pasemi_mac *mac, u64 mactx) +{ + unsigned int cmdsta; + + if (!netif_msg_tx_err(mac)) + return; + + cmdsta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); + + printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ + "tx status 0x%016lx\n", mactx, *mac->tx_status); + + printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); +} + static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) { unsigned int n; int count; - struct pas_dma_xct_descr *dp; struct pasemi_mac_buffer *info; struct sk_buff *skb; - unsigned int i, len; + unsigned int len; u64 macrx; dma_addr_t dma; + int buf_index; + u64 eval; spin_lock(&mac->rx->lock); n = mac->rx->next_to_clean; - for (count = limit; count; count--) { + prefetch(RX_RING(mac, n)); - rmb(); + for (count = 0; count < limit; count++) { + macrx = RX_RING(mac, n); - dp = &RX_DESC(mac, n); - macrx = dp->macrx; + if ((macrx & XCT_MACRX_E) || + (*mac->rx_status & PAS_STATUS_ERROR)) + pasemi_mac_rx_error(mac, macrx); if (!(macrx & XCT_MACRX_O)) break; - info = NULL; - /* We have to scan for our skb since there's no way - * to back-map them from the descriptor, and if we - * have several receive channels then they might not - * show up in the same order as they were put on the - * interface ring. - */ + BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); - dma = (dp->ptr & XCT_PTR_ADDR_M); - for (i = n; i < (n + RX_RING_SIZE); i++) { - info = &RX_DESC_INFO(mac, i); - if (info->dma == dma) - break; - } + eval = (RX_RING(mac, n+1) & XCT_RXRES_8B_EVAL_M) >> + XCT_RXRES_8B_EVAL_S; + buf_index = eval-1; + + dma = (RX_RING(mac, n+2) & XCT_PTR_ADDR_M); + info = &RX_RING_INFO(mac, buf_index); skb = info->skb; - info->dma = 0; - pci_unmap_single(mac->dma_pdev, dma, skb->len, - PCI_DMA_FROMDEVICE); + prefetch(skb); + prefetch(&skb->data_len); len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; if (len < 256) { - struct sk_buff *new_skb = - netdev_alloc_skb(mac->netdev, len + NET_IP_ALIGN); + struct sk_buff *new_skb; + + new_skb = netdev_alloc_skb(mac->netdev, + len + LOCAL_SKB_ALIGN); if (new_skb) { - skb_reserve(new_skb, NET_IP_ALIGN); - memcpy(new_skb->data - NET_IP_ALIGN, - skb->data - NET_IP_ALIGN, - len + NET_IP_ALIGN); + skb_reserve(new_skb, LOCAL_SKB_ALIGN); + memcpy(new_skb->data, skb->data, len); /* save the skb in buffer_info as good */ skb = new_skb; } @@ -488,73 +595,131 @@ static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit) } else info->skb = NULL; - skb_put(skb, len); + pci_unmap_single(mac->dma_pdev, dma, len, PCI_DMA_FROMDEVICE); - skb->protocol = eth_type_trans(skb, mac->netdev); + info->dma = 0; + + skb_put(skb, len); - if ((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) { - skb->ip_summed = CHECKSUM_COMPLETE; + if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum = (macrx & XCT_MACRX_CSUM_M) >> XCT_MACRX_CSUM_S; } else skb->ip_summed = CHECKSUM_NONE; - mac->stats.rx_bytes += len; - mac->stats.rx_packets++; + mac->netdev->stats.rx_bytes += len; + mac->netdev->stats.rx_packets++; + skb->protocol = eth_type_trans(skb, mac->netdev); netif_receive_skb(skb); - dp->ptr = 0; - dp->macrx = 0; + RX_RING(mac, n) = 0; + RX_RING(mac, n+1) = 0; + + /* Need to zero it out since hardware doesn't, since the + * replenish loop uses it to tell when it's done. + */ + RX_BUFF(mac, buf_index) = 0; + + n += 4; + } - n++; + if (n > RX_RING_SIZE) { + /* Errata 5971 workaround: L2 target of headers */ + write_iob_reg(mac, PAS_IOB_COM_PKTHDRCNT, 0); + n &= (RX_RING_SIZE-1); } - mac->rx->next_to_clean += limit - count; - pasemi_mac_replenish_rx_ring(mac->netdev); + mac->rx->next_to_clean = n; + + /* Increase is in number of 16-byte entries, and since each descriptor + * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with + * count*2. + */ + write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), count << 1); + + pasemi_mac_replenish_rx_ring(mac->netdev, count); spin_unlock(&mac->rx->lock); return count; } +/* Can't make this too large or we blow the kernel stack limits */ +#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) + static int pasemi_mac_clean_tx(struct pasemi_mac *mac) { - int i; - struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; - int start, count; - int flags; - + int i, j; + unsigned int start, descr_count, buf_count, batch_limit; + unsigned int ring_limit; + unsigned int total_count; + unsigned long flags; + struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; + dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; + + total_count = 0; + batch_limit = TX_CLEAN_BATCHSIZE; +restart: spin_lock_irqsave(&mac->tx->lock, flags); start = mac->tx->next_to_clean; - count = 0; + ring_limit = mac->tx->next_to_fill; - for (i = start; i < mac->tx->next_to_use; i++) { - dp = &TX_DESC(mac, i); - if (!dp || (dp->mactx & XCT_MACTX_O)) + /* Compensate for when fill has wrapped but clean has not */ + if (start > ring_limit) + ring_limit += TX_RING_SIZE; + + buf_count = 0; + descr_count = 0; + + for (i = start; + descr_count < batch_limit && i < ring_limit; + i += buf_count) { + u64 mactx = TX_RING(mac, i); + struct sk_buff *skb; + + if ((mactx & XCT_MACTX_E) || + (*mac->tx_status & PAS_STATUS_ERROR)) + pasemi_mac_tx_error(mac, mactx); + + if (unlikely(mactx & XCT_MACTX_O)) + /* Not yet transmitted */ break; - count++; + skb = TX_RING_INFO(mac, i+1).skb; + skbs[descr_count] = skb; - info = &TX_DESC_INFO(mac, i); + buf_count = 2 + skb_shinfo(skb)->nr_frags; + for (j = 0; j <= skb_shinfo(skb)->nr_frags; j++) + dmas[descr_count][j] = TX_RING_INFO(mac, i+1+j).dma; - pci_unmap_single(mac->dma_pdev, info->dma, - info->skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(info->skb); + TX_RING(mac, i) = 0; + TX_RING(mac, i+1) = 0; - info->skb = NULL; - info->dma = 0; - dp->mactx = 0; - dp->ptr = 0; + /* Since we always fill with an even number of entries, make + * sure we skip any unused one at the end as well. + */ + if (buf_count & 1) + buf_count++; + descr_count++; } - mac->tx->next_to_clean += count; - spin_unlock_irqrestore(&mac->tx->lock, flags); + mac->tx->next_to_clean = i & (TX_RING_SIZE-1); + spin_unlock_irqrestore(&mac->tx->lock, flags); netif_wake_queue(mac->netdev); - return count; + for (i = 0; i < descr_count; i++) + pasemi_mac_unmap_tx_skb(mac, skbs[i], dmas[i]); + + total_count += descr_count; + + /* If the batch was full, try to clean more */ + if (descr_count == batch_limit) + goto restart; + + return total_count; } @@ -567,15 +732,10 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) if (!(*mac->rx_status & PAS_STATUS_CAUSE_M)) return IRQ_NONE; - if (*mac->rx_status & PAS_STATUS_ERROR) - printk("rx_status reported error\n"); - /* Don't reset packet count so it won't fire again but clear * all others. */ - pci_read_config_dword(mac->dma_pdev, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), ®); - reg = 0; if (*mac->rx_status & PAS_STATUS_SOFT) reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; @@ -584,11 +744,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) if (*mac->rx_status & PAS_STATUS_TIMER) reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; - netif_rx_schedule(dev); - - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); + netif_rx_schedule(dev, &mac->napi); + write_iob_reg(mac, PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg); return IRQ_HANDLED; } @@ -613,9 +771,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) if (*mac->tx_status & PAS_STATUS_ERROR) reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; - pci_write_config_dword(mac->iob_pdev, - PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), - reg); + write_iob_reg(mac, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg); return IRQ_HANDLED; } @@ -641,7 +797,7 @@ static void pasemi_adjust_link(struct net_device *dev) } else netif_carrier_on(dev); - pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags); + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | PAS_MAC_CFG_PCFG_TSR_M); @@ -673,7 +829,7 @@ static void pasemi_adjust_link(struct net_device *dev) mac->link = mac->phydev->link; if (new_flags != flags) - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, new_flags); + write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); if (msg && netif_msg_link(mac)) printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", @@ -736,39 +892,30 @@ static int pasemi_mac_open(struct net_device *dev) int ret; /* enable rx section */ - pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD, - PAS_DMA_COM_RXCMD_EN); + write_dma_reg(mac, PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); /* enable tx section */ - pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD, - PAS_DMA_COM_TXCMD_EN); + write_dma_reg(mac, PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags); + write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); - flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | - PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; - - flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; - - pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), - PAS_IOB_DMA_RXCH_CFG_CNTTH(0)); + write_iob_reg(mac, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), + PAS_IOB_DMA_RXCH_CFG_CNTTH(0)); - pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch), - PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); + write_iob_reg(mac, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch), + PAS_IOB_DMA_TXCH_CFG_CNTTH(128)); /* Clear out any residual packet count state from firmware */ pasemi_mac_restart_rx_intr(mac); pasemi_mac_restart_tx_intr(mac); /* 0xffffff is max value, about 16ms */ - pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG, - PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff)); - - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags); + write_iob_reg(mac, PAS_IOB_DMA_COM_TIMEOUTCFG, + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff)); ret = pasemi_mac_setup_rx_resources(dev); if (ret) @@ -778,27 +925,48 @@ static int pasemi_mac_open(struct net_device *dev) if (ret) goto out_tx_resources; - pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL, - PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) | - PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch)); + write_mac_reg(mac, PAS_MAC_IPC_CHNL, + PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) | + PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch)); /* enable rx if */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), - PAS_DMA_RXINT_RCMDSTA_EN); + write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), + PAS_DMA_RXINT_RCMDSTA_EN | + PAS_DMA_RXINT_RCMDSTA_DROPS_M | + PAS_DMA_RXINT_RCMDSTA_BP | + PAS_DMA_RXINT_RCMDSTA_OO | + PAS_DMA_RXINT_RCMDSTA_BT); /* enable rx channel */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), - PAS_DMA_RXCHAN_CCMDSTA_EN | - PAS_DMA_RXCHAN_CCMDSTA_DU); + write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), + PAS_DMA_RXCHAN_CCMDSTA_EN | + PAS_DMA_RXCHAN_CCMDSTA_DU | + PAS_DMA_RXCHAN_CCMDSTA_OD | + PAS_DMA_RXCHAN_CCMDSTA_FD | + PAS_DMA_RXCHAN_CCMDSTA_DT); /* enable tx channel */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), - PAS_DMA_TXCHAN_TCMDSTA_EN); + write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), + PAS_DMA_TXCHAN_TCMDSTA_EN | + PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA); + + pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); + + write_dma_reg(mac, PAS_DMA_RXCHAN_INCR(mac->dma_rxch), RX_RING_SIZE>>1); - pasemi_mac_replenish_rx_ring(dev); + flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE | + PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; + + if (mac->type == MAC_TYPE_GMAC) + flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; + else + flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; + + /* Enable interface in MAC */ + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); ret = pasemi_mac_phy_init(dev); /* Some configs don't have PHYs (XAUI etc), so don't complain about @@ -808,7 +976,7 @@ static int pasemi_mac_open(struct net_device *dev) dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret); netif_start_queue(dev); - netif_poll_enable(dev); + napi_enable(&mac->napi); /* Interrupts are a bit different for our DMA controller: While * it's got one a regular PCI device header, the interrupt there @@ -845,7 +1013,7 @@ static int pasemi_mac_open(struct net_device *dev) out_rx_int: free_irq(mac->tx_irq, dev); out_tx_int: - netif_poll_disable(dev); + napi_disable(&mac->napi); netif_stop_queue(dev); pasemi_mac_free_tx_resources(dev); out_tx_resources: @@ -860,7 +1028,7 @@ static int pasemi_mac_open(struct net_device *dev) static int pasemi_mac_close(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); - unsigned int stat; + unsigned int sta; int retries; if (mac->phydev) { @@ -869,68 +1037,74 @@ static int pasemi_mac_close(struct net_device *dev) } netif_stop_queue(dev); + napi_disable(&mac->napi); + + sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | + PAS_DMA_RXINT_RCMDSTA_OO | + PAS_DMA_RXINT_RCMDSTA_BT)) + printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); + + sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); + if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | + PAS_DMA_RXCHAN_CCMDSTA_OD | + PAS_DMA_RXCHAN_CCMDSTA_FD | + PAS_DMA_RXCHAN_CCMDSTA_DT)) + printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); + + sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); + if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA)) + printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); /* Clean out any pending buffers */ pasemi_mac_clean_tx(mac); pasemi_mac_clean_rx(mac, RX_RING_SIZE); /* Disable interface */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), - PAS_DMA_TXCHAN_TCMDSTA_ST); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), - PAS_DMA_RXINT_RCMDSTA_ST); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), - PAS_DMA_RXCHAN_CCMDSTA_ST); + write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), PAS_DMA_TXCHAN_TCMDSTA_ST); + write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), PAS_DMA_RXINT_RCMDSTA_ST); + write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), PAS_DMA_RXCHAN_CCMDSTA_ST); for (retries = 0; retries < MAX_RETRIES; retries++) { - pci_read_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), - &stat); - if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) + sta = read_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch)); + if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) break; cond_resched(); } - if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT) + if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); for (retries = 0; retries < MAX_RETRIES; retries++) { - pci_read_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), - &stat); - if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) + sta = read_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch)); + if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) break; cond_resched(); } - if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT) + if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); for (retries = 0; retries < MAX_RETRIES; retries++) { - pci_read_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), - &stat); - if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) + sta = read_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); + if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) break; cond_resched(); } - if (stat & PAS_DMA_RXINT_RCMDSTA_ACT) + if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n"); /* Then, disable the channel. This must be done separately from * stopping, since you can't disable when active. */ - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); + write_dma_reg(mac, PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0); + write_dma_reg(mac, PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0); + write_dma_reg(mac, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); free_irq(mac->tx_irq, dev); free_irq(mac->rx_irq, dev); @@ -946,11 +1120,11 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); struct pasemi_mac_txring *txring; - struct pasemi_mac_buffer *info; - struct pas_dma_xct_descr *dp; - u64 dflags; - dma_addr_t map; - int flags; + u64 dflags, mactx; + dma_addr_t map[MAX_SKB_FRAGS+1]; + unsigned int map_size[MAX_SKB_FRAGS+1]; + unsigned long flags; + int i, nfrags; dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; @@ -971,71 +1145,87 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) } } - map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + nfrags = skb_shinfo(skb)->nr_frags; - if (dma_mapping_error(map)) - return NETDEV_TX_BUSY; + map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + map_size[0] = skb_headlen(skb); + if (dma_mapping_error(map[0])) + goto out_err_nolock; + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map[i+1] = pci_map_page(mac->dma_pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + map_size[i+1] = frag->size; + if (dma_mapping_error(map[i+1])) { + nfrags = i; + goto out_err_nolock; + } + } + + mactx = dflags | XCT_MACTX_LLEN(skb->len); txring = mac->tx; spin_lock_irqsave(&txring->lock, flags); - if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) { - spin_unlock_irqrestore(&txring->lock, flags); - pasemi_mac_clean_tx(mac); - pasemi_mac_restart_tx_intr(mac); - spin_lock_irqsave(&txring->lock, flags); - - if (txring->next_to_clean - txring->next_to_use == - TX_RING_SIZE) { - /* Still no room -- stop the queue and wait for tx - * intr when there's room. - */ - netif_stop_queue(dev); - goto out_err; - } + /* Avoid stepping on the same cache line that the DMA controller + * is currently about to send, so leave at least 8 words available. + * Total free space needed is mactx + fragments + 8 + */ + if (RING_AVAIL(txring) < nfrags + 10) { + /* no room -- stop the queue and wait for tx intr */ + netif_stop_queue(dev); + goto out_err; } + TX_RING(mac, txring->next_to_fill) = mactx; + txring->next_to_fill++; + TX_RING_INFO(mac, txring->next_to_fill).skb = skb; + for (i = 0; i <= nfrags; i++) { + TX_RING(mac, txring->next_to_fill+i) = + XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); + TX_RING_INFO(mac, txring->next_to_fill+i).dma = map[i]; + } - dp = &TX_DESC(mac, txring->next_to_use); - info = &TX_DESC_INFO(mac, txring->next_to_use); + /* We have to add an even number of 8-byte entries to the ring + * even if the last one is unused. That means always an odd number + * of pointers + one mactx descriptor. + */ + if (nfrags & 1) + nfrags++; - dp->mactx = dflags | XCT_MACTX_LLEN(skb->len); - dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map); - info->dma = map; - info->skb = skb; + txring->next_to_fill = (txring->next_to_fill + nfrags + 1) & + (TX_RING_SIZE-1); - txring->next_to_use++; - mac->stats.tx_packets++; - mac->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; spin_unlock_irqrestore(&txring->lock, flags); - pci_write_config_dword(mac->dma_pdev, - PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1); + write_dma_reg(mac, PAS_DMA_TXCHAN_INCR(mac->dma_txch), (nfrags+2) >> 1); return NETDEV_TX_OK; out_err: spin_unlock_irqrestore(&txring->lock, flags); - pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE); - return NETDEV_TX_BUSY; -} +out_err_nolock: + while (nfrags--) + pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], + PCI_DMA_TODEVICE); -static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev) -{ - struct pasemi_mac *mac = netdev_priv(dev); - - return &mac->stats; + return NETDEV_TX_BUSY; } - static void pasemi_mac_set_rx_mode(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int flags; - pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags); + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); /* Set promiscuous */ if (dev->flags & IFF_PROMISC) @@ -1043,30 +1233,92 @@ static void pasemi_mac_set_rx_mode(struct net_device *dev) else flags &= ~PAS_MAC_CFG_PCFG_PR; - pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags); + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); } -static int pasemi_mac_poll(struct net_device *dev, int *budget) +static int pasemi_mac_poll(struct napi_struct *napi, int budget) { - int pkts, limit = min(*budget, dev->quota); - struct pasemi_mac *mac = netdev_priv(dev); - - pkts = pasemi_mac_clean_rx(mac, limit); - - dev->quota -= pkts; - *budget -= pkts; + struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); + struct net_device *dev = mac->netdev; + int pkts; - if (pkts < limit) { + pasemi_mac_clean_tx(mac); + pkts = pasemi_mac_clean_rx(mac, budget); + if (pkts < budget) { /* all done, no more packets present */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); pasemi_mac_restart_rx_intr(mac); - return 0; - } else { - /* used up our quantum, so reschedule */ - return 1; } + return pkts; +} + +static void __iomem * __devinit map_onedev(struct pci_dev *p, int index) +{ + struct device_node *dn; + void __iomem *ret; + + dn = pci_device_to_OF_node(p); + if (!dn) + goto fallback; + + ret = of_iomap(dn, index); + if (!ret) + goto fallback; + + return ret; +fallback: + /* This is hardcoded and ugly, but we have some firmware versions + * that don't provide the register space in the device tree. Luckily + * they are at well-known locations so we can just do the math here. + */ + return ioremap(0xe0000000 + (p->devfn << 12), 0x2000); +} + +static int __devinit pasemi_mac_map_regs(struct pasemi_mac *mac) +{ + struct resource res; + struct device_node *dn; + int err; + + mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); + if (!mac->dma_pdev) { + dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); + return -ENODEV; + } + + mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); + if (!mac->iob_pdev) { + dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); + return -ENODEV; + } + + mac->regs = map_onedev(mac->pdev, 0); + mac->dma_regs = map_onedev(mac->dma_pdev, 0); + mac->iob_regs = map_onedev(mac->iob_pdev, 0); + + if (!mac->regs || !mac->dma_regs || !mac->iob_regs) { + dev_err(&mac->pdev->dev, "Can't map registers\n"); + return -ENODEV; + } + + /* The dma status structure is located in the I/O bridge, and + * is cache coherent. + */ + if (!dma_status) { + dn = pci_device_to_OF_node(mac->iob_pdev); + if (dn) + err = of_address_to_resource(dn, 1, &res); + if (!dn || err) { + /* Fallback for old firmware */ + res.start = 0xfd800000; + res.end = res.start + 0x1000; + } + dma_status = __ioremap(res.start, res.end-res.start, 0); + } + + return 0; } static int __devinit @@ -1076,6 +1328,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *dev; struct pasemi_mac *mac; int err; + DECLARE_MAC_BUF(mac_buf); err = pci_enable_device(pdev); if (err) @@ -1089,7 +1342,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } - SET_MODULE_OWNER(dev); pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -1097,21 +1349,10 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mac->pdev = pdev; mac->netdev = dev; - mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); - if (!mac->dma_pdev) { - dev_err(&pdev->dev, "Can't find DMA Controller\n"); - err = -ENODEV; - goto out_free_netdev; - } + netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); - mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); - - if (!mac->iob_pdev) { - dev_err(&pdev->dev, "Can't find I/O Bridge\n"); - err = -ENODEV; - goto out_put_dma_pdev; - } + dev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_SG; /* These should come out of the device tree eventually */ mac->dma_txch = index; @@ -1148,18 +1389,11 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->open = pasemi_mac_open; dev->stop = pasemi_mac_close; dev->hard_start_xmit = pasemi_mac_start_tx; - dev->get_stats = pasemi_mac_get_stats; dev->set_multicast_list = pasemi_mac_set_rx_mode; - dev->weight = 64; - dev->poll = pasemi_mac_poll; - dev->features = NETIF_F_HW_CSUM; - /* The dma status structure is located in the I/O bridge, and - * is cache coherent. - */ - if (!dma_status) - /* XXXOJN This should come from the device tree */ - dma_status = __ioremap(0xfd800000, 0x1000, 0); + err = pasemi_mac_map_regs(mac); + if (err) + goto out; mac->rx_status = &dma_status->rx_sta[mac->dma_rxch]; mac->tx_status = &dma_status->tx_sta[mac->dma_txch]; @@ -1175,21 +1409,27 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", err); goto out; - } else + } else if netif_msg_probe(mac) printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, " - "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n", + "hw addr %s\n", dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", mac->dma_if, mac->dma_txch, mac->dma_rxch, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + print_mac(mac_buf, dev->dev_addr)); return err; out: - pci_dev_put(mac->iob_pdev); -out_put_dma_pdev: - pci_dev_put(mac->dma_pdev); -out_free_netdev: + if (mac->iob_pdev) + pci_dev_put(mac->iob_pdev); + if (mac->dma_pdev) + pci_dev_put(mac->dma_pdev); + if (mac->dma_regs) + iounmap(mac->dma_regs); + if (mac->iob_regs) + iounmap(mac->iob_regs); + if (mac->regs) + iounmap(mac->regs); + free_netdev(dev); out_disable_device: pci_disable_device(pdev); @@ -1213,6 +1453,10 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev) pci_dev_put(mac->dma_pdev); pci_dev_put(mac->iob_pdev); + iounmap(mac->regs); + iounmap(mac->dma_regs); + iounmap(mac->iob_regs); + pci_set_drvdata(pdev, NULL); free_netdev(netdev); } diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index c29ee159c33d67178f546f1aadb61a53b0a8f985..60368df72634cc3931399b9b6c57e38b82a279a7 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h @@ -28,35 +28,38 @@ struct pasemi_mac_txring { spinlock_t lock; - struct pas_dma_xct_descr *desc; + u64 *ring; dma_addr_t dma; unsigned int size; - unsigned int next_to_use; + unsigned int next_to_fill; unsigned int next_to_clean; - struct pasemi_mac_buffer *desc_info; + struct pasemi_mac_buffer *ring_info; char irq_name[10]; /* "eth%d tx" */ }; struct pasemi_mac_rxring { spinlock_t lock; - struct pas_dma_xct_descr *desc; /* RX channel descriptor ring */ + u64 *ring; /* RX channel descriptor ring */ dma_addr_t dma; u64 *buffers; /* RX interface buffer ring */ dma_addr_t buf_dma; unsigned int size; unsigned int next_to_fill; unsigned int next_to_clean; - struct pasemi_mac_buffer *desc_info; + struct pasemi_mac_buffer *ring_info; char irq_name[10]; /* "eth%d rx" */ }; struct pasemi_mac { struct net_device *netdev; + void __iomem *regs; + void __iomem *dma_regs; + void __iomem *iob_regs; struct pci_dev *pdev; struct pci_dev *dma_pdev; struct pci_dev *iob_pdev; struct phy_device *phydev; - struct net_device_stats stats; + struct napi_struct napi; /* Pointer to the cacheable per-channel status registers */ u64 *rx_status; @@ -85,7 +88,7 @@ struct pasemi_mac { char phy_id[BUS_ID_SIZE]; }; -/* Software status descriptor (desc_info) */ +/* Software status descriptor (ring_info) */ struct pasemi_mac_buffer { struct sk_buff *skb; dma_addr_t dma; @@ -98,20 +101,7 @@ struct pasdma_status { u64 tx_sta[20]; }; -/* descriptor structure */ -struct pas_dma_xct_descr { - union { - u64 mactx; - u64 macrx; - }; - union { - u64 ptr; - u64 rxb; - }; -}; - /* MAC CFG register offsets */ - enum { PAS_MAC_CFG_PCFG = 0x80, PAS_MAC_CFG_TXP = 0x98, @@ -215,6 +205,20 @@ enum { #define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000 #define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000 #define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17 +#define PAS_DMA_RXINT_CFG(i) (0x204+(i)*_PAS_DMA_RXINT_STRIDE) +#define PAS_DMA_RXINT_CFG_RBP 0x80000000 +#define PAS_DMA_RXINT_CFG_ITRR 0x40000000 +#define PAS_DMA_RXINT_CFG_DHL_M 0x07000000 +#define PAS_DMA_RXINT_CFG_DHL_S 24 +#define PAS_DMA_RXINT_CFG_DHL(x) (((x) << PAS_DMA_RXINT_CFG_DHL_S) & \ + PAS_DMA_RXINT_CFG_DHL_M) +#define PAS_DMA_RXINT_CFG_ITR 0x00400000 +#define PAS_DMA_RXINT_CFG_LW 0x00200000 +#define PAS_DMA_RXINT_CFG_L2 0x00100000 +#define PAS_DMA_RXINT_CFG_HEN 0x00080000 +#define PAS_DMA_RXINT_CFG_WIF 0x00000002 +#define PAS_DMA_RXINT_CFG_WIL 0x00000001 + #define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE) #define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff #define PAS_DMA_RXINT_INCR_INCR_S 0 @@ -241,6 +245,10 @@ enum { #define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */ #define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */ #define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */ +#define PAS_DMA_TXCHAN_TCMDSTA_SZ 0x00000800 +#define PAS_DMA_TXCHAN_TCMDSTA_DB 0x00000400 +#define PAS_DMA_TXCHAN_TCMDSTA_DE 0x00000200 +#define PAS_DMA_TXCHAN_TCMDSTA_DA 0x00000100 #define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */ #define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c @@ -251,9 +259,11 @@ enum { #define PAS_DMA_TXCHAN_CFG_WT_S 6 #define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \ PAS_DMA_TXCHAN_CFG_WT_M) -#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */ -#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */ +#define PAS_DMA_TXCHAN_CFG_TRD 0x00010000 /* translate data */ +#define PAS_DMA_TXCHAN_CFG_TRR 0x00008000 /* translate rings */ #define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */ +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */ +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */ #define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0 @@ -283,7 +293,11 @@ enum { #define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */ #define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */ #define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000 +#define PAS_DMA_RXCHAN_CCMDSTA_OD 0x00002000 +#define PAS_DMA_RXCHAN_CCMDSTA_FD 0x00001000 +#define PAS_DMA_RXCHAN_CCMDSTA_DT 0x00000800 #define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE) +#define PAS_DMA_RXCHAN_CFG_CTR 0x00000400 #define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380 #define PAS_DMA_RXCHAN_CFG_HBU_S 7 #define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \ @@ -317,6 +331,12 @@ enum { #define PAS_STATUS_SOFT 0x4000000000000000ull #define PAS_STATUS_INT 0x8000000000000000ull +#define PAS_IOB_COM_PKTHDRCNT 0x120 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_M 0x0fff0000 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR1_S 16 +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_M 0x00000fff +#define PAS_IOB_COM_PKTHDRCNT_PKTHDR0_S 0 + #define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4) #define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff #define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0 @@ -412,10 +432,9 @@ enum { /* Receive descriptor fields */ #define XCT_MACRX_T 0x8000000000000000ull #define XCT_MACRX_ST 0x4000000000000000ull -#define XCT_MACRX_NORES 0x0000000000000000ull -#define XCT_MACRX_8BRES 0x1000000000000000ull -#define XCT_MACRX_24BRES 0x2000000000000000ull -#define XCT_MACRX_40BRES 0x3000000000000000ull +#define XCT_MACRX_RR_M 0x3000000000000000ull +#define XCT_MACRX_RR_NORES 0x0000000000000000ull +#define XCT_MACRX_RR_8BRES 0x1000000000000000ull #define XCT_MACRX_O 0x0400000000000000ull #define XCT_MACRX_E 0x0200000000000000ull #define XCT_MACRX_FF 0x0100000000000000ull @@ -463,6 +482,17 @@ enum { #define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \ XCT_PTR_ADDR_M) +/* Receive interface 8byte result fields */ +#define XCT_RXRES_8B_L4O_M 0xff00000000000000ull +#define XCT_RXRES_8B_L4O_S 56 +#define XCT_RXRES_8B_RULE_M 0x00ffff0000000000ull +#define XCT_RXRES_8B_RULE_S 40 +#define XCT_RXRES_8B_EVAL_M 0x000000ffff000000ull +#define XCT_RXRES_8B_EVAL_S 24 +#define XCT_RXRES_8B_HTYPE_M 0x0000000000f00000ull +#define XCT_RXRES_8B_HASH_M 0x00000000000fffffull +#define XCT_RXRES_8B_HASH_S 0 + /* Receive interface buffer fields */ #define XCT_RXB_LEN_M 0x0ffff00000000000ull #define XCT_RXB_LEN_S 44 diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 3cdbe118200be40ffcc4d4c5527467fcc4bfac84..ed402e00e730e1c0c2eadfa1ac13a5cfe3cb9aa9 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -457,7 +457,6 @@ struct netdrv_private { void *mmio_addr; int drv_flags; struct pci_dev *pci_dev; - struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ unsigned char *rx_ring; unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */ @@ -505,7 +504,6 @@ static int netdrv_start_xmit (struct sk_buff *skb, static irqreturn_t netdrv_interrupt (int irq, void *dev_instance); static int netdrv_close (struct net_device *dev); static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd); -static struct net_device_stats *netdrv_get_stats (struct net_device *dev); static void netdrv_set_rx_mode (struct net_device *dev); static void netdrv_hw_start (struct net_device *dev); @@ -604,7 +602,6 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev, DPRINTK ("EXIT, returning -ENOMEM\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = dev->priv; @@ -740,6 +737,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, int i, addr_len, option; void *ioaddr = NULL; static int board_idx = -1; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -776,7 +774,6 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, dev->open = netdrv_open; dev->hard_start_xmit = netdrv_start_xmit; dev->stop = netdrv_close; - dev->get_stats = netdrv_get_stats; dev->set_multicast_list = netdrv_set_rx_mode; dev->do_ioctl = netdrv_ioctl; dev->tx_timeout = netdrv_tx_timeout; @@ -800,15 +797,11 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev, tp->phys[0] = 32; - printk (KERN_INFO "%s: %s at 0x%lx, " - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " - "IRQ %d\n", + printk (KERN_INFO "%s: %s at 0x%lx, %sIRQ %d\n", dev->name, board_info[ent->driver_data].name, dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", @@ -1277,7 +1270,7 @@ static void netdrv_tx_clear (struct netdrv_private *tp) if (rp->skb) { dev_kfree_skb (rp->skb); rp->skb = NULL; - tp->stats.tx_dropped++; + dev->stats.tx_dropped++; } } } @@ -1390,25 +1383,25 @@ static void netdrv_tx_interrupt (struct net_device *dev, /* There was an major error, log it. */ DPRINTK ("%s: Transmit error, Tx status %8.8x.\n", dev->name, txstatus); - tp->stats.tx_errors++; + dev->stats.tx_errors++; if (txstatus & TxAborted) { - tp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift)); } if (txstatus & TxCarrierLost) - tp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (txstatus & TxOutOfWindow) - tp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; } else { if (txstatus & TxUnderrun) { /* Add 64 to the Tx FIFO threshold. */ if (tp->tx_flag < 0x00300000) tp->tx_flag += 0x00020000; - tp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } - tp->stats.collisions += (txstatus >> 24) & 15; - tp->stats.tx_bytes += txstatus & 0x7ff; - tp->stats.tx_packets++; + dev->stats.collisions += (txstatus >> 24) & 15; + dev->stats.tx_bytes += txstatus & 0x7ff; + dev->stats.tx_packets++; } /* Free the original skb. */ @@ -1461,13 +1454,13 @@ static void netdrv_rx_err (u32 rx_status, struct net_device *dev, dev->name, rx_status); /* A.C.: The chip hangs here. */ } - tp->stats.rx_errors++; + dev->stats.rx_errors++; if (rx_status & (RxBadSymbol | RxBadAlign)) - tp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & (RxRunt | RxTooLong)) - tp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (rx_status & RxCRCErr) - tp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; /* Reset the receiver, based on RealTek recommendation. (Bug?) */ tp->cur_rx = 0; @@ -1573,13 +1566,13 @@ static void netdrv_rx_interrupt (struct net_device *dev, skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->last_rx = jiffies; - tp->stats.rx_bytes += pkt_size; - tp->stats.rx_packets++; + dev->stats.rx_bytes += pkt_size; + dev->stats.rx_packets++; } else { printk (KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - tp->stats.rx_dropped++; + dev->stats.rx_dropped++; } cur_rx = (cur_rx + rx_size + 4 + 3) & ~3; @@ -1608,7 +1601,7 @@ static void netdrv_weird_interrupt (struct net_device *dev, assert (ioaddr != NULL); /* Update the error count. */ - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); + dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); NETDRV_W32 (RxMissed, 0); if ((status & RxUnderrun) && link_changed && @@ -1629,14 +1622,14 @@ static void netdrv_weird_interrupt (struct net_device *dev, /* XXX along with netdrv_rx_err, are we double-counting errors? */ if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver)) - tp->stats.rx_errors++; + dev->stats.rx_errors++; if (status & (PCSTimeout)) - tp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & (RxUnderrun | RxFIFOOver)) - tp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (status & RxOverflow) { - tp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN; NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16); } @@ -1740,7 +1733,7 @@ static int netdrv_close (struct net_device *dev) NETDRV_W16 (IntrMask, 0x0000); /* Update the error counts. */ - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); + dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); NETDRV_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); @@ -1807,31 +1800,6 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) return rc; } - -static struct net_device_stats *netdrv_get_stats (struct net_device *dev) -{ - struct netdrv_private *tp = dev->priv; - void *ioaddr = tp->mmio_addr; - - DPRINTK ("ENTER\n"); - - assert (tp != NULL); - - if (netif_running(dev)) { - unsigned long flags; - - spin_lock_irqsave (&tp->lock, flags); - - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); - NETDRV_W32 (RxMissed, 0); - - spin_unlock_irqrestore (&tp->lock, flags); - } - - DPRINTK ("EXIT\n"); - return &tp->stats; -} - /* Set or clear the multicast filter for this adaptor. This routine is not state sensitive and need not be SMP locked. */ @@ -1909,7 +1877,7 @@ static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear)); /* Update the error counts. */ - tp->stats.rx_missed_errors += NETDRV_R32 (RxMissed); + dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed); NETDRV_W32 (RxMissed, 0); spin_unlock_irqrestore (&tp->lock, flags); diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 2b395ee21f75b7739a69abd9e2150c55f23134df..73dcbb7296dad2fa20d3b71cc5d3729c2a7d5724 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c @@ -343,6 +343,7 @@ static int tc574_config(struct pcmcia_device *link) u16 *phys_addr; char *cardname; union wn3_config config; + DECLARE_MAC_BUF(mac); phys_addr = (u16 *)dev->dev_addr; @@ -458,10 +459,10 @@ static int tc574_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: %s at io %#3lx, irq %d, hw_addr ", - dev->name, cardname, dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : ".\n")); + printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " + "hw_addr %s.\n", + dev->name, cardname, dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n", 8 << config.u.ram_size, ram_split[config.u.ram_split], config.u.autoselect ? "autoselect " : ""); diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 503f2685fb73c434ae4a2d6d98be4f03a3d4b7d9..32076ca6a9e1ff35fd32fd742b966540246600f4 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c @@ -197,7 +197,6 @@ static int tc589_probe(struct pcmcia_device *link) link->conf.ConfigIndex = 1; /* The EL3-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &el3_start_xmit; dev->set_config = &el3_config; dev->get_stats = &el3_get_stats; @@ -256,6 +255,7 @@ static int tc589_config(struct pcmcia_device *link) int last_fn, last_ret, i, j, multi = 0, fifo; kio_addr_t ioaddr; char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; + DECLARE_MAC_BUF(mac); DEBUG(0, "3c589_config(0x%p)\n", link); @@ -331,11 +331,10 @@ static int tc589_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, hw_addr ", - dev->name, (multi ? "562" : "589"), dev->base_addr, - dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " + "hw_addr %s\n", + dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n", (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], if_names[dev->if_port]); diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 50dff1b81d3408b5724f297083c7284488453fb8..a95a2cae6b23ef56d2a5ecf036f9713ff33d7271 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -232,7 +232,7 @@ static int get_prom(struct pcmcia_device *link) axnet_reset_8390(dev); mdelay(10); - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); for (i = 0; i < 6; i += 2) { @@ -292,6 +292,7 @@ static int axnet_config(struct pcmcia_device *link) cisparse_t parse; int i, j, last_ret, last_fn; u_short buf[64]; + DECLARE_MAC_BUF(mac); DEBUG(0, "axnet_config(0x%p)\n", link); @@ -403,11 +404,11 @@ static int axnet_config(struct pcmcia_device *link) strcpy(info->node.dev_name, dev->name); - printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, hw_addr ", + printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " + "hw_addr %s\n", dev->name, ((info->flags & IS_AX88790) ? 7 : 1), - dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); if (info->phy_id != -1) { DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); } else { @@ -771,6 +772,7 @@ static struct pcmcia_device_id axnet_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), + PCMCIA_DEVICE_MANF_CARD(0xffff, 0x1090), PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), @@ -1728,9 +1730,6 @@ static void axdev_setup(struct net_device *dev) if (ei_debug > 1) printk(version_8390); - SET_MODULE_OWNER(dev); - - ei_local = (struct ei_device *)netdev_priv(dev); spin_lock_init(&ei_local->page_lock); diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 85d5f2ca4bb55d6e14e0e2ffc0684975d7b0e500..62844677c784d43fc0974cd6517cf5f41c52532c 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -259,7 +259,6 @@ static int fmvj18x_probe(struct pcmcia_device *link) link->conf.IntType = INT_MEMORY_AND_IO; /* The FMVJ18x specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &fjn_start_xmit; dev->set_config = &fjn_config; dev->get_stats = &fjn_get_stats; @@ -347,6 +346,7 @@ static int fmvj18x_config(struct pcmcia_device *link) cardtype_t cardtype; char *card_name = "unknown"; u_char *node_id; + DECLARE_MAC_BUF(mac); DEBUG(0, "fmvj18x_config(0x%p)\n", link); @@ -534,11 +534,10 @@ static int fmvj18x_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); /* print current configuration */ - printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, hw_addr ", + printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " + "hw_addr %s\n", dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", - dev->base_addr, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + dev->base_addr, dev->irq, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 997c2d0c83bbdbe1f2d765fbc582178d02b84be7..a355a93b908b05222e97b992d4b46a04e55810f8 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -474,7 +474,6 @@ static int nmclan_probe(struct pcmcia_device *link) lp->tx_free_frames=AM2150_MAX_TX_FRAMES; - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &mace_start_xmit; dev->set_config = &mace_config; dev->get_stats = &mace_get_stats; @@ -659,6 +658,7 @@ static int nmclan_config(struct pcmcia_device *link) u_char buf[64]; int i, last_ret, last_fn; kio_addr_t ioaddr; + DECLARE_MAC_BUF(mac); DEBUG(0, "nmclan_config(0x%p)\n", link); @@ -717,10 +717,10 @@ static int nmclan_config(struct pcmcia_device *link) strcpy(lp->node.dev_name, dev->name); - printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port, hw_addr ", - dev->name, dev->base_addr, dev->irq, if_names[dev->if_port]); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," + " hw_addr %s\n", + dev->name, dev->base_addr, dev->irq, if_names[dev->if_port], + print_mac(mac, dev->dev_addr)); return 0; cs_failed: diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 63de89e93b70cc5235cd11fe33fb32d024973722..9d45e9696e161534a9589a5e5c298dde7e35d9bc 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -38,7 +38,7 @@ #include #include #include -#include <../drivers/net/8390.h> +#include "../8390.h" #include #include @@ -207,7 +207,7 @@ static hw_info_t hw_info[] = { { /* PCMCIA Technology OEM */ 0x01c8, 0x00, 0xa0, 0x0c, 0 } }; -#define NR_INFO (sizeof(hw_info)/sizeof(hw_info_t)) +#define NR_INFO ARRAY_SIZE(hw_info) static hw_info_t default_info = { 0, 0, 0, 0, 0 }; static hw_info_t dl10019_info = { 0, 0, 0, 0, IS_DL10019|HAS_MII }; @@ -259,7 +259,6 @@ static int pcnet_probe(struct pcmcia_device *link) link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.IntType = INT_MEMORY_AND_IO; - SET_MODULE_OWNER(dev); dev->open = &pcnet_open; dev->stop = &pcnet_close; dev->set_config = &set_config; @@ -375,7 +374,7 @@ static hw_info_t *get_prom(struct pcmcia_device *link) pcnet_reset_8390(dev); mdelay(10); - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + for (i = 0; i < ARRAY_SIZE(program_seq); i++) outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); for (i = 0; i < 32; i++) @@ -522,6 +521,7 @@ static int pcnet_config(struct pcmcia_device *link) int has_shmem = 0; u_short buf[64]; hw_info_t *hw_info; + DECLARE_MAC_BUF(mac); DEBUG(0, "pcnet_config(0x%p)\n", link); @@ -671,9 +671,7 @@ static int pcnet_config(struct pcmcia_device *link) printk (" mem %#5lx,", dev->mem_start); if (info->flags & HAS_MISC_REG) printk(" %s xcvr,", if_names[dev->if_port]); - printk(" hw_addr "); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(" hw_addr %s\n", print_mac(mac, dev->dev_addr)); return 0; cs_failed: @@ -1664,6 +1662,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDF", 0x1b7827b2, 0xfec71e40), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDL/T", 0x1b7827b2, 0x79fba4f7), PCMCIA_DEVICE_PROD_ID12("Laneed", "LD-CDS", 0x1b7827b2, 0x931afaab), + PCMCIA_DEVICE_PROD_ID12("LEMEL", "LM-N89TX PRO", 0xbbefb52f, 0xd2897a97), PCMCIA_DEVICE_PROD_ID12("Linksys", "Combo PCMCIA EthernetCard (EC2T)", 0x0733cc81, 0x32ee8c78), PCMCIA_DEVICE_PROD_ID12("LINKSYS", "E-CARD", 0xf7cb0b07, 0x6701da11), PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 Integrated PC Card (PCM100)", 0x0733cc81, 0x453c3f9d), diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index af6728cb49c3134b6dfecce5f55b8ba3e6d90ceb..58d716fd17cfac318e7d87876137c966967012d6 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -336,7 +336,6 @@ static int smc91c92_probe(struct pcmcia_device *link) link->conf.IntType = INT_MEMORY_AND_IO; /* The SMC91c92-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &smc_start_xmit; dev->get_stats = &smc_get_stats; dev->set_config = &s9k_config; @@ -963,6 +962,7 @@ static int smc91c92_config(struct pcmcia_device *link) int i, j, rev; kio_addr_t ioaddr; u_long mir; + DECLARE_MAC_BUF(mac); DEBUG(0, "smc91c92_config(0x%p)\n", link); @@ -1075,10 +1075,9 @@ static int smc91c92_config(struct pcmcia_device *link) strcpy(smc->node.dev_name, dev->name); printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " - "hw_addr ", dev->name, name, (rev & 0x0f), dev->base_addr, - dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + "hw_addr %s\n", + dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq, + print_mac(mac, dev->dev_addr)); if (rev > 0) { if (mir & 0x3ff) diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 258d6f396186bcd0ca015993d49c0bb634e92633..c3b69602e275d7f90650f57aa69989ab4130f0c0 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -580,7 +580,6 @@ xirc2ps_probe(struct pcmcia_device *link) link->irq.Instance = dev; /* Fill in card specific entries */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &do_start_xmit; dev->set_config = &do_config; dev->get_stats = &do_get_stats; @@ -732,6 +731,7 @@ xirc2ps_config(struct pcmcia_device * link) u_char buf[64]; cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data; cistpl_cftable_entry_t *cf = &parse.cftable_entry; + DECLARE_MAC_BUF(mac); local->dingo_ccr = NULL; @@ -1033,11 +1033,9 @@ xirc2ps_config(struct pcmcia_device * link) strcpy(local->node.dev_name, dev->name); /* give some infos about the hardware */ - printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr", - dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq); - for (i = 0; i < 6; i++) - printk("%c%02X", i?':':' ', dev->dev_addr[i]); - printk("\n"); + printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %s\n", + dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, + print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index e6a67531de99706ded46e208cf700d9eb545ebfc..5f994b5beda17b1d808372df248a70124ab07395 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -23,11 +23,11 @@ #define DRV_NAME "pcnet32" #ifdef CONFIG_PCNET32_NAPI -#define DRV_VERSION "1.33-NAPI" +#define DRV_VERSION "1.34-NAPI" #else -#define DRV_VERSION "1.33" +#define DRV_VERSION "1.34" #endif -#define DRV_RELDATE "27.Jun.2006" +#define DRV_RELDATE "14.Aug.2007" #define PFX DRV_NAME ": " static const char *const version = @@ -210,31 +210,31 @@ static int homepna[MAX_UNITS]; /* The PCNET32 Rx and Tx ring descriptors. */ struct pcnet32_rx_head { - u32 base; - s16 buf_length; /* two`s complement of length */ - s16 status; - u32 msg_length; - u32 reserved; + __le32 base; + __le16 buf_length; /* two`s complement of length */ + __le16 status; + __le32 msg_length; + __le32 reserved; }; struct pcnet32_tx_head { - u32 base; - s16 length; /* two`s complement of length */ - s16 status; - u32 misc; - u32 reserved; + __le32 base; + __le16 length; /* two`s complement of length */ + __le16 status; + __le32 misc; + __le32 reserved; }; /* The PCNET32 32-Bit initialization block, described in databook. */ struct pcnet32_init_block { - u16 mode; - u16 tlen_rlen; + __le16 mode; + __le16 tlen_rlen; u8 phys_addr[6]; - u16 reserved; - u32 filter[2]; + __le16 reserved; + __le32 filter[2]; /* Receive and transmit ring base, along with extra bits. */ - u32 rx_ring; - u32 tx_ring; + __le32 rx_ring; + __le32 tx_ring; }; /* PCnet32 access functions */ @@ -280,6 +280,8 @@ struct pcnet32_private { unsigned int dirty_rx, /* ring entries to be freed. */ dirty_tx; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; char tx_full; char phycount; /* number of phys found */ @@ -440,15 +442,21 @@ static struct pcnet32_access pcnet32_dwio = { static void pcnet32_netif_stop(struct net_device *dev) { + struct pcnet32_private *lp = netdev_priv(dev); dev->trans_start = jiffies; - netif_poll_disable(dev); +#ifdef CONFIG_PCNET32_NAPI + napi_disable(&lp->napi); +#endif netif_tx_disable(dev); } static void pcnet32_netif_start(struct net_device *dev) { + struct pcnet32_private *lp = netdev_priv(dev); netif_wake_queue(dev); - netif_poll_enable(dev); +#ifdef CONFIG_PCNET32_NAPI + napi_enable(&lp->napi); +#endif } /* @@ -602,9 +610,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, new_dma_addr_list[new] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]); - new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); - new_rx_ring[new].status = le16_to_cpu(0x8000); + new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); + new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); + new_rx_ring[new].status = cpu_to_le16(0x8000); } /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { @@ -816,7 +824,7 @@ static int pcnet32_set_ringparam(struct net_device *dev, if ((1 << i) != lp->rx_ring_size) pcnet32_realloc_rx_ring(dev, lp, i); - dev->weight = lp->rx_ring_size / 2; + lp->napi.weight = lp->rx_ring_size / 2; if (netif_running(dev)) { pcnet32_netif_start(dev); @@ -839,9 +847,14 @@ static void pcnet32_get_strings(struct net_device *dev, u32 stringset, memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); } -static int pcnet32_self_test_count(struct net_device *dev) +static int pcnet32_get_sset_count(struct net_device *dev, int sset) { - return PCNET32_TEST_LEN; + switch (sset) { + case ETH_SS_TEST: + return PCNET32_TEST_LEN; + default: + return -EOPNOTSUPP; + } } static void pcnet32_ethtool_test(struct net_device *dev, @@ -875,7 +888,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) int x, i; /* counters */ int numbuffs = 4; /* number of TX/RX buffers and descs */ u16 status = 0x8300; /* TX ring status */ - u16 teststatus; /* test of ring status */ + __le16 teststatus; /* test of ring status */ int rc; /* return code */ int size; /* size of packets */ unsigned char *packet; /* source packet data */ @@ -922,7 +935,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) packet = skb->data; skb_put(skb, size); /* create space for data */ lp->tx_skbuff[x] = skb; - lp->tx_ring[x].length = le16_to_cpu(-skb->len); + lp->tx_ring[x].length = cpu_to_le16(-skb->len); lp->tx_ring[x].misc = 0; /* put DA and SA into the skb */ @@ -942,10 +955,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[x].base = - (u32) le32_to_cpu(lp->tx_dma_addr[x]); + lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[x].status = le16_to_cpu(status); + lp->tx_ring[x].status = cpu_to_le16(status); } } @@ -956,7 +968,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) x = a->read_csr(ioaddr, CSR15) & 0xfffc; lp->a.write_csr(ioaddr, CSR15, x | 0x0044); - teststatus = le16_to_cpu(0x8000); + teststatus = cpu_to_le16(0x8000); lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ /* Check status of descriptors */ @@ -1086,6 +1098,7 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data) mod_timer(&lp->blink_timer, jiffies); set_current_state(TASK_INTERRUPTIBLE); + /* AV: the limit here makes no sense whatsoever */ if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))) data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ); @@ -1211,7 +1224,7 @@ static void pcnet32_rx_entry(struct net_device *dev, newskb->data, PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - rxp->base = le32_to_cpu(lp->rx_dma_addr[entry]); + rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); rx_in_place = 1; } else skb = NULL; @@ -1255,7 +1268,7 @@ static void pcnet32_rx_entry(struct net_device *dev, return; } -static int pcnet32_rx(struct net_device *dev, int quota) +static int pcnet32_rx(struct net_device *dev, int budget) { struct pcnet32_private *lp = netdev_priv(dev); int entry = lp->cur_rx & lp->rx_mod_mask; @@ -1263,16 +1276,16 @@ static int pcnet32_rx(struct net_device *dev, int quota) int npackets = 0; /* If we own the next entry, it's a new packet. Send it up. */ - while (quota > npackets && (short)le16_to_cpu(rxp->status) >= 0) { + while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) { pcnet32_rx_entry(dev, lp, rxp, entry); npackets += 1; /* * The docs say that the buffer length isn't touched, but Andrew * Boyd of QNX reports that some revs of the 79C965 clear it. */ - rxp->buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); wmb(); /* Make sure owner changes after others are visible */ - rxp->status = le16_to_cpu(0x8000); + rxp->status = cpu_to_le16(0x8000); entry = (++lp->cur_rx) & lp->rx_mod_mask; rxp = &lp->rx_ring[entry]; } @@ -1379,15 +1392,16 @@ static int pcnet32_tx(struct net_device *dev) } #ifdef CONFIG_PCNET32_NAPI -static int pcnet32_poll(struct net_device *dev, int *budget) +static int pcnet32_poll(struct napi_struct *napi, int budget) { - struct pcnet32_private *lp = netdev_priv(dev); - int quota = min(dev->quota, *budget); + struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); + struct net_device *dev = lp->dev; unsigned long ioaddr = dev->base_addr; unsigned long flags; + int work_done; u16 val; - quota = pcnet32_rx(dev, quota); + work_done = pcnet32_rx(dev, budget); spin_lock_irqsave(&lp->lock, flags); if (pcnet32_tx(dev)) { @@ -1399,28 +1413,22 @@ static int pcnet32_poll(struct net_device *dev, int *budget) } spin_unlock_irqrestore(&lp->lock, flags); - *budget -= quota; - dev->quota -= quota; - - if (dev->quota == 0) { - return 1; - } - - netif_rx_complete(dev); - - spin_lock_irqsave(&lp->lock, flags); + if (work_done < budget) { + spin_lock_irqsave(&lp->lock, flags); - /* clear interrupt masks */ - val = lp->a.read_csr(ioaddr, CSR3); - val &= 0x00ff; - lp->a.write_csr(ioaddr, CSR3, val); + __netif_rx_complete(dev, napi); - /* Set interrupt enable. */ - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); - mmiowb(); - spin_unlock_irqrestore(&lp->lock, flags); + /* clear interrupt masks */ + val = lp->a.read_csr(ioaddr, CSR3); + val &= 0x00ff; + lp->a.write_csr(ioaddr, CSR3, val); - return 0; + /* Set interrupt enable. */ + lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN); + mmiowb(); + spin_unlock_irqrestore(&lp->lock, flags); + } + return work_done; } #endif @@ -1506,15 +1514,12 @@ static const struct ethtool_ops pcnet32_ethtool_ops = { .get_link = pcnet32_get_link, .get_ringparam = pcnet32_get_ringparam, .set_ringparam = pcnet32_set_ringparam, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, .get_strings = pcnet32_get_strings, - .self_test_count = pcnet32_self_test_count, .self_test = pcnet32_ethtool_test, .phys_id = pcnet32_phys_id, .get_regs_len = pcnet32_get_regs_len, .get_regs = pcnet32_get_regs, + .get_sset_count = pcnet32_get_sset_count, }; /* only probes for non-PCI devices, the rest are handled by @@ -1815,9 +1820,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } lp->pci_dev = pdev; + lp->dev = dev; + spin_lock_init(&lp->lock); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); lp->name = chipname; lp->shared_irq = shared; @@ -1843,6 +1849,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->mii_if.mdio_read = mdio_read; lp->mii_if.mdio_write = mdio_write; +#ifdef CONFIG_PCNET32_NAPI + netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2); +#endif + if (fdx && !(lp->options & PCNET32_PORT_ASEL) && ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) lp->options |= PCNET32_PORT_FD; @@ -1865,15 +1875,15 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) && dev->dev_addr[2] == 0x75) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; - lp->init_block->mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ + lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ lp->init_block->tlen_rlen = - le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; lp->init_block->filter[0] = 0x00000000; lp->init_block->filter[1] = 0x00000000; - lp->init_block->rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block->tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); /* switch pcnet32 to 32bit mode */ a->write_bcr(ioaddr, 20, 2); @@ -1953,10 +1963,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dev->ethtool_ops = &pcnet32_ethtool_ops; dev->tx_timeout = pcnet32_tx_timeout; dev->watchdog_timeo = (5 * HZ); - dev->weight = lp->rx_ring_size / 2; -#ifdef CONFIG_PCNET32_NAPI - dev->poll = pcnet32_poll; -#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = pcnet32_poll_controller; @@ -2268,7 +2274,7 @@ static int pcnet32_open(struct net_device *dev) #endif lp->init_block->mode = - le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); pcnet32_load_multicast(dev); if (pcnet32_init_ring(dev)) { @@ -2276,6 +2282,10 @@ static int pcnet32_open(struct net_device *dev) goto err_free_ring; } +#ifdef CONFIG_PCNET32_NAPI + napi_enable(&lp->napi); +#endif + /* Re-initialize the PCNET32, and start it when done. */ lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); @@ -2391,10 +2401,10 @@ static int pcnet32_init_ring(struct net_device *dev) lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); - lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]); - lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); + lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); + lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); wmb(); /* Make sure owner changes after all others are visible */ - lp->rx_ring[i].status = le16_to_cpu(0x8000); + lp->rx_ring[i].status = cpu_to_le16(0x8000); } /* The Tx buffer address is filled in as needed, but we do need to clear * the upper ownership bit. */ @@ -2406,11 +2416,11 @@ static int pcnet32_init_ring(struct net_device *dev) } lp->init_block->tlen_rlen = - le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); + cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block->phys_addr[i] = dev->dev_addr[i]; - lp->init_block->rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); - lp->init_block->tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); + lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); + lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); wmb(); /* Make sure all changes are visible */ return 0; } @@ -2519,16 +2529,16 @@ static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Caution: the write order is important here, set the status * with the "ownership" bits last. */ - lp->tx_ring[entry].length = le16_to_cpu(-skb->len); + lp->tx_ring[entry].length = cpu_to_le16(-skb->len); lp->tx_ring[entry].misc = 0x00000000; lp->tx_skbuff[entry] = skb; lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]); + lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); wmb(); /* Make sure owner changes after all others are visible */ - lp->tx_ring[entry].status = le16_to_cpu(status); + lp->tx_ring[entry].status = cpu_to_le16(status); lp->cur_tx++; lp->stats.tx_bytes += skb->len; @@ -2599,18 +2609,18 @@ pcnet32_interrupt(int irq, void *dev_id) /* unlike for the lance, there is no restart needed */ } #ifdef CONFIG_PCNET32_NAPI - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &lp->napi)) { u16 val; /* set interrupt masks */ val = lp->a.read_csr(ioaddr, CSR3); val |= 0x5f00; lp->a.write_csr(ioaddr, CSR3, val); mmiowb(); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &lp->napi); break; } #else - pcnet32_rx(dev, dev->weight); + pcnet32_rx(dev, lp->napi.weight); if (pcnet32_tx(dev)) { /* reset the chip to clear the error condition, then restart */ lp->a.reset(ioaddr); @@ -2645,6 +2655,9 @@ static int pcnet32_close(struct net_device *dev) del_timer_sync(&lp->watchdog_timer); netif_stop_queue(dev); +#ifdef CONFIG_PCNET32_NAPI + napi_disable(&lp->napi); +#endif spin_lock_irqsave(&lp->lock, flags); @@ -2696,7 +2709,7 @@ static void pcnet32_load_multicast(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); volatile struct pcnet32_init_block *ib = lp->init_block; - volatile u16 *mcast_table = (u16 *) & ib->filter; + volatile __le16 *mcast_table = (__le16 *)ib->filter; struct dev_mc_list *dmi = dev->mc_list; unsigned long ioaddr = dev->base_addr; char *addrs; @@ -2705,8 +2718,8 @@ static void pcnet32_load_multicast(struct net_device *dev) /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI) { - ib->filter[0] = 0xffffffff; - ib->filter[1] = 0xffffffff; + ib->filter[0] = cpu_to_le32(~0U); + ib->filter[1] = cpu_to_le32(~0U); lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); @@ -2728,9 +2741,7 @@ static void pcnet32_load_multicast(struct net_device *dev) crc = ether_crc_le(6, addrs); crc = crc >> 26; - mcast_table[crc >> 4] = - le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | - (1 << (crc & 0xf))); + mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf)); } for (i = 0; i < 4; i++) lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, @@ -2756,12 +2767,12 @@ static void pcnet32_set_multicast_list(struct net_device *dev) printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); lp->init_block->mode = - le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << + cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000); } else { lp->init_block->mode = - le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); + cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff); pcnet32_load_multicast(dev); } @@ -2943,6 +2954,33 @@ static void pcnet32_watchdog(struct net_device *dev) mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); } +static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) { + netif_device_detach(dev); + pcnet32_close(dev); + } + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int pcnet32_pm_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (netif_running(dev)) { + pcnet32_open(dev); + netif_device_attach(dev); + } + return 0; +} + static void __devexit pcnet32_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -2966,6 +3004,8 @@ static struct pci_driver pcnet32_driver = { .probe = pcnet32_probe_pci, .remove = __devexit_p(pcnet32_remove_one), .id_table = pcnet32_pci_tbl, + .suspend = pcnet32_pm_suspend, + .resume = pcnet32_pm_resume, }; /* An additional parameter that may be passed in... */ diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index dd09011c7ee5d9656aec4891860c3ee3a6537b74..54b2ba996640791fffa7b889d5fd8b57228d3679 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -76,4 +76,27 @@ config FIXED_MII_100_FDX bool "Emulation for 100M Fdx fixed PHY behavior" depends on FIXED_PHY +config FIXED_MII_1000_FDX + bool "Emulation for 1000M Fdx fixed PHY behavior" + depends on FIXED_PHY + +config FIXED_MII_AMNT + int "Number of emulated PHYs to allocate " + depends on FIXED_PHY + default "1" + ---help--- + Sometimes it is required to have several independent emulated + PHYs on the bus (in case of multi-eth but phy-less HW for instance). + This control will have specified number allocated for each fixed + PHY type enabled. + +config MDIO_BITBANG + tristate "Support for bitbanged MDIO buses" + help + This module implements the MDIO bus protocol in software, + for use by low level drivers that export the ability to + drive the relevant pins. + + If in doubt, say N. + endif # PHYLIB diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 8885650647ffa8e8a6b80db85815a6272eadad4f..3d6cc7b67a800f209fc26b354a69c2eba3e8501c 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_BROADCOM_PHY) += broadcom.o obj-$(CONFIG_ICPLUS_PHY) += icplus.o obj-$(CONFIG_FIXED_PHY) += fixed.o +obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index bb966911a137a051593b1b4ff7f32cf2263cd65f..56191822fa26a74446a689bfa7911b1634d249e6 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -30,53 +30,31 @@ #include #include #include +#include #include #include #include -#define MII_REGS_NUM 7 - -/* - The idea is to emulate normal phy behavior by responding with - pre-defined values to mii BMCR read, so that read_status hook could - take all the needed info. -*/ - -struct fixed_phy_status { - u8 link; - u16 speed; - u8 duplex; -}; - -/*----------------------------------------------------------------------------- - * Private information hoder for mii_bus - *-----------------------------------------------------------------------------*/ -struct fixed_info { - u16 *regs; - u8 regs_num; - struct fixed_phy_status phy_status; - struct phy_device *phydev; /* pointer to the container */ - /* link & speed cb */ - int(*link_update)(struct net_device*, struct fixed_phy_status*); - -}; +/* we need to track the allocated pointers in order to free them on exit */ +static struct fixed_info *fixed_phy_ptrs[CONFIG_FIXED_MII_AMNT*MAX_PHY_AMNT]; /*----------------------------------------------------------------------------- * If something weird is required to be done with link/speed, * network driver is able to assign a function to implement this. * May be useful for PHY's that need to be software-driven. *-----------------------------------------------------------------------------*/ -int fixed_mdio_set_link_update(struct phy_device* phydev, - int(*link_update)(struct net_device*, struct fixed_phy_status*)) +int fixed_mdio_set_link_update(struct phy_device *phydev, + int (*link_update) (struct net_device *, + struct fixed_phy_status *)) { struct fixed_info *fixed; - if(link_update == NULL) + if (link_update == NULL) return -EINVAL; - if(phydev) { - if(phydev->bus) { + if (phydev) { + if (phydev->bus) { fixed = phydev->bus->priv; fixed->link_update = link_update; return 0; @@ -84,54 +62,64 @@ int fixed_mdio_set_link_update(struct phy_device* phydev, } return -EINVAL; } + EXPORT_SYMBOL(fixed_mdio_set_link_update); +struct fixed_info *fixed_mdio_get_phydev (int phydev_ind) +{ + if (phydev_ind >= MAX_PHY_AMNT) + return NULL; + return fixed_phy_ptrs[phydev_ind]; +} + +EXPORT_SYMBOL(fixed_mdio_get_phydev); + /*----------------------------------------------------------------------------- * This is used for updating internal mii regs from the status *-----------------------------------------------------------------------------*/ -#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) +#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) || defined(CONFIG_FIXED_MII_1000_FDX) static int fixed_mdio_update_regs(struct fixed_info *fixed) { u16 *regs = fixed->regs; u16 bmsr = 0; u16 bmcr = 0; - if(!regs) { + if (!regs) { printk(KERN_ERR "%s: regs not set up", __FUNCTION__); return -EINVAL; } - if(fixed->phy_status.link) + if (fixed->phy_status.link) bmsr |= BMSR_LSTATUS; - if(fixed->phy_status.duplex) { + if (fixed->phy_status.duplex) { bmcr |= BMCR_FULLDPLX; - switch ( fixed->phy_status.speed ) { + switch (fixed->phy_status.speed) { case 100: bmsr |= BMSR_100FULL; bmcr |= BMCR_SPEED100; - break; + break; case 10: bmsr |= BMSR_10FULL; - break; + break; } } else { - switch ( fixed->phy_status.speed ) { + switch (fixed->phy_status.speed) { case 100: bmsr |= BMSR_100HALF; bmcr |= BMCR_SPEED100; - break; + break; case 10: bmsr |= BMSR_100HALF; - break; + break; } } - regs[MII_BMCR] = bmcr; - regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/ + regs[MII_BMCR] = bmcr; + regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx */ return 0; } @@ -141,29 +129,30 @@ static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location) struct fixed_info *fixed = bus->priv; /* if user has registered link update callback, use it */ - if(fixed->phydev) - if(fixed->phydev->attached_dev) { - if(fixed->link_update) { + if (fixed->phydev) + if (fixed->phydev->attached_dev) { + if (fixed->link_update) { fixed->link_update(fixed->phydev->attached_dev, - &fixed->phy_status); + &fixed->phy_status); fixed_mdio_update_regs(fixed); } - } + } if ((unsigned int)location >= fixed->regs_num) return -1; return fixed->regs[location]; } -static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) +static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, + u16 val) { - /* do nothing for now*/ + /* do nothing for now */ return 0; } static int fixed_mii_reset(struct mii_bus *bus) { - /*nothing here - no way/need to reset it*/ + /*nothing here - no way/need to reset it */ return 0; } #endif @@ -171,8 +160,8 @@ static int fixed_mii_reset(struct mii_bus *bus) static int fixed_config_aneg(struct phy_device *phydev) { /* :TODO:03/13/2006 09:45:37 PM:: - The full autoneg funcionality can be emulated, - but no need to have anything here for now + The full autoneg funcionality can be emulated, + but no need to have anything here for now */ return 0; } @@ -182,59 +171,79 @@ static int fixed_config_aneg(struct phy_device *phydev) * match will never return true... *-----------------------------------------------------------------------------*/ static struct phy_driver fixed_mdio_driver = { - .name = "Fixed PHY", - .features = PHY_BASIC_FEATURES, - .config_aneg = fixed_config_aneg, - .read_status = genphy_read_status, - .driver = { .owner = THIS_MODULE,}, + .name = "Fixed PHY", +#ifdef CONFIG_FIXED_MII_1000_FDX + .features = PHY_GBIT_FEATURES, +#else + .features = PHY_BASIC_FEATURES, +#endif + .config_aneg = fixed_config_aneg, + .read_status = genphy_read_status, + .driver = { .owner = THIS_MODULE, }, }; +static void fixed_mdio_release(struct device *dev) +{ + struct phy_device *phydev = container_of(dev, struct phy_device, dev); + struct mii_bus *bus = phydev->bus; + struct fixed_info *fixed = bus->priv; + + kfree(phydev); + kfree(bus->dev); + kfree(bus); + kfree(fixed->regs); + kfree(fixed); +} + /*----------------------------------------------------------------------------- * This func is used to create all the necessary stuff, bind * the fixed phy driver and register all it on the mdio_bus_type. - * speed is either 10 or 100, duplex is boolean. + * speed is either 10 or 100 or 1000, duplex is boolean. * number is used to create multiple fixed PHYs, so that several devices can * utilize them simultaneously. + * + * The device on mdio bus will look like [bus_id]:[phy_id], + * bus_id = number + * phy_id = speed+duplex. *-----------------------------------------------------------------------------*/ -#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) -static int fixed_mdio_register_device(int number, int speed, int duplex) +#if defined(CONFIG_FIXED_MII_100_FDX) || defined(CONFIG_FIXED_MII_10_FDX) || defined(CONFIG_FIXED_MII_1000_FDX) +struct fixed_info *fixed_mdio_register_device( + int bus_id, int speed, int duplex, u8 phy_id) { struct mii_bus *new_bus; struct fixed_info *fixed; struct phy_device *phydev; - int err = 0; + int err; - struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL); + struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); - if (NULL == dev) - return -ENOMEM; + if (dev == NULL) + goto err_dev_alloc; new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); - if (NULL == new_bus) { - kfree(dev); - return -ENOMEM; - } + if (new_bus == NULL) + goto err_bus_alloc; + fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL); - if (NULL == fixed) { - kfree(dev); - kfree(new_bus); - return -ENOMEM; - } + if (fixed == NULL) + goto err_fixed_alloc; + + fixed->regs = kzalloc(MII_REGS_NUM * sizeof(int), GFP_KERNEL); + if (NULL == fixed->regs) + goto err_fixed_regs_alloc; - fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL); fixed->regs_num = MII_REGS_NUM; fixed->phy_status.speed = speed; fixed->phy_status.duplex = duplex; fixed->phy_status.link = 1; - new_bus->name = "Fixed MII Bus", - new_bus->read = &fixed_mii_read, - new_bus->write = &fixed_mii_write, - new_bus->reset = &fixed_mii_reset, - - /*set up workspace*/ + new_bus->name = "Fixed MII Bus"; + new_bus->read = &fixed_mii_read; + new_bus->write = &fixed_mii_write; + new_bus->reset = &fixed_mii_reset; + /*set up workspace */ fixed_mdio_update_regs(fixed); new_bus->priv = fixed; @@ -243,119 +252,110 @@ static int fixed_mdio_register_device(int number, int speed, int duplex) /* create phy_device and register it on the mdio bus */ phydev = phy_device_create(new_bus, 0, 0); + if (phydev == NULL) + goto err_phy_dev_create; /* - Put the phydev pointer into the fixed pack so that bus read/write code could - be able to access for instance attached netdev. Well it doesn't have to do - so, only in case of utilizing user-specified link-update... + * Put the phydev pointer into the fixed pack so that bus read/write + * code could be able to access for instance attached netdev. Well it + * doesn't have to do so, only in case of utilizing user-specified + * link-update... */ - fixed->phydev = phydev; - if(NULL == phydev) { - err = -ENOMEM; - goto device_create_fail; - } + fixed->phydev = phydev; + phydev->speed = speed; + phydev->duplex = duplex; phydev->irq = PHY_IGNORE_INTERRUPT; phydev->dev.bus = &mdio_bus_type; - if(number) - snprintf(phydev->dev.bus_id, BUS_ID_SIZE, - "fixed_%d@%d:%d", number, speed, duplex); - else - snprintf(phydev->dev.bus_id, BUS_ID_SIZE, - "fixed@%d:%d", speed, duplex); - phydev->bus = new_bus; + snprintf(phydev->dev.bus_id, BUS_ID_SIZE, + PHY_ID_FMT, bus_id, phy_id); - err = device_register(&phydev->dev); - if(err) { - printk(KERN_ERR "Phy %s failed to register\n", - phydev->dev.bus_id); - goto bus_register_fail; - } + phydev->bus = new_bus; - /* - the mdio bus has phy_id match... In order not to do it - artificially, we are binding the driver here by hand; - it will be the same for all the fixed phys anyway. - */ phydev->dev.driver = &fixed_mdio_driver.driver; - + phydev->dev.release = fixed_mdio_release; err = phydev->dev.driver->probe(&phydev->dev); - if(err < 0) { - printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id); - goto probe_fail; + if (err < 0) { + printk(KERN_ERR "Phy %s: problems with fixed driver\n", + phydev->dev.bus_id); + goto err_out; } + err = device_register(&phydev->dev); + if (err) { + printk(KERN_ERR "Phy %s failed to register\n", + phydev->dev.bus_id); + goto err_out; + } + //phydev->state = PHY_RUNNING; /* make phy go up quick, but in 10Mbit/HDX + return fixed; - err = device_bind_driver(&phydev->dev); - if (err) - goto probe_fail; - - return 0; - -probe_fail: - device_unregister(&phydev->dev); -bus_register_fail: +err_out: kfree(phydev); -device_create_fail: - kfree(dev); - kfree(new_bus); +err_phy_dev_create: + kfree(fixed->regs); +err_fixed_regs_alloc: kfree(fixed); +err_fixed_alloc: + kfree(new_bus); +err_bus_alloc: + kfree(dev); +err_dev_alloc: + + return NULL; - return err; } #endif - MODULE_DESCRIPTION("Fixed PHY device & driver for PAL"); MODULE_AUTHOR("Vitaly Bordug"); MODULE_LICENSE("GPL"); static int __init fixed_init(void) { -#if 0 - int ret; - int duplex = 0; -#endif - - /* register on the bus... Not expected to be matched with anything there... */ + int cnt = 0; + int i; +/* register on the bus... Not expected to be matched + * with anything there... + * + */ phy_driver_register(&fixed_mdio_driver); - /* So let the fun begin... - We will create several mdio devices here, and will bound the upper - driver to them. - - Then the external software can lookup the phy bus by searching - fixed@speed:duplex, e.g. fixed@100:1, to be connected to the - virtual 100M Fdx phy. - - In case several virtual PHYs required, the bus_id will be in form - fixed_@:, which make it able even to define - driver-specific link control callback, if for instance PHY is completely - SW-driven. - - */ - -#ifdef CONFIG_FIXED_MII_DUPLEX -#if 0 - duplex = 1; -#endif +/* We will create several mdio devices here, and will bound the upper + * driver to them. + * + * Then the external software can lookup the phy bus by searching + * for 0:101, to be connected to the virtual 100M Fdx phy. + * + * In case several virtual PHYs required, the bus_id will be in form + * [num]:[duplex]+[speed], which make it able even to define + * driver-specific link control callback, if for instance PHY is + * completely SW-driven. + */ + for (i=1; i <= CONFIG_FIXED_MII_AMNT; i++) { +#ifdef CONFIG_FIXED_MII_1000_FDX + fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(0, 1000, 1, i); #endif - #ifdef CONFIG_FIXED_MII_100_FDX - fixed_mdio_register_device(0, 100, 1); + fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(1, 100, 1, i); #endif - #ifdef CONFIG_FIXED_MII_10_FDX - fixed_mdio_register_device(0, 10, 1); + fixed_phy_ptrs[cnt++] = fixed_mdio_register_device(2, 10, 1, i); #endif + } + return 0; } static void __exit fixed_exit(void) { + int i; + phy_driver_unregister(&fixed_mdio_driver); - /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */ + for (i=0; i < MAX_PHY_AMNT; i++) + if ( fixed_phy_ptrs[i] ) + device_unregister(&fixed_phy_ptrs[i]->phydev->dev); } module_init(fixed_init); diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c new file mode 100644 index 0000000000000000000000000000000000000000..8cd243d92af3ceeee3e25d74c6f534de9cc853c5 --- /dev/null +++ b/drivers/net/phy/mdio-bitbang.c @@ -0,0 +1,187 @@ +/* + * Bitbanged MDIO support. + * + * Author: Scott Wood + * Copyright (c) 2007 Freescale Semiconductor + * + * Based on CPM2 MDIO code which is: + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * 2005 (c) MontaVista Software, Inc. + * Vitaly Bordug + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include + +#define MDIO_READ 1 +#define MDIO_WRITE 0 + +#define MDIO_SETUP_TIME 10 +#define MDIO_HOLD_TIME 10 + +/* Minimum MDC period is 400 ns, plus some margin for error. MDIO_DELAY + * is done twice per period. + */ +#define MDIO_DELAY 250 + +/* The PHY may take up to 300 ns to produce data, plus some margin + * for error. + */ +#define MDIO_READ_DELAY 350 + +/* MDIO must already be configured as output. */ +static void mdiobb_send_bit(struct mdiobb_ctrl *ctrl, int val) +{ + const struct mdiobb_ops *ops = ctrl->ops; + + ops->set_mdio_data(ctrl, val); + ndelay(MDIO_DELAY); + ops->set_mdc(ctrl, 1); + ndelay(MDIO_DELAY); + ops->set_mdc(ctrl, 0); +} + +/* MDIO must already be configured as input. */ +static int mdiobb_get_bit(struct mdiobb_ctrl *ctrl) +{ + const struct mdiobb_ops *ops = ctrl->ops; + + ndelay(MDIO_DELAY); + ops->set_mdc(ctrl, 1); + ndelay(MDIO_READ_DELAY); + ops->set_mdc(ctrl, 0); + + return ops->get_mdio_data(ctrl); +} + +/* MDIO must already be configured as output. */ +static void mdiobb_send_num(struct mdiobb_ctrl *ctrl, u16 val, int bits) +{ + int i; + + for (i = bits - 1; i >= 0; i--) + mdiobb_send_bit(ctrl, (val >> i) & 1); +} + +/* MDIO must already be configured as input. */ +static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) +{ + int i; + u16 ret = 0; + + for (i = bits - 1; i >= 0; i--) { + ret <<= 1; + ret |= mdiobb_get_bit(ctrl); + } + + return ret; +} + +/* Utility to send the preamble, address, and + * register (common to read and write). + */ +static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) +{ + const struct mdiobb_ops *ops = ctrl->ops; + int i; + + ops->set_mdio_dir(ctrl, 1); + + /* + * Send a 32 bit preamble ('1's) with an extra '1' bit for good + * measure. The IEEE spec says this is a PHY optional + * requirement. The AMD 79C874 requires one after power up and + * one after a MII communications error. This means that we are + * doing more preambles than we need, but it is safer and will be + * much more robust. + */ + + for (i = 0; i < 32; i++) + mdiobb_send_bit(ctrl, 1); + + /* send the start bit (01) and the read opcode (10) or write (10) */ + mdiobb_send_bit(ctrl, 0); + mdiobb_send_bit(ctrl, 1); + mdiobb_send_bit(ctrl, read); + mdiobb_send_bit(ctrl, !read); + + mdiobb_send_num(ctrl, phy, 5); + mdiobb_send_num(ctrl, reg, 5); +} + + +static int mdiobb_read(struct mii_bus *bus, int phy, int reg) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + int ret, i; + + mdiobb_cmd(ctrl, MDIO_READ, phy, reg); + ctrl->ops->set_mdio_dir(ctrl, 0); + + /* check the turnaround bit: the PHY should be driving it to zero */ + if (mdiobb_get_bit(ctrl) != 0) { + /* PHY didn't drive TA low -- flush any bits it + * may be trying to send. + */ + for (i = 0; i < 32; i++) + mdiobb_get_bit(ctrl); + + return 0xffff; + } + + ret = mdiobb_get_num(ctrl, 16); + mdiobb_get_bit(ctrl); + return ret; +} + +static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); + + /* send the turnaround (10) */ + mdiobb_send_bit(ctrl, 1); + mdiobb_send_bit(ctrl, 0); + + mdiobb_send_num(ctrl, val, 16); + + ctrl->ops->set_mdio_dir(ctrl, 0); + mdiobb_get_bit(ctrl); + return 0; +} + +struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) +{ + struct mii_bus *bus; + + bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); + if (!bus) + return NULL; + + __module_get(ctrl->ops->owner); + + bus->read = mdiobb_read; + bus->write = mdiobb_write; + bus->priv = ctrl; + + return bus; +} + +void free_mdio_bitbang(struct mii_bus *bus) +{ + struct mdiobb_ctrl *ctrl = bus->priv; + + module_put(ctrl->ops->owner); + kfree(bus); +} diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index cb230f44d6fc3418cd899c42a76552a0091e9ee4..9bc11773705b2beaf68064e6ba3166953c20a5f6 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -7,7 +7,7 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * Copyright (c) 2006 Maciej W. Rozycki + * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -204,7 +205,7 @@ static const struct phy_setting settings[] = { }, }; -#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting)) +#define MAX_NUM_SETTINGS ARRAY_SIZE(settings) /** * phy_find_setting - find a PHY settings array entry that matches speed & duplex @@ -424,7 +425,7 @@ int phy_start_aneg(struct phy_device *phydev) { int err; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (AUTONEG_DISABLE == phydev->autoneg) phy_sanitize_settings(phydev); @@ -445,7 +446,7 @@ int phy_start_aneg(struct phy_device *phydev) } out_unlock: - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); return err; } EXPORT_SYMBOL(phy_start_aneg); @@ -490,10 +491,10 @@ void phy_stop_machine(struct phy_device *phydev) { del_timer_sync(&phydev->phy_timer); - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (phydev->state > PHY_UP) phydev->state = PHY_UP; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); phydev->adjust_state = NULL; } @@ -537,9 +538,9 @@ static void phy_force_reduction(struct phy_device *phydev) */ void phy_error(struct phy_device *phydev) { - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); phydev->state = PHY_HALTED; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); } /** @@ -562,6 +563,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) * queue will write the PHY to disable and clear the * interrupt, and then reenable the irq line. */ disable_irq_nosync(irq); + atomic_inc(&phydev->irq_disable); schedule_work(&phydev->phy_queue); @@ -632,6 +634,7 @@ int phy_start_interrupts(struct phy_device *phydev) INIT_WORK(&phydev->phy_queue, phy_change); + atomic_set(&phydev->irq_disable, 0); if (request_irq(phydev->irq, phy_interrupt, IRQF_SHARED, "phy_interrupt", @@ -662,13 +665,22 @@ int phy_stop_interrupts(struct phy_device *phydev) if (err) phy_error(phydev); + free_irq(phydev->irq, phydev); + /* - * Finish any pending work; we might have been scheduled to be called - * from keventd ourselves, but cancel_work_sync() handles that. + * Cannot call flush_scheduled_work() here as desired because + * of rtnl_lock(), but we do not really care about what would + * be done, except from enable_irq(), so cancel any work + * possibly pending and take care of the matter below. */ cancel_work_sync(&phydev->phy_queue); - - free_irq(phydev->irq, phydev); + /* + * If work indeed has been cancelled, disable_irq() will have + * been left unbalanced from phy_interrupt() and enable_irq() + * has to be called so that other devices on the line work. + */ + while (atomic_dec_return(&phydev->irq_disable) >= 0) + enable_irq(phydev->irq); return err; } @@ -690,11 +702,12 @@ static void phy_change(struct work_struct *work) if (err) goto phy_err; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) phydev->state = PHY_CHANGELINK; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); + atomic_dec(&phydev->irq_disable); enable_irq(phydev->irq); /* Reenable interrupts */ @@ -708,6 +721,7 @@ static void phy_change(struct work_struct *work) irq_enable_err: disable_irq(phydev->irq); + atomic_inc(&phydev->irq_disable); phy_err: phy_error(phydev); } @@ -718,13 +732,11 @@ static void phy_change(struct work_struct *work) */ void phy_stop(struct phy_device *phydev) { - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (PHY_HALTED == phydev->state) goto out_unlock; - phydev->state = PHY_HALTED; - if (phydev->irq != PHY_POLL) { /* Disable PHY Interrupts */ phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); @@ -733,8 +745,10 @@ void phy_stop(struct phy_device *phydev) phy_clear_interrupt(phydev); } + phydev->state = PHY_HALTED; + out_unlock: - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); /* * Cannot call flush_scheduled_work() here as desired because @@ -782,7 +796,7 @@ static void phy_timer(unsigned long data) int needs_aneg = 0; int err = 0; - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); if (phydev->adjust_state) phydev->adjust_state(phydev->attached_dev); @@ -948,7 +962,7 @@ static void phy_timer(unsigned long data) break; } - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); if (needs_aneg) err = phy_start_aneg(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 49328e0505055c554b5236839d72c1423c14d868..c0461217b1084e9ed1264ee82b357cc770ce6321 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -670,9 +670,9 @@ static int phy_remove(struct device *dev) phydev = to_phy_device(dev); - spin_lock(&phydev->lock); + spin_lock_bh(&phydev->lock); phydev->state = PHY_DOWN; - spin_unlock(&phydev->lock); + spin_unlock_bh(&phydev->lock); if (phydev->drv->remove) phydev->drv->remove(phydev); diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 8754cf3356b03610d092275d8a457f274aad3c15..b5e9981d10605a57d53817fa8cfb3ee7b257146e 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c @@ -148,13 +148,12 @@ static void plip_interrupt(int irq, void *dev_id); /* Functions for DEV methods */ static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev); static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len); -static int plip_hard_header_cache(struct neighbour *neigh, + unsigned short type, const void *daddr, + const void *saddr, unsigned len); +static int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh); static int plip_open(struct net_device *dev); static int plip_close(struct net_device *dev); -static struct net_device_stats *plip_get_stats(struct net_device *dev); static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static int plip_preempt(void *handle); static void plip_wakeup(void *handle); @@ -206,7 +205,6 @@ struct plip_local { }; struct net_local { - struct net_device_stats enet_stats; struct net_device *dev; struct work_struct immediate; struct delayed_work deferred; @@ -221,11 +219,6 @@ struct net_local { int is_deferred; int port_owner; int should_relinquish; - int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len); - int (*orig_hard_header_cache)(struct neighbour *neigh, - struct hh_cache *hh); spinlock_t lock; atomic_t kill_timer; struct semaphore killed_timer_sem; @@ -267,6 +260,11 @@ static inline unsigned char read_status (struct net_device *dev) return port->ops->read_status (port); } +static const struct header_ops plip_header_ops = { + .create = plip_hard_header, + .cache = plip_hard_header_cache, +}; + /* Entry point of PLIP driver. Probe the hardware, and register/initialize the driver. @@ -285,19 +283,13 @@ plip_init_netdev(struct net_device *dev) dev->hard_start_xmit = plip_tx_packet; dev->open = plip_open; dev->stop = plip_close; - dev->get_stats = plip_get_stats; dev->do_ioctl = plip_ioctl; - dev->header_cache_update = NULL; + dev->tx_queue_len = 10; dev->flags = IFF_POINTOPOINT|IFF_NOARP; memset(dev->dev_addr, 0xfc, ETH_ALEN); - /* Set the private structure */ - nl->orig_hard_header = dev->hard_header; - dev->hard_header = plip_hard_header; - - nl->orig_hard_header_cache = dev->hard_header_cache; - dev->hard_header_cache = plip_hard_header_cache; + dev->header_ops = &plip_header_ops; nl->port_owner = 0; @@ -430,8 +422,8 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, dev->name, snd->state, c0); } else error = HS_TIMEOUT; - nl->enet_stats.tx_errors++; - nl->enet_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } else if (nl->connection == PLIP_CN_RECEIVE) { if (rcv->state == PLIP_PK_TRIGGER) { /* Transmission was interrupted. */ @@ -448,7 +440,7 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n", dev->name, rcv->state, c0); } - nl->enet_stats.rx_dropped++; + dev->stats.rx_dropped++; } rcv->state = PLIP_PK_DONE; if (rcv->skb) { @@ -661,7 +653,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, &rcv->nibble, &rcv->data)) return TIMEOUT; if (rcv->data != rcv->checksum) { - nl->enet_stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (net_debug) printk(KERN_DEBUG "%s: checksum error\n", dev->name); return ERROR; @@ -673,8 +665,8 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl, rcv->skb->protocol=plip_type_trans(rcv->skb, dev); netif_rx(rcv->skb); dev->last_rx = jiffies; - nl->enet_stats.rx_bytes += rcv->length.h; - nl->enet_stats.rx_packets++; + dev->stats.rx_bytes += rcv->length.h; + dev->stats.rx_packets++; rcv->skb = NULL; if (net_debug > 2) printk(KERN_DEBUG "%s: receive end\n", dev->name); @@ -776,7 +768,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, if (nl->connection == PLIP_CN_RECEIVE) { spin_unlock_irq(&nl->lock); /* Interrupted. */ - nl->enet_stats.collisions++; + dev->stats.collisions++; return OK; } c0 = read_status(dev); @@ -792,7 +784,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, {enable,disable}_irq *counts* them. -- AV */ ENABLE(dev->irq); - nl->enet_stats.collisions++; + dev->stats.collisions++; return OK; } disable_parport_interrupts (dev); @@ -840,9 +832,9 @@ plip_send_packet(struct net_device *dev, struct net_local *nl, &snd->nibble, snd->checksum)) return TIMEOUT; - nl->enet_stats.tx_bytes += snd->skb->len; + dev->stats.tx_bytes += snd->skb->len; dev_kfree_skb(snd->skb); - nl->enet_stats.tx_packets++; + dev->stats.tx_packets++; snd->state = PLIP_PK_DONE; case PLIP_PK_DONE: @@ -996,14 +988,14 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev) } static void -plip_rewrite_address(struct net_device *dev, struct ethhdr *eth) +plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth) { - struct in_device *in_dev; + const struct in_device *in_dev = dev->ip_ptr; - if ((in_dev=dev->ip_ptr) != NULL) { + if (in_dev) { /* Any address will do - we take the first */ - struct in_ifaddr *ifa=in_dev->ifa_list; - if (ifa != NULL) { + const struct in_ifaddr *ifa = in_dev->ifa_list; + if (ifa) { memcpy(eth->h_source, dev->dev_addr, 6); memset(eth->h_dest, 0xfc, 2); memcpy(eth->h_dest+2, &ifa->ifa_address, 4); @@ -1013,26 +1005,25 @@ plip_rewrite_address(struct net_device *dev, struct ethhdr *eth) static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { - struct net_local *nl = netdev_priv(dev); int ret; - if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0) + ret = eth_header(skb, dev, type, daddr, saddr, len); + if (ret >= 0) plip_rewrite_address (dev, (struct ethhdr *)skb->data); return ret; } -int plip_hard_header_cache(struct neighbour *neigh, +int plip_hard_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { - struct net_local *nl = neigh->dev->priv; int ret; - if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0) - { + ret = eth_header_cache(neigh, hh); + if (ret == 0) { struct ethhdr *eth; eth = (struct ethhdr*)(((u8*)hh->hh_data) + @@ -1199,15 +1190,6 @@ plip_wakeup(void *handle) return; } -static struct net_device_stats * -plip_get_stats(struct net_device *dev) -{ - struct net_local *nl = netdev_priv(dev); - struct net_device_stats *r = &nl->enet_stats; - - return r; -} - static int plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { @@ -1278,7 +1260,6 @@ static void plip_attach (struct parport *port) strcpy(dev->name, name); - SET_MODULE_OWNER(dev); dev->irq = port->irq; dev->base_addr = port->base; if (port->irq == -1) { diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 9b30cd600a643af5ba54f6c47c9be9e15f6c8dbe..8936ed3469cf0744a68ace9e205ea6a804ce3b85 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -78,6 +78,7 @@ #include #include +#include #include #include @@ -102,7 +103,7 @@ static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) (memcmp(a->remote, b->remote, ETH_ALEN) == 0)); } -static inline int cmp_addr(struct pppoe_addr *a, unsigned long sid, char *addr) +static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) { return (a->sid == sid && (memcmp(a->remote,addr,ETH_ALEN) == 0)); @@ -112,7 +113,7 @@ static inline int cmp_addr(struct pppoe_addr *a, unsigned long sid, char *addr) #error 8 must be a multiple of PPPOE_HASH_BITS #endif -static int hash_item(unsigned int sid, unsigned char *addr) +static int hash_item(__be16 sid, unsigned char *addr) { unsigned char hash = 0; unsigned int i; @@ -121,7 +122,7 @@ static int hash_item(unsigned int sid, unsigned char *addr) hash ^= addr[i]; } for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ - hash ^= sid>>i; + hash ^= (__force __u32)sid>>i; } for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) { hash ^= hash>>i; @@ -138,7 +139,7 @@ static struct pppox_sock *item_hash_table[PPPOE_HASH_SIZE]; * Set/get/delete/rehash items (internal versions) * **********************************************************************/ -static struct pppox_sock *__get_item(unsigned long sid, unsigned char *addr, int ifindex) +static struct pppox_sock *__get_item(__be16 sid, unsigned char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret; @@ -170,7 +171,7 @@ static int __set_item(struct pppox_sock *po) return 0; } -static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifindex) +static struct pppox_sock *__delete_item(__be16 sid, char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret, **src; @@ -196,7 +197,7 @@ static struct pppox_sock *__delete_item(unsigned long sid, char *addr, int ifind * Set/get/delete/rehash items * **********************************************************************/ -static inline struct pppox_sock *get_item(unsigned long sid, +static inline struct pppox_sock *get_item(__be16 sid, unsigned char *addr, int ifindex) { struct pppox_sock *po; @@ -215,7 +216,7 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) struct net_device *dev; int ifindex; - dev = dev_get_by_name(sp->sa_addr.pppoe.dev); + dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); if(!dev) return NULL; ifindex = dev->ifindex; @@ -223,7 +224,7 @@ static inline struct pppox_sock *get_item_by_addr(struct sockaddr_pppox *sp) return get_item(sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); } -static inline struct pppox_sock *delete_item(unsigned long sid, char *addr, int ifindex) +static inline struct pppox_sock *delete_item(__be16 sid, char *addr, int ifindex) { struct pppox_sock *ret; @@ -300,6 +301,9 @@ static int pppoe_device_event(struct notifier_block *this, { struct net_device *dev = (struct net_device *) ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* Only look at sockets that are using this specific device. */ switch (event) { case NETDEV_CHANGEMTU: @@ -388,12 +392,15 @@ static int pppoe_rcv(struct sk_buff *skb, if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; + if (dev->nd_net != &init_net) + goto drop; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; ph = pppoe_hdr(skb); - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); + po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po != NULL) return sk_receive_skb(sk_pppox(po), skb, 0); drop: @@ -417,6 +424,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct pppoe_hdr *ph; struct pppox_sock *po; + if (dev->nd_net != &init_net) + goto abort; + if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; @@ -427,7 +437,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb, if (ph->code != PADT_CODE) goto abort; - po = get_item((unsigned long) ph->sid, eth_hdr(skb)->h_source, dev->ifindex); + po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po) { struct sock *sk = sk_pppox(po); @@ -476,12 +486,12 @@ static struct proto pppoe_sk_proto = { * Initialize a new struct sock. * **********************************************************************/ -static int pppoe_create(struct socket *sock) +static int pppoe_create(struct net *net, struct socket *sock) { int error = -ENOMEM; struct sock *sk; - sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, 1); + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, 1); if (!sk) goto out; @@ -593,7 +603,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, /* Don't re-bind if sid==0 */ if (sp->sa_addr.pppoe.sid != 0) { - dev = dev_get_by_name(sp->sa_addr.pppoe.dev); + dev = dev_get_by_name(&init_net, sp->sa_addr.pppoe.dev); error = -ENODEV; if (!dev) @@ -824,8 +834,8 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, } error = total_len; - dev->hard_header(skb, dev, ETH_P_PPP_SES, - po->pppoe_pa.remote, NULL, total_len); + dev_hard_header(skb, dev, ETH_P_PPP_SES, + po->pppoe_pa.remote, NULL, total_len); memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); @@ -876,8 +886,8 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) skb->protocol = __constant_htons(ETH_P_PPP_SES); skb->dev = dev; - dev->hard_header(skb, dev, ETH_P_PPP_SES, - po->pppoe_pa.remote, NULL, data_len); + dev_hard_header(skb, dev, ETH_P_PPP_SES, + po->pppoe_pa.remote, NULL, data_len); dev_queue_xmit(skb); @@ -945,6 +955,7 @@ static int pppoe_seq_show(struct seq_file *seq, void *v) { struct pppox_sock *po; char *dev_name; + DECLARE_MAC_BUF(mac); if (v == SEQ_START_TOKEN) { seq_puts(seq, "Id Address Device\n"); @@ -954,11 +965,8 @@ static int pppoe_seq_show(struct seq_file *seq, void *v) po = v; dev_name = po->pppoe_pa.dev; - seq_printf(seq, "%08X %02X:%02X:%02X:%02X:%02X:%02X %8s\n", - po->pppoe_pa.sid, - po->pppoe_pa.remote[0], po->pppoe_pa.remote[1], - po->pppoe_pa.remote[2], po->pppoe_pa.remote[3], - po->pppoe_pa.remote[4], po->pppoe_pa.remote[5], dev_name); + seq_printf(seq, "%08X %s %8s\n", + po->pppoe_pa.sid, print_mac(mac, po->pppoe_pa.remote), dev_name); out: return 0; } @@ -1042,7 +1050,7 @@ static int __init pppoe_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("net/pppoe", S_IRUGO, NULL); + p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net); if (!p) return -ENOMEM; @@ -1113,7 +1121,7 @@ static void __exit pppoe_exit(void) dev_remove_pack(&pppoes_ptype); dev_remove_pack(&pppoed_ptype); unregister_netdevice_notifier(&pppoe_notifier); - remove_proc_entry("net/pppoe", NULL); + remove_proc_entry("pppoe", init_net.proc_net); proto_unregister(&pppoe_sk_proto); } diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index abe91cb595f4fe723a561282f90ed4a1090ec495..921d4ef6d14b66d1b66c0c4b606a571cbec82020 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include @@ -1410,12 +1411,12 @@ static struct proto pppol2tp_sk_proto = { /* socket() handler. Initialize a new struct sock. */ -static int pppol2tp_create(struct socket *sock) +static int pppol2tp_create(struct net *net, struct socket *sock) { int error = -ENOMEM; struct sock *sk; - sk = sk_alloc(PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1); + sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, 1); if (!sk) goto out; @@ -2444,7 +2445,7 @@ static int __init pppol2tp_init(void) goto out_unregister_pppol2tp_proto; #ifdef CONFIG_PROC_FS - pppol2tp_proc = create_proc_entry("pppol2tp", 0, proc_net); + pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net); if (!pppol2tp_proc) { err = -ENOMEM; goto out_unregister_pppox_proto; @@ -2469,7 +2470,7 @@ static void __exit pppol2tp_exit(void) unregister_pppox_proto(PX_PROTO_OL2TP); #ifdef CONFIG_PROC_FS - remove_proc_entry("pppol2tp", proc_net); + remove_proc_entry("pppol2tp", init_net.proc_net); #endif proto_unregister(&pppol2tp_sk_proto); } diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index 25c52b55c38fb6cb11ddc369ce88f5b527f7cf20..c6898c1fc54d95de595fa990fc7b0977149c796a 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c @@ -104,10 +104,13 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) EXPORT_SYMBOL(pppox_ioctl); -static int pppox_create(struct socket *sock, int protocol) +static int pppox_create(struct net *net, struct socket *sock, int protocol) { int rc = -EPROTOTYPE; + if (net != &init_net) + return -EAFNOSUPPORT; + if (protocol < 0 || protocol > PX_MAX_PROTO) goto out; @@ -123,7 +126,7 @@ static int pppox_create(struct socket *sock, int protocol) !try_module_get(pppox_protos[protocol]->owner)) goto out; - rc = pppox_protos[protocol]->create(sock); + rc = pppox_protos[protocol]->create(net, sock); module_put(pppox_protos[protocol]->owner); out: diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index e565039184369b22b25dc486ff4f8a945c92e26e..0a42bf5174658fa71b050bef8225597a04e531b3 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -351,19 +351,13 @@ static int gelic_net_alloc_rx_skbs(struct gelic_net_card *card) static void gelic_net_release_tx_descr(struct gelic_net_card *card, struct gelic_net_descr *descr) { - struct sk_buff *skb; + struct sk_buff *skb = descr->skb; + BUG_ON(!(descr->data_status & (1 << GELIC_NET_TXDESC_TAIL))); - if (descr->data_status & (1 << GELIC_NET_TXDESC_TAIL)) { - /* 2nd descriptor */ - skb = descr->skb; - dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, - DMA_TO_DEVICE); - dev_kfree_skb_any(skb); - } else { - dma_unmap_single(ctodev(card), descr->buf_addr, - descr->buf_size, DMA_TO_DEVICE); - } + dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, + DMA_TO_DEVICE); + dev_kfree_skb_any(skb); descr->buf_addr = 0; descr->buf_size = 0; @@ -426,7 +420,7 @@ static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop) release ++; } out: - if (!stop && (2 < release)) + if (!stop && release) netif_wake_queue(card->netdev); } @@ -556,6 +550,7 @@ static int gelic_net_stop(struct net_device *netdev) { struct gelic_net_card *card = netdev_priv(netdev); + napi_disable(&card->napi); netif_stop_queue(netdev); /* turn off DMA, force end */ @@ -592,13 +587,10 @@ gelic_net_get_next_tx_descr(struct gelic_net_card *card) { if (!card->tx_chain.head) return NULL; - /* see if we can two consecutive free descrs */ + /* see if the next descriptor is free */ if (card->tx_chain.tail != card->tx_chain.head->next && gelic_net_get_descr_status(card->tx_chain.head) == - GELIC_NET_DESCR_NOT_IN_USE && - card->tx_chain.tail != card->tx_chain.head->next->next && - gelic_net_get_descr_status(card->tx_chain.head->next) == - GELIC_NET_DESCR_NOT_IN_USE ) + GELIC_NET_DESCR_NOT_IN_USE) return card->tx_chain.head; else return NULL; @@ -609,44 +601,66 @@ gelic_net_get_next_tx_descr(struct gelic_net_card *card) * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field * @descr: descriptor structure to fill out * @skb: packet to consider - * @middle: middle of frame * * fills out the command and status field of the descriptor structure, * depending on hardware checksum settings. This function assumes a wmb() * has executed before. */ static void gelic_net_set_txdescr_cmdstat(struct gelic_net_descr *descr, - struct sk_buff *skb, int middle) + struct sk_buff *skb) { - u32 eofr; - - if (middle) - eofr = 0; - else - eofr = GELIC_NET_DMAC_CMDSTAT_END_FRAME; - if (skb->ip_summed != CHECKSUM_PARTIAL) - descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | eofr; + descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; else { /* is packet ip? * if yes: tcp? udp? */ if (skb->protocol == htons(ETH_P_IP)) { if (ip_hdr(skb)->protocol == IPPROTO_TCP) descr->dmac_cmd_status = - GELIC_NET_DMAC_CMDSTAT_TCPCS | eofr; + GELIC_NET_DMAC_CMDSTAT_TCPCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; + else if (ip_hdr(skb)->protocol == IPPROTO_UDP) descr->dmac_cmd_status = - GELIC_NET_DMAC_CMDSTAT_UDPCS | eofr; + GELIC_NET_DMAC_CMDSTAT_UDPCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; else /* * the stack should checksum non-tcp and non-udp * packets on his own: NETIF_F_IP_CSUM */ descr->dmac_cmd_status = - GELIC_NET_DMAC_CMDSTAT_NOCS | eofr; + GELIC_NET_DMAC_CMDSTAT_NOCS | + GELIC_NET_DMAC_CMDSTAT_END_FRAME; } } } +static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, + unsigned short tag) +{ + struct vlan_ethhdr *veth; + static unsigned int c; + + if (skb_headroom(skb) < VLAN_HLEN) { + struct sk_buff *sk_tmp = skb; + pr_debug("%s: hd=%d c=%ud\n", __func__, skb_headroom(skb), c); + skb = skb_realloc_headroom(sk_tmp, VLAN_HLEN); + if (!skb) + return NULL; + dev_kfree_skb_any(sk_tmp); + } + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); + + /* Move the mac addresses to the top of buffer */ + memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN); + + veth->h_vlan_proto = __constant_htons(ETH_P_8021Q); + veth->h_vlan_TCI = htons(tag); + + return skb; +} + /** * gelic_net_prepare_tx_descr_v - get dma address of skb_data * @card: card structure @@ -660,65 +674,35 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, struct gelic_net_descr *descr, struct sk_buff *skb) { - dma_addr_t buf[2]; - unsigned int vlan_len; - struct gelic_net_descr *sec_descr = descr->next; + dma_addr_t buf; - if (skb->len < GELIC_NET_VLAN_POS) - return -EINVAL; - - vlan_len = GELIC_NET_VLAN_POS; - memcpy(&descr->vlan, skb->data, vlan_len); if (card->vlan_index != -1) { - /* internal vlan tag used */ - descr->vlan.h_vlan_proto = htons(ETH_P_8021Q); /* vlan 0x8100*/ - descr->vlan.h_vlan_TCI = htons(card->vlan_id[card->vlan_index]); - vlan_len += VLAN_HLEN; /* added for above two lines */ + struct sk_buff *skb_tmp; + skb_tmp = gelic_put_vlan_tag(skb, + card->vlan_id[card->vlan_index]); + if (!skb_tmp) + return -ENOMEM; + skb = skb_tmp; } - /* map data area */ - buf[0] = dma_map_single(ctodev(card), &descr->vlan, - vlan_len, DMA_TO_DEVICE); + buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); - if (!buf[0]) { - dev_err(ctodev(card), - "dma map 1 failed (%p, %i). Dropping packet\n", - skb->data, vlan_len); - return -ENOMEM; - } - - buf[1] = dma_map_single(ctodev(card), skb->data + GELIC_NET_VLAN_POS, - skb->len - GELIC_NET_VLAN_POS, - DMA_TO_DEVICE); - - if (!buf[1]) { + if (!buf) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", - skb->data + GELIC_NET_VLAN_POS, - skb->len - GELIC_NET_VLAN_POS); - dma_unmap_single(ctodev(card), buf[0], vlan_len, - DMA_TO_DEVICE); + skb->data, skb->len); return -ENOMEM; } - /* first descr */ - descr->buf_addr = buf[0]; - descr->buf_size = vlan_len; - descr->skb = NULL; /* not used */ + descr->buf_addr = buf; + descr->buf_size = skb->len; + descr->skb = skb; descr->data_status = 0; - descr->next_descr_addr = descr->next->bus_addr; - gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */ - - /* second descr */ - sec_descr->buf_addr = buf[1]; - sec_descr->buf_size = skb->len - GELIC_NET_VLAN_POS; - sec_descr->skb = skb; - sec_descr->data_status = 0; - sec_descr->next_descr_addr = 0; /* terminate hw descr */ - gelic_net_set_txdescr_cmdstat(sec_descr, skb, 0); + descr->next_descr_addr = 0; /* terminate hw descr */ + gelic_net_set_txdescr_cmdstat(descr, skb); /* bump free descriptor pointer */ - card->tx_chain.head = sec_descr->next; + card->tx_chain.head = descr->next; return 0; } @@ -732,25 +716,17 @@ static int gelic_net_kick_txdma(struct gelic_net_card *card, struct gelic_net_descr *descr) { int status = 0; - int count = 10; if (card->tx_dma_progress) return 0; if (gelic_net_get_descr_status(descr) == GELIC_NET_DESCR_CARDOWNED) { card->tx_dma_progress = 1; - /* sometimes we need retry here */ - while (count--) { - status = lv1_net_start_tx_dma(bus_id(card), - dev_id(card), - descr->bus_addr, 0); - if (!status) - break; - } - if (!count) + status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), + descr->bus_addr, 0); + if (status) dev_info(ctodev(card), "lv1_net_start_txdma failed," \ - "status=%d %#lx\n", - status, card->irq_status); + "status=%d\n", status); } return status; } @@ -987,32 +963,24 @@ static int gelic_net_decode_one_descr(struct gelic_net_card *card) * if the quota is exceeded, but the driver has still packets. * */ -static int gelic_net_poll(struct net_device *netdev, int *budget) +static int gelic_net_poll(struct napi_struct *napi, int budget) { - struct gelic_net_card *card = netdev_priv(netdev); - int packets_to_do, packets_done = 0; - int no_more_packets = 0; - - packets_to_do = min(*budget, netdev->quota); + struct gelic_net_card *card = container_of(napi, struct gelic_net_card, napi); + struct net_device *netdev = card->netdev; + int packets_done = 0; - while (packets_to_do) { - if (gelic_net_decode_one_descr(card)) { - packets_done++; - packets_to_do--; - } else { - /* no more packets for the stack */ - no_more_packets = 1; + while (packets_done < budget) { + if (!gelic_net_decode_one_descr(card)) break; - } + + packets_done++; } - netdev->quota -= packets_done; - *budget -= packets_done; - if (no_more_packets) { - netif_rx_complete(netdev); + + if (packets_done < budget) { + netif_rx_complete(netdev, napi); gelic_net_rx_irq_on(card); - return 0; - } else - return 1; + } + return packets_done; } /** * gelic_net_change_mtu - changes the MTU of an interface @@ -1055,7 +1023,7 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr) if (status & GELIC_NET_RXINT) { gelic_net_rx_irq_off(card); - netif_rx_schedule(netdev); + netif_rx_schedule(netdev, &card->napi); } if (status & GELIC_NET_TXINT) { @@ -1159,6 +1127,8 @@ static int gelic_net_open(struct net_device *netdev) if (gelic_net_alloc_rx_skbs(card)) goto alloc_skbs_failed; + napi_enable(&card->napi); + card->tx_dma_progress = 0; card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT; @@ -1360,9 +1330,6 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev) /* tx watchdog */ netdev->tx_timeout = &gelic_net_tx_timeout; netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; - /* NAPI */ - netdev->poll = &gelic_net_poll; - netdev->weight = GELIC_NET_NAPI_WEIGHT; netdev->ethtool_ops = &gelic_net_ethtool_ops; } @@ -1381,8 +1348,8 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) unsigned int i; int status; u64 v1, v2; + DECLARE_MAC_BUF(mac); - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &card->dev->core); spin_lock_init(&card->tx_dma_lock); @@ -1390,6 +1357,9 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) gelic_net_setup_netdev_ops(netdev); + netif_napi_add(netdev, &card->napi, + gelic_net_poll, GELIC_NET_NAPI_WEIGHT); + netdev->features = NETIF_F_IP_CSUM; status = lv1_net_control(bus_id(card), dev_id(card), @@ -1404,10 +1374,8 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) v1 <<= 16; memcpy(addr.sa_data, &v1, ETH_ALEN); memcpy(netdev->dev_addr, addr.sa_data, ETH_ALEN); - dev_info(ctodev(card), "MAC addr %02x:%02x:%02x:%02x:%02x:%02x\n", - netdev->dev_addr[0], netdev->dev_addr[1], - netdev->dev_addr[2], netdev->dev_addr[3], - netdev->dev_addr[4], netdev->dev_addr[5]); + dev_info(ctodev(card), "MAC addr %s\n", + print_mac(mac, netdev->dev_addr)); card->vlan_index = -1; /* no vlan */ for (i = 0; i < GELIC_NET_VLAN_MAX; i++) { @@ -1430,8 +1398,11 @@ static int gelic_net_setup_netdev(struct gelic_net_card *card) dev_dbg(ctodev(card), "vlan_id:%d, %lx\n", i, v1); } } - if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) + + if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) { card->vlan_index = GELIC_NET_VLAN_WIRED - 1; + netdev->hard_header_len += VLAN_HLEN; + } status = register_netdev(netdev); if (status) { diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h index a9c4c4fc25471b26a99a3cb8efdf51783960b21d..968560269a3ba578ef5ee9021285085c13c0a2f3 100644 --- a/drivers/net/ps3_gelic_net.h +++ b/drivers/net/ps3_gelic_net.h @@ -194,6 +194,7 @@ struct gelic_net_descr_chain { struct gelic_net_card { struct net_device *netdev; + struct napi_struct napi; /* * hypervisor requires irq_status should be * 8 bytes aligned, but u64 member is diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c old mode 100755 new mode 100644 index ea151315050c382ff096696d7ae2a3abd2a54949..30adf726743cba4993b2936e4f4d9ae2f092f182 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include @@ -82,7 +81,7 @@ typedef enum { } PHY_DEVICE_et; typedef struct { - PHY_DEVICE_et phyDevice; + PHY_DEVICE_et phyDevice; u32 phyIdOUI; u16 phyIdModel; char *name; @@ -331,7 +330,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, PCI_DMA_FROMDEVICE); err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; @@ -885,14 +884,14 @@ static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) u16 reg; /* Enable Auto-negotiation sense */ - ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, + ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, PHYAddr[qdev->mac_index]); reg |= PETBI_TBI_AUTO_SENSE; - ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, + ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, - PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, + PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, PHYAddr[qdev->mac_index]); ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, @@ -946,7 +945,7 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); /* Write new PHYAD w/bit 5 set */ ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); - /* + /* * Disable diagnostic mode bit 2 = 0 * Power up device bit 11 = 0 * Link up (on) and activity (blink) @@ -956,18 +955,18 @@ static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) ql_mii_write_reg(qdev, 0x1c, 0xfaf0); } -static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, +static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { PHY_DEVICE_et result = PHY_TYPE_UNKNOWN; - u32 oui; + u32 oui; u16 model; - int i; + int i; if (phyIdReg0 == 0xffff) { return result; } - + if (phyIdReg1 == 0xffff) { return result; } @@ -985,7 +984,7 @@ static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev, printk(KERN_INFO "%s: Phy: %s\n", qdev->ndev->name, PHY_DEVICES[i].name); - + break; } } @@ -1034,7 +1033,7 @@ static int ql_is_full_dup(struct ql3_adapter *qdev) { if (ql_mii_read_reg(qdev, 0x1A, ®)) return 0; - + return ((reg & 0x0080) && (reg & 0x1000)) != 0; } case PHY_VITESSE_VSC8211: @@ -1083,19 +1082,19 @@ static int PHY_Setup(struct ql3_adapter *qdev) /* Check if we have a Agere PHY */ if ((reg1 == 0xffff) || (reg2 == 0xffff)) { - /* Determine which MII address we should be using + /* Determine which MII address we should be using determined by the index of the card */ if (qdev->mac_index == 0) { miiAddr = MII_AGERE_ADDR_1; } else { miiAddr = MII_AGERE_ADDR_2; } - + err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); if(err != 0) { printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n", qdev->ndev->name); - return err; + return err; } err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); @@ -1104,9 +1103,9 @@ static int PHY_Setup(struct ql3_adapter *qdev) qdev->ndev->name); return err; } - + /* We need to remember to initialize the Agere PHY */ - agereAddrChangeNeeded = true; + agereAddrChangeNeeded = true; } /* Determine the particular PHY we have on board to apply @@ -1115,7 +1114,7 @@ static int PHY_Setup(struct ql3_adapter *qdev) if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { /* need this here so address gets changed */ - phyAgereSpecificInit(qdev, miiAddr); + phyAgereSpecificInit(qdev, miiAddr); } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name); return -EIO; @@ -1428,7 +1427,7 @@ static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) static void ql_phy_reset_ex(struct ql3_adapter *qdev) { - ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, + ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, PHYAddr[qdev->mac_index]); } @@ -1439,7 +1438,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) if(qdev->phyType == PHY_AGERE_ET1011C) { /* turn off external loopback */ - ql_mii_write_reg(qdev, 0x13, 0x0000); + ql_mii_write_reg(qdev, 0x13, 0x0000); } if(qdev->mac_index == 0) @@ -1453,23 +1452,23 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) portConfiguration = PORT_CONFIG_DEFAULT; /* Set the 1000 advertisements */ - ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, + ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, PHYAddr[qdev->mac_index]); reg &= ~PHY_GIG_ALL_PARAMS; - if(portConfiguration & + if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED & PORT_CONFIG_1000MB_SPEED) { reg |= PHY_GIG_ADV_1000F; } - - if(portConfiguration & + + if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED & PORT_CONFIG_1000MB_SPEED) { reg |= PHY_GIG_ADV_1000H; } - ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, + ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, PHYAddr[qdev->mac_index]); /* Set the 10/100 & pause negotiation advertisements */ @@ -1483,7 +1482,7 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100F; - + if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10F; } @@ -1491,22 +1490,22 @@ static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { if(portConfiguration & PORT_CONFIG_100MB_SPEED) reg |= PHY_NEG_ADV_100H; - + if(portConfiguration & PORT_CONFIG_10MB_SPEED) reg |= PHY_NEG_ADV_10H; } if(portConfiguration & PORT_CONFIG_1000MB_SPEED) { - reg |= 1; + reg |= 1; } - ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, + ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, PHYAddr[qdev->mac_index]); ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); - - ql_mii_write_reg_ex(qdev, CONTROL_REG, + + ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, PHYAddr[qdev->mac_index]); } @@ -1661,7 +1660,7 @@ static void ql_link_state_machine(struct ql3_adapter *qdev) "%s: Reset in progress, skip processing link " "state.\n", qdev->ndev->name); - spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return; } @@ -1753,7 +1752,7 @@ static int ql_mii_setup(struct ql3_adapter *qdev) return -1; if (qdev->device_id == QL3032_DEVICE_ID) - ql_write_page0_reg(qdev, + ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 0x0f00000); /* Divide 125MHz clock by 28 to meet PHY timing requirements */ @@ -1866,8 +1865,6 @@ static void ql_get_drvinfo(struct net_device *ndev, strncpy(drvinfo->version, ql3xxx_driver_version, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); - drvinfo->n_stats = 0; - drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } @@ -1939,7 +1936,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); dev_kfree_skb(lrg_buf_cb->skb); lrg_buf_cb->skb = NULL; @@ -2047,14 +2044,14 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_WARNING "Frame short but, frame was padded and sent.\n"); } - + tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; /* Check the transmit response flags for any errors */ if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) { printk(KERN_ERR "Frame too short to be legal, frame not sent.\n"); - qdev->stats.tx_errors++; + qdev->ndev->stats.tx_errors++; retval = -EIO; goto frame_not_sent; } @@ -2062,7 +2059,7 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, if(tx_cb->seg_count == 0) { printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id); - qdev->stats.tx_errors++; + qdev->ndev->stats.tx_errors++; retval = -EIO; goto invalid_seg_count; } @@ -2081,8 +2078,8 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, PCI_DMA_TODEVICE); } } - qdev->stats.tx_packets++; - qdev->stats.tx_bytes += tx_cb->skb->len; + qdev->ndev->stats.tx_packets++; + qdev->ndev->stats.tx_bytes += tx_cb->skb->len; frame_not_sent: dev_kfree_skb_irq(tx_cb->skb); @@ -2111,13 +2108,13 @@ static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) /* * The difference between 3022 and 3032 for inbound completions: - * 3022 uses two buffers per completion. The first buffer contains - * (some) header info, the second the remainder of the headers plus - * the data. For this chip we reserve some space at the top of the - * receive buffer so that the header info in buffer one can be - * prepended to the buffer two. Buffer two is the sent up while + * 3022 uses two buffers per completion. The first buffer contains + * (some) header info, the second the remainder of the headers plus + * the data. For this chip we reserve some space at the top of the + * receive buffer so that the header info in buffer one can be + * prepended to the buffer two. Buffer two is the sent up while * buffer one is returned to the hardware to be reused. - * 3032 receives all of it's data and headers in one buffer for a + * 3032 receives all of it's data and headers in one buffer for a * simpler process. 3032 also supports checksum verification as * can be seen in ql_process_macip_rx_intr(). */ @@ -2141,8 +2138,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, lrg_buf_cb2 = ql_get_lbuf(qdev); skb = lrg_buf_cb2->skb; - qdev->stats.rx_packets++; - qdev->stats.rx_bytes += length; + qdev->ndev->stats.rx_packets++; + qdev->ndev->stats.rx_bytes += length; skb_put(skb, length); pci_unmap_single(qdev->pdev, @@ -2208,13 +2205,13 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, skb_push(skb2, size), size); } else { u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); - if (checksum & - (IB_IP_IOCB_RSP_3032_ICE | - IB_IP_IOCB_RSP_3032_CE)) { + if (checksum & + (IB_IP_IOCB_RSP_3032_ICE | + IB_IP_IOCB_RSP_3032_CE)) { printk(KERN_ERR "%s: Bad checksum for this %s packet, checksum = %x.\n", __func__, - ((checksum & + ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? "TCP" : "UDP"),checksum); } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || @@ -2226,8 +2223,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, skb2->protocol = eth_type_trans(skb2, qdev->ndev); netif_receive_skb(skb2); - qdev->stats.rx_packets++; - qdev->stats.rx_bytes += length; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += length; ndev->last_rx = jiffies; lrg_buf_cb2->skb = NULL; @@ -2310,10 +2307,10 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, return work_done; } -static int ql_poll(struct net_device *ndev, int *budget) +static int ql_poll(struct napi_struct *napi, int budget) { - struct ql3_adapter *qdev = netdev_priv(ndev); - int work_to_do = min(*budget, ndev->quota); + struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); + struct net_device *ndev = qdev->ndev; int rx_cleaned = 0, tx_cleaned = 0; unsigned long hw_flags; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; @@ -2321,16 +2318,13 @@ static int ql_poll(struct net_device *ndev, int *budget) if (!netif_carrier_ok(ndev)) goto quit_polling; - ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do); - *budget -= rx_cleaned; - ndev->quota -= rx_cleaned; + ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); - if( tx_cleaned + rx_cleaned != work_to_do || + if (tx_cleaned + rx_cleaned != budget || !netif_running(ndev)) { quit_polling: - netif_rx_complete(ndev); - spin_lock_irqsave(&qdev->hw_lock, hw_flags); + __netif_rx_complete(ndev, napi); ql_update_small_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev); writel(qdev->rsp_consumer_index, @@ -2338,9 +2332,8 @@ static int ql_poll(struct net_device *ndev, int *budget) spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); ql_enable_interrupts(qdev); - return 0; } - return 1; + return tx_cleaned + rx_cleaned; } static irqreturn_t ql3xxx_isr(int irq, void *dev_id) @@ -2390,8 +2383,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) spin_unlock(&qdev->adapter_lock); } else if (value & ISP_IMR_DISABLE_CMPL_INT) { ql_disable_interrupts(qdev); - if (likely(netif_rx_schedule_prep(ndev))) { - __netif_rx_schedule(ndev); + if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) { + __netif_rx_schedule(ndev, &qdev->napi); } } else { return IRQ_NONE; @@ -2401,12 +2394,12 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) } /* - * Get the total number of segments needed for the + * Get the total number of segments needed for the * given number of fragments. This is necessary because * outbound address lists (OAL) will be used when more than - * two frags are given. Each address list has 5 addr/len + * two frags are given. Each address list has 5 addr/len * pairs. The 5th pair in each AOL is used to point to - * the next AOL if more frags are coming. + * the next AOL if more frags are coming. * That is why the frags:segment count ratio is not linear. */ static int ql_get_seg_count(struct ql3_adapter *qdev, @@ -2483,12 +2476,12 @@ static int ql_send_map(struct ql3_adapter *qdev, err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", qdev->ndev->name, err); return NETDEV_TX_BUSY; } - + oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); @@ -2518,7 +2511,7 @@ static int ql_send_map(struct ql3_adapter *qdev, err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", + printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", qdev->ndev->name, err); goto map_error; } @@ -2544,7 +2537,7 @@ static int ql_send_map(struct ql3_adapter *qdev, err = pci_dma_mapping_error(map); if(err) { - printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", + printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", qdev->ndev->name, err); goto map_error; } @@ -2565,10 +2558,10 @@ static int ql_send_map(struct ql3_adapter *qdev, map_error: /* A PCI mapping failed and now we will need to back out - * We need to traverse through the oal's and associated pages which + * We need to traverse through the oal's and associated pages which * have been mapped and now we must unmap them to clean up properly */ - + seg = 1; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal = tx_cb->oal; @@ -2606,11 +2599,11 @@ static int ql_send_map(struct ql3_adapter *qdev, * The difference between 3022 and 3032 sends: * 3022 only supports a simple single segment transmission. * 3032 supports checksumming and scatter/gather lists (fragments). - * The 3032 supports sglists by using the 3 addr/len pairs (ALP) - * in the IOCB plus a chain of outbound address lists (OAL) that - * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) - * will used to point to an OAL when more ALP entries are required. - * The IOCB is always the top of the chain followed by one or more + * The 3032 supports sglists by using the 3 addr/len pairs (ALP) + * in the IOCB plus a chain of outbound address lists (OAL) that + * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) + * will used to point to an OAL when more ALP entries are required. + * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) @@ -2624,14 +2617,14 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) if (unlikely(atomic_read(&qdev->tx_count) < 2)) { return NETDEV_TX_BUSY; } - + tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; if((tx_cb->seg_count = ql_get_seg_count(qdev, (skb_shinfo(skb)->nr_frags))) == -1) { printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); return NETDEV_TX_OK; } - + mac_iocb_ptr = tx_cb->queue_entry; memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); mac_iocb_ptr->opcode = qdev->mac_ob_opcode; @@ -2643,12 +2636,12 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) if (qdev->device_id == QL3032_DEVICE_ID && skb->ip_summed == CHECKSUM_PARTIAL) ql_hw_csum_setup(skb, mac_iocb_ptr); - + if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) { printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__); return NETDEV_TX_BUSY; } - + wmb(); qdev->req_producer_index++; if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) @@ -2746,7 +2739,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name); return -ENOMEM; } - + qdev->lrg_buf_q_alloc_virt_addr = pci_alloc_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, @@ -3562,6 +3555,7 @@ static void ql_display_dev_info(struct net_device *ndev) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); struct pci_dev *pdev = qdev->pdev; + DECLARE_MAC_BUF(mac); printk(KERN_INFO PFX "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", @@ -3587,10 +3581,8 @@ static void ql_display_dev_info(struct net_device *ndev) if (netif_msg_probe(qdev)) printk(KERN_INFO PFX - "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", - ndev->name, ndev->dev_addr[0], ndev->dev_addr[1], - ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4], - ndev->dev_addr[5]); + "%s: MAC address %s\n", + ndev->name, print_mac(mac, ndev->dev_addr)); } static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) @@ -3617,7 +3609,7 @@ static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) del_timer_sync(&qdev->adapter_timer); - netif_poll_disable(ndev); + napi_disable(&qdev->napi); if (do_reset) { int soft_reset; @@ -3705,7 +3697,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev) mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); - netif_poll_enable(ndev); + napi_enable(&qdev->napi); ql_enable_interrupts(qdev); return 0; @@ -3758,12 +3750,6 @@ static int ql3xxx_open(struct net_device *ndev) return (ql_adapter_up(qdev)); } -static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev) -{ - struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv; - return &qdev->stats; -} - static void ql3xxx_set_multicast_list(struct net_device *ndev) { /* @@ -4016,7 +4002,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, goto err_out_free_regions; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); pci_set_drvdata(pdev, ndev); @@ -4054,15 +4039,13 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, ndev->open = ql3xxx_open; ndev->hard_start_xmit = ql3xxx_send; ndev->stop = ql3xxx_close; - ndev->get_stats = ql3xxx_get_stats; ndev->set_multicast_list = ql3xxx_set_multicast_list; SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->set_mac_address = ql3xxx_set_mac_address; ndev->tx_timeout = ql3xxx_tx_timeout; ndev->watchdog_timeo = 5 * HZ; - ndev->poll = &ql_poll; - ndev->weight = 64; + netif_napi_add(ndev, &qdev->napi, ql_poll, 64); ndev->irq = pdev->irq; diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h old mode 100755 new mode 100644 index 4a832c46c274813541731937374389caad3efbe9..fbcb0b9496390247b20e3f626e611b5a9b086c3b --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h @@ -556,7 +556,7 @@ enum { IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, IP_ADDR_INDEX_REG_6 = 0x0008, IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030, - IP_ADDR_INDEX_REG_E = 0x0040, + IP_ADDR_INDEX_REG_E = 0x0040, }; enum { QL3032_PORT_CONTROL_DS = 0x0001, @@ -1112,7 +1112,7 @@ struct ql_rcv_buf_cb { * OAL has 5 entries: * 1 thru 4 point to frags * fifth points to next oal. - */ + */ #define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1) struct oal_entry { @@ -1137,7 +1137,7 @@ struct ql_tx_buf_cb { struct ob_mac_iocb_req *queue_entry ; int seg_count; struct oal *oal; - struct map_list map[MAX_SKB_FRAGS+1]; + struct map_list map[MAX_SKB_FRAGS+1]; }; /* definitions for type field */ @@ -1175,6 +1175,8 @@ struct ql3_adapter { struct pci_dev *pdev; struct net_device *ndev; /* Parent NET device */ + struct napi_struct napi; + /* Hardware information */ u8 chip_rev_id; u8 pci_slot; @@ -1281,7 +1283,6 @@ struct ql3_adapter { u32 update_ob_opcode; /* Opcode to use for updating NCB */ u32 mb_bit_mask; /* MA Bits mask to use on transmission */ u32 numPorts; - struct net_device_stats stats; struct workqueue_struct *workqueue; struct delayed_work reset_work; struct delayed_work tx_timeout_work; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c76dd29c8e9a1ef8f17d60067c08a2c970d788e8..419c00cbe6e962bac5e1ea095848628415566e51 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -384,6 +384,7 @@ struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; /* Index of PCI device */ struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; /* statistics of net device */ spinlock_t lock; /* spin lock flag */ u32 msg_enable; @@ -443,13 +444,13 @@ static void rtl_set_rx_mode(struct net_device *dev); static void rtl8169_tx_timeout(struct net_device *dev); static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, - void __iomem *); + void __iomem *, u32 budget); static int rtl8169_change_mtu(struct net_device *dev, int new_mtu); static void rtl8169_down(struct net_device *dev); static void rtl8169_rx_clear(struct rtl8169_private *tp); #ifdef CONFIG_R8169_NAPI -static int rtl8169_poll(struct net_device *dev, int *budget); +static int rtl8169_poll(struct napi_struct *napi, int budget); #endif static const unsigned int rtl8169_rx_config = @@ -976,24 +977,29 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { }; struct rtl8169_counters { - u64 tx_packets; - u64 rx_packets; - u64 tx_errors; - u32 rx_errors; - u16 rx_missed; - u16 align_errors; - u32 tx_one_collision; - u32 tx_multi_collision; - u64 rx_unicast; - u64 rx_broadcast; - u32 rx_multicast; - u16 tx_aborted; - u16 tx_underun; + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; }; -static int rtl8169_get_stats_count(struct net_device *dev) +static int rtl8169_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(rtl8169_gstrings); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } } static void rtl8169_get_ethtool_stats(struct net_device *dev, @@ -1060,17 +1066,14 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .set_msglevel = rtl8169_set_msglevel, .get_rx_csum = rtl8169_get_rx_csum, .set_rx_csum = rtl8169_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_regs = rtl8169_get_regs, .get_wol = rtl8169_get_wol, .set_wol = rtl8169_set_wol, .get_strings = rtl8169_get_strings, - .get_stats_count = rtl8169_get_stats_count, + .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, }; @@ -1505,7 +1508,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->dev = dev; @@ -1656,8 +1658,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->set_mac_address = rtl_set_mac_address; #ifdef CONFIG_R8169_NAPI - dev->poll = rtl8169_poll; - dev->weight = R8169_NAPI_WEIGHT; + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); #endif #ifdef CONFIG_R8169_VLAN @@ -1777,6 +1778,10 @@ static int rtl8169_open(struct net_device *dev) if (retval < 0) goto err_release_ring_2; +#ifdef CONFIG_R8169_NAPI + napi_enable(&tp->napi); +#endif + rtl_hw_start(dev); rtl8169_request_timer(dev); @@ -2082,7 +2087,9 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) if (ret < 0) goto out; - netif_poll_enable(dev); +#ifdef CONFIG_R8169_NAPI + napi_enable(&tp->napi); +#endif rtl_hw_start(dev); @@ -2274,11 +2281,15 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) synchronize_irq(dev->irq); /* Wait for any pending NAPI task to complete */ - netif_poll_disable(dev); +#ifdef CONFIG_R8169_NAPI + napi_disable(&tp->napi); +#endif rtl8169_irq_mask_and_ack(ioaddr); - netif_poll_enable(dev); +#ifdef CONFIG_R8169_NAPI + napi_enable(&tp->napi); +#endif } static void rtl8169_reinit_task(struct work_struct *work) @@ -2322,7 +2333,7 @@ static void rtl8169_reset_task(struct work_struct *work) rtl8169_wait_for_quiescence(dev); - rtl8169_rx_interrupt(dev, tp, tp->mmio_addr); + rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0); rtl8169_tx_clear(tp); if (tp->dirty_rx == tp->cur_rx) { @@ -2636,14 +2647,14 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, static int rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, - void __iomem *ioaddr) + void __iomem *ioaddr, u32 budget) { unsigned int cur_rx, rx_left; unsigned int delta, count; cur_rx = tp->cur_rx; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; - rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); + rx_left = rtl8169_rx_quota(rx_left, budget); for (; rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; @@ -2792,8 +2803,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); tp->intr_mask = ~tp->napi_event; - if (likely(netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); + if (likely(netif_rx_schedule_prep(dev, &tp->napi))) + __netif_rx_schedule(dev, &tp->napi); else if (netif_msg_intr(tp)) { printk(KERN_INFO "%s: interrupt %04x in poll\n", dev->name, status); @@ -2803,7 +2814,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) #else /* Rx interrupt */ if (status & (RxOK | RxOverflow | RxFIFOOver)) - rtl8169_rx_interrupt(dev, tp, ioaddr); + rtl8169_rx_interrupt(dev, tp, ioaddr, ~(u32)0); /* Tx interrupt */ if (status & (TxOK | TxErr)) @@ -2826,20 +2837,18 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) } #ifdef CONFIG_R8169_NAPI -static int rtl8169_poll(struct net_device *dev, int *budget) +static int rtl8169_poll(struct napi_struct *napi, int budget) { - unsigned int work_done, work_to_do = min(*budget, dev->quota); - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->mmio_addr; + int work_done; - work_done = rtl8169_rx_interrupt(dev, tp, ioaddr); + work_done = rtl8169_rx_interrupt(dev, tp, ioaddr, (u32) budget); rtl8169_tx_interrupt(dev, tp, ioaddr); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done < work_to_do) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); tp->intr_mask = 0xffff; /* * 20040426: the barrier is not strictly required but the @@ -2851,7 +2860,7 @@ static int rtl8169_poll(struct net_device *dev, int *budget) RTL_W16(IntrMask, tp->intr_event); } - return (work_done >= work_to_do); + return work_done; } #endif @@ -2880,7 +2889,7 @@ static void rtl8169_down(struct net_device *dev) synchronize_irq(dev->irq); if (!poll_locked) { - netif_poll_disable(dev); + napi_disable(&tp->napi); poll_locked++; } @@ -2918,8 +2927,6 @@ static int rtl8169_close(struct net_device *dev) free_irq(dev->irq, dev); - netif_poll_enable(dev); - pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index df6b73872fdbfca2aab338760695897c1fa3879d..e7fd08adbbacbfe307d81a47623cf5d627b7053b 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -53,7 +53,6 @@ struct rionet_private { struct rio_mport *mport; struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; - struct net_device_stats stats; int rx_slot; int tx_slot; int tx_cnt; @@ -91,12 +90,6 @@ static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES]; #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) -static struct net_device_stats *rionet_stats(struct net_device *ndev) -{ - struct rionet_private *rnet = ndev->priv; - return &rnet->stats; -} - static int rionet_rx_clean(struct net_device *ndev) { int i; @@ -120,15 +113,15 @@ static int rionet_rx_clean(struct net_device *ndev) error = netif_rx(rnet->rx_skb[i]); if (error == NET_RX_DROP) { - rnet->stats.rx_dropped++; + ndev->stats.rx_dropped++; } else if (error == NET_RX_BAD) { if (netif_msg_rx_err(rnet)) printk(KERN_WARNING "%s: bad rx packet\n", DRV_NAME); - rnet->stats.rx_errors++; + ndev->stats.rx_errors++; } else { - rnet->stats.rx_packets++; - rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE; + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += RIO_MAX_MSG_SIZE; } } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); @@ -163,8 +156,8 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); rnet->tx_skb[rnet->tx_slot] = skb; - rnet->stats.tx_packets++; - rnet->stats.tx_bytes += skb->len; + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) netif_stop_queue(ndev); @@ -439,6 +432,7 @@ static int rionet_setup_netdev(struct rio_mport *mport) struct net_device *ndev = NULL; struct rionet_private *rnet; u16 device_id; + DECLARE_MAC_BUF(mac); /* Allocate our net_device structure */ ndev = alloc_etherdev(sizeof(struct rionet_private)); @@ -466,13 +460,10 @@ static int rionet_setup_netdev(struct rio_mport *mport) ndev->open = &rionet_open; ndev->hard_start_xmit = &rionet_start_xmit; ndev->stop = &rionet_close; - ndev->get_stats = &rionet_stats; ndev->mtu = RIO_MAX_MSG_SIZE - 14; ndev->features = NETIF_F_LLTX; SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); - SET_MODULE_OWNER(ndev); - spin_lock_init(&rnet->lock); spin_lock_init(&rnet->tx_lock); @@ -482,13 +473,12 @@ static int rionet_setup_netdev(struct rio_mport *mport) if (rc != 0) goto out; - printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + printk("%s: %s %s Version %s, MAC %s\n", ndev->name, DRV_NAME, DRV_DESC, DRV_VERSION, - ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], - ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + print_mac(mac, ndev->dev_addr)); out: return rc; diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c index 5c2e41fac6d2d8bee3d57330cf73149f71b3dba1..19152f54ef2b4f80a357b4b2ba59393a3ab721aa 100644 --- a/drivers/net/rrunner.c +++ b/drivers/net/rrunner.c @@ -109,7 +109,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev, rrpriv = netdev_priv(dev); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, "rrunner")) { @@ -127,7 +126,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev, dev->open = &rr_open; dev->hard_start_xmit = &rr_start_xmit; dev->stop = &rr_close; - dev->get_stats = &rr_get_stats; dev->do_ioctl = &rr_ioctl; dev->base_addr = pci_resource_start(pdev, 0); @@ -522,7 +520,7 @@ static int __devinit rr_init(struct net_device *dev) struct rr_regs __iomem *regs; struct eeprom *hw = NULL; u32 sram_size, rev; - int i; + DECLARE_MAC_BUF(mac); rrpriv = netdev_priv(dev); regs = rrpriv->regs; @@ -560,11 +558,7 @@ static int __devinit rr_init(struct net_device *dev) *(u32 *)(dev->dev_addr+2) = htonl(rr_read_eeprom_word(rrpriv, &hw->manf.BoardULA[4])); - printk(" MAC: "); - - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x\n", dev->dev_addr[i]); + printk(" MAC: %s\n", print_mac(mac, dev->dev_addr)); sram_size = rr_read_eeprom_word(rrpriv, (void *)8); printk(" SRAM size 0x%06x\n", sram_size); @@ -809,7 +803,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) case E_CON_REJ: printk(KERN_WARNING "%s: Connection rejected\n", dev->name); - rrpriv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; break; case E_CON_TMOUT: printk(KERN_WARNING "%s: Connection timeout\n", @@ -818,7 +812,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) case E_DISC_ERR: printk(KERN_WARNING "%s: HIPPI disconnect error\n", dev->name); - rrpriv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; break; case E_INT_PRTY: printk(KERN_ERR "%s: HIPPI Internal Parity error\n", @@ -834,7 +828,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) case E_TX_LINK_DROP: printk(KERN_WARNING "%s: Link lost during transmit\n", dev->name); - rrpriv->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl); wmb(); @@ -974,7 +968,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) printk("len %x, mode %x\n", pkt_len, desc->mode); #endif if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ - rrpriv->stats.rx_dropped++; + dev->stats.rx_dropped++; goto defer; } @@ -987,7 +981,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) skb = alloc_skb(pkt_len, GFP_ATOMIC); if (skb == NULL){ printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); - rrpriv->stats.rx_dropped++; + dev->stats.rx_dropped++; goto defer; } else { pci_dma_sync_single_for_cpu(rrpriv->pci_dev, @@ -1025,7 +1019,7 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) } else { printk("%s: Out of memory, deferring " "packet\n", dev->name); - rrpriv->stats.rx_dropped++; + dev->stats.rx_dropped++; goto defer; } } @@ -1034,8 +1028,8 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) netif_rx(skb); /* send it up */ dev->last_rx = jiffies; - rrpriv->stats.rx_packets++; - rrpriv->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } defer: desc->mode = 0; @@ -1103,8 +1097,8 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id) desc = &(rrpriv->tx_ring[txcon]); skb = rrpriv->tx_skbuff[txcon]; - rrpriv->stats.tx_packets++; - rrpriv->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, skb->len, @@ -1492,16 +1486,6 @@ static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) } -static struct net_device_stats *rr_get_stats(struct net_device *dev) -{ - struct rr_private *rrpriv; - - rrpriv = netdev_priv(dev); - - return(&rrpriv->stats); -} - - /* * Read the firmware out of the EEPROM and put it into the SRAM * (or from user space - later) diff --git a/drivers/net/rrunner.h b/drivers/net/rrunner.h index 9f3e050c4dc6dc27b82c0290b239bcea6029781a..6a79825bc8cf6b6059efbe8297f547242573eec1 100644 --- a/drivers/net/rrunner.h +++ b/drivers/net/rrunner.h @@ -819,7 +819,6 @@ struct rr_private u32 tx_full; u32 fw_rev; volatile short fw_running; - struct net_device_stats stats; struct pci_dev *pci_dev; }; @@ -834,7 +833,6 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id); static int rr_open(struct net_device *dev); static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev); static int rr_close(struct net_device *dev); -static struct net_device_stats *rr_get_stats(struct net_device *dev); static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static unsigned int rr_read_eeprom(struct rr_private *rrpriv, unsigned long offset, diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index cfa267914476158a9cea058e203f3e5881f46925..aef66e2d98d2d36ba3d559f860efba434cb9c200 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -220,7 +220,7 @@ struct XENA_dev_config { u64 scheduled_int_ctrl; #define SCHED_INT_CTRL_TIMER_EN BIT(0) #define SCHED_INT_CTRL_ONE_SHOT BIT(1) -#define SCHED_INT_CTRL_INT2MSI TBD +#define SCHED_INT_CTRL_INT2MSI(val) vBIT(val,10,6) #define SCHED_INT_PERIOD TBD u64 txreqtimeout; @@ -325,33 +325,66 @@ struct XENA_dev_config { #define TXDMA_TPA_INT BIT(5) #define TXDMA_SM_INT BIT(6) u64 pfc_err_reg; +#define PFC_ECC_SG_ERR BIT(7) +#define PFC_ECC_DB_ERR BIT(15) +#define PFC_SM_ERR_ALARM BIT(23) +#define PFC_MISC_0_ERR BIT(31) +#define PFC_MISC_1_ERR BIT(32) +#define PFC_PCIX_ERR BIT(39) u64 pfc_err_mask; u64 pfc_err_alarm; u64 tda_err_reg; +#define TDA_Fn_ECC_SG_ERR vBIT(0xff,0,8) +#define TDA_Fn_ECC_DB_ERR vBIT(0xff,8,8) +#define TDA_SM0_ERR_ALARM BIT(22) +#define TDA_SM1_ERR_ALARM BIT(23) +#define TDA_PCIX_ERR BIT(39) u64 tda_err_mask; u64 tda_err_alarm; u64 pcc_err_reg; -#define PCC_FB_ECC_DB_ERR vBIT(0xFF, 16, 8) +#define PCC_FB_ECC_SG_ERR vBIT(0xFF,0,8) +#define PCC_TXB_ECC_SG_ERR vBIT(0xFF,8,8) +#define PCC_FB_ECC_DB_ERR vBIT(0xFF,16, 8) +#define PCC_TXB_ECC_DB_ERR vBIT(0xff,24,8) +#define PCC_SM_ERR_ALARM vBIT(0xff,32,8) +#define PCC_WR_ERR_ALARM vBIT(0xff,40,8) +#define PCC_N_SERR vBIT(0xff,48,8) +#define PCC_6_COF_OV_ERR BIT(56) +#define PCC_7_COF_OV_ERR BIT(57) +#define PCC_6_LSO_OV_ERR BIT(58) +#define PCC_7_LSO_OV_ERR BIT(59) #define PCC_ENABLE_FOUR vBIT(0x0F,0,8) - u64 pcc_err_mask; u64 pcc_err_alarm; u64 tti_err_reg; +#define TTI_ECC_SG_ERR BIT(7) +#define TTI_ECC_DB_ERR BIT(15) +#define TTI_SM_ERR_ALARM BIT(23) u64 tti_err_mask; u64 tti_err_alarm; u64 lso_err_reg; +#define LSO6_SEND_OFLOW BIT(12) +#define LSO7_SEND_OFLOW BIT(13) +#define LSO6_ABORT BIT(14) +#define LSO7_ABORT BIT(15) +#define LSO6_SM_ERR_ALARM BIT(22) +#define LSO7_SM_ERR_ALARM BIT(23) u64 lso_err_mask; u64 lso_err_alarm; u64 tpa_err_reg; +#define TPA_TX_FRM_DROP BIT(7) +#define TPA_SM_ERR_ALARM BIT(23) + u64 tpa_err_mask; u64 tpa_err_alarm; u64 sm_err_reg; +#define SM_SM_ERR_ALARM BIT(15) u64 sm_err_mask; u64 sm_err_alarm; @@ -450,22 +483,52 @@ struct XENA_dev_config { #define RXDMA_INT_RTI_INT_M BIT(3) u64 rda_err_reg; +#define RDA_RXDn_ECC_SG_ERR vBIT(0xFF,0,8) +#define RDA_RXDn_ECC_DB_ERR vBIT(0xFF,8,8) +#define RDA_FRM_ECC_SG_ERR BIT(23) +#define RDA_FRM_ECC_DB_N_AERR BIT(31) +#define RDA_SM1_ERR_ALARM BIT(38) +#define RDA_SM0_ERR_ALARM BIT(39) +#define RDA_MISC_ERR BIT(47) +#define RDA_PCIX_ERR BIT(55) +#define RDA_RXD_ECC_DB_SERR BIT(63) u64 rda_err_mask; u64 rda_err_alarm; u64 rc_err_reg; +#define RC_PRCn_ECC_SG_ERR vBIT(0xFF,0,8) +#define RC_PRCn_ECC_DB_ERR vBIT(0xFF,8,8) +#define RC_FTC_ECC_SG_ERR BIT(23) +#define RC_FTC_ECC_DB_ERR BIT(31) +#define RC_PRCn_SM_ERR_ALARM vBIT(0xFF,32,8) +#define RC_FTC_SM_ERR_ALARM BIT(47) +#define RC_RDA_FAIL_WR_Rn vBIT(0xFF,48,8) u64 rc_err_mask; u64 rc_err_alarm; u64 prc_pcix_err_reg; +#define PRC_PCI_AB_RD_Rn vBIT(0xFF,0,8) +#define PRC_PCI_DP_RD_Rn vBIT(0xFF,8,8) +#define PRC_PCI_AB_WR_Rn vBIT(0xFF,16,8) +#define PRC_PCI_DP_WR_Rn vBIT(0xFF,24,8) +#define PRC_PCI_AB_F_WR_Rn vBIT(0xFF,32,8) +#define PRC_PCI_DP_F_WR_Rn vBIT(0xFF,40,8) u64 prc_pcix_err_mask; u64 prc_pcix_err_alarm; u64 rpa_err_reg; +#define RPA_ECC_SG_ERR BIT(7) +#define RPA_ECC_DB_ERR BIT(15) +#define RPA_FLUSH_REQUEST BIT(22) +#define RPA_SM_ERR_ALARM BIT(23) +#define RPA_CREDIT_ERR BIT(31) u64 rpa_err_mask; u64 rpa_err_alarm; u64 rti_err_reg; +#define RTI_ECC_SG_ERR BIT(7) +#define RTI_ECC_DB_ERR BIT(15) +#define RTI_SM_ERR_ALARM BIT(23) u64 rti_err_mask; u64 rti_err_alarm; @@ -582,17 +645,43 @@ struct XENA_dev_config { #define MAC_INT_STATUS_RMAC_INT BIT(1) u64 mac_tmac_err_reg; -#define TMAC_ERR_REG_TMAC_ECC_DB_ERR BIT(15) -#define TMAC_ERR_REG_TMAC_TX_BUF_OVRN BIT(23) -#define TMAC_ERR_REG_TMAC_TX_CRI_ERR BIT(31) +#define TMAC_ECC_SG_ERR BIT(7) +#define TMAC_ECC_DB_ERR BIT(15) +#define TMAC_TX_BUF_OVRN BIT(23) +#define TMAC_TX_CRI_ERR BIT(31) +#define TMAC_TX_SM_ERR BIT(39) +#define TMAC_DESC_ECC_SG_ERR BIT(47) +#define TMAC_DESC_ECC_DB_ERR BIT(55) + u64 mac_tmac_err_mask; u64 mac_tmac_err_alarm; u64 mac_rmac_err_reg; -#define RMAC_ERR_REG_RX_BUFF_OVRN BIT(0) -#define RMAC_ERR_REG_RTS_ECC_DB_ERR BIT(14) -#define RMAC_ERR_REG_ECC_DB_ERR BIT(15) -#define RMAC_LINK_STATE_CHANGE_INT BIT(31) +#define RMAC_RX_BUFF_OVRN BIT(0) +#define RMAC_FRM_RCVD_INT BIT(1) +#define RMAC_UNUSED_INT BIT(2) +#define RMAC_RTS_PNUM_ECC_SG_ERR BIT(5) +#define RMAC_RTS_DS_ECC_SG_ERR BIT(6) +#define RMAC_RD_BUF_ECC_SG_ERR BIT(7) +#define RMAC_RTH_MAP_ECC_SG_ERR BIT(8) +#define RMAC_RTH_SPDM_ECC_SG_ERR BIT(9) +#define RMAC_RTS_VID_ECC_SG_ERR BIT(10) +#define RMAC_DA_SHADOW_ECC_SG_ERR BIT(11) +#define RMAC_RTS_PNUM_ECC_DB_ERR BIT(13) +#define RMAC_RTS_DS_ECC_DB_ERR BIT(14) +#define RMAC_RD_BUF_ECC_DB_ERR BIT(15) +#define RMAC_RTH_MAP_ECC_DB_ERR BIT(16) +#define RMAC_RTH_SPDM_ECC_DB_ERR BIT(17) +#define RMAC_RTS_VID_ECC_DB_ERR BIT(18) +#define RMAC_DA_SHADOW_ECC_DB_ERR BIT(19) +#define RMAC_LINK_STATE_CHANGE_INT BIT(31) +#define RMAC_RX_SM_ERR BIT(39) +#define RMAC_SINGLE_ECC_ERR (BIT(5) | BIT(6) | BIT(7) |\ + BIT(8) | BIT(9) | BIT(10)|\ + BIT(11)) +#define RMAC_DOUBLE_ECC_ERR (BIT(13) | BIT(14) | BIT(15) |\ + BIT(16) | BIT(17) | BIT(18)|\ + BIT(19)) u64 mac_rmac_err_mask; u64 mac_rmac_err_alarm; @@ -750,6 +839,7 @@ struct XENA_dev_config { BIT(17) | BIT(19)) #define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\ BIT(13) | BIT(18) | BIT(20)) +#define PLL_LOCK_N BIT(39) u64 mc_err_mask; u64 mc_err_alarm; @@ -823,11 +913,17 @@ struct XENA_dev_config { #define XGXS_INT_MASK_RXGXS BIT(1) u64 xgxs_txgxs_err_reg; -#define TXGXS_ECC_DB_ERR BIT(15) +#define TXGXS_ECC_SG_ERR BIT(7) +#define TXGXS_ECC_DB_ERR BIT(15) +#define TXGXS_ESTORE_UFLOW BIT(31) +#define TXGXS_TX_SM_ERR BIT(39) + u64 xgxs_txgxs_err_mask; u64 xgxs_txgxs_err_alarm; u64 xgxs_rxgxs_err_reg; +#define RXGXS_ESTORE_OFLOW BIT(7) +#define RXGXS_RX_SM_ERR BIT(39) u64 xgxs_rxgxs_err_mask; u64 xgxs_rxgxs_err_alarm; diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 24feb00600eea835b1c50da642f610157a9e5f62..22e4054d4fcb249feac3331eab47b544f85920a8 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -37,8 +37,8 @@ * tx_fifo_len: This too is an array of 8. Each element defines the number of * Tx descriptors that can be associated with each corresponding FIFO. * intr_type: This defines the type of interrupt. The values can be 0(INTA), - * 2(MSI_X). Default value is '0(INTA)' - * lro: Specifies whether to enable Large Receive Offload (LRO) or not. + * 2(MSI_X). Default value is '2(MSI_X)' + * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not. * Possible values '1' for enable '0' for disable. Default is '0' * lro_max_pkts: This parameter defines maximum number of packets can be * aggregated as a single large packet @@ -84,7 +84,7 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.25.1" +#define DRV_VERSION "2.0.26.5" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; @@ -130,6 +130,11 @@ static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring) return 0; } +static inline int is_s2io_card_up(const struct s2io_nic * sp) +{ + return test_bit(__S2IO_STATE_CARD_UP, &sp->state); +} + /* Ethtool related variables and Macros. */ static char s2io_gstrings[][ETH_GSTRING_LEN] = { "Register test\t(offline)", @@ -263,47 +268,71 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { {"serious_err_cnt"}, {"soft_reset_cnt"}, {"fifo_full_cnt"}, - {"ring_full_cnt"}, - ("alarm_transceiver_temp_high"), - ("alarm_transceiver_temp_low"), - ("alarm_laser_bias_current_high"), - ("alarm_laser_bias_current_low"), - ("alarm_laser_output_power_high"), - ("alarm_laser_output_power_low"), - ("warn_transceiver_temp_high"), - ("warn_transceiver_temp_low"), - ("warn_laser_bias_current_high"), - ("warn_laser_bias_current_low"), - ("warn_laser_output_power_high"), - ("warn_laser_output_power_low"), - ("lro_aggregated_pkts"), - ("lro_flush_both_count"), - ("lro_out_of_sequence_pkts"), - ("lro_flush_due_to_max_pkts"), - ("lro_avg_aggr_pkts"), - ("mem_alloc_fail_cnt"), - ("pci_map_fail_cnt"), - ("watchdog_timer_cnt"), - ("mem_allocated"), - ("mem_freed"), - ("link_up_cnt"), - ("link_down_cnt"), - ("link_up_time"), - ("link_down_time"), - ("tx_tcode_buf_abort_cnt"), - ("tx_tcode_desc_abort_cnt"), - ("tx_tcode_parity_err_cnt"), - ("tx_tcode_link_loss_cnt"), - ("tx_tcode_list_proc_err_cnt"), - ("rx_tcode_parity_err_cnt"), - ("rx_tcode_abort_cnt"), - ("rx_tcode_parity_abort_cnt"), - ("rx_tcode_rda_fail_cnt"), - ("rx_tcode_unkn_prot_cnt"), - ("rx_tcode_fcs_err_cnt"), - ("rx_tcode_buf_size_err_cnt"), - ("rx_tcode_rxd_corrupt_cnt"), - ("rx_tcode_unkn_err_cnt") + {"ring_0_full_cnt"}, + {"ring_1_full_cnt"}, + {"ring_2_full_cnt"}, + {"ring_3_full_cnt"}, + {"ring_4_full_cnt"}, + {"ring_5_full_cnt"}, + {"ring_6_full_cnt"}, + {"ring_7_full_cnt"}, + {"alarm_transceiver_temp_high"}, + {"alarm_transceiver_temp_low"}, + {"alarm_laser_bias_current_high"}, + {"alarm_laser_bias_current_low"}, + {"alarm_laser_output_power_high"}, + {"alarm_laser_output_power_low"}, + {"warn_transceiver_temp_high"}, + {"warn_transceiver_temp_low"}, + {"warn_laser_bias_current_high"}, + {"warn_laser_bias_current_low"}, + {"warn_laser_output_power_high"}, + {"warn_laser_output_power_low"}, + {"lro_aggregated_pkts"}, + {"lro_flush_both_count"}, + {"lro_out_of_sequence_pkts"}, + {"lro_flush_due_to_max_pkts"}, + {"lro_avg_aggr_pkts"}, + {"mem_alloc_fail_cnt"}, + {"pci_map_fail_cnt"}, + {"watchdog_timer_cnt"}, + {"mem_allocated"}, + {"mem_freed"}, + {"link_up_cnt"}, + {"link_down_cnt"}, + {"link_up_time"}, + {"link_down_time"}, + {"tx_tcode_buf_abort_cnt"}, + {"tx_tcode_desc_abort_cnt"}, + {"tx_tcode_parity_err_cnt"}, + {"tx_tcode_link_loss_cnt"}, + {"tx_tcode_list_proc_err_cnt"}, + {"rx_tcode_parity_err_cnt"}, + {"rx_tcode_abort_cnt"}, + {"rx_tcode_parity_abort_cnt"}, + {"rx_tcode_rda_fail_cnt"}, + {"rx_tcode_unkn_prot_cnt"}, + {"rx_tcode_fcs_err_cnt"}, + {"rx_tcode_buf_size_err_cnt"}, + {"rx_tcode_rxd_corrupt_cnt"}, + {"rx_tcode_unkn_err_cnt"}, + {"tda_err_cnt"}, + {"pfc_err_cnt"}, + {"pcc_err_cnt"}, + {"tti_err_cnt"}, + {"tpa_err_cnt"}, + {"sm_err_cnt"}, + {"lso_err_cnt"}, + {"mac_tmac_err_cnt"}, + {"mac_rmac_err_cnt"}, + {"xgxs_txgxs_err_cnt"}, + {"xgxs_rxgxs_err_cnt"}, + {"rc_err_cnt"}, + {"prc_pcix_err_cnt"}, + {"rpa_err_cnt"}, + {"rda_err_cnt"}, + {"rti_err_cnt"}, + {"mc_err_cnt"} }; #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN @@ -326,6 +355,16 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { timer.data = (unsigned long) arg; \ mod_timer(&timer, (jiffies + exp)) \ +/* copy mac addr to def_mac_addr array */ +static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) +{ + sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr); + sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8); + sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16); + sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24); + sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); + sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); +} /* Add the vlan */ static void s2io_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) @@ -423,14 +462,15 @@ S2IO_PARM_INT(mc_pause_threshold_q4q7, 187); S2IO_PARM_INT(shared_splits, 0); S2IO_PARM_INT(tmac_util_period, 5); S2IO_PARM_INT(rmac_util_period, 5); -S2IO_PARM_INT(bimodal, 0); S2IO_PARM_INT(l3l4hdr_size, 128); /* Frequency of Rx desc syncs expressed as power of 2 */ S2IO_PARM_INT(rxsync_frequency, 3); /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ -S2IO_PARM_INT(intr_type, 0); +S2IO_PARM_INT(intr_type, 2); /* Large receive offload feature */ -S2IO_PARM_INT(lro, 0); +static unsigned int lro_enable; +module_param_named(lro, lro_enable, uint, 0); + /* Max pkts to be aggregated by LRO at one time. If not specified, * aggregation happens until we hit max IP pkt size(64K) */ @@ -532,7 +572,7 @@ static int init_shared_mem(struct s2io_nic *nic) for (i = 0; i < config->tx_fifo_num; i++) { int fifo_len = config->tx_cfg[i].fifo_len; int list_holder_size = fifo_len * sizeof(struct list_info_hold); - mac_control->fifos[i].list_info = kmalloc(list_holder_size, + mac_control->fifos[i].list_info = kzalloc(list_holder_size, GFP_KERNEL); if (!mac_control->fifos[i].list_info) { DBG_PRINT(INFO_DBG, @@ -540,7 +580,6 @@ static int init_shared_mem(struct s2io_nic *nic) return -ENOMEM; } mem_allocated += list_holder_size; - memset(mac_control->fifos[i].list_info, 0, list_holder_size); } for (i = 0; i < config->tx_fifo_num; i++) { int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len, @@ -671,7 +710,7 @@ static int init_shared_mem(struct s2io_nic *nic) GFP_KERNEL); if (!rx_blocks->rxds) return -ENOMEM; - mem_allocated += + mem_allocated += (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); for (l=0; lrxd_mode];l++) { rx_blocks->rxds[l].virt_addr = @@ -733,7 +772,7 @@ static int init_shared_mem(struct s2io_nic *nic) (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); if (!ba->ba_0_org) return -ENOMEM; - mem_allocated += + mem_allocated += (BUF0_LEN + ALIGN_SIZE); tmp = (unsigned long)ba->ba_0_org; tmp += ALIGN_SIZE; @@ -744,7 +783,7 @@ static int init_shared_mem(struct s2io_nic *nic) (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); if (!ba->ba_1_org) return -ENOMEM; - mem_allocated + mem_allocated += (BUF1_LEN + ALIGN_SIZE); tmp = (unsigned long) ba->ba_1_org; tmp += ALIGN_SIZE; @@ -829,7 +868,7 @@ static void free_shared_mem(struct s2io_nic *nic) mac_control->fifos[i]. list_info[mem_blks]. list_phy_addr); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += PAGE_SIZE; } /* If we got a zero DMA address during allocation, @@ -844,11 +883,11 @@ static void free_shared_mem(struct s2io_nic *nic) dev->name); DBG_PRINT(INIT_DBG, "Virtual address %p\n", mac_control->zerodma_virt_addr); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += PAGE_SIZE; } kfree(mac_control->fifos[i].list_info); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold)); } @@ -866,7 +905,7 @@ static void free_shared_mem(struct s2io_nic *nic) tmp_v_addr, tmp_p_addr); nic->mac_control.stats_info->sw_stat.mem_freed += size; kfree(mac_control->rings[i].rx_blocks[j].rxds); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]); } } @@ -892,11 +931,12 @@ static void free_shared_mem(struct s2io_nic *nic) k++; } kfree(mac_control->rings[i].ba[j]); - nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd) * - (rxd_count[nic->rxd_mode] + 1)); + nic->mac_control.stats_info->sw_stat.mem_freed += + (sizeof(struct buffAdd) * + (rxd_count[nic->rxd_mode] + 1)); } kfree(mac_control->rings[i].ba); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += (sizeof(struct buffAdd *) * blk_cnt); } } @@ -906,12 +946,12 @@ static void free_shared_mem(struct s2io_nic *nic) mac_control->stats_mem_sz, mac_control->stats_mem, mac_control->stats_mem_phy); - nic->mac_control.stats_info->sw_stat.mem_freed += + nic->mac_control.stats_info->sw_stat.mem_freed += mac_control->stats_mem_sz; } if (nic->ufo_in_band_v) { kfree(nic->ufo_in_band_v); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (ufo_size * sizeof(u64)); } } @@ -1456,7 +1496,7 @@ static int init_nic(struct s2io_nic *nic) &bar0->rts_frm_len_n[i]); } } - + /* Disable differentiated services steering logic */ for (i = 0; i < 64; i++) { if (rts_ds_steer(nic, i, 0) == FAILURE) { @@ -1536,90 +1576,57 @@ static int init_nic(struct s2io_nic *nic) time++; } - if (nic->config.bimodal) { - int k = 0; - for (k = 0; k < config->rx_ring_num; k++) { - val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; - val64 |= TTI_CMD_MEM_OFFSET(0x38+k); - writeq(val64, &bar0->tti_command_mem); - + /* RTI Initialization */ + if (nic->device_type == XFRAME_II_DEVICE) { /* - * Once the operation completes, the Strobe bit of the command - * register will be reset. We poll for this particular condition - * We wait for a maximum of 500ms for the operation to complete, - * if it's not complete by then we return error. - */ - time = 0; - while (TRUE) { - val64 = readq(&bar0->tti_command_mem); - if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) { - break; - } - if (time > 10) { - DBG_PRINT(ERR_DBG, - "%s: TTI init Failed\n", - dev->name); - return -1; - } - time++; - msleep(50); - } - } - } else { - - /* RTI Initialization */ - if (nic->device_type == XFRAME_II_DEVICE) { - /* - * Programmed to generate Apprx 500 Intrs per - * second - */ - int count = (nic->config.bus_speed * 125)/4; - val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count); - } else { - val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); - } - val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | - RTI_DATA1_MEM_RX_URNG_B(0x10) | - RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; - - writeq(val64, &bar0->rti_data1_mem); + * Programmed to generate Apprx 500 Intrs per + * second + */ + int count = (nic->config.bus_speed * 125)/4; + val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count); + } else + val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF); + val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) | + RTI_DATA1_MEM_RX_URNG_B(0x10) | + RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN; + + writeq(val64, &bar0->rti_data1_mem); + + val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | + RTI_DATA2_MEM_RX_UFC_B(0x2) ; + if (nic->config.intr_type == MSI_X) + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \ + RTI_DATA2_MEM_RX_UFC_D(0x40)); + else + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \ + RTI_DATA2_MEM_RX_UFC_D(0x80)); + writeq(val64, &bar0->rti_data2_mem); - val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | - RTI_DATA2_MEM_RX_UFC_B(0x2) ; - if (nic->intr_type == MSI_X) - val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \ - RTI_DATA2_MEM_RX_UFC_D(0x40)); - else - val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \ - RTI_DATA2_MEM_RX_UFC_D(0x80)); - writeq(val64, &bar0->rti_data2_mem); + for (i = 0; i < config->rx_ring_num; i++) { + val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD + | RTI_CMD_MEM_OFFSET(i); + writeq(val64, &bar0->rti_command_mem); - for (i = 0; i < config->rx_ring_num; i++) { - val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD - | RTI_CMD_MEM_OFFSET(i); - writeq(val64, &bar0->rti_command_mem); + /* + * Once the operation completes, the Strobe bit of the + * command register will be reset. We poll for this + * particular condition. We wait for a maximum of 500ms + * for the operation to complete, if it's not complete + * by then we return error. + */ + time = 0; + while (TRUE) { + val64 = readq(&bar0->rti_command_mem); + if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) + break; - /* - * Once the operation completes, the Strobe bit of the - * command register will be reset. We poll for this - * particular condition. We wait for a maximum of 500ms - * for the operation to complete, if it's not complete - * by then we return error. - */ - time = 0; - while (TRUE) { - val64 = readq(&bar0->rti_command_mem); - if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) { - break; - } - if (time > 10) { - DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", - dev->name); - return -1; - } - time++; - msleep(50); + if (time > 10) { + DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n", + dev->name); + return -1; } + time++; + msleep(50); } } @@ -1724,7 +1731,7 @@ static int init_nic(struct s2io_nic *nic) static int s2io_link_fault_indication(struct s2io_nic *nic) { - if (nic->intr_type != INTA) + if (nic->config.intr_type != INTA) return MAC_RMAC_ERR_TIMER; if (nic->device_type == XFRAME_II_DEVICE) return LINK_UP_DOWN_INTERRUPT; @@ -1732,6 +1739,150 @@ static int s2io_link_fault_indication(struct s2io_nic *nic) return MAC_RMAC_ERR_TIMER; } +/** + * do_s2io_write_bits - update alarm bits in alarm register + * @value: alarm bits + * @flag: interrupt status + * @addr: address value + * Description: update alarm bits in alarm register + * Return Value: + * NONE. + */ +static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr) +{ + u64 temp64; + + temp64 = readq(addr); + + if(flag == ENABLE_INTRS) + temp64 &= ~((u64) value); + else + temp64 |= ((u64) value); + writeq(temp64, addr); +} + +static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag) +{ + struct XENA_dev_config __iomem *bar0 = nic->bar0; + register u64 gen_int_mask = 0; + + if (mask & TX_DMA_INTR) { + + gen_int_mask |= TXDMA_INT_M; + + do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT | + TXDMA_PCC_INT | TXDMA_TTI_INT | + TXDMA_LSO_INT | TXDMA_TPA_INT | + TXDMA_SM_INT, flag, &bar0->txdma_int_mask); + + do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM | + PFC_MISC_0_ERR | PFC_MISC_1_ERR | + PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag, + &bar0->pfc_err_mask); + + do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | + TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR | + TDA_PCIX_ERR, flag, &bar0->tda_err_mask); + + do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR | + PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM | + PCC_N_SERR | PCC_6_COF_OV_ERR | + PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR | + PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR | + PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask); + + do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR | + TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask); + + do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT | + LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM | + LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, + flag, &bar0->lso_err_mask); + + do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP, + flag, &bar0->tpa_err_mask); + + do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask); + + } + + if (mask & TX_MAC_INTR) { + gen_int_mask |= TXMAC_INT_M; + do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag, + &bar0->mac_int_mask); + do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR | + TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR | + TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, + flag, &bar0->mac_tmac_err_mask); + } + + if (mask & TX_XGXS_INTR) { + gen_int_mask |= TXXGXS_INT_M; + do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag, + &bar0->xgxs_int_mask); + do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR | + TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, + flag, &bar0->xgxs_txgxs_err_mask); + } + + if (mask & RX_DMA_INTR) { + gen_int_mask |= RXDMA_INT_M; + do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M | + RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M, + flag, &bar0->rxdma_int_mask); + do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR | + RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM | + RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR | + RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask); + do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn | + PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn | + PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag, + &bar0->prc_pcix_err_mask); + do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR | + RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag, + &bar0->rpa_err_mask); + do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR | + RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM | + RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR | + RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR, + flag, &bar0->rda_err_mask); + do_s2io_write_bits(RTI_SM_ERR_ALARM | + RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, + flag, &bar0->rti_err_mask); + } + + if (mask & RX_MAC_INTR) { + gen_int_mask |= RXMAC_INT_M; + do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag, + &bar0->mac_int_mask); + do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR | + RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR | + RMAC_DOUBLE_ECC_ERR | + RMAC_LINK_STATE_CHANGE_INT, + flag, &bar0->mac_rmac_err_mask); + } + + if (mask & RX_XGXS_INTR) + { + gen_int_mask |= RXXGXS_INT_M; + do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag, + &bar0->xgxs_int_mask); + do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag, + &bar0->xgxs_rxgxs_err_mask); + } + + if (mask & MC_INTR) { + gen_int_mask |= MC_INT_M; + do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask); + do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG | + MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag, + &bar0->mc_err_mask); + } + nic->general_int_mask = gen_int_mask; + + /* Remove this line when alarm interrupts are enabled */ + nic->general_int_mask = 0; +} /** * en_dis_able_nic_intrs - Enable or Disable the interrupts * @nic: device private variable, @@ -1746,17 +1897,16 @@ static int s2io_link_fault_indication(struct s2io_nic *nic) static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) { struct XENA_dev_config __iomem *bar0 = nic->bar0; - register u64 val64 = 0, temp64 = 0; + register u64 temp64 = 0, intr_mask = 0; + + intr_mask = nic->general_int_mask; /* Top level interrupt classification */ /* PIC Interrupts */ - if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) { + if (mask & TX_PIC_INTR) { /* Enable PIC Intrs in the general intr mask register */ - val64 = TXPIC_INT_M; + intr_mask |= TXPIC_INT_M; if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); /* * If Hercules adapter enable GPIO otherwise * disable all PCIX, Flash, MDIO, IIC and GPIO @@ -1765,64 +1915,25 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) */ if (s2io_link_fault_indication(nic) == LINK_UP_DOWN_INTERRUPT ) { - temp64 = readq(&bar0->pic_int_mask); - temp64 &= ~((u64) PIC_INT_GPIO); - writeq(temp64, &bar0->pic_int_mask); - temp64 = readq(&bar0->gpio_int_mask); - temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP); - writeq(temp64, &bar0->gpio_int_mask); - } else { + do_s2io_write_bits(PIC_INT_GPIO, flag, + &bar0->pic_int_mask); + do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag, + &bar0->gpio_int_mask); + } else writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); - } - /* - * No MSI Support is available presently, so TTI and - * RTI interrupts are also disabled. - */ } else if (flag == DISABLE_INTRS) { /* * Disable PIC Intrs in the general * intr mask register */ writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask); - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); - } - } - - /* MAC Interrupts */ - /* Enabling/Disabling MAC interrupts */ - if (mask & (TX_MAC_INTR | RX_MAC_INTR)) { - val64 = TXMAC_INT_M | RXMAC_INT_M; - if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); - /* - * All MAC block error interrupts are disabled for now - * TODO - */ - } else if (flag == DISABLE_INTRS) { - /* - * Disable MAC Intrs in the general intr mask register - */ - writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask); - writeq(DISABLE_ALL_INTRS, - &bar0->mac_rmac_err_mask); - - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); } } /* Tx traffic interrupts */ if (mask & TX_TRAFFIC_INTR) { - val64 = TXTRAFFIC_INT_M; + intr_mask |= TXTRAFFIC_INT_M; if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); /* * Enable all the Tx side interrupts * writing 0 Enables all 64 TX interrupt levels @@ -1834,19 +1945,13 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) * register. */ writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask); - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); } } /* Rx traffic interrupts */ if (mask & RX_TRAFFIC_INTR) { - val64 = RXTRAFFIC_INT_M; + intr_mask |= RXTRAFFIC_INT_M; if (flag == ENABLE_INTRS) { - temp64 = readq(&bar0->general_int_mask); - temp64 &= ~((u64) val64); - writeq(temp64, &bar0->general_int_mask); /* writing 0 Enables all 8 RX interrupt levels */ writeq(0x0, &bar0->rx_traffic_mask); } else if (flag == DISABLE_INTRS) { @@ -1855,11 +1960,17 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) * register. */ writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask); - temp64 = readq(&bar0->general_int_mask); - val64 |= temp64; - writeq(val64, &bar0->general_int_mask); } } + + temp64 = readq(&bar0->general_int_mask); + if (flag == ENABLE_INTRS) + temp64 &= ~((u64) intr_mask); + else + temp64 = DISABLE_ALL_INTRS; + writeq(temp64, &bar0->general_int_mask); + + nic->general_int_mask = readq(&bar0->general_int_mask); } /** @@ -1872,7 +1983,7 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag) int ret = 0, herc; struct XENA_dev_config __iomem *bar0 = sp->bar0; u64 val64 = readq(&bar0->adapter_status); - + herc = (sp->device_type == XFRAME_II_DEVICE); if (flag == FALSE) { @@ -2018,8 +2129,6 @@ static int start_nic(struct s2io_nic *nic) &bar0->prc_rxd0_n[i]); val64 = readq(&bar0->prc_ctrl_n[i]); - if (nic->config.bimodal) - val64 |= PRC_CTRL_BIMODAL_INTERRUPT; if (nic->rxd_mode == RXD_MODE_1) val64 |= PRC_CTRL_RC_ENABLED; else @@ -2062,14 +2171,6 @@ static int start_nic(struct s2io_nic *nic) val64 &= ~ADAPTER_ECC_EN; writeq(val64, &bar0->adapter_control); - /* - * Clearing any possible Link state change interrupts that - * could have popped up just before Enabling the card. - */ - val64 = readq(&bar0->mac_rmac_err_reg); - if (val64) - writeq(val64, &bar0->mac_rmac_err_reg); - /* * Verify if the device is ready to be enabled, if so enable * it. @@ -2187,7 +2288,7 @@ static void free_tx_buffers(struct s2io_nic *nic) mac_control->fifos[i].list_info[j].list_virt_addr; skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); if (skb) { - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; dev_kfree_skb(skb); cnt++; @@ -2223,9 +2324,9 @@ static void stop_nic(struct s2io_nic *nic) config = &nic->config; /* Disable all interrupts */ + en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS); interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; - interruptible |= TX_PIC_INTR | RX_PIC_INTR; - interruptible |= TX_MAC_INTR | RX_MAC_INTR; + interruptible |= TX_PIC_INTR; en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS); /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */ @@ -2352,7 +2453,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) mem_alloc_fail_cnt++; return -ENOMEM ; } - nic->mac_control.stats_info->sw_stat.mem_allocated + nic->mac_control.stats_info->sw_stat.mem_allocated += skb->truesize; if (nic->rxd_mode == RXD_MODE_1) { /* 1 buffer mode - normal operation mode */ @@ -2367,7 +2468,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) DMA_ERROR_CODE)) goto pci_map_failed; - rxdp->Control_2 = + rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); } else if (nic->rxd_mode == RXD_MODE_3B) { @@ -2568,7 +2669,7 @@ static void free_rx_buffers(struct s2io_nic *sp) /** * s2io_poll - Rx interrupt handler for NAPI support - * @dev : pointer to the device structure. + * @napi : pointer to the napi structure. * @budget : The number of packets that were budgeted to be processed * during one pass through the 'Poll" function. * Description: @@ -2579,22 +2680,23 @@ static void free_rx_buffers(struct s2io_nic *sp) * 0 on success and 1 if there are No Rx packets to be processed. */ -static int s2io_poll(struct net_device *dev, int *budget) +static int s2io_poll(struct napi_struct *napi, int budget) { - struct s2io_nic *nic = dev->priv; + struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); + struct net_device *dev = nic->dev; int pkt_cnt = 0, org_pkts_to_process; struct mac_info *mac_control; struct config_param *config; struct XENA_dev_config __iomem *bar0 = nic->bar0; int i; - atomic_inc(&nic->isr_cnt); + if (!is_s2io_card_up(nic)) + return 0; + mac_control = &nic->mac_control; config = &nic->config; - nic->pkts_to_process = *budget; - if (nic->pkts_to_process > dev->quota) - nic->pkts_to_process = dev->quota; + nic->pkts_to_process = budget; org_pkts_to_process = nic->pkts_to_process; writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); @@ -2608,12 +2710,8 @@ static int s2io_poll(struct net_device *dev, int *budget) goto no_rx; } } - if (!pkt_cnt) - pkt_cnt = 1; - dev->quota -= pkt_cnt; - *budget -= pkt_cnt; - netif_rx_complete(dev); + netif_rx_complete(dev, napi); for (i = 0; i < config->rx_ring_num; i++) { if (fill_rx_buffers(nic, i) == -ENOMEM) { @@ -2625,13 +2723,9 @@ static int s2io_poll(struct net_device *dev, int *budget) /* Re enable the Rx interrupts. */ writeq(0x0, &bar0->rx_traffic_mask); readl(&bar0->rx_traffic_mask); - atomic_dec(&nic->isr_cnt); - return 0; + return pkt_cnt; no_rx: - dev->quota -= pkt_cnt; - *budget -= pkt_cnt; - for (i = 0; i < config->rx_ring_num; i++) { if (fill_rx_buffers(nic, i) == -ENOMEM) { DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); @@ -2639,8 +2733,7 @@ static int s2io_poll(struct net_device *dev, int *budget) break; } } - atomic_dec(&nic->isr_cnt); - return 1; + return pkt_cnt; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2667,7 +2760,6 @@ static void s2io_netpoll(struct net_device *dev) disable_irq(dev->irq); - atomic_inc(&nic->isr_cnt); mac_control = &nic->mac_control; config = &nic->config; @@ -2692,7 +2784,6 @@ static void s2io_netpoll(struct net_device *dev) break; } } - atomic_dec(&nic->isr_cnt); enable_irq(dev->irq); return; } @@ -2724,12 +2815,6 @@ static void rx_intr_handler(struct ring_info *ring_data) struct RxD3* rxdp3; spin_lock(&nic->rx_lock); - if (atomic_read(&nic->card_state) == CARD_DOWN) { - DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n", - __FUNCTION__, dev->name); - spin_unlock(&nic->rx_lock); - return; - } get_info = ring_data->rx_curr_get_info; get_block = get_info.block_index; @@ -3163,135 +3248,6 @@ static void s2io_updt_xpak_counter(struct net_device *dev) stat_info->xpak_stat.warn_laser_output_power_low++; } -/** - * alarm_intr_handler - Alarm Interrrupt handler - * @nic: device private variable - * Description: If the interrupt was neither because of Rx packet or Tx - * complete, this function is called. If the interrupt was to indicate - * a loss of link, the OSM link status handler is invoked for any other - * alarm interrupt the block that raised the interrupt is displayed - * and a H/W reset is issued. - * Return Value: - * NONE -*/ - -static void alarm_intr_handler(struct s2io_nic *nic) -{ - struct net_device *dev = (struct net_device *) nic->dev; - struct XENA_dev_config __iomem *bar0 = nic->bar0; - register u64 val64 = 0, err_reg = 0; - u64 cnt; - int i; - if (atomic_read(&nic->card_state) == CARD_DOWN) - return; - if (pci_channel_offline(nic->pdev)) - return; - nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; - /* Handling the XPAK counters update */ - if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { - /* waiting for an hour */ - nic->mac_control.stats_info->xpak_stat.xpak_timer_count++; - } else { - s2io_updt_xpak_counter(dev); - /* reset the count to zero */ - nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0; - } - - /* Handling link status change error Intr */ - if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) { - err_reg = readq(&bar0->mac_rmac_err_reg); - writeq(err_reg, &bar0->mac_rmac_err_reg); - if (err_reg & RMAC_LINK_STATE_CHANGE_INT) { - schedule_work(&nic->set_link_task); - } - } - - /* Handling Ecc errors */ - val64 = readq(&bar0->mc_err_reg); - writeq(val64, &bar0->mc_err_reg); - if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) { - if (val64 & MC_ERR_REG_ECC_ALL_DBL) { - nic->mac_control.stats_info->sw_stat. - double_ecc_errs++; - DBG_PRINT(INIT_DBG, "%s: Device indicates ", - dev->name); - DBG_PRINT(INIT_DBG, "double ECC error!!\n"); - if (nic->device_type != XFRAME_II_DEVICE) { - /* Reset XframeI only if critical error */ - if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | - MC_ERR_REG_MIRI_ECC_DB_ERR_1)) { - netif_stop_queue(dev); - schedule_work(&nic->rst_timer_task); - nic->mac_control.stats_info->sw_stat. - soft_reset_cnt++; - } - } - } else { - nic->mac_control.stats_info->sw_stat. - single_ecc_errs++; - } - } - - /* In case of a serious error, the device will be Reset. */ - val64 = readq(&bar0->serr_source); - if (val64 & SERR_SOURCE_ANY) { - nic->mac_control.stats_info->sw_stat.serious_err_cnt++; - DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name); - DBG_PRINT(ERR_DBG, "serious error %llx!!\n", - (unsigned long long)val64); - netif_stop_queue(dev); - schedule_work(&nic->rst_timer_task); - nic->mac_control.stats_info->sw_stat.soft_reset_cnt++; - } - - /* - * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC - * Error occurs, the adapter will be recycled by disabling the - * adapter enable bit and enabling it again after the device - * becomes Quiescent. - */ - val64 = readq(&bar0->pcc_err_reg); - writeq(val64, &bar0->pcc_err_reg); - if (val64 & PCC_FB_ECC_DB_ERR) { - u64 ac = readq(&bar0->adapter_control); - ac &= ~(ADAPTER_CNTL_EN); - writeq(ac, &bar0->adapter_control); - ac = readq(&bar0->adapter_control); - schedule_work(&nic->set_link_task); - } - /* Check for data parity error */ - val64 = readq(&bar0->pic_int_status); - if (val64 & PIC_INT_GPIO) { - val64 = readq(&bar0->gpio_int_reg); - if (val64 & GPIO_INT_REG_DP_ERR_INT) { - nic->mac_control.stats_info->sw_stat.parity_err_cnt++; - schedule_work(&nic->rst_timer_task); - nic->mac_control.stats_info->sw_stat.soft_reset_cnt++; - } - } - - /* Check for ring full counter */ - if (nic->device_type & XFRAME_II_DEVICE) { - val64 = readq(&bar0->ring_bump_counter1); - for (i=0; i<4; i++) { - cnt = ( val64 & vBIT(0xFFFF,(i*16),16)); - cnt >>= 64 - ((i+1)*16); - nic->mac_control.stats_info->sw_stat.ring_full_cnt - += cnt; - } - - val64 = readq(&bar0->ring_bump_counter2); - for (i=0; i<4; i++) { - cnt = ( val64 & vBIT(0xFFFF,(i*16),16)); - cnt >>= 64 - ((i+1)*16); - nic->mac_control.stats_info->sw_stat.ring_full_cnt - += cnt; - } - } - - /* Other type of interrupts are not being handled now, TODO */ -} - /** * wait_for_cmd_complete - waits for a command to complete. * @sp : private member of the device structure, which is a pointer to the @@ -3426,7 +3382,7 @@ static void s2io_reset(struct s2io_nic * sp) /* Reset device statistics maintained by OS */ memset(&sp->stats, 0, sizeof (struct net_device_stats)); - + up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt; down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt; up_time = sp->mac_control.stats_info->sw_stat.link_up_time; @@ -3468,7 +3424,7 @@ static void s2io_reset(struct s2io_nic * sp) } /* restore the previously assigned mac address */ - s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr); + do_s2io_prog_unicast(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr); sp->device_enabled_once = FALSE; } @@ -3565,7 +3521,7 @@ static int s2io_set_swapper(struct s2io_nic * sp) SWAPPER_CTRL_RXF_W_FE | SWAPPER_CTRL_XMSI_FE | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); - if (sp->intr_type == INTA) + if (sp->config.intr_type == INTA) val64 |= SWAPPER_CTRL_XMSI_SE; writeq(val64, &bar0->swapper_ctrl); #else @@ -3588,7 +3544,7 @@ static int s2io_set_swapper(struct s2io_nic * sp) SWAPPER_CTRL_RXF_W_FE | SWAPPER_CTRL_XMSI_FE | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); - if (sp->intr_type == INTA) + if (sp->config.intr_type == INTA) val64 |= SWAPPER_CTRL_XMSI_SE; writeq(val64, &bar0->swapper_ctrl); #endif @@ -3680,34 +3636,31 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) u16 msi_control; /* Temp variable */ int ret, i, j, msix_indx = 1; - nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry), + nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), GFP_KERNEL); - if (nic->entries == NULL) { + if (!nic->entries) { DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ __FUNCTION__); nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; return -ENOMEM; } - nic->mac_control.stats_info->sw_stat.mem_allocated + nic->mac_control.stats_info->sw_stat.mem_allocated += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); - memset(nic->entries, 0,MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); nic->s2io_entries = - kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry), + kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), GFP_KERNEL); - if (nic->s2io_entries == NULL) { - DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", + if (!nic->s2io_entries) { + DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__); nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; kfree(nic->entries); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); return -ENOMEM; } - nic->mac_control.stats_info->sw_stat.mem_allocated + nic->mac_control.stats_info->sw_stat.mem_allocated += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); - memset(nic->s2io_entries, 0, - MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); for (i=0; i< MAX_REQUESTED_MSI_X; i++) { nic->entries[i].entry = i; @@ -3725,27 +3678,15 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) } writeq(tx_mat, &bar0->tx_mat0_n[0]); - if (!nic->config.bimodal) { - rx_mat = readq(&bar0->rx_mat); - for (j=0; jconfig.rx_ring_num; j++, msix_indx++) { - rx_mat |= RX_MAT_SET(j, msix_indx); - nic->s2io_entries[msix_indx].arg - = &nic->mac_control.rings[j]; - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; - } - writeq(rx_mat, &bar0->rx_mat); - } else { - tx_mat = readq(&bar0->tx_mat0_n[7]); - for (j=0; jconfig.rx_ring_num; j++, msix_indx++) { - tx_mat |= TX_MAT_SET(i, msix_indx); - nic->s2io_entries[msix_indx].arg - = &nic->mac_control.rings[j]; - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; - } - writeq(tx_mat, &bar0->tx_mat0_n[7]); + rx_mat = readq(&bar0->rx_mat); + for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { + rx_mat |= RX_MAT_SET(j, msix_indx); + nic->s2io_entries[msix_indx].arg + = &nic->mac_control.rings[j]; + nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; + nic->s2io_entries[msix_indx].in_use = MSIX_FLG; } + writeq(rx_mat, &bar0->rx_mat); nic->avail_msix_vectors = 0; ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); @@ -3757,10 +3698,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) if (ret) { DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); kfree(nic->entries); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); kfree(nic->s2io_entries); - nic->mac_control.stats_info->sw_stat.mem_freed + nic->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); nic->entries = NULL; nic->s2io_entries = NULL; @@ -3781,6 +3722,59 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) return 0; } +/* Handle software interrupt used during MSI(X) test */ +static irqreturn_t __devinit s2io_test_intr(int irq, void *dev_id) +{ + struct s2io_nic *sp = dev_id; + + sp->msi_detected = 1; + wake_up(&sp->msi_wait); + + return IRQ_HANDLED; +} + +/* Test interrupt path by forcing a a software IRQ */ +static int __devinit s2io_test_msi(struct s2io_nic *sp) +{ + struct pci_dev *pdev = sp->pdev; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + int err; + u64 val64, saved64; + + err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, + sp->name, sp); + if (err) { + DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n", + sp->dev->name, pci_name(pdev), pdev->irq); + return err; + } + + init_waitqueue_head (&sp->msi_wait); + sp->msi_detected = 0; + + saved64 = val64 = readq(&bar0->scheduled_int_ctrl); + val64 |= SCHED_INT_CTRL_ONE_SHOT; + val64 |= SCHED_INT_CTRL_TIMER_EN; + val64 |= SCHED_INT_CTRL_INT2MSI(1); + writeq(val64, &bar0->scheduled_int_ctrl); + + wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10); + + if (!sp->msi_detected) { + /* MSI(X) test failed, go back to INTx mode */ + DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated" + "using MSI(X) during test\n", sp->dev->name, + pci_name(pdev)); + + err = -EOPNOTSUPP; + } + + free_irq(sp->entries[1].vector, sp); + + writeq(saved64, &bar0->scheduled_int_ctrl); + + return err; +} /* ********************************************************* * * Functions defined below concern the OS part of the driver * * ********************************************************* */ @@ -3809,6 +3803,50 @@ static int s2io_open(struct net_device *dev) netif_carrier_off(dev); sp->last_link_state = 0; + napi_enable(&sp->napi); + + if (sp->config.intr_type == MSI_X) { + int ret = s2io_enable_msi_x(sp); + + if (!ret) { + u16 msi_control; + + ret = s2io_test_msi(sp); + + /* rollback MSI-X, will re-enable during add_isr() */ + kfree(sp->entries); + sp->mac_control.stats_info->sw_stat.mem_freed += + (MAX_REQUESTED_MSI_X * + sizeof(struct msix_entry)); + kfree(sp->s2io_entries); + sp->mac_control.stats_info->sw_stat.mem_freed += + (MAX_REQUESTED_MSI_X * + sizeof(struct s2io_msix_entry)); + sp->entries = NULL; + sp->s2io_entries = NULL; + + pci_read_config_word(sp->pdev, 0x42, &msi_control); + msi_control &= 0xFFFE; /* Disable MSI */ + pci_write_config_word(sp->pdev, 0x42, msi_control); + + pci_disable_msix(sp->pdev); + + } + if (ret) { + + DBG_PRINT(ERR_DBG, + "%s: MSI-X requested but failed to enable\n", + dev->name); + sp->config.intr_type = INTA; + } + } + + /* NAPI doesn't work well with MSI(X) */ + if (sp->config.intr_type != INTA) { + if(sp->config.napi) + sp->config.napi = 0; + } + /* Initialize H/W and enable interrupts */ err = s2io_card_up(sp); if (err) { @@ -3817,7 +3855,7 @@ static int s2io_open(struct net_device *dev) goto hw_init_failed; } - if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { + if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) { DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n"); s2io_card_down(sp); err = -ENODEV; @@ -3828,15 +3866,16 @@ static int s2io_open(struct net_device *dev) return 0; hw_init_failed: - if (sp->intr_type == MSI_X) { + napi_disable(&sp->napi); + if (sp->config.intr_type == MSI_X) { if (sp->entries) { kfree(sp->entries); - sp->mac_control.stats_info->sw_stat.mem_freed + sp->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); } if (sp->s2io_entries) { kfree(sp->s2io_entries); - sp->mac_control.stats_info->sw_stat.mem_freed + sp->mac_control.stats_info->sw_stat.mem_freed += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); } } @@ -3861,6 +3900,7 @@ static int s2io_close(struct net_device *dev) struct s2io_nic *sp = dev->priv; netif_stop_queue(dev); + napi_disable(&sp->napi); /* Reset card, kill tasklet and free Tx and Rx buffers. */ s2io_card_down(sp); @@ -3907,7 +3947,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) } spin_lock_irqsave(&sp->tx_lock, flags); - if (atomic_read(&sp->card_state) == CARD_DOWN) { + if (!is_s2io_card_up(sp)) { DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", dev->name); spin_unlock_irqrestore(&sp->tx_lock, flags); @@ -4059,8 +4099,9 @@ static void s2io_alarm_handle(unsigned long data) { struct s2io_nic *sp = (struct s2io_nic *)data; + struct net_device *dev = sp->dev; - alarm_intr_handler(sp); + s2io_handle_errors(dev); mod_timer(&sp->alarm_timer, jiffies + HZ / 2); } @@ -4098,12 +4139,12 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) struct ring_info *ring = (struct ring_info *)dev_id; struct s2io_nic *sp = ring->nic; - atomic_inc(&sp->isr_cnt); + if (!is_s2io_card_up(sp)) + return IRQ_HANDLED; rx_intr_handler(ring); s2io_chk_rx_buffers(sp, ring->ring_no); - atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; } @@ -4112,9 +4153,10 @@ static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) struct fifo_info *fifo = (struct fifo_info *)dev_id; struct s2io_nic *sp = fifo->nic; - atomic_inc(&sp->isr_cnt); + if (!is_s2io_card_up(sp)) + return IRQ_HANDLED; + tx_intr_handler(fifo); - atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; } static void s2io_txpic_intr_handle(struct s2io_nic *sp) @@ -4178,6 +4220,292 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp) val64 = readq(&bar0->gpio_int_mask); } +/** + * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter + * @value: alarm bits + * @addr: address value + * @cnt: counter variable + * Description: Check for alarm and increment the counter + * Return Value: + * 1 - if alarm bit set + * 0 - if alarm bit is not set + */ +static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr, + unsigned long long *cnt) +{ + u64 val64; + val64 = readq(addr); + if ( val64 & value ) { + writeq(val64, addr); + (*cnt)++; + return 1; + } + return 0; + +} + +/** + * s2io_handle_errors - Xframe error indication handler + * @nic: device private variable + * Description: Handle alarms such as loss of link, single or + * double ECC errors, critical and serious errors. + * Return Value: + * NONE + */ +static void s2io_handle_errors(void * dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct s2io_nic *sp = dev->priv; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + u64 temp64 = 0,val64=0; + int i = 0; + + struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat; + struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat; + + if (!is_s2io_card_up(sp)) + return; + + if (pci_channel_offline(sp->pdev)) + return; + + memset(&sw_stat->ring_full_cnt, 0, + sizeof(sw_stat->ring_full_cnt)); + + /* Handling the XPAK counters update */ + if(stats->xpak_timer_count < 72000) { + /* waiting for an hour */ + stats->xpak_timer_count++; + } else { + s2io_updt_xpak_counter(dev); + /* reset the count to zero */ + stats->xpak_timer_count = 0; + } + + /* Handling link status change error Intr */ + if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) { + val64 = readq(&bar0->mac_rmac_err_reg); + writeq(val64, &bar0->mac_rmac_err_reg); + if (val64 & RMAC_LINK_STATE_CHANGE_INT) + schedule_work(&sp->set_link_task); + } + + /* In case of a serious error, the device will be Reset. */ + if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source, + &sw_stat->serious_err_cnt)) + goto reset; + + /* Check for data parity error */ + if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg, + &sw_stat->parity_err_cnt)) + goto reset; + + /* Check for ring full counter */ + if (sp->device_type == XFRAME_II_DEVICE) { + val64 = readq(&bar0->ring_bump_counter1); + for (i=0; i<4; i++) { + temp64 = ( val64 & vBIT(0xFFFF,(i*16),16)); + temp64 >>= 64 - ((i+1)*16); + sw_stat->ring_full_cnt[i] += temp64; + } + + val64 = readq(&bar0->ring_bump_counter2); + for (i=0; i<4; i++) { + temp64 = ( val64 & vBIT(0xFFFF,(i*16),16)); + temp64 >>= 64 - ((i+1)*16); + sw_stat->ring_full_cnt[i+4] += temp64; + } + } + + val64 = readq(&bar0->txdma_int_status); + /*check for pfc_err*/ + if (val64 & TXDMA_PFC_INT) { + if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM| + PFC_MISC_0_ERR | PFC_MISC_1_ERR| + PFC_PCIX_ERR, &bar0->pfc_err_reg, + &sw_stat->pfc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg, + &sw_stat->pfc_err_cnt); + } + + /*check for tda_err*/ + if (val64 & TXDMA_TDA_INT) { + if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM | + TDA_SM1_ERR_ALARM, &bar0->tda_err_reg, + &sw_stat->tda_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR, + &bar0->tda_err_reg, &sw_stat->tda_err_cnt); + } + /*check for pcc_err*/ + if (val64 & TXDMA_PCC_INT) { + if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM + | PCC_N_SERR | PCC_6_COF_OV_ERR + | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR + | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR + | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg, + &sw_stat->pcc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR, + &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt); + } + + /*check for tti_err*/ + if (val64 & TXDMA_TTI_INT) { + if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg, + &sw_stat->tti_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR, + &bar0->tti_err_reg, &sw_stat->tti_err_cnt); + } + + /*check for lso_err*/ + if (val64 & TXDMA_LSO_INT) { + if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT + | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM, + &bar0->lso_err_reg, &sw_stat->lso_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW, + &bar0->lso_err_reg, &sw_stat->lso_err_cnt); + } + + /*check for tpa_err*/ + if (val64 & TXDMA_TPA_INT) { + if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg, + &sw_stat->tpa_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg, + &sw_stat->tpa_err_cnt); + } + + /*check for sm_err*/ + if (val64 & TXDMA_SM_INT) { + if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg, + &sw_stat->sm_err_cnt)) + goto reset; + } + + val64 = readq(&bar0->mac_int_status); + if (val64 & MAC_INT_STATUS_TMAC_INT) { + if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR, + &bar0->mac_tmac_err_reg, + &sw_stat->mac_tmac_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR + | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR, + &bar0->mac_tmac_err_reg, + &sw_stat->mac_tmac_err_cnt); + } + + val64 = readq(&bar0->xgxs_int_status); + if (val64 & XGXS_INT_STATUS_TXGXS) { + if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR, + &bar0->xgxs_txgxs_err_reg, + &sw_stat->xgxs_txgxs_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR, + &bar0->xgxs_txgxs_err_reg, + &sw_stat->xgxs_txgxs_err_cnt); + } + + val64 = readq(&bar0->rxdma_int_status); + if (val64 & RXDMA_INT_RC_INT_M) { + if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR + | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM, + &bar0->rc_err_reg, &sw_stat->rc_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR + | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg, + &sw_stat->rc_err_cnt); + if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn + | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg, + &sw_stat->prc_pcix_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn + | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg, + &sw_stat->prc_pcix_err_cnt); + } + + if (val64 & RXDMA_INT_RPA_INT_M) { + if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR, + &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, + &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt); + } + + if (val64 & RXDMA_INT_RDA_INT_M) { + if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR + | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM + | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR, + &bar0->rda_err_reg, &sw_stat->rda_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR + | RDA_MISC_ERR | RDA_PCIX_ERR, + &bar0->rda_err_reg, &sw_stat->rda_err_cnt); + } + + if (val64 & RXDMA_INT_RTI_INT_M) { + if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg, + &sw_stat->rti_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR, + &bar0->rti_err_reg, &sw_stat->rti_err_cnt); + } + + val64 = readq(&bar0->mac_int_status); + if (val64 & MAC_INT_STATUS_RMAC_INT) { + if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR, + &bar0->mac_rmac_err_reg, + &sw_stat->mac_rmac_err_cnt)) + goto reset; + do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR| + RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg, + &sw_stat->mac_rmac_err_cnt); + } + + val64 = readq(&bar0->xgxs_int_status); + if (val64 & XGXS_INT_STATUS_RXGXS) { + if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, + &bar0->xgxs_rxgxs_err_reg, + &sw_stat->xgxs_rxgxs_err_cnt)) + goto reset; + } + + val64 = readq(&bar0->mc_int_status); + if(val64 & MC_INT_STATUS_MC_INT) { + if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg, + &sw_stat->mc_err_cnt)) + goto reset; + + /* Handling Ecc errors */ + if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) { + writeq(val64, &bar0->mc_err_reg); + if (val64 & MC_ERR_REG_ECC_ALL_DBL) { + sw_stat->double_ecc_errs++; + if (sp->device_type != XFRAME_II_DEVICE) { + /* + * Reset XframeI only if critical error + */ + if (val64 & + (MC_ERR_REG_MIRI_ECC_DB_ERR_0 | + MC_ERR_REG_MIRI_ECC_DB_ERR_1)) + goto reset; + } + } else + sw_stat->single_ecc_errs++; + } + } + return; + +reset: + netif_stop_queue(dev); + schedule_work(&sp->rst_timer_task); + sw_stat->soft_reset_cnt++; + return; +} + /** * s2io_isr - ISR handler of the device . * @irq: the irq of the device. @@ -4205,7 +4533,9 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) if (pci_channel_offline(sp->pdev)) return IRQ_NONE; - atomic_inc(&sp->isr_cnt); + if (!is_s2io_card_up(sp)) + return IRQ_NONE; + mac_control = &sp->mac_control; config = &sp->config; @@ -4215,73 +4545,75 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) * 1. Rx of packet. * 2. Tx complete. * 3. Link down. - * 4. Error in any functional blocks of the NIC. */ reason = readq(&bar0->general_int_status); - if (!reason) { - /* The interrupt was not raised by us. */ - atomic_dec(&sp->isr_cnt); - return IRQ_NONE; - } - else if (unlikely(reason == S2IO_MINUS_ONE) ) { - /* Disable device and get out */ - atomic_dec(&sp->isr_cnt); - return IRQ_NONE; + if (unlikely(reason == S2IO_MINUS_ONE) ) { + /* Nothing much can be done. Get out */ + return IRQ_HANDLED; } - if (napi) { - if (reason & GEN_INTR_RXTRAFFIC) { - if ( likely ( netif_rx_schedule_prep(dev)) ) { - __netif_rx_schedule(dev); - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); + if (reason & (GEN_INTR_RXTRAFFIC | + GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) + { + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); + + if (config->napi) { + if (reason & GEN_INTR_RXTRAFFIC) { + if (likely(netif_rx_schedule_prep(dev, + &sp->napi))) { + __netif_rx_schedule(dev, &sp->napi); + writeq(S2IO_MINUS_ONE, + &bar0->rx_traffic_mask); + } else + writeq(S2IO_MINUS_ONE, + &bar0->rx_traffic_int); } - else + } else { + /* + * rx_traffic_int reg is an R1 register, writing all 1's + * will ensure that the actual interrupt causing bit + * get's cleared and hence a read can be avoided. + */ + if (reason & GEN_INTR_RXTRAFFIC) writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + + for (i = 0; i < config->rx_ring_num; i++) + rx_intr_handler(&mac_control->rings[i]); } - } else { + /* - * Rx handler is called by default, without checking for the - * cause of interrupt. - * rx_traffic_int reg is an R1 register, writing all 1's + * tx_traffic_int reg is an R1 register, writing all 1's * will ensure that the actual interrupt causing bit get's * cleared and hence a read can be avoided. */ - if (reason & GEN_INTR_RXTRAFFIC) - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); + if (reason & GEN_INTR_TXTRAFFIC) + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); - for (i = 0; i < config->rx_ring_num; i++) { - rx_intr_handler(&mac_control->rings[i]); - } - } + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); - /* - * tx_traffic_int reg is an R1 register, writing all 1's - * will ensure that the actual interrupt causing bit get's - * cleared and hence a read can be avoided. - */ - if (reason & GEN_INTR_TXTRAFFIC) - writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); + if (reason & GEN_INTR_TXPIC) + s2io_txpic_intr_handle(sp); - for (i = 0; i < config->tx_fifo_num; i++) - tx_intr_handler(&mac_control->fifos[i]); + /* + * Reallocate the buffers from the interrupt handler itself. + */ + if (!config->napi) { + for (i = 0; i < config->rx_ring_num; i++) + s2io_chk_rx_buffers(sp, i); + } + writeq(sp->general_int_mask, &bar0->general_int_mask); + readl(&bar0->general_int_status); - if (reason & GEN_INTR_TXPIC) - s2io_txpic_intr_handle(sp); - /* - * If the Rx buffer count is below the panic threshold then - * reallocate the buffers from the interrupt handler itself, - * else schedule a tasklet to reallocate the buffers. - */ - if (!napi) { - for (i = 0; i < config->rx_ring_num; i++) - s2io_chk_rx_buffers(sp, i); - } + return IRQ_HANDLED; - writeq(0, &bar0->general_int_mask); - readl(&bar0->general_int_status); + } + else if (!reason) { + /* The interrupt was not raised by us */ + return IRQ_NONE; + } - atomic_dec(&sp->isr_cnt); return IRQ_HANDLED; } @@ -4294,7 +4626,7 @@ static void s2io_updt_stats(struct s2io_nic *sp) u64 val64; int cnt = 0; - if (atomic_read(&sp->card_state) == CARD_UP) { + if (is_s2io_card_up(sp)) { /* Apprx 30us on a 133 MHz bus */ val64 = SET_UPDT_CLICKS(10) | STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN; @@ -4308,7 +4640,7 @@ static void s2io_updt_stats(struct s2io_nic *sp) if (cnt == 5) break; /* Updt failed */ } while(1); - } + } } /** @@ -4525,8 +4857,48 @@ static void s2io_set_multicast(struct net_device *dev) } } +/* add unicast MAC address to CAM */ +static int do_s2io_add_unicast(struct s2io_nic *sp, u64 addr, int off) +{ + u64 val64; + struct XENA_dev_config __iomem *bar0 = sp->bar0; + + writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr), + &bar0->rmac_addr_data0_mem); + + val64 = + RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | + RMAC_ADDR_CMD_MEM_OFFSET(off); + writeq(val64, &bar0->rmac_addr_cmd_mem); + + /* Wait till command completes */ + if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, + RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, + S2IO_BIT_RESET)) { + DBG_PRINT(INFO_DBG, "add_mac_addr failed\n"); + return FAILURE; + } + return SUCCESS; +} + +/** + * s2io_set_mac_addr driver entry point + */ +static int s2io_set_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + /* store the MAC address in CAM */ + return (do_s2io_prog_unicast(dev, dev->dev_addr)); +} + /** - * s2io_set_mac_addr - Programs the Xframe mac address + * do_s2io_prog_unicast - Programs the Xframe mac address * @dev : pointer to the device structure. * @addr: a uchar pointer to the new mac address which is to be set. * Description : This procedure will program the Xframe to receive @@ -4534,56 +4906,31 @@ static void s2io_set_multicast(struct net_device *dev) * Return value: SUCCESS on success and an appropriate (-)ve integer * as defined in errno.h file on failure. */ - -static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) +static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) { struct s2io_nic *sp = dev->priv; - struct XENA_dev_config __iomem *bar0 = sp->bar0; - register u64 val64, mac_addr = 0; + register u64 mac_addr = 0, perm_addr = 0; int i; - u64 old_mac_addr = 0; /* - * Set the new MAC address as the new unicast filter and reflect this - * change on the device address registered with the OS. It will be - * at offset 0. - */ + * Set the new MAC address as the new unicast filter and reflect this + * change on the device address registered with the OS. It will be + * at offset 0. + */ for (i = 0; i < ETH_ALEN; i++) { mac_addr <<= 8; mac_addr |= addr[i]; - old_mac_addr <<= 8; - old_mac_addr |= sp->def_mac_addr[0].mac_addr[i]; + perm_addr <<= 8; + perm_addr |= sp->def_mac_addr[0].mac_addr[i]; } - if(0 == mac_addr) + /* check if the dev_addr is different than perm_addr */ + if (mac_addr == perm_addr) return SUCCESS; /* Update the internal structure with this new mac address */ - if(mac_addr != old_mac_addr) { - memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN)); - sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr); - sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8); - sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16); - sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24); - sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32); - sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40); - } - - writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr), - &bar0->rmac_addr_data0_mem); - - val64 = - RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | - RMAC_ADDR_CMD_MEM_OFFSET(0); - writeq(val64, &bar0->rmac_addr_cmd_mem); - /* Wait till command completes */ - if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, - RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) { - DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name); - return FAILURE; - } - - return SUCCESS; + do_s2io_copy_mac_addr(sp, 0, mac_addr); + return (do_s2io_add_unicast(sp, mac_addr, 0)); } /** @@ -4631,7 +4978,9 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); info->port = PORT_FIBRE; - /* info->transceiver?? TODO */ + + /* info->transceiver */ + info->transceiver = XCVR_EXTERNAL; if (netif_carrier_ok(sp->dev)) { info->speed = 10000; @@ -4668,12 +5017,6 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); info->regdump_len = XENA_REG_SPACE; info->eedump_len = XENA_EEPROM_SPACE; - info->testinfo_len = S2IO_TEST_LEN; - - if (sp->device_type == XFRAME_I_DEVICE) - info->n_stats = XFRAME_I_STAT_LEN; - else - info->n_stats = XFRAME_II_STAT_LEN; } /** @@ -4803,13 +5146,13 @@ static void s2io_ethtool_gringparam(struct net_device *dev, ering->rx_max_pending = MAX_RX_DESC_2; ering->tx_max_pending = MAX_TX_DESC; - for (i = 0 ; i < sp->config.tx_fifo_num ; i++) + for (i = 0 ; i < sp->config.tx_fifo_num ; i++) tx_desc_count += sp->config.tx_cfg[i].fifo_len; - + DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds); ering->tx_pending = tx_desc_count; rx_desc_count = 0; - for (i = 0 ; i < sp->config.rx_ring_num ; i++) + for (i = 0 ; i < sp->config.rx_ring_num ; i++) rx_desc_count += sp->config.rx_cfg[i].num_rxd; ering->rx_pending = rx_desc_count; @@ -5567,7 +5910,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 * tmp_stats) { - int i = 0; + int i = 0, k; struct s2io_nic *sp = dev->priv; struct stat_block *stat_info = sp->mac_control.stats_info; @@ -5762,7 +6105,8 @@ static void s2io_get_ethtool_stats(struct net_device *dev, tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt; tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt; tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt; - tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt; + for (k = 0; k < MAX_RX_RINGS; k++) + tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k]; tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high; tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low; tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high; @@ -5819,6 +6163,23 @@ static void s2io_get_ethtool_stats(struct net_device *dev, tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt; tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt; tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt; + tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt; } static int s2io_ethtool_get_regs_len(struct net_device *dev) @@ -5851,9 +6212,25 @@ static int s2io_get_eeprom_len(struct net_device *dev) return (XENA_EEPROM_SPACE); } -static int s2io_ethtool_self_test_count(struct net_device *dev) +static int s2io_get_sset_count(struct net_device *dev, int sset) { - return (S2IO_TEST_LEN); + struct s2io_nic *sp = dev->priv; + + switch (sset) { + case ETH_SS_TEST: + return S2IO_TEST_LEN; + case ETH_SS_STATS: + switch(sp->device_type) { + case XFRAME_I_DEVICE: + return XFRAME_I_STAT_LEN; + case XFRAME_II_DEVICE: + return XFRAME_II_STAT_LEN; + default: + return 0; + } + default: + return -EOPNOTSUPP; + } } static void s2io_ethtool_get_strings(struct net_device *dev, @@ -5880,22 +6257,6 @@ static void s2io_ethtool_get_strings(struct net_device *dev, sizeof(ethtool_driver_stats_keys)); } } -static int s2io_ethtool_get_stats_count(struct net_device *dev) -{ - struct s2io_nic *sp = dev->priv; - int stat_count = 0; - switch(sp->device_type) { - case XFRAME_I_DEVICE: - stat_count = XFRAME_I_STAT_LEN; - break; - - case XFRAME_II_DEVICE: - stat_count = XFRAME_II_STAT_LEN; - break; - } - - return stat_count; -} static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) { @@ -5936,20 +6297,16 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_pauseparam = s2io_ethtool_setpause_data, .get_rx_csum = s2io_ethtool_get_rx_csum, .set_rx_csum = s2io_ethtool_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = s2io_ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_tso = s2io_ethtool_op_get_tso, .set_tso = s2io_ethtool_op_set_tso, - .get_ufo = ethtool_op_get_ufo, .set_ufo = ethtool_op_set_ufo, - .self_test_count = s2io_ethtool_self_test_count, .self_test = s2io_ethtool_test, .get_strings = s2io_ethtool_get_strings, .phys_id = s2io_ethtool_idnic, - .get_stats_count = s2io_ethtool_get_stats_count, - .get_ethtool_stats = s2io_get_ethtool_stats + .get_ethtool_stats = s2io_get_ethtool_stats, + .get_sset_count = s2io_get_sset_count, }; /** @@ -6072,7 +6429,7 @@ static void s2io_set_link(struct work_struct *work) if (!netif_running(dev)) goto out_unlock; - if (test_and_set_bit(0, &(nic->link_state))) { + if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) { /* The card is being reset, no point doing anything */ goto out_unlock; } @@ -6110,13 +6467,10 @@ static void s2io_set_link(struct work_struct *work) netif_stop_queue(dev); } } - val64 = readq(&bar0->adapter_status); - if (!LINK_IS_UP(val64)) { - DBG_PRINT(ERR_DBG, "%s:", dev->name); - DBG_PRINT(ERR_DBG, " Link down after enabling "); - DBG_PRINT(ERR_DBG, "device \n"); - } else - s2io_link(nic, LINK_UP); + val64 = readq(&bar0->adapter_control); + val64 |= ADAPTER_LED_ON; + writeq(val64, &bar0->adapter_control); + s2io_link(nic, LINK_UP); } else { if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, subid)) { @@ -6125,9 +6479,13 @@ static void s2io_set_link(struct work_struct *work) writeq(val64, &bar0->gpio_control); val64 = readq(&bar0->gpio_control); } + /* turn off LED */ + val64 = readq(&bar0->adapter_control); + val64 = val64 &(~ADAPTER_LED_ON); + writeq(val64, &bar0->adapter_control); s2io_link(nic, LINK_DOWN); } - clear_bit(0, &(nic->link_state)); + clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state)); out_unlock: rtnl_unlock(); @@ -6162,7 +6520,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, mem_alloc_fail_cnt++; return -ENOMEM ; } - sp->mac_control.stats_info->sw_stat.mem_allocated + sp->mac_control.stats_info->sw_stat.mem_allocated += (*skb)->truesize; /* storing the mapped addr in a temp variable * such it will be used for next rxd whose @@ -6195,7 +6553,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, mem_alloc_fail_cnt++; return -ENOMEM; } - sp->mac_control.stats_info->sw_stat.mem_allocated + sp->mac_control.stats_info->sw_stat.mem_allocated += (*skb)->truesize; rxdp3->Buffer2_ptr = *temp2 = pci_map_single(sp->pdev, (*skb)->data, @@ -6308,18 +6666,18 @@ static int s2io_add_isr(struct s2io_nic * sp) struct net_device *dev = sp->dev; int err = 0; - if (sp->intr_type == MSI_X) + if (sp->config.intr_type == MSI_X) ret = s2io_enable_msi_x(sp); if (ret) { DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); - sp->intr_type = INTA; + sp->config.intr_type = INTA; } /* Store the values of the MSIX table in the struct s2io_nic structure */ store_xmsi_data(sp); /* After proper initialization of H/W, register ISR */ - if (sp->intr_type == MSI_X) { + if (sp->config.intr_type == MSI_X) { int i, msix_tx_cnt=0,msix_rx_cnt=0; for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { @@ -6371,7 +6729,7 @@ static int s2io_add_isr(struct s2io_nic * sp) printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt); printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt); } - if (sp->intr_type == INTA) { + if (sp->config.intr_type == INTA) { err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, sp->name, dev); if (err) { @@ -6384,10 +6742,10 @@ static int s2io_add_isr(struct s2io_nic * sp) } static void s2io_rem_isr(struct s2io_nic * sp) { - int cnt = 0; struct net_device *dev = sp->dev; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; - if (sp->intr_type == MSI_X) { + if (sp->config.intr_type == MSI_X) { int i; u16 msi_control; @@ -6396,24 +6754,28 @@ static void s2io_rem_isr(struct s2io_nic * sp) int vector = sp->entries[i].vector; void *arg = sp->s2io_entries[i].arg; + synchronize_irq(vector); free_irq(vector, arg); } + + kfree(sp->entries); + stats->mem_freed += + (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + kfree(sp->s2io_entries); + stats->mem_freed += + (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); + sp->entries = NULL; + sp->s2io_entries = NULL; + pci_read_config_word(sp->pdev, 0x42, &msi_control); msi_control &= 0xFFFE; /* Disable MSI */ pci_write_config_word(sp->pdev, 0x42, msi_control); pci_disable_msix(sp->pdev); } else { + synchronize_irq(sp->pdev->irq); free_irq(sp->pdev->irq, dev); } - /* Waiting till all Interrupt handlers are complete */ - cnt = 0; - do { - msleep(10); - if (!atomic_read(&sp->isr_cnt)) - break; - cnt++; - } while(cnt < 5); } static void do_s2io_card_down(struct s2io_nic * sp, int do_io) @@ -6425,10 +6787,10 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) del_timer_sync(&sp->alarm_timer); /* If s2io_set_link task is executing, wait till it completes. */ - while (test_and_set_bit(0, &(sp->link_state))) { + while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) { msleep(50); } - atomic_set(&sp->card_state, CARD_DOWN); + clear_bit(__S2IO_STATE_CARD_UP, &sp->state); /* disable Tx and Rx traffic on the NIC */ if (do_io) @@ -6479,7 +6841,7 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) free_rx_buffers(sp); spin_unlock_irqrestore(&sp->rx_lock, flags); - clear_bit(0, &(sp->link_state)); + clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state)); } static void s2io_card_down(struct s2io_nic * sp) @@ -6550,7 +6912,7 @@ static int s2io_card_up(struct s2io_nic * sp) /* Add interrupt service routine */ if (s2io_add_isr(sp) != 0) { - if (sp->intr_type == MSI_X) + if (sp->config.intr_type == MSI_X) s2io_rem_isr(sp); s2io_reset(sp); free_rx_buffers(sp); @@ -6563,17 +6925,16 @@ static int s2io_card_up(struct s2io_nic * sp) tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev); /* Enable select interrupts */ - if (sp->intr_type != INTA) + en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); + if (sp->config.intr_type != INTA) en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); else { interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; - interruptible |= TX_PIC_INTR | RX_PIC_INTR; - interruptible |= TX_MAC_INTR | RX_MAC_INTR; + interruptible |= TX_PIC_INTR; en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS); } - - atomic_set(&sp->card_state, CARD_UP); + set_bit(__S2IO_STATE_CARD_UP, &sp->state); return 0; } @@ -6727,7 +7088,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n", dev->name, err_mask); sp->stats.rx_crc_errors++; - sp->mac_control.stats_info->sw_stat.mem_freed + sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; dev_kfree_skb(skb); atomic_dec(&sp->rx_bufs_left[ring_no]); @@ -6776,7 +7137,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) int ret = 0; ret = s2io_club_tcp_session(skb->data, &tcp, - &tcp_len, &lro, rxdp, sp); + &tcp_len, &lro, + rxdp, sp); switch (ret) { case 3: /* Begin anew */ lro->parent = skb; @@ -6881,13 +7243,13 @@ static void s2io_link(struct s2io_nic * sp, int link) DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); netif_carrier_off(dev); if(sp->mac_control.stats_info->sw_stat.link_up_cnt) - sp->mac_control.stats_info->sw_stat.link_up_time = + sp->mac_control.stats_info->sw_stat.link_up_time = jiffies - sp->start_time; sp->mac_control.stats_info->sw_stat.link_down_cnt++; } else { DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name); if (sp->mac_control.stats_info->sw_stat.link_down_cnt) - sp->mac_control.stats_info->sw_stat.link_down_time = + sp->mac_control.stats_info->sw_stat.link_down_time = jiffies - sp->start_time; sp->mac_control.stats_info->sw_stat.link_up_cnt++; netif_carrier_on(dev); @@ -6944,19 +7306,12 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) if (*dev_intr_type != INTA) napi = 0; -#ifndef CONFIG_PCI_MSI - if (*dev_intr_type != INTA) { - DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" - "MSI/MSI-X. Defaulting to INTA\n"); - *dev_intr_type = INTA; - } -#else if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " "Defaulting to INTA\n"); *dev_intr_type = INTA; } -#endif + if ((*dev_intr_type == MSI_X) && ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { @@ -7033,6 +7388,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) struct config_param *config; int mode; u8 dev_intr_type = intr_type; + DECLARE_MAC_BUF(mac); if ((ret = s2io_verify_parm(pdev, &dev_intr_type))) return ret; @@ -7076,7 +7432,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) pci_set_master(pdev); pci_set_drvdata(pdev, dev); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Private member variable initialized to s2io NIC structure */ @@ -7091,7 +7446,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) if (rx_ring_mode == 2) sp->rxd_mode = RXD_MODE_3B; - sp->intr_type = dev_intr_type; + sp->config.intr_type = dev_intr_type; if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || (pdev->device == PCI_DEVICE_ID_HERC_UNI)) @@ -7099,7 +7454,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) else sp->device_type = XFRAME_I_DEVICE; - sp->lro = lro; + sp->lro = lro_enable; /* Initialize some PCI/PCI-X fields of the NIC. */ s2io_init_pci(sp); @@ -7114,6 +7469,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) mac_control = &sp->mac_control; config = &sp->config; + config->napi = napi; + /* Tx side parameters. */ config->tx_fifo_num = tx_fifo_num; for (i = 0; i < MAX_TX_FIFOS; i++) { @@ -7161,9 +7518,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) for (i = 0; i < config->rx_ring_num; i++) atomic_set(&sp->rx_bufs_left[i], 0); - /* Initialize the number of ISRs currently running */ - atomic_set(&sp->isr_cnt, 0); - /* initialize the shared memory used by the NIC and the host */ if (init_shared_mem(sp)) { DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", @@ -7206,6 +7560,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) dev->get_stats = &s2io_get_stats; dev->set_multicast_list = &s2io_set_multicast; dev->do_ioctl = &s2io_ioctl; + dev->set_mac_address = &s2io_set_mac_addr; dev->change_mtu = &s2io_change_mtu; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; @@ -7215,8 +7570,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) * will use eth_mac_addr() for dev->set_mac_address * mac address will be set every time dev->open() is called */ - dev->poll = s2io_poll; - dev->weight = 32; + netif_napi_add(dev, &sp->napi, s2io_poll, 32); #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = s2io_netpoll; @@ -7292,7 +7646,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Set the factory defined MAC address initially */ dev->addr_len = ETH_ALEN; memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); + /* Store the values of the MSIX table in the s2io_nic structure */ + store_xmsi_data(sp); /* reset Nic and bring it to known state */ s2io_reset(sp); @@ -7300,9 +7657,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) * Initialize the tasklet status and link state flags * and the card state parameter */ - atomic_set(&(sp->card_state), 0); sp->tasklet_status = 0; - sp->link_state = 0; + sp->state = 0; /* Initialize spinlocks */ spin_lock_init(&sp->tx_lock); @@ -7338,14 +7694,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) sp->product_name, pdev->revision); DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, s2io_driver_version); - DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " - "%02x:%02x:%02x:%02x:%02x:%02x", dev->name, - sp->def_mac_addr[0].mac_addr[0], - sp->def_mac_addr[0].mac_addr[1], - sp->def_mac_addr[0].mac_addr[2], - sp->def_mac_addr[0].mac_addr[3], - sp->def_mac_addr[0].mac_addr[4], - sp->def_mac_addr[0].mac_addr[5]); + DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num); if (sp->device_type & XFRAME_II_DEVICE) { mode = s2io_print_pci_mode(sp); @@ -7369,7 +7719,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) if (napi) DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); - switch(sp->intr_type) { + switch(sp->config.intr_type) { case INTA: DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); break; @@ -7386,14 +7736,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) /* Initialize device name */ sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); - /* Initialize bimodal Interrupts */ - sp->config.bimodal = bimodal; - if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) { - sp->config.bimodal = 0; - DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n", - dev->name); - } - /* * Make Link state as off at this point, when the Link change * interrupt comes the state will be automatically changed to @@ -7459,7 +7801,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev) * the module loadable parameters and initializes PCI configuration space. */ -int __init s2io_starter(void) +static int __init s2io_starter(void) { return pci_register_driver(&s2io_driver); } diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 92983ee7df8c35bd01cb6b7872ad430fe0199304..f6b45565304f07c2bf3b9f3b5b49d50bb50a9a12 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -91,7 +91,7 @@ struct swStat { unsigned long long serious_err_cnt; unsigned long long soft_reset_cnt; unsigned long long fifo_full_cnt; - unsigned long long ring_full_cnt; + unsigned long long ring_full_cnt[8]; /* LRO statistics */ unsigned long long clubbed_frms_cnt; unsigned long long sending_both; @@ -126,6 +126,26 @@ struct swStat { unsigned long long rx_buf_size_err_cnt; unsigned long long rx_rxd_corrupt_cnt; unsigned long long rx_unkn_err_cnt; + + /* Error/alarm statistics*/ + unsigned long long tda_err_cnt; + unsigned long long pfc_err_cnt; + unsigned long long pcc_err_cnt; + unsigned long long tti_err_cnt; + unsigned long long lso_err_cnt; + unsigned long long tpa_err_cnt; + unsigned long long sm_err_cnt; + unsigned long long mac_tmac_err_cnt; + unsigned long long mac_rmac_err_cnt; + unsigned long long xgxs_txgxs_err_cnt; + unsigned long long xgxs_rxgxs_err_cnt; + unsigned long long rc_err_cnt; + unsigned long long prc_pcix_err_cnt; + unsigned long long rpa_err_cnt; + unsigned long long rda_err_cnt; + unsigned long long rti_err_cnt; + unsigned long long mc_err_cnt; + }; /* Xpak releated alarm and warnings */ @@ -412,6 +432,11 @@ struct config_param { struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ u64 tx_intr_type; +#define INTA 0 +#define MSI_X 2 + u8 intr_type; + u8 napi; + /* Specifies if Tx Intr is UTILZ or PER_LIST type. */ /* Rx Side */ @@ -419,7 +444,6 @@ struct config_param { #define MAX_RX_BLOCKS_PER_RING 150 struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ - u8 bimodal; /*Flag for setting bimodal interrupts*/ #define HEADER_ETHERNET_II_802_3_SIZE 14 #define HEADER_802_2_SIZE 3 @@ -777,6 +801,13 @@ struct lro { u8 saw_ts; }; +/* These flags represent the devices temporary state */ +enum s2io_device_state_t +{ + __S2IO_STATE_LINK_TASK=0, + __S2IO_STATE_CARD_UP +}; + /* Structure representing one instance of the NIC */ struct s2io_nic { int rxd_mode; @@ -786,6 +817,7 @@ struct s2io_nic { */ int pkts_to_process; struct net_device *dev; + struct napi_struct napi; struct mac_info mac_control; struct config_param config; struct pci_dev *pdev; @@ -854,13 +886,11 @@ struct s2io_nic { int task_flag; unsigned long long start_time; -#define CARD_DOWN 1 -#define CARD_UP 2 - atomic_t card_state; - volatile unsigned long link_state; struct vlan_group *vlgrp; #define MSIX_FLG 0xA5 struct msix_entry *entries; + int msi_detected; + wait_queue_head_t msi_wait; struct s2io_msix_entry *s2io_entries; char desc[MAX_REQUESTED_MSI_X][25]; @@ -878,13 +908,9 @@ struct s2io_nic { unsigned long sending_both; u8 lro; u16 lro_max_aggr_per_sess; - -#define INTA 0 -#define MSI_X 2 - u8 intr_type; - + volatile unsigned long state; spinlock_t rx_lock; - atomic_t isr_cnt; + u64 general_int_mask; u64 *ufo_in_band_v; #define VPD_STRING_LEN 80 u8 product_name[VPD_STRING_LEN]; @@ -1009,7 +1035,7 @@ static void free_shared_mem(struct s2io_nic *sp); static int init_nic(struct s2io_nic *nic); static void rx_intr_handler(struct ring_info *ring_data); static void tx_intr_handler(struct fifo_info *fifo_data); -static void alarm_intr_handler(struct s2io_nic *sp); +static void s2io_handle_errors(void * dev_id); static int s2io_starter(void); static void s2io_closer(void); @@ -1019,9 +1045,9 @@ static void s2io_set_multicast(struct net_device *dev); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); static void s2io_link(struct s2io_nic * sp, int link); static void s2io_reset(struct s2io_nic * sp); -static int s2io_poll(struct net_device *dev, int *budget); +static int s2io_poll(struct napi_struct *napi, int budget); static void s2io_init_pci(struct s2io_nic * sp); -static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); +static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); static void s2io_alarm_handle(unsigned long data); static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id); diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 7dae4d404978e407c67856736825a369098fd56c..14361e885415ca583c7bcc2eddd8a113120702ab 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c @@ -151,30 +151,30 @@ static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) printk("lp->lan_saa9730_regs->CamData = %x\n", readl(&lp->lan_saa9730_regs->CamData)); } - printk("lp->stats.tx_packets = %lx\n", lp->stats.tx_packets); - printk("lp->stats.tx_errors = %lx\n", lp->stats.tx_errors); - printk("lp->stats.tx_aborted_errors = %lx\n", - lp->stats.tx_aborted_errors); - printk("lp->stats.tx_window_errors = %lx\n", - lp->stats.tx_window_errors); - printk("lp->stats.tx_carrier_errors = %lx\n", - lp->stats.tx_carrier_errors); - printk("lp->stats.tx_fifo_errors = %lx\n", - lp->stats.tx_fifo_errors); - printk("lp->stats.tx_heartbeat_errors = %lx\n", - lp->stats.tx_heartbeat_errors); - printk("lp->stats.collisions = %lx\n", lp->stats.collisions); - - printk("lp->stats.rx_packets = %lx\n", lp->stats.rx_packets); - printk("lp->stats.rx_errors = %lx\n", lp->stats.rx_errors); - printk("lp->stats.rx_dropped = %lx\n", lp->stats.rx_dropped); - printk("lp->stats.rx_crc_errors = %lx\n", lp->stats.rx_crc_errors); - printk("lp->stats.rx_frame_errors = %lx\n", - lp->stats.rx_frame_errors); - printk("lp->stats.rx_fifo_errors = %lx\n", - lp->stats.rx_fifo_errors); - printk("lp->stats.rx_length_errors = %lx\n", - lp->stats.rx_length_errors); + printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets); + printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors); + printk("dev->stats.tx_aborted_errors = %lx\n", + dev->stats.tx_aborted_errors); + printk("dev->stats.tx_window_errors = %lx\n", + dev->stats.tx_window_errors); + printk("dev->stats.tx_carrier_errors = %lx\n", + dev->stats.tx_carrier_errors); + printk("dev->stats.tx_fifo_errors = %lx\n", + dev->stats.tx_fifo_errors); + printk("dev->stats.tx_heartbeat_errors = %lx\n", + dev->stats.tx_heartbeat_errors); + printk("dev->stats.collisions = %lx\n", dev->stats.collisions); + + printk("dev->stats.rx_packets = %lx\n", dev->stats.rx_packets); + printk("dev->stats.rx_errors = %lx\n", dev->stats.rx_errors); + printk("dev->stats.rx_dropped = %lx\n", dev->stats.rx_dropped); + printk("dev->stats.rx_crc_errors = %lx\n", dev->stats.rx_crc_errors); + printk("dev->stats.rx_frame_errors = %lx\n", + dev->stats.rx_frame_errors); + printk("dev->stats.rx_fifo_errors = %lx\n", + dev->stats.rx_fifo_errors); + printk("dev->stats.rx_length_errors = %lx\n", + dev->stats.rx_length_errors); printk("lp->lan_saa9730_regs->DebugPCIMasterAddr = %x\n", readl(&lp->lan_saa9730_regs->DebugPCIMasterAddr)); @@ -605,24 +605,24 @@ static int lan_saa9730_tx(struct net_device *dev) printk("lan_saa9730_tx: tx error = %x\n", tx_status); - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & (TX_STATUS_EX_COLL << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (tx_status & (TX_STATUS_LATE_COLL << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; if (tx_status & (TX_STATUS_L_CARR << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & (TX_STATUS_UNDER << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (tx_status & (TX_STATUS_SQ_ERR << TX_STAT_CTL_STATUS_SHF)) - lp->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; - lp->stats.collisions += + dev->stats.collisions += tx_status & TX_STATUS_TX_COLL_MSK; } @@ -684,10 +684,10 @@ static int lan_saa9730_rx(struct net_device *dev) printk ("%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { - lp->stats.rx_bytes += len; - lp->stats.rx_packets++; + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ skb_copy_to_linear_data(skb, @@ -704,19 +704,19 @@ static int lan_saa9730_rx(struct net_device *dev) ("lan_saa9730_rx: We got an error packet = %x\n", rx_status); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (rx_status & (RX_STATUS_CRC_ERR << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (rx_status & (RX_STATUS_ALIGN_ERR << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (rx_status & (RX_STATUS_OVERFLOW << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_fifo_errors++; + dev->stats.rx_fifo_errors++; if (rx_status & (RX_STATUS_LONG_ERR << RX_STAT_CTL_STATUS_SHF)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } /* Indicate we have processed the buffer. */ @@ -853,7 +853,7 @@ static void lan_saa9730_tx_timeout(struct net_device *dev) struct lan_saa9730_private *lp = netdev_priv(dev); /* Transmitter timeout, serious problems */ - lp->stats.tx_errors++; + dev->stats.tx_errors++; printk("%s: transmit timed out, reset\n", dev->name); /*show_saa9730_regs(lp); */ lan_saa9730_restart(lp); @@ -886,8 +886,8 @@ static int lan_saa9730_start_xmit(struct sk_buff *skb, return -1; } - lp->stats.tx_bytes += len; - lp->stats.tx_packets++; + dev->stats.tx_bytes += len; + dev->stats.tx_packets++; dev->trans_start = jiffies; netif_wake_queue(dev); @@ -919,14 +919,6 @@ static int lan_saa9730_close(struct net_device *dev) return 0; } -static struct net_device_stats *lan_saa9730_get_stats(struct net_device - *dev) -{ - struct lan_saa9730_private *lp = netdev_priv(dev); - - return &lp->stats; -} - static void lan_saa9730_set_multicast(struct net_device *dev) { struct lan_saa9730_private *lp = netdev_priv(dev); @@ -1040,7 +1032,6 @@ static int lan_saa9730_init(struct net_device *dev, struct pci_dev *pdev, dev->open = lan_saa9730_open; dev->hard_start_xmit = lan_saa9730_start_xmit; dev->stop = lan_saa9730_close; - dev->get_stats = lan_saa9730_get_stats; dev->set_multicast_list = lan_saa9730_set_multicast; dev->tx_timeout = lan_saa9730_tx_timeout; dev->watchdog_timeo = (HZ >> 1); diff --git a/drivers/net/saa9730.h b/drivers/net/saa9730.h index f656f2f40bb85cdbbdc1ddd3ecc248215590faa0..010a120ea938f040676054f14cbb9234760bffcf 100644 --- a/drivers/net/saa9730.h +++ b/drivers/net/saa9730.h @@ -378,7 +378,6 @@ struct lan_saa9730_private { unsigned char PhysicalAddress[LAN_SAA9730_CAM_ENTRIES][6]; - struct net_device_stats stats; spinlock_t lock; }; diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 1de3eec1a792f13312cf809c0b6ab528c3103ba8..487f9d2ac5b4750fbabc83008967dd1d0dc150fb 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -76,7 +76,6 @@ struct sb1000_private { unsigned char rx_session_id[NPIDS]; unsigned char rx_frame_id[NPIDS]; unsigned char rx_pkt_type[NPIDS]; - struct net_device_stats stats; }; /* prototypes for Linux interface */ @@ -85,7 +84,6 @@ static int sb1000_open(struct net_device *dev); static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); static int sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t sb1000_interrupt(int irq, void *dev_id); -static struct net_device_stats *sb1000_stats(struct net_device *dev); static int sb1000_close(struct net_device *dev); @@ -189,7 +187,6 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) */ dev->flags = IFF_POINTOPOINT|IFF_NOARP; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (sb1000_debug > 0) @@ -200,7 +197,6 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) dev->do_ioctl = sb1000_dev_ioctl; dev->hard_start_xmit = sb1000_start_xmit; dev->stop = sb1000_close; - dev->get_stats = sb1000_stats; /* hardware address is 0:0:serial_number */ dev->dev_addr[2] = serial_number >> 24 & 0xff; @@ -740,7 +736,7 @@ sb1000_rx(struct net_device *dev) unsigned int skbsize; struct sk_buff *skb; struct sb1000_private *lp = netdev_priv(dev); - struct net_device_stats *stats = &lp->stats; + struct net_device_stats *stats = &dev->stats; /* SB1000 frame constants */ const int FrameSize = FRAMESIZE; @@ -1003,11 +999,11 @@ static int sb1000_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGCMSTATS: /* get statistics */ - stats[0] = lp->stats.rx_bytes; + stats[0] = dev->stats.rx_bytes; stats[1] = lp->rx_frames; - stats[2] = lp->stats.rx_packets; - stats[3] = lp->stats.rx_errors; - stats[4] = lp->stats.rx_dropped; + stats[2] = dev->stats.rx_packets; + stats[3] = dev->stats.rx_errors; + stats[4] = dev->stats.rx_dropped; if(copy_to_user(ifr->ifr_data, stats, sizeof(stats))) return -EFAULT; status = 0; @@ -1133,12 +1129,6 @@ static irqreturn_t sb1000_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static struct net_device_stats *sb1000_stats(struct net_device *dev) -{ - struct sb1000_private *lp = netdev_priv(dev); - return &lp->stats; -} - static int sb1000_close(struct net_device *dev) { int i; diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index e7fdcf15b5a72f88cf9961843afca4cbab4a3ecc..7b53d658e33712fa65d11e4932e260a6a8c3beca 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -1,5 +1,6 @@ /* * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation + * Copyright (c) 2006, 2007 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -18,7 +19,12 @@ * * This driver is designed for the Broadcom SiByte SOC built-in * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp. + * + * Updated to the driver model and the PHY abstraction layer + * by Maciej W. Rozycki. */ + +#include #include #include #include @@ -32,9 +38,15 @@ #include #include #include -#include /* Processor type for cache alignment. */ -#include +#include +#include +#include +#include +#include + #include +#include +#include /* Processor type for cache alignment. */ /* This is only here until the firmware is ready. In that case, the firmware leaves the ethernet address in the register for us. */ @@ -48,7 +60,7 @@ /* These identify the driver base version and may not be removed. */ #if 0 -static char version1[] __devinitdata = +static char version1[] __initdata = "sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n"; #endif @@ -57,8 +69,6 @@ static char version1[] __devinitdata = #define CONFIG_SBMAC_COALESCE -#define MAX_UNITS 4 /* More are supported, limit only on options */ - /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) @@ -74,26 +84,6 @@ static int debug = 1; module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug messages"); -/* mii status msgs */ -static int noisy_mii = 1; -module_param(noisy_mii, int, S_IRUGO); -MODULE_PARM_DESC(noisy_mii, "MII status messages"); - -/* Used to pass the media type, etc. - Both 'options[]' and 'full_duplex[]' should exist for driver - interoperability. - The media type is usually passed in 'options[]'. -*/ -#ifdef MODULE -static int options[MAX_UNITS] = {-1, -1, -1, -1}; -module_param_array(options, int, NULL, S_IRUGO); -MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS)); - -static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1}; -module_param_array(full_duplex, int, NULL, S_IRUGO); -MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS)); -#endif - #ifdef CONFIG_SBMAC_COALESCE static int int_pktcnt_tx = 255; module_param(int_pktcnt_tx, int, S_IRUGO); @@ -112,6 +102,7 @@ module_param(int_timeout_rx, int, S_IRUGO); MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #endif +#include #include #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) #include @@ -135,22 +126,43 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #error invalid SiByte MAC configuation #endif +#ifdef K_INT_PHY +#define SBMAC_PHY_INT K_INT_PHY +#else +#define SBMAC_PHY_INT PHY_POLL +#endif + /********************************************************************** * Simple types ********************************************************************* */ +enum sbmac_speed { + sbmac_speed_none = 0, + sbmac_speed_10 = SPEED_10, + sbmac_speed_100 = SPEED_100, + sbmac_speed_1000 = SPEED_1000, +}; -typedef enum { sbmac_speed_auto, sbmac_speed_10, - sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t; - -typedef enum { sbmac_duplex_auto, sbmac_duplex_half, - sbmac_duplex_full } sbmac_duplex_t; +enum sbmac_duplex { + sbmac_duplex_none = -1, + sbmac_duplex_half = DUPLEX_HALF, + sbmac_duplex_full = DUPLEX_FULL, +}; -typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame, - sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t; +enum sbmac_fc { + sbmac_fc_none, + sbmac_fc_disabled, + sbmac_fc_frame, + sbmac_fc_collision, + sbmac_fc_carrier, +}; -typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, - sbmac_state_broken } sbmac_state_t; +enum sbmac_state { + sbmac_state_uninit, + sbmac_state_off, + sbmac_state_on, + sbmac_state_broken, +}; /********************************************************************** @@ -176,55 +188,61 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, * DMA Descriptor structure ********************************************************************* */ -typedef struct sbdmadscr_s { +struct sbdmadscr { uint64_t dscr_a; uint64_t dscr_b; -} sbdmadscr_t; - -typedef unsigned long paddr_t; +}; /********************************************************************** * DMA Controller structure ********************************************************************* */ -typedef struct sbmacdma_s { +struct sbmacdma { /* * This stuff is used to identify the channel and the registers * associated with it. */ - - struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ - int sbdma_channel; /* channel number */ - int sbdma_txdir; /* direction (1=transmit) */ - int sbdma_maxdescr; /* total # of descriptors in ring */ + struct sbmac_softc *sbdma_eth; /* back pointer to associated + MAC */ + int sbdma_channel; /* channel number */ + int sbdma_txdir; /* direction (1=transmit) */ + int sbdma_maxdescr; /* total # of descriptors + in ring */ #ifdef CONFIG_SBMAC_COALESCE - int sbdma_int_pktcnt; /* # descriptors rx/tx before interrupt*/ - int sbdma_int_timeout; /* # usec rx/tx interrupt */ + int sbdma_int_pktcnt; + /* # descriptors rx/tx + before interrupt */ + int sbdma_int_timeout; + /* # usec rx/tx interrupt */ #endif - - volatile void __iomem *sbdma_config0; /* DMA config register 0 */ - volatile void __iomem *sbdma_config1; /* DMA config register 1 */ - volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */ - volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */ - volatile void __iomem *sbdma_curdscr; /* current descriptor address */ - volatile void __iomem *sbdma_oodpktlost;/* pkt drop (rx only) */ - + void __iomem *sbdma_config0; /* DMA config register 0 */ + void __iomem *sbdma_config1; /* DMA config register 1 */ + void __iomem *sbdma_dscrbase; + /* descriptor base address */ + void __iomem *sbdma_dscrcnt; /* descriptor count register */ + void __iomem *sbdma_curdscr; /* current descriptor + address */ + void __iomem *sbdma_oodpktlost; + /* pkt drop (rx only) */ /* * This stuff is for maintenance of the ring */ - - sbdmadscr_t *sbdma_dscrtable_unaligned; - sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ - sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */ - - struct sk_buff **sbdma_ctxtable; /* context table, one per descr */ - - paddr_t sbdma_dscrtable_phys; /* and also the phys addr */ - sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */ - sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */ -} sbmacdma_t; + void *sbdma_dscrtable_unaligned; + struct sbdmadscr *sbdma_dscrtable; + /* base of descriptor table */ + struct sbdmadscr *sbdma_dscrtable_end; + /* end of descriptor table */ + struct sk_buff **sbdma_ctxtable; + /* context table, one + per descr */ + dma_addr_t sbdma_dscrtable_phys; + /* and also the phys addr */ + struct sbdmadscr *sbdma_addptr; /* next dscr for sw to add */ + struct sbdmadscr *sbdma_remptr; /* next dscr for sw + to remove */ +}; /********************************************************************** @@ -236,47 +254,43 @@ struct sbmac_softc { /* * Linux-specific things */ + struct net_device *sbm_dev; /* pointer to linux device */ + struct napi_struct napi; + struct phy_device *phy_dev; /* the associated PHY device */ + struct mii_bus mii_bus; /* the MII bus */ + int phy_irq[PHY_MAX_ADDR]; + spinlock_t sbm_lock; /* spin lock */ + int sbm_devflags; /* current device flags */ - struct net_device *sbm_dev; /* pointer to linux device */ - spinlock_t sbm_lock; /* spin lock */ - struct timer_list sbm_timer; /* for monitoring MII */ - struct net_device_stats sbm_stats; - int sbm_devflags; /* current device flags */ - - int sbm_phy_oldbmsr; - int sbm_phy_oldanlpar; - int sbm_phy_oldk1stsr; - int sbm_phy_oldlinkstat; - int sbm_buffersize; - - unsigned char sbm_phys[2]; + int sbm_buffersize; /* * Controller-specific things */ - - void __iomem *sbm_base; /* MAC's base address */ - sbmac_state_t sbm_state; /* current state */ - - volatile void __iomem *sbm_macenable; /* MAC Enable Register */ - volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */ - volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */ - volatile void __iomem *sbm_framecfg; /* Frame configuration register */ - volatile void __iomem *sbm_rxfilter; /* receive filter register */ - volatile void __iomem *sbm_isr; /* Interrupt status register */ - volatile void __iomem *sbm_imr; /* Interrupt mask register */ - volatile void __iomem *sbm_mdio; /* MDIO register */ - - sbmac_speed_t sbm_speed; /* current speed */ - sbmac_duplex_t sbm_duplex; /* current duplex */ - sbmac_fc_t sbm_fc; /* current flow control setting */ - - unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; - - sbmacdma_t sbm_txdma; /* for now, only use channel 0 */ - sbmacdma_t sbm_rxdma; - int rx_hw_checksum; - int sbe_idx; + void __iomem *sbm_base; /* MAC's base address */ + enum sbmac_state sbm_state; /* current state */ + + void __iomem *sbm_macenable; /* MAC Enable Register */ + void __iomem *sbm_maccfg; /* MAC Config Register */ + void __iomem *sbm_fifocfg; /* FIFO Config Register */ + void __iomem *sbm_framecfg; /* Frame Config Register */ + void __iomem *sbm_rxfilter; /* Receive Filter Register */ + void __iomem *sbm_isr; /* Interrupt Status Register */ + void __iomem *sbm_imr; /* Interrupt Mask Register */ + void __iomem *sbm_mdio; /* MDIO Register */ + + enum sbmac_speed sbm_speed; /* current speed */ + enum sbmac_duplex sbm_duplex; /* current duplex */ + enum sbmac_fc sbm_fc; /* cur. flow control setting */ + int sbm_pause; /* current pause setting */ + int sbm_link; /* current link state */ + + unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; + + struct sbmacdma sbm_txdma; /* only channel 0 for now */ + struct sbmacdma sbm_rxdma; + int rx_hw_checksum; + int sbe_idx; }; @@ -288,55 +302,58 @@ struct sbmac_softc { * Prototypes ********************************************************************* */ -static void sbdma_initctx(sbmacdma_t *d, - struct sbmac_softc *s, - int chan, - int txrx, - int maxdescr); -static void sbdma_channel_start(sbmacdma_t *d, int rxtx); -static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m); -static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m); -static void sbdma_emptyring(sbmacdma_t *d); -static void sbdma_fillring(sbmacdma_t *d); -static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, int work_to_do, int poll); -static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll); +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr); +static void sbdma_channel_start(struct sbmacdma *d, int rxtx); +static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); +static void sbdma_emptyring(struct sbmacdma *d); +static void sbdma_fillring(struct sbmacdma *d); +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll); +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll); static int sbmac_initctx(struct sbmac_softc *s); static void sbmac_channel_start(struct sbmac_softc *s); static void sbmac_channel_stop(struct sbmac_softc *s); -static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,sbmac_state_t); -static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff); +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *, + enum sbmac_state); +static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff); static uint64_t sbmac_addr2reg(unsigned char *ptr); -static irqreturn_t sbmac_intr(int irq,void *dev_instance); +static irqreturn_t sbmac_intr(int irq, void *dev_instance); static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev); static void sbmac_setmulti(struct sbmac_softc *sc); -static int sbmac_init(struct net_device *dev, int idx); -static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed); -static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc); +static int sbmac_init(struct platform_device *pldev, long long base); +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed); +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc); static int sbmac_open(struct net_device *dev); -static void sbmac_timer(unsigned long data); static void sbmac_tx_timeout (struct net_device *dev); -static struct net_device_stats *sbmac_get_stats(struct net_device *dev); static void sbmac_set_rx_mode(struct net_device *dev); static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sbmac_close(struct net_device *dev); -static int sbmac_poll(struct net_device *poll_dev, int *budget); +static int sbmac_poll(struct napi_struct *napi, int budget); -static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); +static void sbmac_mii_poll(struct net_device *dev); static int sbmac_mii_probe(struct net_device *dev); -static void sbmac_mii_sync(struct sbmac_softc *s); -static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt); -static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx); -static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, - unsigned int regval); +static void sbmac_mii_sync(void __iomem *sbm_mdio); +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt); +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx); +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 val); /********************************************************************** * Globals ********************************************************************* */ -static uint64_t sbmac_orig_hwaddr[MAX_UNITS]; +static char sbmac_string[] = "sb1250-mac"; +static char sbmac_pretty[] = "SB1250 MAC"; + +static char sbmac_mdio_string[] = "sb1250-mac-mdio"; /********************************************************************** @@ -348,185 +365,66 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS]; #define MII_COMMAND_WRITE 0x01 #define MII_COMMAND_ACK 0x02 -#define BMCR_RESET 0x8000 -#define BMCR_LOOPBACK 0x4000 -#define BMCR_SPEED0 0x2000 -#define BMCR_ANENABLE 0x1000 -#define BMCR_POWERDOWN 0x0800 -#define BMCR_ISOLATE 0x0400 -#define BMCR_RESTARTAN 0x0200 -#define BMCR_DUPLEX 0x0100 -#define BMCR_COLTEST 0x0080 -#define BMCR_SPEED1 0x0040 -#define BMCR_SPEED1000 BMCR_SPEED1 -#define BMCR_SPEED100 BMCR_SPEED0 -#define BMCR_SPEED10 0 - -#define BMSR_100BT4 0x8000 -#define BMSR_100BT_FDX 0x4000 -#define BMSR_100BT_HDX 0x2000 -#define BMSR_10BT_FDX 0x1000 -#define BMSR_10BT_HDX 0x0800 -#define BMSR_100BT2_FDX 0x0400 -#define BMSR_100BT2_HDX 0x0200 -#define BMSR_1000BT_XSR 0x0100 -#define BMSR_PRESUP 0x0040 -#define BMSR_ANCOMPLT 0x0020 -#define BMSR_REMFAULT 0x0010 -#define BMSR_AUTONEG 0x0008 -#define BMSR_LINKSTAT 0x0004 -#define BMSR_JABDETECT 0x0002 -#define BMSR_EXTCAPAB 0x0001 - -#define PHYIDR1 0x2000 -#define PHYIDR2 0x5C60 - -#define ANAR_NP 0x8000 -#define ANAR_RF 0x2000 -#define ANAR_ASYPAUSE 0x0800 -#define ANAR_PAUSE 0x0400 -#define ANAR_T4 0x0200 -#define ANAR_TXFD 0x0100 -#define ANAR_TXHD 0x0080 -#define ANAR_10FD 0x0040 -#define ANAR_10HD 0x0020 -#define ANAR_PSB 0x0001 - -#define ANLPAR_NP 0x8000 -#define ANLPAR_ACK 0x4000 -#define ANLPAR_RF 0x2000 -#define ANLPAR_ASYPAUSE 0x0800 -#define ANLPAR_PAUSE 0x0400 -#define ANLPAR_T4 0x0200 -#define ANLPAR_TXFD 0x0100 -#define ANLPAR_TXHD 0x0080 -#define ANLPAR_10FD 0x0040 -#define ANLPAR_10HD 0x0020 -#define ANLPAR_PSB 0x0001 /* 802.3 */ - -#define ANER_PDF 0x0010 -#define ANER_LPNPABLE 0x0008 -#define ANER_NPABLE 0x0004 -#define ANER_PAGERX 0x0002 -#define ANER_LPANABLE 0x0001 - -#define ANNPTR_NP 0x8000 -#define ANNPTR_MP 0x2000 -#define ANNPTR_ACK2 0x1000 -#define ANNPTR_TOGTX 0x0800 -#define ANNPTR_CODE 0x0008 - -#define ANNPRR_NP 0x8000 -#define ANNPRR_MP 0x2000 -#define ANNPRR_ACK3 0x1000 -#define ANNPRR_TOGTX 0x0800 -#define ANNPRR_CODE 0x0008 - -#define K1TCR_TESTMODE 0x0000 -#define K1TCR_MSMCE 0x1000 -#define K1TCR_MSCV 0x0800 -#define K1TCR_RPTR 0x0400 -#define K1TCR_1000BT_FDX 0x200 -#define K1TCR_1000BT_HDX 0x100 - -#define K1STSR_MSMCFLT 0x8000 -#define K1STSR_MSCFGRES 0x4000 -#define K1STSR_LRSTAT 0x2000 -#define K1STSR_RRSTAT 0x1000 -#define K1STSR_LP1KFD 0x0800 -#define K1STSR_LP1KHD 0x0400 -#define K1STSR_LPASMDIR 0x0200 - -#define K1SCR_1KX_FDX 0x8000 -#define K1SCR_1KX_HDX 0x4000 -#define K1SCR_1KT_FDX 0x2000 -#define K1SCR_1KT_HDX 0x1000 - -#define STRAP_PHY1 0x0800 -#define STRAP_NCMODE 0x0400 -#define STRAP_MANMSCFG 0x0200 -#define STRAP_ANENABLE 0x0100 -#define STRAP_MSVAL 0x0080 -#define STRAP_1KHDXADV 0x0010 -#define STRAP_1KFDXADV 0x0008 -#define STRAP_100ADV 0x0004 -#define STRAP_SPEEDSEL 0x0000 -#define STRAP_SPEED100 0x0001 - -#define PHYSUP_SPEED1000 0x10 -#define PHYSUP_SPEED100 0x08 -#define PHYSUP_SPEED10 0x00 -#define PHYSUP_LINKUP 0x04 -#define PHYSUP_FDX 0x02 - -#define MII_BMCR 0x00 /* Basic mode control register (rw) */ -#define MII_BMSR 0x01 /* Basic mode status register (ro) */ -#define MII_PHYIDR1 0x02 -#define MII_PHYIDR2 0x03 - -#define MII_K1STSR 0x0A /* 1K Status Register (ro) */ -#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */ - - #define M_MAC_MDIO_DIR_OUTPUT 0 /* for clarity */ #define ENABLE 1 #define DISABLE 0 /********************************************************************** - * SBMAC_MII_SYNC(s) + * SBMAC_MII_SYNC(sbm_mdio) * * Synchronize with the MII - send a pattern of bits to the MII * that will guarantee that it is ready to accept a command. * * Input parameters: - * s - sbmac structure + * sbm_mdio - address of the MAC's MDIO register * * Return value: * nothing ********************************************************************* */ -static void sbmac_mii_sync(struct sbmac_softc *s) +static void sbmac_mii_sync(void __iomem *sbm_mdio) { int cnt; uint64_t bits; int mac_mdio_genc; - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); for (cnt = 0; cnt < 32; cnt++) { - __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); } } /********************************************************************** - * SBMAC_MII_SENDDATA(s,data,bitcnt) + * SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt) * * Send some bits to the MII. The bits to be sent are right- * justified in the 'data' parameter. * * Input parameters: - * s - sbmac structure - * data - data to send - * bitcnt - number of bits to send + * sbm_mdio - address of the MAC's MDIO register + * data - data to send + * bitcnt - number of bits to send ********************************************************************* */ -static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt) +static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data, + int bitcnt) { int i; uint64_t bits; unsigned int curmask; int mac_mdio_genc; - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; bits = M_MAC_MDIO_DIR_OUTPUT; - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask = 1 << (bitcnt - 1); @@ -534,9 +432,9 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc if (data & curmask) bits |= M_MAC_MDIO_OUT; else bits &= ~M_MAC_MDIO_OUT; - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, sbm_mdio); curmask >>= 1; } } @@ -544,21 +442,22 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc /********************************************************************** - * SBMAC_MII_READ(s,phyaddr,regidx) - * + * SBMAC_MII_READ(bus, phyaddr, regidx) * Read a PHY register. * * Input parameters: - * s - sbmac structure + * bus - MDIO bus handle * phyaddr - PHY's address - * regidx = index of register to read + * regnum - index of register to read * * Return value: - * value read, or 0 if an error occurred. + * value read, or 0xffff if an error occurred. ********************************************************************* */ -static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) +static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx) { + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; int idx; int error; int regval; @@ -568,8 +467,7 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) * Synchronize ourselves so that the PHY knows the next * thing coming down is a command */ - - sbmac_mii_sync(s); + sbmac_mii_sync(sbm_mdio); /* * Send the data to the PHY. The sequence is @@ -578,37 +476,37 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) * the PHY addr (5 bits) * the register index (5 bits) */ + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); - sbmac_mii_senddata(s,MII_COMMAND_START, 2); - sbmac_mii_senddata(s,MII_COMMAND_READ, 2); - sbmac_mii_senddata(s,phyaddr, 5); - sbmac_mii_senddata(s,regidx, 5); - - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; /* * Switch the port around without a clock transition. */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * Send out a clock pulse to signal we want the status */ - - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); /* * If an error occurred, the PHY will signal '1' back */ - error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN; + error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN; /* * Issue an 'idle' clock pulse, but keep the direction * the same. */ - __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); regval = 0; @@ -616,55 +514,60 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) regval <<= 1; if (error == 0) { - if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN) + if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN) regval |= 1; } - __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, + sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio); } /* Switch back to output */ - __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); if (error == 0) return regval; - return 0; + return 0xffff; } /********************************************************************** - * SBMAC_MII_WRITE(s,phyaddr,regidx,regval) + * SBMAC_MII_WRITE(bus, phyaddr, regidx, regval) * * Write a value to a PHY register. * * Input parameters: - * s - sbmac structure + * bus - MDIO bus handle * phyaddr - PHY to use - * regidx - register within the PHY - * regval - data to write to register + * regidx - register within the PHY + * regval - data to write to register * * Return value: - * nothing + * 0 for success ********************************************************************* */ -static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, - unsigned int regval) +static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx, + u16 regval) { + struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv; + void __iomem *sbm_mdio = sc->sbm_mdio; int mac_mdio_genc; - sbmac_mii_sync(s); + sbmac_mii_sync(sbm_mdio); + + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2); + sbmac_mii_senddata(sbm_mdio, phyaddr, 5); + sbmac_mii_senddata(sbm_mdio, regidx, 5); + sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2); + sbmac_mii_senddata(sbm_mdio, regval, 16); - sbmac_mii_senddata(s,MII_COMMAND_START,2); - sbmac_mii_senddata(s,MII_COMMAND_WRITE,2); - sbmac_mii_senddata(s,phyaddr, 5); - sbmac_mii_senddata(s,regidx, 5); - sbmac_mii_senddata(s,MII_COMMAND_ACK,2); - sbmac_mii_senddata(s,regval,16); + mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC; - mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio); - __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio); + return 0; } @@ -677,8 +580,8 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, * way. * * Input parameters: - * d - sbmacdma_t structure (DMA channel context) - * s - sbmac_softc structure (pointer to a MAC) + * d - struct sbmacdma (DMA channel context) + * s - struct sbmac_softc (pointer to a MAC) * chan - channel number (0..1 right now) * txrx - Identifies DMA_TX or DMA_RX for channel direction * maxdescr - number of descriptors @@ -687,11 +590,8 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, * nothing ********************************************************************* */ -static void sbdma_initctx(sbmacdma_t *d, - struct sbmac_softc *s, - int chan, - int txrx, - int maxdescr) +static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, + int txrx, int maxdescr) { #ifdef CONFIG_SBMAC_COALESCE int int_pktcnt, int_timeout; @@ -710,27 +610,27 @@ static void sbdma_initctx(sbmacdma_t *d, s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; #endif - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR))); - __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR))); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR); + __raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR); /* * initialize register pointers @@ -758,18 +658,17 @@ static void sbdma_initctx(sbmacdma_t *d, d->sbdma_maxdescr = maxdescr; - d->sbdma_dscrtable_unaligned = - d->sbdma_dscrtable = (sbdmadscr_t *) - kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL); + d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1, + sizeof(*d->sbdma_dscrtable), + GFP_KERNEL); /* * The descriptor table must be aligned to at least 16 bytes or the * MAC will corrupt it. */ - d->sbdma_dscrtable = (sbdmadscr_t *) - ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t)); - - memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t)); + d->sbdma_dscrtable = (struct sbdmadscr *) + ALIGN((unsigned long)d->sbdma_dscrtable_unaligned, + sizeof(*d->sbdma_dscrtable)); d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; @@ -779,10 +678,8 @@ static void sbdma_initctx(sbmacdma_t *d, * And context table */ - d->sbdma_ctxtable = (struct sk_buff **) - kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL); - - memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *)); + d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr, + sizeof(*d->sbdma_ctxtable), GFP_KERNEL); #ifdef CONFIG_SBMAC_COALESCE /* @@ -819,7 +716,7 @@ static void sbdma_initctx(sbmacdma_t *d, * nothing ********************************************************************* */ -static void sbdma_channel_start(sbmacdma_t *d, int rxtx ) +static void sbdma_channel_start(struct sbmacdma *d, int rxtx) { /* * Turn on the DMA channel @@ -860,7 +757,7 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx ) * nothing ********************************************************************* */ -static void sbdma_channel_stop(sbmacdma_t *d) +static void sbdma_channel_stop(struct sbmacdma *d) { /* * Turn off the DMA channel @@ -909,10 +806,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) ********************************************************************* */ -static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) +static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) { - sbdmadscr_t *dsc; - sbdmadscr_t *nextdsc; + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; struct sk_buff *sb_new = NULL; int pktsize = ENET_PACKET_SIZE; @@ -953,7 +850,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) if (sb == NULL) { sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); if (sb_new == NULL) { - printk(KERN_INFO "%s: sk_buff allocation failed\n", + pr_info("%s: sk_buff allocation failed\n", d->sbdma_eth->sbm_dev->name); return -ENOBUFS; } @@ -1024,10 +921,10 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) ********************************************************************* */ -static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb) +static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb) { - sbdmadscr_t *dsc; - sbdmadscr_t *nextdsc; + struct sbdmadscr *dsc; + struct sbdmadscr *nextdsc; uint64_t phys; uint64_t ncb; int length; @@ -1113,7 +1010,7 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb) * nothing ********************************************************************* */ -static void sbdma_emptyring(sbmacdma_t *d) +static void sbdma_emptyring(struct sbmacdma *d) { int idx; struct sk_buff *sb; @@ -1141,7 +1038,7 @@ static void sbdma_emptyring(sbmacdma_t *d) * nothing ********************************************************************* */ -static void sbdma_fillring(sbmacdma_t *d) +static void sbdma_fillring(struct sbmacdma *d) { int idx; @@ -1188,12 +1085,13 @@ static void sbmac_netpoll(struct net_device *netdev) * nothing ********************************************************************* */ -static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, - int work_to_do, int poll) +static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int work_to_do, int poll) { + struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; - sbdmadscr_t *dsc; + struct sbdmadscr *dsc; struct sk_buff *sb; int len; int work_done = 0; @@ -1203,7 +1101,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, again: /* Check if the HW dropped any frames */ - sc->sbm_stats.rx_fifo_errors + dev->stats.rx_fifo_errors += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff; __raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost); @@ -1225,8 +1123,9 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, prefetch(dsc); prefetch(&d->sbdma_ctxtable[curidx]); - hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - - d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / + sizeof(*d->sbdma_dscrtable); /* * If they're the same, that means we've processed all @@ -1262,7 +1161,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, if (unlikely (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS)) { - sc->sbm_stats.rx_dropped++; + dev->stats.rx_dropped++; sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ /* No point in continuing at the moment */ printk(KERN_ERR "dropped packet (1)\n"); @@ -1298,13 +1197,13 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, dropped = netif_rx(sb); if (dropped == NET_RX_DROP) { - sc->sbm_stats.rx_dropped++; + dev->stats.rx_dropped++; d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); goto done; } else { - sc->sbm_stats.rx_bytes += len; - sc->sbm_stats.rx_packets++; + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; } } } else { @@ -1312,7 +1211,7 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, * Packet was mangled somehow. Just drop it and * put it back on the receive ring. */ - sc->sbm_stats.rx_errors++; + dev->stats.rx_errors++; sbdma_add_rcvbuffer(d,sb); } @@ -1350,11 +1249,13 @@ static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, * nothing ********************************************************************* */ -static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) +static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, + int poll) { + struct net_device *dev = sc->sbm_dev; int curidx; int hwidx; - sbdmadscr_t *dsc; + struct sbdmadscr *dsc; struct sk_buff *sb; unsigned long flags; int packets_handled = 0; @@ -1364,8 +1265,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) if (d->sbdma_remptr == d->sbdma_addptr) goto end_unlock; - hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - - d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); + hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable); for (;;) { /* @@ -1402,8 +1303,8 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll) * Stats */ - sc->sbm_stats.tx_bytes += sb->len; - sc->sbm_stats.tx_packets++; + dev->stats.tx_bytes += sb->len; + dev->stats.tx_packets++; /* * for transmits, we just free buffers. @@ -1468,14 +1369,6 @@ static int sbmac_initctx(struct sbmac_softc *s) s->sbm_imr = s->sbm_base + R_MAC_INT_MASK; s->sbm_mdio = s->sbm_base + R_MAC_MDIO; - s->sbm_phys[0] = 1; - s->sbm_phys[1] = 0; - - s->sbm_phy_oldbmsr = 0; - s->sbm_phy_oldanlpar = 0; - s->sbm_phy_oldk1stsr = 0; - s->sbm_phy_oldlinkstat = 0; - /* * Initialize the DMA channels. Right now, only one per MAC is used * Note: Only do this _once_, as it allocates memory from the kernel! @@ -1490,19 +1383,11 @@ static int sbmac_initctx(struct sbmac_softc *s) s->sbm_state = sbmac_state_off; - /* - * Initial speed is (XXX TEMP) 10MBit/s HDX no FC - */ - - s->sbm_speed = sbmac_speed_10; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_disabled; - return 0; } -static void sbdma_uninitctx(struct sbmacdma_s *d) +static void sbdma_uninitctx(struct sbmacdma *d) { if (d->sbdma_dscrtable_unaligned) { kfree(d->sbdma_dscrtable_unaligned); @@ -1538,7 +1423,7 @@ static void sbmac_uninitctx(struct sbmac_softc *sc) static void sbmac_channel_start(struct sbmac_softc *s) { uint64_t reg; - volatile void __iomem *port; + void __iomem *port; uint64_t cfg,fifo,framecfg; int idx, th_value; @@ -1801,10 +1686,10 @@ static void sbmac_channel_stop(struct sbmac_softc *s) * Return value: * old state ********************************************************************* */ -static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc, - sbmac_state_t state) +static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc, + enum sbmac_state state) { - sbmac_state_t oldstate = sc->sbm_state; + enum sbmac_state oldstate = sc->sbm_state; /* * If same as previous state, return @@ -1939,14 +1824,14 @@ static uint64_t sbmac_addr2reg(unsigned char *ptr) * * Input parameters: * s - sbmac structure - * speed - speed to set MAC to (see sbmac_speed_t enum) + * speed - speed to set MAC to (see enum sbmac_speed) * * Return value: * 1 if successful * 0 indicates invalid parameters ********************************************************************* */ -static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) +static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed) { uint64_t cfg; uint64_t framecfg; @@ -2004,8 +1889,6 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; break; - case sbmac_speed_auto: /* XXX not implemented */ - /* fall through */ default: return 0; } @@ -2028,15 +1911,16 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) * * Input parameters: * s - sbmac structure - * duplex - duplex setting (see sbmac_duplex_t) - * fc - flow control setting (see sbmac_fc_t) + * duplex - duplex setting (see enum sbmac_duplex) + * fc - flow control setting (see enum sbmac_fc) * * Return value: * 1 if ok * 0 if an invalid parameter combination was specified ********************************************************************* */ -static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc) +static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, + enum sbmac_fc fc) { uint64_t cfg; @@ -2078,8 +1962,6 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; break; - case sbmac_fc_auto: /* XXX not implemented */ - /* fall through */ case sbmac_fc_frame: /* not valid in half duplex */ default: /* invalid selection */ return 0; @@ -2098,15 +1980,12 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc case sbmac_fc_collision: /* not valid in full duplex */ case sbmac_fc_carrier: /* not valid in full duplex */ - case sbmac_fc_auto: /* XXX not implemented */ - /* fall through */ default: return 0; } break; - case sbmac_duplex_auto: - /* XXX not implemented */ - break; + default: + return 0; } /* @@ -2154,20 +2033,13 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance) * Transmits on channel 0 */ - if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { + if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) sbdma_tx_process(sc,&(sc->sbm_txdma), 0); -#ifdef CONFIG_NETPOLL_TRAP - if (netpoll_trap()) { - if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) - __netif_schedule(dev); - } -#endif - } if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &sc->napi)) { __raw_writeq(0, sc->sbm_imr); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &sc->napi); /* Depend on the exit from poll to reenable intr */ } else { @@ -2236,7 +2108,7 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) static void sbmac_setmulti(struct sbmac_softc *sc) { uint64_t reg; - volatile void __iomem *port; + void __iomem *port; int idx; struct dev_mc_list *mclist; struct net_device *dev = sc->sbm_dev; @@ -2392,7 +2264,7 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) if (new_mtu > ENET_PACKET_SIZE) return -EINVAL; _dev->mtu = new_mtu; - printk(KERN_INFO "changing the mtu to %d\n", new_mtu); + pr_info("changing the mtu to %d\n", new_mtu); return 0; } @@ -2408,19 +2280,17 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) * status ********************************************************************* */ -static int sbmac_init(struct net_device *dev, int idx) +static int sbmac_init(struct platform_device *pldev, long long base) { - struct sbmac_softc *sc; + struct net_device *dev = pldev->dev.driver_data; + int idx = pldev->id; + struct sbmac_softc *sc = netdev_priv(dev); unsigned char *eaddr; uint64_t ea_reg; int i; int err; + DECLARE_MAC_BUF(mac); - sc = netdev_priv(dev); - - /* Determine controller base address */ - - sc->sbm_base = IOADDR(dev->base_addr); sc->sbm_dev = dev; sc->sbe_idx = idx; @@ -2465,58 +2335,67 @@ static int sbmac_init(struct net_device *dev, int idx) dev->open = sbmac_open; dev->hard_start_xmit = sbmac_start_tx; dev->stop = sbmac_close; - dev->get_stats = sbmac_get_stats; dev->set_multicast_list = sbmac_set_rx_mode; dev->do_ioctl = sbmac_mii_ioctl; dev->tx_timeout = sbmac_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - dev->poll = sbmac_poll; - dev->weight = 16; + + netif_napi_add(dev, &sc->napi, sbmac_poll, 16); dev->change_mtu = sb1250_change_mtu; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = sbmac_netpoll; #endif + dev->irq = UNIT_INT(idx); + /* This is needed for PASS2 for Rx H/W checksum feature */ sbmac_set_iphdr_offset(sc); err = register_netdev(dev); - if (err) - goto out_uninit; - - if (sc->rx_hw_checksum == ENABLE) { - printk(KERN_INFO "%s: enabling TCP rcv checksum\n", - sc->sbm_dev->name); + if (err) { + printk(KERN_ERR "%s.%d: unable to register netdev\n", + sbmac_string, idx); + sbmac_uninitctx(sc); + return err; } + pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); + + if (sc->rx_hw_checksum == ENABLE) + pr_info("%s: enabling TCP rcv checksum\n", dev->name); + /* * Display Ethernet address (this is called during the config * process so we need to finish off the config message that * was being displayed) */ - printk(KERN_INFO - "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n", - dev->name, dev->base_addr, - eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]); - + pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %s\n", + dev->name, base, print_mac(mac, eaddr)); - return 0; + sc->mii_bus.name = sbmac_mdio_string; + sc->mii_bus.id = idx; + sc->mii_bus.priv = sc; + sc->mii_bus.read = sbmac_mii_read; + sc->mii_bus.write = sbmac_mii_write; + sc->mii_bus.irq = sc->phy_irq; + for (i = 0; i < PHY_MAX_ADDR; ++i) + sc->mii_bus.irq[i] = SBMAC_PHY_INT; -out_uninit: - sbmac_uninitctx(sc); + sc->mii_bus.dev = &pldev->dev; + dev_set_drvdata(&pldev->dev, &sc->mii_bus); - return err; + return 0; } static int sbmac_open(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); + int err; - if (debug > 1) { - printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq); - } + if (debug > 1) + pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq); /* * map/route interrupt (clear status first, in case something @@ -2525,23 +2404,35 @@ static int sbmac_open(struct net_device *dev) */ __raw_readq(sc->sbm_isr); - if (request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev)) - return -EBUSY; + err = request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev); + if (err) { + printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, + dev->irq); + goto out_err; + } /* - * Probe phy address + * Probe PHY address */ - - if(sbmac_mii_probe(dev) == -1) { - printk("%s: failed to probe PHY.\n", dev->name); - return -EINVAL; + err = mdiobus_register(&sc->mii_bus); + if (err) { + printk(KERN_ERR "%s: unable to register MDIO bus\n", + dev->name); + goto out_unirq; } + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_none; + sc->sbm_pause = -1; + sc->sbm_link = 0; + /* - * Configure default speed + * Attach to the PHY */ - - sbmac_mii_poll(sc,noisy_mii); + err = sbmac_mii_probe(dev); + if (err) + goto out_unregister; /* * Turn on the channel @@ -2549,200 +2440,133 @@ static int sbmac_open(struct net_device *dev) sbmac_set_channel_state(sc,sbmac_state_on); - /* - * XXX Station address is in dev->dev_addr - */ - - if (dev->if_port == 0) - dev->if_port = 0; - netif_start_queue(dev); sbmac_set_rx_mode(dev); - /* Set the timer to check for link beat. */ - init_timer(&sc->sbm_timer); - sc->sbm_timer.expires = jiffies + 2 * HZ/100; - sc->sbm_timer.data = (unsigned long)dev; - sc->sbm_timer.function = &sbmac_timer; - add_timer(&sc->sbm_timer); + phy_start(sc->phy_dev); + + napi_enable(&sc->napi); return 0; + +out_unregister: + mdiobus_unregister(&sc->mii_bus); + +out_unirq: + free_irq(dev->irq, dev); + +out_err: + return err; } static int sbmac_mii_probe(struct net_device *dev) { + struct sbmac_softc *sc = netdev_priv(dev); + struct phy_device *phy_dev; int i; - struct sbmac_softc *s = netdev_priv(dev); - u16 bmsr, id1, id2; - u32 vendor, device; - - for (i=1; i<31; i++) { - bmsr = sbmac_mii_read(s, i, MII_BMSR); - if (bmsr != 0) { - s->sbm_phys[0] = i; - id1 = sbmac_mii_read(s, i, MII_PHYIDR1); - id2 = sbmac_mii_read(s, i, MII_PHYIDR2); - vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f); - device = (id2 >> 4) & 0x3f; - - printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n", - dev->name, i, vendor, device); - return i; - } - } - return -1; -} - - -static int sbmac_mii_poll(struct sbmac_softc *s,int noisy) -{ - int bmsr,bmcr,k1stsr,anlpar; - int chg; - char buffer[100]; - char *p = buffer; - /* Read the mode status and mode control registers. */ - bmsr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMSR); - bmcr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMCR); - - /* get the link partner status */ - anlpar = sbmac_mii_read(s,s->sbm_phys[0],MII_ANLPAR); - - /* if supported, read the 1000baseT register */ - if (bmsr & BMSR_1000BT_XSR) { - k1stsr = sbmac_mii_read(s,s->sbm_phys[0],MII_K1STSR); - } - else { - k1stsr = 0; + for (i = 0; i < PHY_MAX_ADDR; i++) { + phy_dev = sc->mii_bus.phy_map[i]; + if (phy_dev) + break; } - - chg = 0; - - if ((bmsr & BMSR_LINKSTAT) == 0) { - /* - * If link status is down, clear out old info so that when - * it comes back up it will force us to reconfigure speed - */ - s->sbm_phy_oldbmsr = 0; - s->sbm_phy_oldanlpar = 0; - s->sbm_phy_oldk1stsr = 0; - return 0; + if (!phy_dev) { + printk(KERN_ERR "%s: no PHY found\n", dev->name); + return -ENXIO; } - if ((s->sbm_phy_oldbmsr != bmsr) || - (s->sbm_phy_oldanlpar != anlpar) || - (s->sbm_phy_oldk1stsr != k1stsr)) { - if (debug > 1) { - printk(KERN_DEBUG "%s: bmsr:%x/%x anlpar:%x/%x k1stsr:%x/%x\n", - s->sbm_dev->name, - s->sbm_phy_oldbmsr,bmsr, - s->sbm_phy_oldanlpar,anlpar, - s->sbm_phy_oldk1stsr,k1stsr); - } - s->sbm_phy_oldbmsr = bmsr; - s->sbm_phy_oldanlpar = anlpar; - s->sbm_phy_oldk1stsr = k1stsr; - chg = 1; + phy_dev = phy_connect(dev, phy_dev->dev.bus_id, &sbmac_mii_poll, 0, + PHY_INTERFACE_MODE_GMII); + if (IS_ERR(phy_dev)) { + printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); + return PTR_ERR(phy_dev); } - if (chg == 0) - return 0; + /* Remove any features not supported by the controller */ + phy_dev->supported &= SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_MII | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + phy_dev->advertising = phy_dev->supported; - p += sprintf(p,"Link speed: "); - - if (k1stsr & K1STSR_LP1KFD) { - s->sbm_speed = sbmac_speed_1000; - s->sbm_duplex = sbmac_duplex_full; - s->sbm_fc = sbmac_fc_frame; - p += sprintf(p,"1000BaseT FDX"); - } - else if (k1stsr & K1STSR_LP1KHD) { - s->sbm_speed = sbmac_speed_1000; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_disabled; - p += sprintf(p,"1000BaseT HDX"); - } - else if (anlpar & ANLPAR_TXFD) { - s->sbm_speed = sbmac_speed_100; - s->sbm_duplex = sbmac_duplex_full; - s->sbm_fc = (anlpar & ANLPAR_PAUSE) ? sbmac_fc_frame : sbmac_fc_disabled; - p += sprintf(p,"100BaseT FDX"); - } - else if (anlpar & ANLPAR_TXHD) { - s->sbm_speed = sbmac_speed_100; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_disabled; - p += sprintf(p,"100BaseT HDX"); - } - else if (anlpar & ANLPAR_10FD) { - s->sbm_speed = sbmac_speed_10; - s->sbm_duplex = sbmac_duplex_full; - s->sbm_fc = sbmac_fc_frame; - p += sprintf(p,"10BaseT FDX"); - } - else if (anlpar & ANLPAR_10HD) { - s->sbm_speed = sbmac_speed_10; - s->sbm_duplex = sbmac_duplex_half; - s->sbm_fc = sbmac_fc_collision; - p += sprintf(p,"10BaseT HDX"); - } - else { - p += sprintf(p,"Unknown"); - } + pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + dev->name, phy_dev->drv->name, + phy_dev->dev.bus_id, phy_dev->irq); - if (noisy) { - printk(KERN_INFO "%s: %s\n",s->sbm_dev->name,buffer); - } + sc->phy_dev = phy_dev; - return 1; + return 0; } -static void sbmac_timer(unsigned long data) +static void sbmac_mii_poll(struct net_device *dev) { - struct net_device *dev = (struct net_device *)data; struct sbmac_softc *sc = netdev_priv(dev); - int next_tick = HZ; - int mii_status; + struct phy_device *phy_dev = sc->phy_dev; + unsigned long flags; + enum sbmac_fc fc; + int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg; + + link_chg = (sc->sbm_link != phy_dev->link); + speed_chg = (sc->sbm_speed != phy_dev->speed); + duplex_chg = (sc->sbm_duplex != phy_dev->duplex); + pause_chg = (sc->sbm_pause != phy_dev->pause); + + if (!link_chg && !speed_chg && !duplex_chg && !pause_chg) + return; /* Hmmm... */ + + if (!phy_dev->link) { + if (link_chg) { + sc->sbm_link = phy_dev->link; + sc->sbm_speed = sbmac_speed_none; + sc->sbm_duplex = sbmac_duplex_none; + sc->sbm_fc = sbmac_fc_disabled; + sc->sbm_pause = -1; + pr_info("%s: link unavailable\n", dev->name); + } + return; + } - spin_lock_irq (&sc->sbm_lock); + if (phy_dev->duplex == DUPLEX_FULL) { + if (phy_dev->pause) + fc = sbmac_fc_frame; + else + fc = sbmac_fc_disabled; + } else + fc = sbmac_fc_collision; + fc_chg = (sc->sbm_fc != fc); - /* make IFF_RUNNING follow the MII status bit "Link established" */ - mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR); + pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed, + phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H'); - if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) { - sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT; - if (mii_status & BMSR_LINKSTAT) { - netif_carrier_on(dev); - } - else { - netif_carrier_off(dev); - } - } + spin_lock_irqsave(&sc->sbm_lock, flags); - /* - * Poll the PHY to see what speed we should be running at - */ + sc->sbm_speed = phy_dev->speed; + sc->sbm_duplex = phy_dev->duplex; + sc->sbm_fc = fc; + sc->sbm_pause = phy_dev->pause; + sc->sbm_link = phy_dev->link; - if (sbmac_mii_poll(sc,noisy_mii)) { - if (sc->sbm_state != sbmac_state_off) { - /* - * something changed, restart the channel - */ - if (debug > 1) { - printk("%s: restarting channel because speed changed\n", - sc->sbm_dev->name); - } - sbmac_channel_stop(sc); - sbmac_channel_start(sc); - } + if ((speed_chg || duplex_chg || fc_chg) && + sc->sbm_state != sbmac_state_off) { + /* + * something changed, restart the channel + */ + if (debug > 1) + pr_debug("%s: restarting channel " + "because PHY state changed\n", dev->name); + sbmac_channel_stop(sc); + sbmac_channel_start(sc); } - spin_unlock_irq (&sc->sbm_lock); - - sc->sbm_timer.expires = jiffies + next_tick; - add_timer(&sc->sbm_timer); + spin_unlock_irqrestore(&sc->sbm_lock, flags); } @@ -2754,7 +2578,7 @@ static void sbmac_tx_timeout (struct net_device *dev) dev->trans_start = jiffies; - sc->sbm_stats.tx_errors++; + dev->stats.tx_errors++; spin_unlock_irq (&sc->sbm_lock); @@ -2764,22 +2588,6 @@ static void sbmac_tx_timeout (struct net_device *dev) -static struct net_device_stats *sbmac_get_stats(struct net_device *dev) -{ - struct sbmac_softc *sc = netdev_priv(dev); - unsigned long flags; - - spin_lock_irqsave(&sc->sbm_lock, flags); - - /* XXX update other stats here */ - - spin_unlock_irqrestore(&sc->sbm_lock, flags); - - return &sc->sbm_stats; -} - - - static void sbmac_set_rx_mode(struct net_device *dev) { unsigned long flags; @@ -2811,62 +2619,34 @@ static void sbmac_set_rx_mode(struct net_device *dev) static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct sbmac_softc *sc = netdev_priv(dev); - u16 *data = (u16 *)&rq->ifr_ifru; - unsigned long flags; - int retval; - spin_lock_irqsave(&sc->sbm_lock, flags); - retval = 0; - - switch(cmd) { - case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ - data[0] = sc->sbm_phys[0] & 0x1f; - /* Fall Through */ - case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ - data[3] = sbmac_mii_read(sc, data[0] & 0x1f, data[1] & 0x1f); - break; - case SIOCDEVPRIVATE+2: /* Write the specified MII register */ - if (!capable(CAP_NET_ADMIN)) { - retval = -EPERM; - break; - } - if (debug > 1) { - printk(KERN_DEBUG "%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev->name, - data[0],data[1],data[2]); - } - sbmac_mii_write(sc, data[0] & 0x1f, data[1] & 0x1f, data[2]); - break; - default: - retval = -EOPNOTSUPP; - } + if (!netif_running(dev) || !sc->phy_dev) + return -EINVAL; - spin_unlock_irqrestore(&sc->sbm_lock, flags); - return retval; + return phy_mii_ioctl(sc->phy_dev, if_mii(rq), cmd); } static int sbmac_close(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); - unsigned long flags; - int irq; - sbmac_set_channel_state(sc,sbmac_state_off); + napi_disable(&sc->napi); - del_timer_sync(&sc->sbm_timer); + phy_stop(sc->phy_dev); - spin_lock_irqsave(&sc->sbm_lock, flags); + sbmac_set_channel_state(sc, sbmac_state_off); netif_stop_queue(dev); - if (debug > 1) { - printk(KERN_DEBUG "%s: Shutting down ethercard\n",dev->name); - } + if (debug > 1) + pr_debug("%s: Shutting down ethercard\n", dev->name); - spin_unlock_irqrestore(&sc->sbm_lock, flags); + phy_disconnect(sc->phy_dev); + sc->phy_dev = NULL; + + mdiobus_unregister(&sc->mii_bus); - irq = dev->irq; - synchronize_irq(irq); - free_irq(irq, dev); + free_irq(dev->irq, dev); sbdma_emptyring(&(sc->sbm_txdma)); sbdma_emptyring(&(sc->sbm_rxdma)); @@ -2874,26 +2654,17 @@ static int sbmac_close(struct net_device *dev) return 0; } -static int sbmac_poll(struct net_device *dev, int *budget) +static int sbmac_poll(struct napi_struct *napi, int budget) { - int work_to_do; + struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); + struct net_device *dev = sc->sbm_dev; int work_done; - struct sbmac_softc *sc = netdev_priv(dev); - - work_to_do = min(*budget, dev->quota); - work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), work_to_do, 1); - - if (work_done > work_to_do) - printk(KERN_ERR "%s exceeded work_to_do budget=%d quota=%d work-done=%d\n", - sc->sbm_dev->name, *budget, dev->quota, work_done); + work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); sbdma_tx_process(sc, &(sc->sbm_txdma), 1); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done < work_to_do) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); #ifdef CONFIG_SBMAC_COALESCE __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | @@ -2905,57 +2676,198 @@ static int sbmac_poll(struct net_device *dev, int *budget) #endif } - return (work_done >= work_to_do); + return work_done; +} + + +static int __init sbmac_probe(struct platform_device *pldev) +{ + struct net_device *dev; + struct sbmac_softc *sc; + void __iomem *sbm_base; + struct resource *res; + u64 sbmac_orig_hwaddr; + int err; + + res = platform_get_resource(pldev, IORESOURCE_MEM, 0); + BUG_ON(!res); + sbm_base = ioremap_nocache(res->start, res->end - res->start + 1); + if (!sbm_base) { + printk(KERN_ERR "%s: unable to map device registers\n", + pldev->dev.bus_id); + err = -ENOMEM; + goto out_out; + } + + /* + * The R_MAC_ETHERNET_ADDR register will be set to some nonzero + * value for us by the firmware if we're going to use this MAC. + * If we find a zero, skip this MAC. + */ + sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); + pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", pldev->dev.bus_id, + sbmac_orig_hwaddr ? "" : "not ", (long long)res->start); + if (sbmac_orig_hwaddr == 0) { + err = 0; + goto out_unmap; + } + + /* + * Okay, cool. Initialize this MAC. + */ + dev = alloc_etherdev(sizeof(struct sbmac_softc)); + if (!dev) { + printk(KERN_ERR "%s: unable to allocate etherdev\n", + pldev->dev.bus_id); + err = -ENOMEM; + goto out_unmap; + } + + pldev->dev.driver_data = dev; + SET_NETDEV_DEV(dev, &pldev->dev); + + sc = netdev_priv(dev); + sc->sbm_base = sbm_base; + + err = sbmac_init(pldev, res->start); + if (err) + goto out_kfree; + + return 0; + +out_kfree: + free_netdev(dev); + __raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR); + +out_unmap: + iounmap(sbm_base); + +out_out: + return err; +} + +static int __exit sbmac_remove(struct platform_device *pldev) +{ + struct net_device *dev = pldev->dev.driver_data; + struct sbmac_softc *sc = netdev_priv(dev); + + unregister_netdev(dev); + sbmac_uninitctx(sc); + iounmap(sc->sbm_base); + free_netdev(dev); + + return 0; } + +static struct platform_device **sbmac_pldev; +static int sbmac_max_units; + #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR) -static void -sbmac_setup_hwaddr(int chan,char *addr) +static void __init sbmac_setup_hwaddr(int idx, char *addr) { + void __iomem *sbm_base; + unsigned long start, end; uint8_t eaddr[6]; uint64_t val; - unsigned long port; - port = A_MAC_CHANNEL_BASE(chan); - sbmac_parse_hwaddr(addr,eaddr); + if (idx >= sbmac_max_units) + return; + + start = A_MAC_CHANNEL_BASE(idx); + end = A_MAC_CHANNEL_BASE(idx + 1) - 1; + + sbm_base = ioremap_nocache(start, end - start + 1); + if (!sbm_base) { + printk(KERN_ERR "%s: unable to map device registers\n", + sbmac_string); + return; + } + + sbmac_parse_hwaddr(addr, eaddr); val = sbmac_addr2reg(eaddr); - __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR)); - val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR)); + __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR); + val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR); + + iounmap(sbm_base); } #endif -static struct net_device *dev_sbmac[MAX_UNITS]; +static int __init sbmac_platform_probe_one(int idx) +{ + struct platform_device *pldev; + struct { + struct resource r; + char name[strlen(sbmac_pretty) + 4]; + } *res; + int err; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + printk(KERN_ERR "%s.%d: unable to allocate memory\n", + sbmac_string, idx); + err = -ENOMEM; + goto out_err; + } + + /* + * This is the base address of the MAC. + */ + snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx); + res->r.name = res->name; + res->r.flags = IORESOURCE_MEM; + res->r.start = A_MAC_CHANNEL_BASE(idx); + res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1; + + pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1); + if (IS_ERR(pldev)) { + printk(KERN_ERR "%s.%d: unable to register platform device\n", + sbmac_string, idx); + err = PTR_ERR(pldev); + goto out_kfree; + } + + if (!pldev->dev.driver) { + err = 0; /* No hardware at this address. */ + goto out_unregister; + } + + sbmac_pldev[idx] = pldev; + return 0; + +out_unregister: + platform_device_unregister(pldev); + +out_kfree: + kfree(res); + +out_err: + return err; +} -static int __init -sbmac_init_module(void) +static void __init sbmac_platform_probe(void) { - int idx; - struct net_device *dev; - unsigned long port; - int chip_max_units; + int i; /* Set the number of available units based on the SOC type. */ switch (soc_type) { case K_SYS_SOC_TYPE_BCM1250: case K_SYS_SOC_TYPE_BCM1250_ALT: - chip_max_units = 3; + sbmac_max_units = 3; break; case K_SYS_SOC_TYPE_BCM1120: case K_SYS_SOC_TYPE_BCM1125: case K_SYS_SOC_TYPE_BCM1125H: - case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ - chip_max_units = 2; + case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */ + sbmac_max_units = 2; break; case K_SYS_SOC_TYPE_BCM1x55: case K_SYS_SOC_TYPE_BCM1x80: - chip_max_units = 4; + sbmac_max_units = 4; break; default: - chip_max_units = 0; - break; + return; /* none */ } - if (chip_max_units > MAX_UNITS) - chip_max_units = MAX_UNITS; /* * For bringup when not using the firmware, we can pre-fill @@ -2963,89 +2875,71 @@ sbmac_init_module(void) * specified in this file (or maybe from the config file?) */ #ifdef SBMAC_ETH0_HWADDR - if (chip_max_units > 0) - sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR); + sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR); #endif #ifdef SBMAC_ETH1_HWADDR - if (chip_max_units > 1) - sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR); + sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR); #endif #ifdef SBMAC_ETH2_HWADDR - if (chip_max_units > 2) - sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR); + sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR); #endif #ifdef SBMAC_ETH3_HWADDR - if (chip_max_units > 3) - sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR); + sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR); #endif + sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev), + GFP_KERNEL); + if (!sbmac_pldev) { + printk(KERN_ERR "%s: unable to allocate memory\n", + sbmac_string); + return; + } + /* * Walk through the Ethernet controllers and find * those who have their MAC addresses set. */ - for (idx = 0; idx < chip_max_units; idx++) { + for (i = 0; i < sbmac_max_units; i++) + if (sbmac_platform_probe_one(i)) + break; +} - /* - * This is the base address of the MAC. - */ - port = A_MAC_CHANNEL_BASE(idx); +static void __exit sbmac_platform_cleanup(void) +{ + int i; - /* - * The R_MAC_ETHERNET_ADDR register will be set to some nonzero - * value for us by the firmware if we are going to use this MAC. - * If we find a zero, skip this MAC. - */ + for (i = 0; i < sbmac_max_units; i++) + platform_device_unregister(sbmac_pldev[i]); + kfree(sbmac_pldev); +} - sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR)); - if (sbmac_orig_hwaddr[idx] == 0) { - printk(KERN_DEBUG "sbmac: not configuring MAC at " - "%lx\n", port); - continue; - } - /* - * Okay, cool. Initialize this MAC. - */ +static struct platform_driver sbmac_driver = { + .probe = sbmac_probe, + .remove = __exit_p(sbmac_remove), + .driver = { + .name = sbmac_string, + }, +}; - dev = alloc_etherdev(sizeof(struct sbmac_softc)); - if (!dev) - return -ENOMEM; +static int __init sbmac_init_module(void) +{ + int err; - printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); + err = platform_driver_register(&sbmac_driver); + if (err) + return err; - dev->irq = UNIT_INT(idx); - dev->base_addr = port; - dev->mem_end = 0; - if (sbmac_init(dev, idx)) { - port = A_MAC_CHANNEL_BASE(idx); - __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR)); - free_netdev(dev); - continue; - } - dev_sbmac[idx] = dev; - } - return 0; -} + sbmac_platform_probe(); + return err; +} -static void __exit -sbmac_cleanup_module(void) +static void __exit sbmac_cleanup_module(void) { - struct net_device *dev; - int idx; - - for (idx = 0; idx < MAX_UNITS; idx++) { - struct sbmac_softc *sc; - dev = dev_sbmac[idx]; - if (!dev) - continue; - - sc = netdev_priv(dev); - unregister_netdev(dev); - sbmac_uninitctx(sc); - free_netdev(dev); - } + sbmac_platform_cleanup(); + platform_driver_unregister(&sbmac_driver); } module_init(sbmac_init_module); diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 872cb1cc9c41c05d2b12048450a5b187b782608f..37b42394560dfc239377096ee30da8e5887aaff3 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -1372,9 +1372,14 @@ static void sc92031_ethtool_get_strings(struct net_device *dev, SILAN_STATS_NUM * ETH_GSTRING_LEN); } -static int sc92031_ethtool_get_stats_count(struct net_device *dev) +static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset) { - return SILAN_STATS_NUM; + switch (sset) { + case ETH_SS_STATS: + return SILAN_STATS_NUM; + default: + return -EOPNOTSUPP; + } } static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev, @@ -1396,13 +1401,9 @@ static struct ethtool_ops sc92031_ethtool_ops = { .set_wol = sc92031_ethtool_set_wol, .nway_reset = sc92031_ethtool_nway_reset, .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, - .get_tso = ethtool_op_get_tso, .get_strings = sc92031_ethtool_get_strings, - .get_stats_count = sc92031_ethtool_get_stats_count, + .get_sset_count = sc92031_ethtool_get_sset_count, .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, - .get_ufo = ethtool_op_get_ufo, }; static int __devinit sc92031_probe(struct pci_dev *pdev, diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c index 4bce7c4f373ce8754964818a7a3c3f53ee535823..48c64fb20eecf164b9022aae727e6dc86e8f5290 100644 --- a/drivers/net/seeq8005.c +++ b/drivers/net/seeq8005.c @@ -67,7 +67,6 @@ static unsigned int net_debug = NET_DEBUG; /* Information that need to be kept for each board. */ struct net_local { - struct net_device_stats stats; unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */ long open_time; /* Useless example local info. */ }; @@ -86,7 +85,6 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t seeq8005_interrupt(int irq, void *dev_id); static void seeq8005_rx(struct net_device *dev); static int seeq8005_close(struct net_device *dev); -static struct net_device_stats *seeq8005_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); /* Example routines you must write ;->. */ @@ -160,6 +158,7 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) int old_dmaar; int old_rear; int retval; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005")) return -ENODEV; @@ -303,7 +302,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) /* Retrieve and print the ethernet address. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = SA_prom[i+6]); + dev->dev_addr[i] = SA_prom[i+6]; + printk("%s", print_mac(mac, dev->dev_addr)); if (dev->irq == 0xff) ; /* Do nothing: a user-level program will set it. */ @@ -338,7 +338,6 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr) dev->hard_start_xmit = seeq8005_send_packet; dev->tx_timeout = seeq8005_timeout; dev->watchdog_timeo = HZ/20; - dev->get_stats = seeq8005_get_stats; dev->set_multicast_list = set_multicast_list; dev->flags &= ~IFF_MULTICAST; @@ -391,7 +390,6 @@ static void seeq8005_timeout(struct net_device *dev) static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) { - struct net_local *lp = netdev_priv(dev); short length = skb->len; unsigned char *buf; @@ -407,7 +405,7 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) hardware_send_packet(dev, buf, length); dev->trans_start = jiffies; - lp->stats.tx_bytes += length; + dev->stats.tx_bytes += length; dev_kfree_skb (skb); /* You might need to clean up and record Tx statistics here. */ @@ -463,7 +461,7 @@ static irqreturn_t seeq8005_interrupt(int irq, void *dev_id) if (status & SEEQSTAT_TX_INT) { handled = 1; outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); - lp->stats.tx_packets++; + dev->stats.tx_packets++; netif_wake_queue(dev); /* Inform upper layers. */ } if (status & SEEQSTAT_RX_INT) { @@ -531,11 +529,11 @@ static void seeq8005_rx(struct net_device *dev) } if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */ - lp->stats.rx_errors++; - if (pkt_hdr & SEEQPKTS_SHORT) lp->stats.rx_frame_errors++; - if (pkt_hdr & SEEQPKTS_DRIB) lp->stats.rx_frame_errors++; - if (pkt_hdr & SEEQPKTS_OVERSIZE) lp->stats.rx_over_errors++; - if (pkt_hdr & SEEQPKTS_CRC_ERR) lp->stats.rx_crc_errors++; + dev->stats.rx_errors++; + if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++; + if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++; + if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++; + if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++; /* skip over this packet */ outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD); outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA); @@ -547,7 +545,7 @@ static void seeq8005_rx(struct net_device *dev) skb = dev_alloc_skb(pkt_len); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } skb_reserve(skb, 2); /* align data on 16 byte */ @@ -567,8 +565,8 @@ static void seeq8005_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN)); @@ -599,15 +597,6 @@ static int seeq8005_close(struct net_device *dev) } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *seeq8005_get_stats(struct net_device *dev) -{ - struct net_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 0fb74cb51c4bed2eaccd46287eaa7f02181b873e..ff4056310356a8e04b2b33634d1098e474347b2c 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -75,6 +75,7 @@ struct sgiseeq_init_block { /* Note the name ;-) */ struct sgiseeq_private { struct sgiseeq_init_block *srings; + dma_addr_t srings_dma; /* Ptrs to the descriptors in uncached space. */ struct sgiseeq_rx_desc *rx_desc; @@ -92,8 +93,6 @@ struct sgiseeq_private { unsigned char control; unsigned char mode; - struct net_device_stats stats; - spinlock_t tx_lock; }; @@ -266,18 +265,17 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, return 0; } -static inline void record_rx_errors(struct sgiseeq_private *sp, - unsigned char status) +static void record_rx_errors(struct net_device *dev, unsigned char status) { if (status & SEEQ_RSTAT_OVERF || status & SEEQ_RSTAT_SFRAME) - sp->stats.rx_over_errors++; + dev->stats.rx_over_errors++; if (status & SEEQ_RSTAT_CERROR) - sp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (status & SEEQ_RSTAT_DERROR) - sp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & SEEQ_RSTAT_REOF) - sp->stats.rx_errors++; + dev->stats.rx_errors++; } static inline void rx_maybe_restart(struct sgiseeq_private *sp, @@ -327,8 +325,8 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { netif_rx(skb); dev->last_rx = jiffies; - sp->stats.rx_packets++; - sp->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } else { /* Silently drop my own packets */ dev_kfree_skb_irq(skb); @@ -336,10 +334,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp } else { printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", dev->name); - sp->stats.rx_dropped++; + dev->stats.rx_dropped++; } } else { - record_rx_errors(sp, pkt_status); + record_rx_errors(dev, pkt_status); } /* Return the entry to the ring pool. */ @@ -391,11 +389,11 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) { /* Oops, HPC detected some sort of error. */ if (status & SEEQ_TSTAT_R16) - sp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; if (status & SEEQ_TSTAT_UFLOW) - sp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (status & SEEQ_TSTAT_LCLS) - sp->stats.collisions++; + dev->stats.collisions++; } /* Ack 'em... */ @@ -411,7 +409,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp } break; } - sp->stats.tx_packets++; + dev->stats.tx_packets++; sp->tx_old = NEXT_TX(sp->tx_old); td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); td->tdma.cntinfo |= HPCDMA_EOX; @@ -515,7 +513,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Setup... */ skblen = skb->len; len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; - sp->stats.tx_bytes += len; + dev->stats.tx_bytes += len; entry = sp->tx_new; td = &sp->tx_desc[entry]; @@ -568,13 +566,6 @@ static void timeout(struct net_device *dev) netif_wake_queue(dev); } -static struct net_device_stats *sgiseeq_get_stats(struct net_device *dev) -{ - struct sgiseeq_private *sp = netdev_priv(dev); - - return &sp->stats; -} - static void sgiseeq_set_multicast(struct net_device *dev) { struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; @@ -631,6 +622,7 @@ static int __init sgiseeq_probe(struct platform_device *pdev) struct sgiseeq_private *sp; struct net_device *dev; int err, i; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof (struct sgiseeq_private)); if (!dev) { @@ -643,13 +635,20 @@ static int __init sgiseeq_probe(struct platform_device *pdev) sp = netdev_priv(dev); /* Make private data page aligned */ - sr = (struct sgiseeq_init_block *) get_zeroed_page(GFP_KERNEL); + sr = dma_alloc_coherent(&pdev->dev, sizeof(*sp->srings), + &sp->srings_dma, GFP_KERNEL); if (!sr) { printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); err = -ENOMEM; goto err_out_free_dev; } sp->srings = sr; + sp->rx_desc = sp->srings->rxvector; + sp->tx_desc = sp->srings->txvector; + + /* A couple calculations now, saves many cycles later. */ + setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); + setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); memcpy(dev->dev_addr, pd->mac, ETH_ALEN); @@ -662,19 +661,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev) sp->name = sgiseeqstr; sp->mode = SEEQ_RCMD_RBCAST; - sp->rx_desc = (struct sgiseeq_rx_desc *) - CKSEG1ADDR(ALIGNED(&sp->srings->rxvector[0])); - dma_cache_wback_inv((unsigned long)&sp->srings->rxvector, - sizeof(sp->srings->rxvector)); - sp->tx_desc = (struct sgiseeq_tx_desc *) - CKSEG1ADDR(ALIGNED(&sp->srings->txvector[0])); - dma_cache_wback_inv((unsigned long)&sp->srings->txvector, - sizeof(sp->srings->txvector)); - - /* A couple calculations now, saves many cycles later. */ - setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); - setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); - /* Setup PIO and DMA transfer timing */ sp->hregs->pconfig = 0x161; sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | @@ -699,7 +685,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev) dev->hard_start_xmit = sgiseeq_start_xmit; dev->tx_timeout = timeout; dev->watchdog_timeo = (200 * HZ) / 1000; - dev->get_stats = sgiseeq_get_stats; dev->set_multicast_list = sgiseeq_set_multicast; dev->set_mac_address = sgiseeq_set_mac_address; dev->irq = irq; @@ -711,9 +696,8 @@ static int __init sgiseeq_probe(struct platform_device *pdev) goto err_out_free_page; } - printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); + printk(KERN_INFO "%s: %s %s\n", + dev->name, sgiseeqstr, print_mac(mac, dev->dev_addr)); return 0; @@ -732,7 +716,8 @@ static int __exit sgiseeq_remove(struct platform_device *pdev) struct sgiseeq_private *sp = netdev_priv(dev); unregister_netdev(dev); - free_page((unsigned long) sp->srings); + dma_free_coherent(&pdev->dev, sizeof(*sp->srings), sp->srings, + sp->srings_dma); free_netdev(dev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c index 4c3d98ff4cd48bab194c6f521e94b1522cd71203..228f650250f6f0b6ebeacc2f63dd3f6addcc1d58 100644 --- a/drivers/net/shaper.c +++ b/drivers/net/shaper.c @@ -86,6 +86,7 @@ #include #include +#include struct shaper_cb { unsigned long shapeclock; /* Time it should go out */ @@ -170,7 +171,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if(time_after(SHAPERCB(skb)->shapeclock,jiffies + SHAPER_LATENCY)) { dev_kfree_skb(skb); - shaper->stats.tx_dropped++; + dev->stats.tx_dropped++; } else skb_queue_tail(&shaper->sendq, skb); } @@ -181,7 +182,7 @@ static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev) { ptr=skb_dequeue(&shaper->sendq); dev_kfree_skb(ptr); - shaper->stats.collisions++; + dev->stats.collisions++; } shaper_kick(shaper); spin_unlock(&shaper->lock); @@ -206,8 +207,8 @@ static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb) shaper->dev->name,newskb->priority); dev_queue_xmit(newskb); - shaper->stats.tx_bytes += skb->len; - shaper->stats.tx_packets++; + shaper->dev->stats.tx_bytes += skb->len; + shaper->dev->stats.tx_packets++; if(sh_debug) printk("Kicked new frame out.\n"); @@ -329,22 +330,17 @@ static int shaper_close(struct net_device *dev) * ARP and other resolutions and not before. */ -static struct net_device_stats *shaper_get_stats(struct net_device *dev) -{ - struct shaper *sh=dev->priv; - return &sh->stats; -} - static int shaper_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, unsigned len) + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { struct shaper *sh=dev->priv; int v; if(sh_debug) printk("Shaper header\n"); - skb->dev=sh->dev; - v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len); - skb->dev=dev; + skb->dev = sh->dev; + v = dev_hard_header(skb, sh->dev, type, daddr, saddr, len); + skb->dev = dev; return v; } @@ -356,7 +352,7 @@ static int shaper_rebuild_header(struct sk_buff *skb) if(sh_debug) printk("Shaper rebuild header\n"); skb->dev=sh->dev; - v=sh->rebuild_header(skb); + v = sh->dev->header_ops->rebuild(skb); skb->dev=dev; return v; } @@ -420,51 +416,17 @@ static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) #endif +static const struct header_ops shaper_ops = { + .create = shaper_header, + .rebuild = shaper_rebuild_header, +}; + static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev) { sh->dev = dev; - sh->hard_start_xmit=dev->hard_start_xmit; sh->get_stats=dev->get_stats; - if(dev->hard_header) - { - sh->hard_header=dev->hard_header; - shdev->hard_header = shaper_header; - } - else - shdev->hard_header = NULL; - - if(dev->rebuild_header) - { - sh->rebuild_header = dev->rebuild_header; - shdev->rebuild_header = shaper_rebuild_header; - } - else - shdev->rebuild_header = NULL; -#if 0 - if(dev->hard_header_cache) - { - sh->hard_header_cache = dev->hard_header_cache; - shdev->hard_header_cache= shaper_cache; - } - else - { - shdev->hard_header_cache= NULL; - } - - if(dev->header_cache_update) - { - sh->header_cache_update = dev->header_cache_update; - shdev->header_cache_update = shaper_cache_update; - } - else - shdev->header_cache_update= NULL; -#else - shdev->header_cache_update = NULL; - shdev->hard_header_cache = NULL; -#endif shdev->neigh_setup = shaper_neigh_setup_dev; - shdev->hard_header_len=dev->hard_header_len; shdev->type=dev->type; shdev->addr_len=dev->addr_len; @@ -488,7 +450,7 @@ static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { case SHAPER_SET_DEV: { - struct net_device *them=__dev_get_by_name(ss->ss_name); + struct net_device *them=__dev_get_by_name(&init_net, ss->ss_name); if(them==NULL) return -ENODEV; if(sh->dev) @@ -532,14 +494,11 @@ static void __init shaper_setup(struct net_device *dev) * Set up the shaper. */ - SET_MODULE_OWNER(dev); - shaper_init_priv(dev); dev->open = shaper_open; dev->stop = shaper_close; dev->hard_start_xmit = shaper_start_xmit; - dev->get_stats = shaper_get_stats; dev->set_multicast_list = NULL; /* @@ -550,12 +509,6 @@ static void __init shaper_setup(struct net_device *dev) * Handlers for when we attach to a device. */ - dev->hard_header = shaper_header; - dev->rebuild_header = shaper_rebuild_header; -#if 0 - dev->hard_header_cache = shaper_cache; - dev->header_cache_update= shaper_cache_update; -#endif dev->neigh_setup = shaper_neigh_setup_dev; dev->do_ioctl = shaper_ioctl; dev->hard_header_len = 0; diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index d470b19c08107a9f603686f1c45ffd48d7159366..720088396bb917a89552bedf521487c425649b58 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -47,24 +47,13 @@ #define PHY_ID_ANY 0x1f #define MII_REG_ANY 0x1f -#ifdef CONFIG_SIS190_NAPI -#define NAPI_SUFFIX "-NAPI" -#else -#define NAPI_SUFFIX "" -#endif - -#define DRV_VERSION "1.2" NAPI_SUFFIX +#define DRV_VERSION "1.2" #define DRV_NAME "sis190" #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION #define PFX DRV_NAME ": " -#ifdef CONFIG_SIS190_NAPI -#define sis190_rx_skb netif_receive_skb -#define sis190_rx_quota(count, quota) min(count, quota) -#else #define sis190_rx_skb netif_rx #define sis190_rx_quota(count, quota) count -#endif #define MAC_ADDR_LEN 6 @@ -281,7 +270,6 @@ struct sis190_private { void __iomem *mmio_addr; struct pci_dev *pci_dev; struct net_device *dev; - struct net_device_stats stats; spinlock_t lock; u32 rx_buf_sz; u32 cur_rx; @@ -580,7 +568,7 @@ static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) static int sis190_rx_interrupt(struct net_device *dev, struct sis190_private *tp, void __iomem *ioaddr) { - struct net_device_stats *stats = &tp->stats; + struct net_device_stats *stats = &dev->stats; u32 rx_left, cur_rx = tp->cur_rx; u32 delta, count; @@ -694,8 +682,8 @@ static void sis190_tx_interrupt(struct net_device *dev, skb = tp->Tx_skbuff[entry]; - tp->stats.tx_packets++; - tp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; sis190_unmap_tx_skb(tp->pci_dev, skb, txd); tp->Tx_skbuff[entry] = NULL; @@ -1091,7 +1079,7 @@ static void sis190_tx_clear(struct sis190_private *tp) tp->Tx_skbuff[i] = NULL; dev_kfree_skb(skb); - tp->stats.tx_dropped++; + tp->dev->stats.tx_dropped++; } tp->cur_tx = tp->dirty_tx = 0; } @@ -1115,10 +1103,8 @@ static void sis190_down(struct net_device *dev) synchronize_irq(dev->irq); - if (!poll_locked) { - netif_poll_disable(dev); + if (!poll_locked) poll_locked++; - } synchronize_sched(); @@ -1137,8 +1123,6 @@ static int sis190_close(struct net_device *dev) free_irq(dev->irq, dev); - netif_poll_enable(dev); - pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); @@ -1158,7 +1142,7 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb->len < ETH_ZLEN)) { if (skb_padto(skb, ETH_ZLEN)) { - tp->stats.tx_dropped++; + dev->stats.tx_dropped++; goto out; } len = ETH_ZLEN; @@ -1211,13 +1195,6 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static struct net_device_stats *sis190_get_stats(struct net_device *dev) -{ - struct sis190_private *tp = netdev_priv(dev); - - return &tp->stats; -} - static void sis190_free_phy(struct list_head *first_phy) { struct sis190_phy *cur, *next; @@ -1436,7 +1413,6 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) goto err_out_0; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); @@ -1783,6 +1759,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, struct net_device *dev; void __iomem *ioaddr; int rc; + DECLARE_MAC_BUF(mac); if (!printed_version) { net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n"); @@ -1811,7 +1788,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, dev->open = sis190_open; dev->stop = sis190_close; dev->do_ioctl = sis190_ioctl; - dev->get_stats = sis190_get_stats; dev->tx_timeout = sis190_tx_timeout; dev->watchdog_timeo = SIS190_TX_TIMEOUT; dev->hard_start_xmit = sis190_start_xmit; @@ -1834,12 +1810,9 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, goto err_remove_mii; net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", - pci_name(pdev), sis_chip_info[ent->driver_data].name, - ioaddr, dev->irq, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5]); + "%s\n", + pci_name(pdev), sis_chip_info[ent->driver_data].name, + ioaddr, dev->irq, print_mac(mac, dev->dev_addr)); net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name, (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 7c6e4808399a047501f0fddbe0a4462f2f4fab13..0857d2c88aa08fd963f26e23fe1b8f5fe1a7bd94 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -158,7 +158,6 @@ typedef struct _BufferDesc { } BufferDesc; struct sis900_private { - struct net_device_stats stats; struct pci_dev * pci_dev; spinlock_t lock; @@ -221,7 +220,6 @@ static void sis900_finish_xmit (struct net_device *net_dev); static irqreturn_t sis900_interrupt(int irq, void *dev_instance); static int sis900_close(struct net_device *net_dev); static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd); -static struct net_device_stats *sis900_get_stats(struct net_device *net_dev); static u16 sis900_mcast_bitnr(u8 *addr, u8 revision); static void set_rx_mode(struct net_device *net_dev); static void sis900_reset(struct net_device *net_dev); @@ -406,6 +404,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, int i, ret; const char *card_name = card_names[pci_id->driver_data]; const char *dev_name = pci_name(pci_dev); + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -430,7 +429,6 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, net_dev = alloc_etherdev(sizeof(struct sis900_private)); if (!net_dev) return -ENOMEM; - SET_MODULE_OWNER(net_dev); SET_NETDEV_DEV(net_dev, &pci_dev->dev); /* We do a request_region() to register /proc/ioports info. */ @@ -467,7 +465,6 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, net_dev->open = &sis900_open; net_dev->hard_start_xmit = &sis900_start_xmit; net_dev->stop = &sis900_close; - net_dev->get_stats = &sis900_get_stats; net_dev->set_config = &sis900_set_config; net_dev->set_multicast_list = &set_rx_mode; net_dev->do_ioctl = &mii_ioctl; @@ -537,11 +534,9 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, goto err_unmap_rx; /* print some information about our NIC */ - printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ", net_dev->name, - card_name, ioaddr, net_dev->irq); - for (i = 0; i < 5; i++) - printk("%2.2x:", (u8)net_dev->dev_addr[i]); - printk("%2.2x.\n", net_dev->dev_addr[i]); + printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %s\n", + net_dev->name, card_name, ioaddr, net_dev->irq, + print_mac(mac, net_dev->dev_addr)); /* Detect Wake on Lan support */ ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; @@ -1543,7 +1538,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) sis_priv->tx_skbuff[i] = NULL; sis_priv->tx_ring[i].cmdsts = 0; sis_priv->tx_ring[i].bufptr = 0; - sis_priv->stats.tx_dropped++; + net_dev->stats.tx_dropped++; } } sis_priv->tx_full = 0; @@ -1740,15 +1735,15 @@ static int sis900_rx(struct net_device *net_dev) printk(KERN_DEBUG "%s: Corrupted packet " "received, buffer status = 0x%8.8x/%d.\n", net_dev->name, rx_status, data_size); - sis_priv->stats.rx_errors++; + net_dev->stats.rx_errors++; if (rx_status & OVERRUN) - sis_priv->stats.rx_over_errors++; + net_dev->stats.rx_over_errors++; if (rx_status & (TOOLONG|RUNT)) - sis_priv->stats.rx_length_errors++; + net_dev->stats.rx_length_errors++; if (rx_status & (RXISERR | FAERR)) - sis_priv->stats.rx_frame_errors++; + net_dev->stats.rx_frame_errors++; if (rx_status & CRCERR) - sis_priv->stats.rx_crc_errors++; + net_dev->stats.rx_crc_errors++; /* reset buffer descriptor state */ sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; } else { @@ -1769,7 +1764,7 @@ static int sis900_rx(struct net_device *net_dev) * in the rx ring */ skb = sis_priv->rx_skbuff[entry]; - sis_priv->stats.rx_dropped++; + net_dev->stats.rx_dropped++; goto refill_rx_ring; } @@ -1794,10 +1789,10 @@ static int sis900_rx(struct net_device *net_dev) /* some network statistics */ if ((rx_status & BCAST) == MCAST) - sis_priv->stats.multicast++; + net_dev->stats.multicast++; net_dev->last_rx = jiffies; - sis_priv->stats.rx_bytes += rx_size; - sis_priv->stats.rx_packets++; + net_dev->stats.rx_bytes += rx_size; + net_dev->stats.rx_packets++; sis_priv->dirty_rx++; refill_rx_ring: sis_priv->rx_skbuff[entry] = skb; @@ -1828,7 +1823,7 @@ static int sis900_rx(struct net_device *net_dev) printk(KERN_INFO "%s: Memory squeeze," "deferring packet.\n", net_dev->name); - sis_priv->stats.rx_dropped++; + net_dev->stats.rx_dropped++; break; } sis_priv->rx_skbuff[entry] = skb; @@ -1879,20 +1874,20 @@ static void sis900_finish_xmit (struct net_device *net_dev) printk(KERN_DEBUG "%s: Transmit " "error, Tx status %8.8x.\n", net_dev->name, tx_status); - sis_priv->stats.tx_errors++; + net_dev->stats.tx_errors++; if (tx_status & UNDERRUN) - sis_priv->stats.tx_fifo_errors++; + net_dev->stats.tx_fifo_errors++; if (tx_status & ABORT) - sis_priv->stats.tx_aborted_errors++; + net_dev->stats.tx_aborted_errors++; if (tx_status & NOCARRIER) - sis_priv->stats.tx_carrier_errors++; + net_dev->stats.tx_carrier_errors++; if (tx_status & OWCOLL) - sis_priv->stats.tx_window_errors++; + net_dev->stats.tx_window_errors++; } else { /* packet successfully transmitted */ - sis_priv->stats.collisions += (tx_status & COLCNT) >> 16; - sis_priv->stats.tx_bytes += tx_status & DSIZE; - sis_priv->stats.tx_packets++; + net_dev->stats.collisions += (tx_status & COLCNT) >> 16; + net_dev->stats.tx_bytes += tx_status & DSIZE; + net_dev->stats.tx_packets++; } /* Free the original skb. */ skb = sis_priv->tx_skbuff[entry]; @@ -2138,21 +2133,6 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) } } -/** - * sis900_get_stats - Get sis900 read/write statistics - * @net_dev: the net device to get statistics for - * - * get tx/rx statistics for sis900 - */ - -static struct net_device_stats * -sis900_get_stats(struct net_device *net_dev) -{ - struct sis900_private *sis_priv = net_dev->priv; - - return &sis_priv->stats; -} - /** * sis900_set_config - Set media type by net_device.set_config * @dev: the net device for media type change diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index 7dc9c9ebf5e7167aff8bc99e36cb26ac8699e974..20890e44f99a5be1d31b06713122b4f8f3267f01 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -4877,7 +4877,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev, goto out_free_netdev; } - SET_MODULE_OWNER(dev); dev->open = &SkGeOpen; dev->stop = &SkGeClose; dev->hard_start_xmit = &SkGeXmit; diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c index 4fe624b0dd251907522f74d9aecd97554f3f3040..c77cc14b322714937b3d39185fa32bc1dbb3c0e6 100644 --- a/drivers/net/skfp/drvfbi.c +++ b/drivers/net/skfp/drvfbi.c @@ -43,25 +43,6 @@ static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ; /* * valid configuration values are: */ -#ifdef ISA -const int opt_ints[] = {8, 3, 4, 5, 9, 10, 11, 12, 15} ; -const int opt_iops[] = {8, - 0x100, 0x120, 0x180, 0x1a0, 0x220, 0x240, 0x320, 0x340}; -const int opt_dmas[] = {4, 3, 5, 6, 7} ; -const int opt_eproms[] = {15, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, - 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc} ; -#endif -#ifdef EISA -const int opt_ints[] = {5, 9, 10, 11} ; -const int opt_dmas[] = {0, 5, 6, 7} ; -const int opt_eproms[] = {0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, - 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc} ; -#endif - -#ifdef MCA -int opt_ints[] = {3, 11, 10, 9} ; /* FM1 */ -int opt_eproms[] = {0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4, 0xd8, 0xdc} ; -#endif /* MCA */ /* * xPOS_ID:xxxx @@ -78,17 +59,9 @@ int opt_eproms[] = {0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4, 0xd8, 0xdc} ; */ #ifndef MULT_OEM #ifndef OEM_CONCEPT -#ifndef MCA const u_char oem_id[] = "xPOS_ID:xxxx" ; -#else -const u_char oem_id[] = "xPOSID1:xxxx" ; /* FM1 card id. */ -#endif #else /* OEM_CONCEPT */ -#ifndef MCA const u_char oem_id[] = OEM_ID ; -#else -const u_char oem_id[] = OEM_ID1 ; /* FM1 card id. */ -#endif /* MCA */ #endif /* OEM_CONCEPT */ #define ID_BYTE0 8 #define OEMID(smc,i) oem_id[ID_BYTE0 + i] @@ -109,23 +82,6 @@ extern int AIX_vpdReadByte() ; /* Prototype of a local function. */ static void smt_stop_watchdog(struct s_smc *smc); -#ifdef MCA -static int read_card_id() ; -static void DisableSlotAccess() ; -static void EnableSlotAccess() ; -#ifdef AIX -extern int attach_POS_addr() ; -extern int detach_POS_addr() ; -extern u_char read_POS() ; -extern void write_POS() ; -extern int AIX_vpdReadByte() ; -#else -#define read_POS(smc,a1,a2) ((u_char) inp(a1)) -#define write_POS(smc,a1,a2,a3) outp((a1),(a3)) -#endif -#endif /* MCA */ - - /* * FDDI card reset */ @@ -139,51 +95,6 @@ static void card_start(struct s_smc *smc) smt_stop_watchdog(smc) ; -#ifdef ISA - outpw(CSR_A,0) ; /* reset for all chips */ - for (i = 10 ; i ; i--) /* delay for PLC's */ - (void)inpw(ISR_A) ; - OUT_82c54_TIMER(3,COUNT(2) | RW_OP(3) | TMODE(2)) ; - /* counter 2, mode 2 */ - OUT_82c54_TIMER(2,97) ; /* LSB */ - OUT_82c54_TIMER(2,0) ; /* MSB ( 15.6 us ) */ - outpw(CSR_A,CS_CRESET) ; -#endif -#ifdef EISA - outpw(CSR_A,0) ; /* reset for all chips */ - for (i = 10 ; i ; i--) /* delay for PLC's */ - (void)inpw(ISR_A) ; - outpw(CSR_A,CS_CRESET) ; - smc->hw.led = (2<<6) ; - outpw(CSR_A,CS_CRESET | smc->hw.led) ; -#endif -#ifdef MCA - outp(ADDR(CARD_DIS),0) ; /* reset for all chips */ - for (i = 10 ; i ; i--) /* delay for PLC's */ - (void)inpw(ISR_A) ; - outp(ADDR(CARD_EN),0) ; - /* first I/O after reset must not be a access to FORMAC or PLC */ - - /* - * bus timeout (MCA) - */ - OUT_82c54_TIMER(3,COUNT(2) | RW_OP(3) | TMODE(3)) ; - /* counter 2, mode 3 */ - OUT_82c54_TIMER(2,(2*24)) ; /* 3.9 us * 2 square wave */ - OUT_82c54_TIMER(2,0) ; /* MSB */ - - /* POS 102 indicated an activ Check Line or Buss Error monitoring */ - if (inpw(CSA_A) & (POS_EN_CHKINT | POS_EN_BUS_ERR)) { - outp(ADDR(IRQ_CHCK_EN),0) ; - } - - if (!((i = inpw(CSR_A)) & CS_SAS)) { - if (!(i & CS_BYSTAT)) { - outp(ADDR(BYPASS(STAT_INS)),0) ;/* insert station */ - } - } - outpw(LEDR_A,LED_1) ; /* yellow */ -#endif /* MCA */ #ifdef PCI /* * make sure no transfer activity is pending @@ -253,15 +164,7 @@ void card_stop(struct s_smc *smc) { smt_stop_watchdog(smc) ; smc->hw.mac_ring_is_up = 0 ; /* ring down */ -#ifdef ISA - outpw(CSR_A,0) ; /* reset for all chips */ -#endif -#ifdef EISA - outpw(CSR_A,0) ; /* reset for all chips */ -#endif -#ifdef MCA - outp(ADDR(CARD_DIS),0) ; /* reset for all chips */ -#endif + #ifdef PCI /* * make sure no transfer activity is pending @@ -284,60 +187,6 @@ void mac1_irq(struct s_smc *smc, u_short stu, u_short stl) { int restart_tx = 0 ; again: -#ifndef PCI -#ifndef ISA -/* - * FORMAC+ bug modified the queue pointer if many read/write accesses happens!? - */ - if (stl & (FM_SPCEPDS | /* parit/coding err. syn.q.*/ - FM_SPCEPDA0 | /* parit/coding err. a.q.0 */ - FM_SPCEPDA1 | /* parit/coding err. a.q.1 */ - FM_SPCEPDA2)) { /* parit/coding err. a.q.2 */ - SMT_PANIC(smc,SMT_E0132, SMT_E0132_MSG) ; - } - if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/ - FM_STBURA0 | /* tx buffer underrun a.q.0 */ - FM_STBURA1 | /* tx buffer underrun a.q.1 */ - FM_STBURA2)) { /* tx buffer underrun a.q.2 */ - SMT_PANIC(smc,SMT_E0133, SMT_E0133_MSG) ; - } -#endif - if ( (stu & (FM_SXMTABT | /* transmit abort */ -#ifdef SYNC - FM_STXABRS | /* syn. tx abort */ -#endif /* SYNC */ - FM_STXABRA0)) || /* asyn. tx abort */ - (stl & (FM_SQLCKS | /* lock for syn. q. */ - FM_SQLCKA0)) ) { /* lock for asyn. q. */ - formac_tx_restart(smc) ; /* init tx */ - restart_tx = 1 ; - stu = inpw(FM_A(FM_ST1U)) ; - stl = inpw(FM_A(FM_ST1L)) ; - stu &= ~ (FM_STECFRMA0 | FM_STEFRMA0 | FM_STEFRMS) ; - if (stu || stl) - goto again ; - } - -#ifndef SYNC - if (stu & (FM_STECFRMA0 | /* end of chain asyn tx */ - FM_STEFRMA0)) { /* end of frame asyn tx */ - /* free tx_queue */ - smc->hw.n_a_send = 0 ; - if (++smc->hw.fp.tx_free < smc->hw.fp.tx_max) { - start_next_send(smc); - } - restart_tx = 1 ; - } -#else /* SYNC */ - if (stu & (FM_STEFRMA0 | /* end of asyn tx */ - FM_STEFRMS)) { /* end of sync tx */ - restart_tx = 1 ; - } -#endif /* SYNC */ - if (restart_tx) - llc_restart_tx(smc) ; -} -#else /* PCI */ /* * parity error: note encoding error is not possible in tag mode @@ -378,7 +227,7 @@ void mac1_irq(struct s_smc *smc, u_short stu, u_short stl) if (restart_tx) llc_restart_tx(smc) ; } -#endif /* PCI */ + /* * interrupt source= plc1 * this function is called in nwfbisr.asm @@ -387,10 +236,6 @@ void plc1_irq(struct s_smc *smc) { u_short st = inpw(PLC(PB,PL_INTR_EVENT)) ; -#if (defined(ISA) || defined(EISA)) - /* reset PLC Int. bits */ - outpw(PLC1_I,inpw(PLC1_I)) ; -#endif plc_irq(smc,PB,st) ; } @@ -402,10 +247,6 @@ void plc2_irq(struct s_smc *smc) { u_short st = inpw(PLC(PA,PL_INTR_EVENT)) ; -#if (defined(ISA) || defined(EISA)) - /* reset PLC Int. bits */ - outpw(PLC2_I,inpw(PLC2_I)) ; -#endif plc_irq(smc,PA,st) ; } @@ -446,43 +287,15 @@ void read_address(struct s_smc *smc, u_char *mac_addr) char PmdType ; int i ; -#if (defined(ISA) || defined(MCA)) - for (i = 0; i < 4 ;i++) { /* read mac address from board */ - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inpw(PR_A(i+SA_MAC))); - } - for (i = 4; i < 6; i++) { - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inpw(PR_A(i+SA_MAC+PRA_OFF))); - } -#endif -#ifdef EISA - /* - * Note: We get trouble on an Alpha machine if we make a inpw() - * instead of inp() - */ - for (i = 0; i < 4 ;i++) { /* read mac address from board */ - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inp(PR_A(i+SA_MAC))); - } - for (i = 4; i < 6; i++) { - smc->hw.fddi_phys_addr.a[i] = - bitrev8(inp(PR_A(i+SA_MAC+PRA_OFF))); - } -#endif #ifdef PCI for (i = 0; i < 6; i++) { /* read mac address from board */ smc->hw.fddi_phys_addr.a[i] = bitrev8(inp(ADDR(B2_MAC_0+i))); } #endif -#ifndef PCI - ConnectorType = inpw(PR_A(SA_PMD_TYPE)) & 0xff ; - PmdType = inpw(PR_A(SA_PMD_TYPE+1)) & 0xff ; -#else + ConnectorType = inp(ADDR(B2_CONN_TYP)) ; PmdType = inp(ADDR(B2_PMD_TYP)) ; -#endif smc->y[PA].pmd_type[PMD_SK_CONN] = smc->y[PB].pmd_type[PMD_SK_CONN] = ConnectorType ; @@ -512,20 +325,12 @@ void init_board(struct s_smc *smc, u_char *mac_addr) card_start(smc) ; read_address(smc,mac_addr) ; -#ifndef PCI - if (inpw(CSR_A) & CS_SAS) -#else if (!(inp(ADDR(B0_DAS)) & DAS_AVAIL)) -#endif smc->s.sas = SMT_SAS ; /* Single att. station */ else smc->s.sas = SMT_DAS ; /* Dual att. station */ -#ifndef PCI - if (inpw(CSR_A) & CS_BYSTAT) -#else if (!(inp(ADDR(B0_DAS)) & DAS_BYP_ST)) -#endif smc->mib.fddiSMTBypassPresent = 0 ; /* without opt. bypass */ else @@ -538,42 +343,12 @@ void init_board(struct s_smc *smc, u_char *mac_addr) */ void sm_pm_bypass_req(struct s_smc *smc, int mode) { -#if (defined(ISA) || defined(EISA)) - int csra_v ; -#endif - DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ? "BP_INSERT" : "BP_DEINSERT",0) ; if (smc->s.sas != SMT_DAS) return ; -#if (defined(ISA) || defined(EISA)) - - csra_v = inpw(CSR_A) & ~CS_BYPASS ; -#ifdef EISA - csra_v |= smc->hw.led ; -#endif - - switch(mode) { - case BP_INSERT : - outpw(CSR_A,csra_v | CS_BYPASS) ; - break ; - case BP_DEINSERT : - outpw(CSR_A,csra_v) ; - break ; - } -#endif /* ISA / EISA */ -#ifdef MCA - switch(mode) { - case BP_INSERT : - outp(ADDR(BYPASS(STAT_INS)),0) ;/* insert station */ - break ; - case BP_DEINSERT : - outp(ADDR(BYPASS(STAT_BYP)),0) ; /* bypass station */ - break ; - } -#endif #ifdef PCI switch(mode) { case BP_INSERT : @@ -591,31 +366,14 @@ void sm_pm_bypass_req(struct s_smc *smc, int mode) */ int sm_pm_bypass_present(struct s_smc *smc) { -#ifndef PCI - return( (inpw(CSR_A) & CS_BYSTAT) ? FALSE : TRUE ) ; -#else return( (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE: FALSE) ; -#endif } void plc_clear_irq(struct s_smc *smc, int p) { SK_UNUSED(p) ; -#if (defined(ISA) || defined(EISA)) - switch (p) { - case PA : - /* reset PLC Int. bits */ - outpw(PLC2_I,inpw(PLC2_I)) ; - break ; - case PB : - /* reset PLC Int. bits */ - outpw(PLC1_I,inpw(PLC1_I)) ; - break ; - } -#else SK_UNUSED(smc) ; -#endif } @@ -645,51 +403,6 @@ static void led_indication(struct s_smc *smc, int led_event) phy = &smc->y[PB] ; mib_b = phy->mib ; -#ifdef EISA - /* Ring up = yellow led OFF*/ - if (led_event == LED_Y_ON) { - smc->hw.led |= CS_LED_1 ; - } - else if (led_event == LED_Y_OFF) { - smc->hw.led &= ~CS_LED_1 ; - } - else { - /* Link at Port A or B = green led ON */ - if (mib_a->fddiPORTPCMState == PC8_ACTIVE || - mib_b->fddiPORTPCMState == PC8_ACTIVE) { - smc->hw.led |= CS_LED_0 ; - } - else { - smc->hw.led &= ~CS_LED_0 ; - } - } -#endif -#ifdef MCA - led_state = inpw(LEDR_A) ; - - /* Ring up = yellow led OFF*/ - if (led_event == LED_Y_ON) { - led_state |= LED_1 ; - } - else if (led_event == LED_Y_OFF) { - led_state &= ~LED_1 ; - } - else { - led_state &= ~(LED_2|LED_0) ; - - /* Link at Port A = green led A ON */ - if (mib_a->fddiPORTPCMState == PC8_ACTIVE) { - led_state |= LED_2 ; - } - - /* Link at Port B/S = green led B ON */ - if (mib_b->fddiPORTPCMState == PC8_ACTIVE) { - led_state |= LED_0 ; - } - } - - outpw(LEDR_A, led_state) ; -#endif /* MCA */ #ifdef PCI led_state = 0 ; @@ -824,446 +537,6 @@ int set_oi_id_def(struct s_smc *smc) } #endif /* MULT_OEM */ - -#ifdef MCA -/************************ - * - * BEGIN_MANUAL_ENTRY() - * - * exist_board - * - * Check if an MCA board is present in the specified slot. - * - * int exist_board( - * struct s_smc *smc, - * int slot) ; - * In - * smc - A pointer to the SMT Context struct. - * - * slot - The number of the slot to inspect. - * Out - * 0 = No adapter present. - * 1 = Found FM1 adapter. - * - * Pseudo - * Read MCA ID - * for all valid OEM_IDs - * compare with ID read - * if equal, return 1 - * return(0 - * - * Note - * The smc pointer must be valid now. - * - * END_MANUAL_ENTRY() - * - ************************/ -#define LONG_CARD_ID(lo, hi) ((((hi) & 0xff) << 8) | ((lo) & 0xff)) -int exist_board(struct s_smc *smc, int slot) -{ -#ifdef MULT_OEM - SK_LOC_DECL(u_char,id[2]) ; - int idi ; -#endif /* MULT_OEM */ - - /* No longer valid. */ - if (smc == NULL) - return(0) ; - -#ifndef MULT_OEM - if (read_card_id(smc, slot) - == LONG_CARD_ID(OEMID(smc,0), OEMID(smc,1))) - return (1) ; /* Found FM adapter. */ - -#else /* MULT_OEM */ - idi = read_card_id(smc, slot) ; - id[0] = idi & 0xff ; - id[1] = idi >> 8 ; - - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; - - if (is_equal_num(&id[0],&OEMID(smc,0),2)) - return (1) ; - } -#endif /* MULT_OEM */ - return (0) ; /* No adapter found. */ -} - -/************************ - * - * read_card_id - * - * Read the MCA card id from the specified slot. - * In - * smc - A pointer to the SMT Context struct. - * CAVEAT: This pointer may be NULL and *must not* be used within this - * function. It's only purpose is for drivers that need some information - * for the inp() and outp() macros. - * - * slot - The number of the slot for which the card id is returned. - * Out - * Returns the card id read from the specified slot. If an illegal slot - * number is specified, the function returns zero. - * - ************************/ -static int read_card_id(struct s_smc *smc, int slot) -/* struct s_smc *smc ; Do not use. */ -{ - int card_id ; - - SK_UNUSED(smc) ; /* Make LINT happy. */ - if ((slot < 1) || (slot > 15)) /* max 16 slots, 0 = motherboard */ - return (0) ; /* Illegal slot number specified. */ - - EnableSlotAccess(smc, slot) ; - - card_id = ((read_POS(smc,POS_ID_HIGH,slot - 1) & 0xff) << 8) | - (read_POS(smc,POS_ID_LOW,slot - 1) & 0xff) ; - - DisableSlotAccess(smc) ; - - return (card_id) ; -} - -/************************ - * - * BEGIN_MANUAL_ENTRY() - * - * get_board_para - * - * Get adapter configuration information. Fill all board specific - * parameters within the 'smc' structure. - * - * int get_board_para( - * struct s_smc *smc, - * int slot) ; - * In - * smc - A pointer to the SMT Context struct, to which this function will - * write some adapter configuration data. - * - * slot - The number of the slot, in which the adapter is installed. - * Out - * 0 = No adapter present. - * 1 = Ok. - * 2 = Adapter present, but card enable bit not set. - * - * END_MANUAL_ENTRY() - * - ************************/ -int get_board_para(struct s_smc *smc, int slot) -{ - int val ; - int i ; - - /* Check if adapter present & get type of adapter. */ - switch (exist_board(smc, slot)) { - case 0: /* Adapter not present. */ - return (0) ; - case 1: /* FM Rev. 1 */ - smc->hw.rev = FM1_REV ; - smc->hw.VFullRead = 0x0a ; - smc->hw.VFullWrite = 0x05 ; - smc->hw.DmaWriteExtraBytes = 8 ; /* 2 extra words. */ - break ; - } - smc->hw.slot = slot ; - - EnableSlotAccess(smc, slot) ; - - if (!(read_POS(smc,POS_102, slot - 1) & POS_CARD_EN)) { - DisableSlotAccess(smc) ; - return (2) ; /* Card enable bit not set. */ - } - - val = read_POS(smc,POS_104, slot - 1) ; /* I/O, IRQ */ - -#ifndef MEM_MAPPED_IO /* is defined by the operating system */ - i = val & POS_IOSEL ; /* I/O base addr. (0x0200 .. 0xfe00) */ - smc->hw.iop = (i + 1) * 0x0400 - 0x200 ; -#endif - i = ((val & POS_IRQSEL) >> 6) & 0x03 ; /* IRQ <0, 1> */ - smc->hw.irq = opt_ints[i] ; - - /* FPROM base addr. */ - i = ((read_POS(smc,POS_103, slot - 1) & POS_MSEL) >> 4) & 0x07 ; - smc->hw.eprom = opt_eproms[i] ; - - DisableSlotAccess(smc) ; - - /* before this, the smc->hw.iop must be set !!! */ - smc->hw.slot_32 = inpw(CSF_A) & SLOT_32 ; - - return (1) ; -} - -/* Enable access to specified MCA slot. */ -static void EnableSlotAccess(struct s_smc *smc, int slot) -{ - SK_UNUSED(slot) ; - -#ifndef AIX - SK_UNUSED(smc) ; - - /* System mode. */ - outp(POS_SYS_SETUP, POS_SYSTEM) ; - - /* Select slot. */ - outp(POS_CHANNEL_POS, POS_CHANNEL_BIT | (slot-1)) ; -#else - attach_POS_addr (smc) ; -#endif -} - -/* Disable access to MCA slot formerly enabled via EnableSlotAccess(). */ -static void DisableSlotAccess(struct s_smc *smc) -{ -#ifndef AIX - SK_UNUSED(smc) ; - - outp(POS_CHANNEL_POS, 0) ; -#else - detach_POS_addr (smc) ; -#endif -} -#endif /* MCA */ - -#ifdef EISA -#ifndef MEM_MAPPED_IO -#define SADDR(slot) (((slot)<<12)&0xf000) -#else /* MEM_MAPPED_IO */ -#define SADDR(slot) (smc->hw.iop) -#endif /* MEM_MAPPED_IO */ - -/************************ - * - * BEGIN_MANUAL_ENTRY() - * - * exist_board - * - * Check if an EISA board is present in the specified slot. - * - * int exist_board( - * struct s_smc *smc, - * int slot) ; - * In - * smc - A pointer to the SMT Context struct. - * - * slot - The number of the slot to inspect. - * Out - * 0 = No adapter present. - * 1 = Found adapter. - * - * Pseudo - * Read EISA ID - * for all valid OEM_IDs - * compare with ID read - * if equal, return 1 - * return(0 - * - * Note - * The smc pointer must be valid now. - * - ************************/ -int exist_board(struct s_smc *smc, int slot) -{ - int i ; -#ifdef MULT_OEM - SK_LOC_DECL(u_char,id[4]) ; -#endif /* MULT_OEM */ - - /* No longer valid. */ - if (smc == NULL) - return(0); - - SK_UNUSED(slot) ; - -#ifndef MULT_OEM - for (i = 0 ; i < 4 ; i++) { - if (inp(SADDR(slot)+PRA(i)) != OEMID(smc,i)) - return(0) ; - } - return(1) ; -#else /* MULT_OEM */ - for (i = 0 ; i < 4 ; i++) - id[i] = inp(SADDR(slot)+PRA(i)) ; - - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; - - if (is_equal_num(&id[0],&OEMID(smc,0),4)) - return (1) ; - } - return (0) ; /* No adapter found. */ -#endif /* MULT_OEM */ -} - - -int get_board_para(struct s_smc *smc, int slot) -{ - int i ; - - if (!exist_board(smc,slot)) - return(0) ; - - smc->hw.slot = slot ; -#ifndef MEM_MAPPED_IO /* if defined by the operating system */ - smc->hw.iop = SADDR(slot) ; -#endif - - if (!(inp(C0_A(0))&CFG_CARD_EN)) { - return(2) ; /* CFG_CARD_EN bit not set! */ - } - - smc->hw.irq = opt_ints[(inp(C1_A(0)) & CFG_IRQ_SEL)] ; - smc->hw.dma = opt_dmas[((inp(C1_A(0)) & CFG_DRQ_SEL)>>3)] ; - - if ((i = inp(C2_A(0)) & CFG_EPROM_SEL) != 0x0f) - smc->hw.eprom = opt_eproms[i] ; - else - smc->hw.eprom = 0 ; - - smc->hw.DmaWriteExtraBytes = 8 ; - - return(1) ; -} -#endif /* EISA */ - -#ifdef ISA -#ifndef MULT_OEM -const u_char sklogo[6] = SKLOGO_STR ; -#define SIZE_SKLOGO(smc) sizeof(sklogo) -#define SKLOGO(smc,i) sklogo[i] -#else /* MULT_OEM */ -#define SIZE_SKLOGO(smc) smc->hw.oem_id->oi_logo_len -#define SKLOGO(smc,i) smc->hw.oem_id->oi_logo[i] -#endif /* MULT_OEM */ - - -int exist_board(struct s_smc *smc, HW_PTR port) -{ - int i ; -#ifdef MULT_OEM - int bytes_read ; - u_char board_logo[15] ; - SK_LOC_DECL(u_char,id[4]) ; -#endif /* MULT_OEM */ - - /* No longer valid. */ - if (smc == NULL) - return(0); - - SK_UNUSED(smc) ; -#ifndef MULT_OEM - for (i = SADDRL ; i < (signed) (SADDRL+SIZE_SKLOGO(smc)) ; i++) { - if ((u_char)inpw((PRA(i)+port)) != SKLOGO(smc,i-SADDRL)) { - return(0) ; - } - } - - /* check MAC address (S&K or other) */ - for (i = 0 ; i < 3 ; i++) { - if ((u_char)inpw((PRA(i)+port)) != OEMID(smc,i)) - return(0) ; - } - return(1) ; -#else /* MULT_OEM */ - smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - board_logo[0] = (u_char)inpw((PRA(SADDRL)+port)) ; - bytes_read = 1 ; - - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; - - /* Test all read bytes with current OEM_entry */ - /* for (i=0; (ihw.oem_id = (struct s_oem_ids *) &oem_ids[0] ; - for (; smc->hw.oem_id->oi_status != OI_STAT_LAST; smc->hw.oem_id++) { - if (smc->hw.oem_id->oi_status < smc->hw.oem_min_status) - continue ; -#endif - ven_id = OEMID(smc,0) + (OEMID(smc,1) << 8) ; - dev_id = OEMID(smc,2) + (OEMID(smc,3) << 8) ; - for (i = 0; i < slot; i++) { - if (pci_find_device(i,&smc->hw.pci_handle, - dev_id,ven_id) != 0) { - - found = FALSE ; - } else { - found = TRUE ; - } - } - if (found) { - return(1) ; /* adapter was found */ - } -#ifdef MULT_OEM - } -#endif - return(0) ; /* adapter was not found */ -} -#endif /* PCI */ -#endif /* USE_BIOS_FUNC */ - void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr) { int i ; diff --git a/drivers/net/skfp/h/mbuf.h b/drivers/net/skfp/h/mbuf.h index b339d1f2e0e5c53e548ffd9db7224a40db80c229..f2aadcda9e7faa697c48cf7f2f732e72542a7355 100644 --- a/drivers/net/skfp/h/mbuf.h +++ b/drivers/net/skfp/h/mbuf.h @@ -15,11 +15,7 @@ #ifndef _MBUF_ #define _MBUF_ -#ifndef PCI -#define M_SIZE 4550 -#else #define M_SIZE 4504 -#endif #ifndef MAX_MBUF #define MAX_MBUF 4 diff --git a/drivers/net/skfp/h/skfbi.h b/drivers/net/skfp/h/skfbi.h index ba347d6910f160daad9e18398cf53b5659185e34..c1ba26c06d731252771dfe7f2273b724bc675068 100644 --- a/drivers/net/skfp/h/skfbi.h +++ b/drivers/net/skfp/h/skfbi.h @@ -15,797 +15,11 @@ #ifndef _SKFBI_H_ #define _SKFBI_H_ -#ifdef SYNC -#define exist_board_far exist_board -#define get_board_para_far get_board_para -#endif - -/* - * physical address offset + IO-Port base address - */ -#ifndef PCI -#define ADDR(a) ((a)+smc->hw.iop) -#define ADDRS(smc,a) ((a)+(smc)->hw.iop) -#endif - /* - * FDDI-Fx (x := {I(SA), E(ISA), M(CA), P(CI)}) + * FDDI-Fx (x := {I(SA), P(CI)}) * address calculation & function defines */ -#ifdef EISA - -/* - * Configuration PROM: !! all 8-Bit IO's !! - * |<- MAC-Address ->| - * /-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-/ - * val: |PROD_ID0..3| | free | |00|00|5A|40| |nn|mm|00|00| - * /-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-//-+--+--+--+--+-/ - * IO- ^ ^ ^ ^ ^ - * port 0C80 0C83 0C88 0C90 0C98 - * | \ - * | \ - * | \______________________________________________ - * EISA Expansion Board Product ID: \ - * BIT: |7 6 5 4 3 2 1 0| \ - * | PROD_ID0 | PROD_ID1 | PROD_ID2 | PROD_ID3 | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * |0| MAN_C0 | MAN_C1 | MAN_C2 | PROD1 | PROD0 | REV1 | REV0 | - * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - * ^=reserved | product numb. | revision numb | - * MAN_Cx = compressed manufacterer code (x:=0..2) - * ASCII : 'A'..'Z' : 0x41..0x5A -> compr.(c-0x40) : 0x01..0x1A (5Bits!) - */ - -#ifndef MULT_OEM -#ifndef OEM_CONCEPT -#define MAN_C0 ('S'-0x40) -#define MAN_C1 ('K'-0x40) -#define MAN_C2 ('D'-0x40) -#define PROD_ID0 (u_char)((MAN_C0<<2) | (MAN_C1>>3)) -#define PROD_ID1 (u_char)(((MAN_C1<<5) & 0xff) | MAN_C2) -#define PROD_ID2 (u_char)(1) /* prod. nr. */ -#define PROD_ID3 (u_char)(0) /* rev. nr. */ - -#ifndef OEM_USER_DATA -#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata" -#endif -#else /* OEM_CONCEPT */ - -/* MAN_C(0|1|2) no longer present (ra). */ -#define PROD_ID0 (u_char)OEM_PROD_ID0 -#define PROD_ID1 (u_char)OEM_PROD_ID1 -#define PROD_ID2 (u_char)OEM_PROD_ID2 -#define PROD_ID3 (u_char)OEM_PROD_ID3 -#endif /* OEM_CONCEPT */ - -#define SKLOGO PROD_ID0, PROD_ID1, PROD_ID2, PROD_ID3 -#endif /* MULT_OEM */ - -#define SADDRL (0) /* start address SKLOGO */ -#define SA_MAC (0x10) /* start addr. MAC_AD within the PROM */ -#define PRA_OFF (4) -#define SA_PMD_TYPE (8) /* start addr. PMD-Type */ - -#define SKFDDI_PSZ 32 /* address PROM size */ - -/* - * address transmission from logical to physical offset address on board - */ -#define FMA(a) (0x0400|((a)<<1)) /* FORMAC+ (r/w) */ -#define P1A(a) (0x0800|((a)<<1)) /* PLC1 (r/w) */ -#define P2A(a) (0x0840|((a)<<1)) /* PLC2 (r/w) */ -#define TIA(a) (0x0880|((a)<<1)) /* Timer (r/w) */ -#define PRA(a) (0x0c80| (a)) /* configuration PROM */ -#define C0A(a) (0x0c84| (a)) /* config. RAM */ -#define C1A(a) (0x0ca0| (a)) /* IRQ-, DMA-nr., EPROM type */ -#define C2A(a) (0x0ca4| (a)) /* EPROM and PAGE selector */ - -#define CONF C0A(0) /* config RAM (card enable bit port) */ -#define PGRA C2A(0) /* Flash page register */ -#define CDID PRA(0) /* Card ID I/O port addr. offset */ - - -/* - * physical address offset + slot specific IO-Port base address - */ -#define FM_A(a) (FMA(a)+smc->hw.iop) /* FORMAC Plus physical addr */ -#define P1_A(a) (P1A(a)+smc->hw.iop) /* PLC1 (r/w) */ -#define P2_A(a) (P2A(a)+smc->hw.iop) /* PLC2 (r/w) */ -#define TI_A(a) (TIA(a)+smc->hw.iop) /* Timer (r/w) */ -#define PR_A(a) (PRA(a)+smc->hw.iop) /* config. PROM */ -#define C0_A(a) (C0A(a)+smc->hw.iop) /* config. RAM */ -#define C1_A(a) (C1A(a)+smc->hw.iop) /* config. RAM */ -#define C2_A(a) (C2A(a)+smc->hw.iop) /* config. RAM */ - - -#define CSRA 0x0008 /* control/status register address (r/w) */ -#define ISRA 0x0008 /* int. source register address (upper 8Bits) */ -#define PLC1I 0x001a /* clear PLC1 interrupt (write only) */ -#define PLC2I 0x0020 /* clear PLC2 interrupt (write only) */ -#define CSFA 0x001c /* control/status FIFO BUSY flags (read only) */ -#define RQAA 0x001c /* Request reg. (write only) */ -#define WCTA 0x001e /* word counter (r/w) */ -#define FFLAG 0x005e /* FLAG/V_FULL (FIFO almost full, write only)*/ - -#define CSR_A (CSRA+smc->hw.iop) /* control/status register address (r/w) */ -#ifdef UNIX -#define CSR_AS(smc) (CSRA+(smc)->hw.iop) /* control/status register address (r/w) */ -#endif -#define ISR_A (ISRA+smc->hw.iop) /* int. source register address (upper 8Bits) */ -#define PLC1_I (PLC1I+smc->hw.iop) /* clear PLC1 internupt (write only) */ -#define PLC2_I (PLC2I+smc->hw.iop) /* clear PLC2 interrupt (write only) */ -#define CSF_A (CSFA+smc->hw.iop) /* control/status FIFO BUSY flags (r/w) */ -#define RQA_A (RQAA+smc->hw.iop) /* Request reg. (write only) */ -#define WCT_A (WCTA+smc->hw.iop) /* word counter (r/w) */ -#define FFLAG_A (FFLAG+smc->hw.iop) /* FLAG/V_FULL (FIFO almost full, write only)*/ - -/* - * control/status register CSRA bits - */ -/* write */ -#define CS_CRESET 0x01 /* Card reset (0=reset) */ -#define CS_RESET_FIFO 0x02 /* FIFO reset (0=reset) */ -#define CS_IMSK 0x04 /* enable IRQ (1=enable, 0=disable) */ -#define CS_EN_IRQ_TC 0x08 /* enable IRQ from transfer counter */ -#define CS_BYPASS 0x20 /* bypass switch (0=remove, 1=insert)*/ -#define CS_LED_0 0x40 /* switch LED 0 */ -#define CS_LED_1 0x80 /* switch LED 1 */ -/* read */ -#define CS_BYSTAT 0x40 /* 0=Bypass exist, 1= ..not */ -#define CS_SAS 0x80 /* single attachement station (=1) */ - -/* - * control/status register CSFA bits (FIFO) - */ -#define CSF_MUX0 0x01 -#define CSF_MUX1 0x02 -#define CSF_HSREQ0 0x04 -#define CSF_HSREQ1 0x08 -#define CSF_HSREQ2 0x10 -#define CSF_BUSY_DMA 0x40 -#define CSF_BUSY_FIFO 0x80 - -/* - * Interrupt source register ISRA (upper 8 data bits) read only & low activ. - */ -#define IS_MINTR1 0x0100 /* FORMAC ST1U/L & ~IMSK1U/L*/ -#define IS_MINTR2 0x0200 /* FORMAC ST2U/L & ~IMSK2U/L*/ -#define IS_PLINT1 0x0400 /* PLC1 */ -#define IS_PLINT2 0x0800 /* PLC2 */ -#define IS_TIMINT 0x1000 /* Timer 82C54-2 */ -#define IS_TC 0x2000 /* transf. counter */ - -#define ALL_IRSR (IS_MINTR1|IS_MINTR2|IS_PLINT1|IS_PLINT2|IS_TIMINT|IS_TC) - -/* - * CONFIG<0> RAM (C0_A()) - */ -#define CFG_CARD_EN 0x01 /* card enable */ - -/* - * CONFIG<1> RAM (C1_A()) - */ -#define CFG_IRQ_SEL 0x03 /* IRQ select (4 nr.) */ -#define CFG_IRQ_TT 0x04 /* IRQ trigger type (LEVEL/EDGE) */ -#define CFG_DRQ_SEL 0x18 /* DMA requ. (4 nr.) */ -#define CFG_BOOT_EN 0x20 /* 0=BOOT-, 1=Application Software */ -#define CFG_PROG_EN 0x40 /* V_Prog for FLASH_PROM (1=on) */ - -/* - * CONFIG<2> RAM (C2_A()) - */ -#define CFG_EPROM_SEL 0x0f /* FPROM start address selection */ -#define CFG_PAGE 0xf0 /* FPROM page selection */ - - -#define READ_PROM(a) ((u_char)inp(a)) -#define GET_PAGE(i) outp(C2_A(0),((int)(i)<<4) | (inp(C2_A(0)) & ~CFG_PAGE)) -#define FPROM_SW() (inp(C1_A(0)) & CFG_BOOT_EN) - -#define MAX_PAGES 16 /* 16 pages */ -#define MAX_FADDR 0x2000 /* 8K per page */ -#define VPP_ON() outp(C1_A(0),inp(C1_A(0)) | CFG_PROG_EN) -#define VPP_OFF() outp(C1_A(0),inp(C1_A(0)) & ~CFG_PROG_EN) - -#define DMA_BUSY() (inpw(CSF_A) & CSF_BUSY_DMA) -#define FIFO_BUSY() (inpw(CSF_A) & CSF_BUSY_FIFO) -#define DMA_FIFO_BUSY() (inpw(CSF_A) & (CSF_BUSY_DMA | CSF_BUSY_FIFO)) -#define BUS_CHECK() - -#ifdef UNISYS -/* For UNISYS use another macro with drv_usecewait function */ -#define CHECK_DMA() {u_long k = 1000000; \ - while (k && (DMA_BUSY())) { k--; drv_usecwait(20); } \ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } -#else -#define CHECK_DMA() {u_long k = 1000000 ;\ - while (k && (DMA_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } -#endif - -#define CHECK_FIFO() {u_long k = 1000000 ;\ - while (k && (FIFO_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0019,HWM_E0019_MSG) ; } - -#define CHECK_DMA_FIFO() {u_long k = 1000000 ;\ - while (k && (DMA_FIFO_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; } - -#define GET_ISR() ~inpw(ISR_A) -#define CHECK_ISR() ~inpw(ISR_A) - -#ifndef UNIX -#ifndef WINNT -#define CLI_FBI() outpw(CSR_A,(inpw(CSR_A)&\ - (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|smc->hw.led) -#else /* WINNT */ -#define CLI_FBI() outpw(CSR_A,(l_inpw(CSR_A)&\ - (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|smc->hw.led) -#endif /* WINNT */ -#else /* UNIX */ -#define CLI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))&\ - (CS_CRESET|CS_BYPASS))|CS_RESET_FIFO|(smc)->hw.led) -#endif - -#ifndef UNIX -#define STI_FBI() outpw(CSR_A,(inpw(CSR_A)&\ - (CS_CRESET|CS_BYPASS|CS_RESET_FIFO))|CS_IMSK|smc->hw.led) -#else -#define STI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))&\ - (CS_CRESET|CS_BYPASS|CS_RESET_FIFO))|CS_IMSK|(smc)->hw.led) -#endif - -/* EISA DMA Controller */ -#define DMA_WRITE_SINGLE_MASK_BIT_M 0x0a /* Master DMA Controller */ -#define DMA_WRITE_SINGLE_MASK_BIT_S 0xd4 /* Slave DMA Controller */ -#define DMA_CLEAR_BYTE_POINTER_M 0x0c -#define DMA_CLEAR_BYTE_POINTER_S 0xd8 - -#endif /* EISA */ - -#ifdef MCA - -/* - * POS Register: !! all I/O's are 8-Bit !! - */ -#define POS_SYS_SETUP 0x94 /* system setup register */ -#define POS_SYSTEM 0xff /* system mode */ - -#define POS_CHANNEL_POS 0x96 /* register slot ID */ -#define POS_CHANNEL_BIT 0x08 /* mask for -"- */ - -#define POS_BASE 0x100 /* POS base address */ -#define POS_ID_LOW POS_BASE /* card ID low */ -#define POS_ID_HIGH (POS_BASE+1) /* card ID high */ -#define POS_102 (POS_BASE+2) /* card en., arbitration level .. */ -#define POS_103 (POS_BASE+3) /* FPROM addr, page */ -#define POS_104 (POS_BASE+4) /* I/O, IRQ */ -#define POS_105 (POS_BASE+5) /* POS_CHCK */ -#define POS_106 (POS_BASE+6) /* to read VPD */ -#define POS_107 (POS_BASE+7) /* added without function */ - -/* FM1 card IDs */ -#define FM1_CARD_ID0 0x83 -#define FM1_CARD_ID1 0 - -#define FM1_IBM_ID0 0x9c -#define FM1_IBM_ID1 0x8f - - -/* FM2 card IDs */ -#define FM2_CARD_ID0 0xab -#define FM2_CARD_ID1 0 - -#define FM2_IBM_ID0 0x7e -#define FM2_IBM_ID1 0x8f - -/* Board revision. */ -#define FM1_REV 0 -#define FM2_REV 1 - -#define MAX_SLOT 8 - -/* - * POS_102 - */ -#define POS_CARD_EN 0x01 /* card enable =1 */ -#define POS_SDAT_EN 0x02 /* enable 32-bit streaming data mode */ -#define POS_EN_CHKINT 0x04 /* enable int. from check line asserted */ -#define POS_EN_BUS_ERR 0x08 /* enable int. on invalid busmaster transf. */ -#define POS_FAIRNESS 0x10 /* fairnes on =1 */ -/* attention: arbitration level used with bit 0 POS 105 */ -#define POS_LARBIT 0xe0 /* arbitration level (0,0,0)->level = 0x8 - (1,1,1)->level = 0xf */ -/* - * POS_103 - */ -#define POS_PAGE 0x07 /* FPROM page selection */ -#define POS_BOOT_EN 0x08 /* boot PROM enable =1 */ -#define POS_MSEL 0x70 /* memory start address for FPROM mapping */ -#define PROG_EN 0x80 /* FM1: Vpp prog on/off */ -#define POS_SDR 0x80 /* FM2: Streaming data bit */ - -/* - * POS_104 - */ -#define POS_IOSEL 0x3f /* selected I/O base address */ -#define POS_IRQSEL 0xc0 /* selected interrupt */ - -/* - * POS_105 - */ -#define POS_CHCK 0x80 -#define POS_SYNC_ERR 0x20 /* FM2: synchronous error reporting */ -#define POS_PAR_DATA 0x10 /* FM2: data parity enable bit */ -#define POS_PAR_ADDR 0x08 /* FM2: address parity enable bit */ -#define POS_IRQHSEL 0x02 /* FM2: Highest bit for IRQ_selection */ -#define POS_HARBIT 0x01 /* Highest bit in Bus arbitration selection */ - -#define SA_MAC (0) /* start addr. MAC_AD within the PROM */ -#define PRA_OFF (0) -#define SA_PMD_TYPE (8) /* start addr. PMD-Type */ - -/* - * address transmission from logical to physical offset address on board - */ -#define FMA(a) (0x0100|((a)<<1)) /* FORMAC+ (r/w) */ -#define P2(a) (0x00c0|((a)<<1)) /* PLC2 (r/w) (DAS) */ -#define P1(a) (0x0080|((a)<<1)) /* PLC1 (r/w) */ -#define TI(a) (0x0060|((a)<<1)) /* Timer (r/w) */ -#define PR(a) (0x0040|((a)<<1)) /* configuration PROM */ -#define CS(a) (0x0020| (a)) /* control/status */ -#define FF(a) (0x0010|((a)<<1)) /* FIFO ASIC */ -#define CT(a) (0x0000|((a)<<1)) /* counter */ - -/* - * counter - */ -#define ACLA CT(0) /* address counter low */ -#define ACHA CT(1) /* address counter high */ -#define BCN CT(2) /* byte counter */ -#define MUX CT(3) /* MUX-register */ -#define WCN CT(0x08) /* word counter */ -#define FFLG CT(0x09) /* FIFO Flags */ - -/* - * test/control register (FM2 only) - */ -#define CNT_TST 0x018 /* Counter test control register */ -#define CNT_STP 0x01a /* Counter test step reg. (8 Bit) */ - -/* - * CS register (read only) - */ -#define CSRA CS(0) /* control/status register address */ -#define CSFA CS(2) /* control/status FIFO BUSY ... */ -#define ISRA CS(4) /* first int. source register address */ -#define ISR2 CS(6) /* second int. source register address */ -#define LEDR CS(0x0c) /* LED register r/w */ -#define CSIL CS(0x10) /* I/O mapped POS_ID_low (100) */ -#define CSIH CS(0x12) /* - " - POS_ID_HIGH (101) */ -#define CSA CS(0x14) /* - " - POS_102 */ -#define CSM CS(0x0e) /* - " - POS_103 */ -#define CSM_FM1 CS(0x16) /* - " - POS_103 (copy in FM1) */ -#define CSI CS(0x18) /* - " - POS_104 */ -#define CSS CS(0x1a) /* - " - POS_105 */ -#define CSP_06 CS(0x1c) /* - " - POS_106 */ -#define WDOG_ST 0x1c /* Watchdog status (FM2 only) */ -#define WDOG_EN 0x1c /* Watchdog enabling (FM2 only, 8Bit) */ -#define WDOG_DIS 0x1e /* Watchdog disabling (FM2 only, 8Bit) */ - -#define PGRA CSM /* Flash page register */ - - -#define WCTA FF(0) /* word counter */ -#define FFLAG FF(1) /* FLAG/V_FULL (FIFO almost full, write only)*/ - -/* - * Timer register (FM2 only) - */ -#define RTM_CNT 0x28 /* RTM Counter */ -#define TI_DIV 0x60 /* Timer Prescaler */ -#define TI_CH1 0x62 /* Timer channel 1 counter */ -#define TI_STOP 0x64 /* Stop timer on channel 1 */ -#define TI_STRT 0x66 /* Start timer on channel 1 */ -#define TI_INI2 0x68 /* Timer: Bus master preemption */ -#define TI_CNT2 0x6a /* Timer */ -#define TI_INI3 0x6c /* Timer: Streaming data */ -#define TI_CNT3 0x6e /* Timer */ -#define WDOG_LO 0x70 /* Watchdog counter low */ -#define WDOG_HI 0x72 /* Watchdog counter high */ -#define RTM_PRE 0x74 /* restr. token prescaler */ -#define RTM_TIM 0x76 /* restr. token timer */ - -/* - * Recommended Timeout values (for FM2 timer only) - */ -#define TOUT_BM_PRE 188 /* 3.76 usec */ -#define TOUT_S_DAT 374 /* 7.48 usec */ - -/* - * CS register (write only) - */ -#define HSR(p) CS(0x18|(p)) /* Host request register */ - -#define RTM_PUT 0x36 /* restr. token counter write */ -#define RTM_GET 0x28 /* - " - clear */ -#define RTM_CLEAR 0x34 /* - " - read */ - -/* - * BCN Bit definitions - */ -#define BCN_BUSY 0x8000 /* DMA Busy flag */ -#define BCN_AZERO 0x4000 /* Almost zero flag (BCN < 4) */ -#define BCN_STREAM 0x2000 /* Allow streaming data (BCN >= 8) */ - -/* - * WCN Bit definitions - */ -#define WCN_ZERO 0x2000 /* Zero flag (counted to zero) */ -#define WCN_AZERO 0x1000 /* Almost zero flag (BCN < 4) */ - -/* - * CNT_TST Bit definitions - */ -#define CNT_MODE 0x01 /* Go into test mode */ -#define CNT_D32 0x02 /* 16/32 BIT test mode */ - -/* - * FIFO Flag FIFO Flags/Vfull register - */ -#define FF_VFULL 0x003f /* V_full value mask */ -#define FFLG_FULL 0x2000 /* FULL flag */ -#define FFLG_A_FULL 0x1000 /* Almost full flag */ -#define FFLG_VFULL 0x0800 /* V_full Flag */ -#define FFLG_A_EMP 0x0400 /* almost empty flag */ -#define FFLG_EMP 0x0200 /* empty flag */ -#define FFLG_T_EMP 0x0100 /* totally empty flag */ - -/* - * WDOG Watchdog status register - */ -#define WDOG_ALM 0x01 /* Watchdog alarm Bit */ -#define WDOG_ACT 0x02 /* Watchdog active Bit */ - -/* - * CS(0) CONTROLS - */ -#define CS_CRESET 0x0001 -#define FIFO_RST 0x0002 -#define CS_IMSK 0x0004 -#define EN_IRQ_CHCK 0x0008 -#define EN_IRQ_TOKEN 0x0010 -#define EN_IRQ_TC 0x0020 -#define TOKEN_STATUS 0x0040 -#define RTM_CHANGE 0x0080 - -#define CS_SAS 0x0100 -#define CS_BYSTAT 0x0200 /* bypass connected (0=conn.) */ -#define CS_BYPASS 0x0400 /* bypass on/off indication */ - -/* - * CS(2) FIFOSTAT - */ -#define HSREQ 0x0007 -#define BIGDIR 0x0008 -#define CSF_BUSY_FIFO 0x0010 -#define CSF_BUSY_DMA 0x0020 -#define SLOT_32 0x0040 - -#define LED_0 0x0001 -#define LED_1 0x0002 -#define LED_2 0x0100 - -#define MAX_PAGES 8 /* pages */ -#define MAX_FADDR 0x4000 /* 16K per page */ - -/* - * IRQ = ISRA || ISR2 ; - * - * ISRA = IRQ_OTH_EN && (IS_LAN | IS_BUS) ; - * ISR2 = IRQ_TC_EN && IS_TC ; - * - * IS_LAN = (IS_MINTR1 | IS_MINTR2 | IS_PLINT1 | IS_PLINT2 | IS_TIMINT) || - * (IRQ_EN_TOKEN && IS_TOKEN) ; - * IS_BUS = IRQ_CHCK_EN && (IS_BUSERR | IS_CHCK_L) ; - */ -/* - * ISRA !!! activ high !!! - */ -#define IS_MINTR1 0x0001 /* FORMAC ST1U/L & ~IMSK1U/L*/ -#define IS_MINTR2 0x0002 /* FORMAC ST2U/L & ~IMSK2U/L*/ -#define IS_PLINT1 0x0004 /* PLC1 */ -#define IS_PLINT2 0x0008 /* PLC2 */ -#define IS_TIMINT 0x0010 /* Timer 82C54-2 */ -#define IS_TOKEN 0x0020 /* restrictet token monitoring */ -#define IS_CHCK_L 0x0040 /* check line asserted */ -#define IS_BUSERR 0x0080 /* bus error */ -/* - * ISR2 - */ -#define IS_TC 0x0001 /* terminal count irq */ -#define IS_SFDBKRTN 0x0002 /* selected feedback return */ -#define IS_D16 0x0004 /* DS16 */ -#define IS_D32 0x0008 /* DS32 */ -#define IS_DPEI 0x0010 /* Data Parity Indication */ - -#define ALL_IRSR 0x00ff - -#define FM_A(a) ADDR(FMA(a)) /* FORMAC Plus physical addr */ -#define P1_A(a) ADDR(P1(a)) /* PLC1 (r/w) */ -#define P2_A(a) ADDR(P2(a)) /* PLC2 (r/w) (DAS) */ -#define TI_A(a) ADDR(TI(a)) /* Timer (r/w) FM1 only! */ -#define PR_A(a) ADDR(PR(a)) /* config. PROM */ -#define CS_A(a) ADDR(CS(a)) /* control/status */ - -#define ISR1_A ADDR(ISRA) /* first int. source register address */ -#define ISR2_A ADDR(ISR2) /* second -"- */ -#define CSR_A ADDR(CSRA) /* control/status register address */ -#define CSF_A ADDR(CSFA) /* control/status FIFO BUSY flags (r/w) */ - -#define CSIL_A ADDR(CSIL) /* I/O mapped POS_ID_low (102) */ -#define CSIH_A ADDR(CSIH) /* - " - POS_ID_HIGH (101) */ -#define CSA_A ADDR(CSA) /* - " - POS_102 */ -#define CSI_A ADDR(CSI) /* - " - POS_104 */ -#define CSM_A ADDR(CSM) /* - " - POS_103 */ -#define CSM_FM1_A ADDR(CSM_FM1) /* - " - POS_103 (2nd copy, FM1) */ -#define CSP_06_A ADDR(CSP_06) /* - " - POS_106 */ - -#define WCT_A ADDR(WCTA) /* word counter (r/w) */ -#define FFLAG_A ADDR(FFLAG) /* FLAG/V_FULL (FIFO almost full, write only)*/ - -#define ACL_A ADDR(ACLA) /* address counter low */ -#define ACH_A ADDR(ACHA) /* address counter high */ -#define BCN_A ADDR(BCN) /* byte counter */ -#define MUX_A ADDR(MUX) /* MUX-register */ - -#define ISR_A ADDR(ISRA) /* Interrupt Source Register */ -#define FIFO_RESET_A ADDR(FIFO_RESET) /* reset the FIFO */ -#define FIFO_EN_A ADDR(FIFO_EN) /* enable the FIFO */ - -#define WDOG_EN_A ADDR(WDOG_EN) /* reset and start the WDOG */ -#define WDOG_DIS_A ADDR(WDOG_DIS) /* disable the WDOG */ -/* - * all control reg. (read!) are 8 bit (except PAGE_RG_A and LEDR_A) - */ -#define HSR_A(p) ADDR(HSR(p)) /* Host request register */ - -#define STAT_BYP 0 /* bypass station */ -#define STAT_INS 2 /* insert station */ -#define BYPASS(o) CS(0x10|(o)) /* o=STAT_BYP || STAT_INS */ - -#define IRQ_TC_EN CS(0x0b) /* enable/disable IRQ on TC */ -#define IRQ_TC_DIS CS(0x0a) -#define IRQ_TOKEN_EN CS(9) /* enable/disable IRQ on restr. Token */ -#define IRQ_TOKEN_DIS CS(8) -#define IRQ_CHCK_EN CS(7) /* -"- IRQ after CHCK line */ -#define IRQ_CHCK_DIS CS(6) -#define IRQ_OTH_EN CS(5) /* -"- other IRQ's */ -#define IRQ_OTH_DIS CS(4) -#define FIFO_EN CS(3) /* disable (reset), enable FIFO */ -#define FIFO_RESET CS(2) -#define CARD_EN CS(1) /* disable (reset), enable card */ -#define CARD_DIS CS(0) - -#define LEDR_A ADDR(LEDR) /* D0=green, D1=yellow, D8=L2 */ -#define PAGE_RG_A ADDR(CSM) /* D<2..0> */ -#define IRQ_CHCK_EN_A ADDR(IRQ_CHCK_EN) -#define IRQ_CHCK_DIS_A ADDR(IRQ_CHCK_DIS) - -#define GET_PAGE(bank) outpw(PAGE_RG_A,(inpw(PAGE_RG_A) &\ - (~POS_PAGE)) |(int) (bank)) -#define VPP_ON() if (smc->hw.rev == FM1_REV) { \ - outpw(PAGE_RG_A, \ - (inpw(PAGE_RG_A) & POS_PAGE) | PROG_EN); \ - } -#define VPP_OFF() if (smc->hw.rev == FM1_REV) { \ - outpw(PAGE_RG_A,(inpw(PAGE_RG_A) & POS_PAGE)); \ - } - -#define SKFDDI_PSZ 16 /* address PROM size */ - -#define READ_PROM(a) ((u_char)inp(a)) - -#define GET_ISR() ~inpw(ISR1_A) -#ifndef TCI -#define CHECK_ISR() ~inpw(ISR1_A) -#define CHECK_ISR_SMP(iop) ~inpw((iop)+ISRA) -#else -#define CHECK_ISR() (~inpw(ISR1_A) | ~inpw(ISR2_A)) -#define CHECK_ISR_SMP(iop) (~inpw((iop)+ISRA) | ~inpw((iop)+ISR2)) -#endif - -#define DMA_BUSY() (inpw(CSF_A) & CSF_BUSY_DMA) -#define FIFO_BUSY() (inpw(CSF_A) & CSF_BUSY_FIFO) -#define DMA_FIFO_BUSY() (inpw(CSF_A) & (CSF_BUSY_DMA | CSF_BUSY_FIFO)) -#define BUS_CHECK() { int i ; \ - if ((i = GET_ISR()) & IS_BUSERR) \ - SMT_PANIC(smc,HWM_E0020,HWM_E0020_MSG) ; \ - if (i & IS_CHCK_L) \ - SMT_PANIC(smc,HWM_E0014,HWM_E0014_MSG) ; \ - } - -#define CHECK_DMA() { u_long k = 10000 ; \ - while (k && (DMA_BUSY())) { \ - k-- ; \ - BUS_CHECK() ; \ - } \ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } - -#define CHECK_FIFO() {u_long k = 1000000 ;\ - while (k && (FIFO_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0019,HWM_E0019_MSG) ; } - -#define CHECK_DMA_FIFO() {u_long k = 1000000 ;\ - while (k && (DMA_FIFO_BUSY())) { \ - k-- ;\ - BUS_CHECK() ; \ - } \ - if (!k) SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; } - -#ifndef UNIX -#define CLI_FBI() outp(ADDR(IRQ_OTH_DIS),0) -#else -#define CLI_FBI(smc) outp(ADDRS((smc),IRQ_OTH_DIS),0) -#endif - -#ifndef TCI -#define CLI_FBI_SMP(iop) outp((iop)+IRQ_OTH_DIS,0) -#else -#define CLI_FBI_SMP(iop) outp((iop)+IRQ_OTH_DIS,0) ;\ - outp((iop)+IRQ_TC_DIS,0) -#endif - -#ifndef UNIX -#define STI_FBI() outp(ADDR(IRQ_OTH_EN),0) -#else -#define STI_FBI(smc) outp(ADDRS((smc),IRQ_OTH_EN),0) -#endif - -/* - * Terminal count primitives - */ -#define CLI_TCI(smc) outp(ADDRS((smc),IRQ_TC_DIS),0) -#define STI_TCI(smc) outp(ADDRS((smc),IRQ_TC_EN),0) -#define CHECK_TC(smc,k) {(k) = 10000 ;\ - while ((k) && (~inpw(ISR2_A) & IS_TC)) (k)-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0018,HWM_E0018_MSG) ; } - -#endif /* MCA */ - -#ifdef ISA - -/* - * address transmission from logic NPADDR6-0 to physical offset address on board - */ -#define FMA(a) (0x8000|(((a)&0x07)<<1)|(((a)&0x78)<<7)) /* FORMAC+ (r/w) */ -#define PRA(a) (0x1000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PROM (read only)*/ -#define P1A(a) (0x4000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PLC1 (r/w) */ -#define P2A(a) (0x5000|(((a)&0x07)<<1)|(((a)&0x18)<<7)) /* PLC2 (r/w) */ -#define TIA(a) (0x6000|(((a)&0x03)<<1)) /* Timer (r/w) */ - -#define ISRA 0x0000 /* int. source register address (read only) */ -#define ACLA 0x0000 /* address counter low address (write only) */ -#define ACHA 0x0002 /* address counter high address (write only) */ -#define TRCA 0x0004 /* transfer counter address (write only) */ -#define PGRA 0x0006 /* page register address (write only) */ -#define RQAA 0x2000 /* Request reg. (write only) */ -#define CSRA 0x3000 /* control/status register address (r/w) */ - -/* - * physical address offset + IO-Port base address - */ -#define FM_A(a) (FMA(a)+smc->hw.iop) /* FORMAC Plus physical addr */ -#define PR_A(a) (PRA(a)+smc->hw.iop) /* PROM (read only)*/ -#define P1_A(a) (P1A(a)+smc->hw.iop) /* PLC1 (r/w) */ -#define P2_A(a) (P2A(a)+smc->hw.iop) /* PLC2 (r/w) */ -#define TI_A(a) (TIA(a)+smc->hw.iop) /* Timer (r/w) */ - -#define ISR_A (0x0000+smc->hw.iop) /* int. source register address (read only) */ -#define ACL_A (0x0000+smc->hw.iop) /* address counter low address (write only) */ -#define ACH_A (0x0002+smc->hw.iop) /* address counter high address (write only)*/ -#define TRC_A (0x0004+smc->hw.iop) /* transfer counter address (write only) */ -#define PGR_A (0x0006+smc->hw.iop) /* page register address (write only) */ -#define RQA_A (0x2000+smc->hw.iop) /* Request reg. (write only) */ -#define CSR_A (0x3000+smc->hw.iop) /* control/status register address (r/w) */ -#ifdef UNIX -#define CSR_AS(smc) (0x3000+(smc)->hw.iop) /* control/status register address */ -#endif -#define PLC1_I (0x3400+smc->hw.iop) /* clear PLC1 interrupt bit */ -#define PLC2_I (0x3800+smc->hw.iop) /* clear PLC2 interrupt bit */ - -#ifndef MULT_OEM -#ifndef OEM_CONCEPT -#define SKLOGO_STR "SKFDDI" -#else /* OEM_CONCEPT */ -#define SKLOGO_STR OEM_FDDI_LOGO -#endif /* OEM_CONCEPT */ -#endif /* MULT_OEM */ -#define SADDRL (24) /* start address SKLOGO */ -#define SA_MAC (0) /* start addr. MAC_AD within the PROM */ -#define PRA_OFF (0) -#define SA_PMD_TYPE (8) /* start addr. PMD-Type */ - -#define CDID (PRA(SADDRL)) /* Card ID int/O port addr. offset */ -#define NEXT_CDID ((PRA(SADDRL+1)) - CDID) - -#define SKFDDI_PSZ 32 /* address PROM size */ - -#define READ_PROM(a) ((u_char)inpw(a)) -#define GET_PAGE(i) outpw(PGR_A,(int)(i)) - -#define MAX_PAGES 16 /* 16 pages */ -#define MAX_FADDR 0x2000 /* 8K per page */ -#define VPP_OFF() outpw(CSR_A,(inpw(CSR_A) & (CS_CRESET|CS_BYPASS))) -#define VPP_ON() outpw(CSR_A,(inpw(CSR_A) & (CS_CRESET|CS_BYPASS)) | \ - CS_VPPSW) - -/* - * control/status register CSRA bits (log. addr: 0x3000) - */ -/* write */ -#define CS_CRESET 0x01 /* Card reset (0=reset) */ -#define CS_IMSK 0x02 /* enable IRQ (1=enable, 0=disable) */ -#define CS_RESINT1 0x04 /* PLINT1 reset */ -#define CS_VPPSW 0x10 /* 12V power switch (0=off, 1=on) */ -#define CS_BYPASS 0x20 /* bypass switch (0=remove, 1=insert)*/ -#define CS_RESINT2 0x40 /* PLINT2 reset */ -/* read */ -#define CS_BUSY 0x04 /* master transfer activ (=1) */ -#define CS_SW_EPROM 0x08 /* 0=Application Soft. 1=BOOT-EPROM */ -#define CS_BYSTAT 0x40 /* 0=Bypass exist, 1= ..not */ -#define CS_SAS 0x80 /* single attachement station (=1) */ - -/* - * Interrupt source register ISRA (log. addr: 0x0000) read only & low activ. - */ -#define IS_MINTR1 0x01 /* FORMAC ST1U/L && ~IMSK1U/L*/ -#define IS_MINTR2 0x02 /* FORMAC ST2U/L && ~IMSK2U/L*/ -#define IS_PLINT1 0x04 /* PLC1 */ -#define IS_PLINT2 0x08 /* PLC2 */ -#define IS_TIMINT 0x10 /* Timer 82C54-2 */ - -#define ALL_IRSR (IS_MINTR1|IS_MINTR2|IS_PLINT1|IS_PLINT2|IS_TIMINT) - -#define FPROM_SW() (inpw(CSR_A)&CS_SW_EPROM) -#define DMA_BUSY() (inpw(CSR_A)&CS_BUSY) -#define CHECK_FIFO() -#define BUS_CHECK() - -/* - * set Host Request register (wr.) - */ -#define SET_HRQ(qup) outpw(RQA_A+((qup)<<1),0) - -#ifndef UNIX -#ifndef WINNT -#define CLI_FBI() outpw(CSR_A,(inpw(CSR_A)&(CS_CRESET|CS_BYPASS|CS_VPPSW))) -#else -#define CLI_FBI() outpw(CSR_A,(l_inpw(CSR_A) & \ - (CS_CRESET|CS_BYPASS|CS_VPPSW))) -#endif -#else -#define CLI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc))& \ - (CS_CRESET|CS_BYPASS|CS_VPPSW))) -#endif - -#ifndef UNIX -#define STI_FBI() outpw(CSR_A,(inpw(CSR_A) & \ - (CS_CRESET|CS_BYPASS|CS_VPPSW)) | CS_IMSK) -#else -#define STI_FBI(smc) outpw(CSR_AS(smc),(inpw(CSR_AS(smc)) & \ - (CS_CRESET|CS_BYPASS|CS_VPPSW)) | CS_IMSK) -#endif - -#define CHECK_DMA() {unsigned k = 10000 ;\ - while (k && (DMA_BUSY())) k-- ;\ - if (!k) SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } - -#define GET_ISR() ~inpw(ISR_A) - -#endif /* ISA */ - /*--------------------------------------------------------------------------*/ #ifdef PCI diff --git a/drivers/net/skfp/h/skfbiinc.h b/drivers/net/skfp/h/skfbiinc.h index 79d55ad2cd2a7818d85fec6ad2fc5f5f6253a391..ac2d7192f1ca2f9be347b47b866cca515181df91 100644 --- a/drivers/net/skfp/h/skfbiinc.h +++ b/drivers/net/skfp/h/skfbiinc.h @@ -22,32 +22,6 @@ */ #define ERR_FLAGS (FS_MSRABT | FS_SEAC2 | FS_SFRMERR | FS_SFRMTY1) -#ifdef ISA -#define DMA_BUSY_CHECK CSRA -#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT) -#define HRQR (RQAA+(RQ_RRQ<<1)) -#define HRQW (RQAA+(RQ_WA2<<1)) -#define HRQA0 (RQAA+(RQ_WA0<<1)) -#define HRQSQ (RQAA+(RQ_WSQ<<1)) -#endif - -#ifdef EISA -#define DMA_BUSY_CHECK CSRA -#define DMA_HIGH_WORD 0x0400 -#define DMA_MASK_M 0x0a -#define DMA_MODE_M 0x0b -#define DMA_BYTE_PTR_M 0x0c -#define DMA_MASK_S 0x0d4 -#define DMA_MODE_S 0x0d6 -#define DMA_BYTE_PTR_S 0x0d8 -#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TC) -#endif /* EISA */ - -#ifdef MCA -#define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ - IS_CHCK_L | IS_BUSERR) -#endif - #ifdef PCI #define IMASK_FAST (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ IS_MINTR2 | IS_MINTR3 | IS_R1_P | \ diff --git a/drivers/net/skfp/h/targethw.h b/drivers/net/skfp/h/targethw.h index 22c4923241d38e3063c1072e23cc76b5e2ae672d..626dc7263591326d9ad9b916fe1a9010aea2dbb3 100644 --- a/drivers/net/skfp/h/targethw.h +++ b/drivers/net/skfp/h/targethw.h @@ -53,11 +53,6 @@ struct s_oem_ids { u_char oi_sub_id[4] ; /* sub id bytes, representation as */ /* defined by hardware, */ #endif -#ifdef ISA - u_char oi_logo_len ; /* the length of the adapter logo */ - u_char oi_logo[6] ; /* the adapter logo */ - u_char oi_reserved1 ; -#endif /* ISA */ } ; #endif /* MULT_OEM */ @@ -70,43 +65,17 @@ struct s_smt_hw { short dma ; /* DMA channel */ short irq ; /* IRQ level */ short eprom ; /* FLASH prom */ -#ifndef PCI - short DmaWriteExtraBytes ; /* add bytes for DMA write */ -#endif #ifndef SYNC u_short n_a_send ; /* pending send requests */ #endif -#if (defined(EISA) || defined(MCA) || defined(PCI)) +#if defined(PCI) short slot ; /* slot number */ short max_slots ; /* maximum number of slots */ -#endif - -#if (defined(PCI) || defined(MCA)) short wdog_used ; /* TRUE if the watch dog is used */ #endif -#ifdef MCA - short slot_32 ; /* 32bit slot (1) or 16bit slot (0) */ - short rev ; /* Board revision (FMx_REV). */ - short VFullRead ; /* V_full value for DMA read */ - short VFullWrite ; /* V_full value for DMA write */ -#endif - -#ifdef EISA - short led ; /* LED for FE card */ - - short dma_rmode ; /* read mode */ - short dma_wmode ; /* write mode */ - short dma_emode ; /* extend mode */ - - /* DMA controller channel dependent io addresses */ - u_short dma_base_word_count ; - u_short dma_base_address ; - u_short dma_base_address_page ; -#endif - #ifdef PCI u_short pci_handle ; /* handle to access the BIOS func */ u_long is_imask ; /* int maske for the int source reg */ diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c index e01f8a0f35c6341679b9a1ded6370cadb5808837..053151468f93af7947d0bc9dc3da41a409a3971f 100644 --- a/drivers/net/skfp/hwt.c +++ b/drivers/net/skfp/hwt.c @@ -77,25 +77,10 @@ void hwt_start(struct s_smc *smc, u_long time) */ if (!cnt) cnt++ ; -#ifndef PCI - /* - * 6.25MHz -> CLK0 : T0 (cnt0 = 16us) -> OUT0 - * OUT0 -> CLK1 : T1 (cnt1) OUT1 -> ISRA(IS_TIMINT) - */ - OUT_82c54_TIMER(3,1<<6 | 3<<4 | 0<<1) ; /* counter 1, mode 0 */ - OUT_82c54_TIMER(1,cnt & 0xff) ; /* LSB */ - OUT_82c54_TIMER(1,(cnt>>8) & 0xff) ; /* MSB */ - /* - * start timer by switching counter 0 to mode 3 - * T0 resolution 16 us (CLK0=0.16us) - */ - OUT_82c54_TIMER(3,0<<6 | 3<<4 | 3<<1) ; /* counter 0, mode 3 */ - OUT_82c54_TIMER(0,100) ; /* LSB */ - OUT_82c54_TIMER(0,0) ; /* MSB */ -#else /* PCI */ + outpd(ADDR(B2_TI_INI), (u_long) cnt * 200) ; /* Load timer value. */ outpw(ADDR(B2_TI_CRTL), TIM_START) ; /* Start timer. */ -#endif /* PCI */ + smc->hw.timer_activ = TRUE ; } @@ -115,15 +100,8 @@ void hwt_start(struct s_smc *smc, u_long time) ************************/ void hwt_stop(struct s_smc *smc) { -#ifndef PCI - /* stop counter 0 by switching to mode 0 */ - OUT_82c54_TIMER(3,0<<6 | 3<<4 | 0<<1) ; /* counter 0, mode 0 */ - OUT_82c54_TIMER(0,0) ; /* LSB */ - OUT_82c54_TIMER(0,0) ; /* MSB */ -#else /* PCI */ outpw(ADDR(B2_TI_CRTL), TIM_STOP) ; outpw(ADDR(B2_TI_CRTL), TIM_CL_IRQ) ; -#endif /* PCI */ smc->hw.timer_activ = FALSE ; } @@ -168,11 +146,6 @@ void hwt_init(struct s_smc *smc) void hwt_restart(struct s_smc *smc) { hwt_stop(smc) ; -#ifndef PCI - OUT_82c54_TIMER(3,1<<6 | 3<<4 | 0<<1) ; /* counter 1, mode 0 */ - OUT_82c54_TIMER(1,1 ) ; /* LSB */ - OUT_82c54_TIMER(1,0 ) ; /* MSB */ -#endif } /************************ @@ -191,21 +164,12 @@ void hwt_restart(struct s_smc *smc) u_long hwt_read(struct s_smc *smc) { u_short tr ; -#ifndef PCI - u_short is ; -#else u_long is ; -#endif if (smc->hw.timer_activ) { hwt_stop(smc) ; -#ifndef PCI - OUT_82c54_TIMER(3,1<<6) ; /* latch command */ - tr = IN_82c54_TIMER(1) & 0xff ; - tr += (IN_82c54_TIMER(1) & 0xff)<<8 ; -#else /* PCI */ tr = (u_short)((inpd(ADDR(B2_TI_VAL))/200) & 0xffff) ; -#endif /* PCI */ + is = GET_ISR() ; /* Check if timer expired (or wraparound). */ if ((tr > smc->hw.t_start) || (is & IS_TIMINT)) { diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index a7ef6c8b7721b00e40148388784e5b70279902f0..7cf9b9f35dee4defb5277648ff4f3f50426c5d07 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -260,9 +260,7 @@ static int skfp_init_one(struct pci_dev *pdev, dev->set_multicast_list = &skfp_ctl_set_multicast_list; dev->set_mac_address = &skfp_ctl_set_mac_address; dev->do_ioctl = &skfp_ioctl; - dev->header_cache_update = NULL; /* not supported */ - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* Initialize board structure with bus-specific info */ diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c index 75afc1f07ba0ab2b70d263738759cb30c9b52fa9..ced2c8f76be711f773881d1718405a9367752762 100644 --- a/drivers/net/skfp/smt.c +++ b/drivers/net/skfp/smt.c @@ -1654,7 +1654,7 @@ static const struct smt_pdef { { SMT_P4053, 0, SWAP_SMT_P4053 } , } ; -#define N_SMT_PLEN (sizeof(smt_pdef)/sizeof(smt_pdef[0])) +#define N_SMT_PLEN ARRAY_SIZE(smt_pdef) int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short list[]) diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c index 16573aca8b6268dda579c96117439f3475fcda70..6caf713b744c59aab2c81d724f99ca0e8096178e 100644 --- a/drivers/net/skfp/srf.c +++ b/drivers/net/skfp/srf.c @@ -43,7 +43,7 @@ static void clear_reported(struct s_smc *smc); static void smt_send_srf(struct s_smc *smc); static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index); -#define MAX_EVCS (sizeof(smc->evcs)/sizeof(smc->evcs[0])) +#define MAX_EVCS ARRAY_SIZE(smc->evcs) struct evc_init { u_char code ; @@ -67,7 +67,7 @@ static const struct evc_init evc_inits[] = { { SMT_EVENT_PORT_PATH_CHANGE, INDEX_PORT,NUMPHYS,SMT_P4053 } , } ; -#define MAX_INIT_EVC (sizeof(evc_inits)/sizeof(evc_inits[0])) +#define MAX_INIT_EVC ARRAY_SIZE(evc_inits) void smt_init_evc(struct s_smc *smc) { diff --git a/drivers/net/skge.c b/drivers/net/skge.c index e3d8520209b85e1bc5e3bccd60e58c1b1d03b0aa..2aae9fe38c5a9c250ae79f648ef081645cddf9e4 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -410,9 +410,14 @@ static const struct skge_stat { { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, }; -static int skge_get_stats_count(struct net_device *dev) +static int skge_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(skge_stats); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } } static void skge_get_ethtool_stats(struct net_device *dev, @@ -811,15 +816,13 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_pauseparam = skge_set_pauseparam, .get_coalesce = skge_get_coalesce, .set_coalesce = skge_set_coalesce, - .get_sg = ethtool_op_get_sg, .set_sg = skge_set_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = skge_set_tx_csum, .get_rx_csum = skge_get_rx_csum, .set_rx_csum = skge_set_rx_csum, .get_strings = skge_get_strings, .phys_id = skge_phys_id, - .get_stats_count = skge_get_stats_count, + .get_sset_count = skge_get_sset_count, .get_ethtool_stats = skge_get_ethtool_stats, }; @@ -2528,7 +2531,7 @@ static int skge_up(struct net_device *dev) skge_write32(hw, B0_IMSK, hw->intr_mask); spin_unlock_irq(&hw->hw_lock); - netif_poll_enable(dev); + napi_enable(&skge->napi); return 0; free_rx_ring: @@ -2558,7 +2561,7 @@ static int skge_down(struct net_device *dev) if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) del_timer_sync(&skge->link_timer); - netif_poll_disable(dev); + napi_disable(&skge->napi); netif_carrier_off(dev); spin_lock_irq(&hw->hw_lock); @@ -3044,14 +3047,13 @@ static void skge_tx_done(struct net_device *dev) } } -static int skge_poll(struct net_device *dev, int *budget) +static int skge_poll(struct napi_struct *napi, int to_do) { - struct skge_port *skge = netdev_priv(dev); + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - unsigned long flags; - int to_do = min(dev->quota, *budget); int work_done = 0; skge_tx_done(dev); @@ -3082,20 +3084,16 @@ static int skge_poll(struct net_device *dev, int *budget) wmb(); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); - *budget -= work_done; - dev->quota -= work_done; - - if (work_done >= to_do) - return 1; /* not done */ - - spin_lock_irqsave(&hw->hw_lock, flags); - __netif_rx_complete(dev); - hw->intr_mask |= napimask[skge->port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - skge_read32(hw, B0_IMSK); - spin_unlock_irqrestore(&hw->hw_lock, flags); + if (work_done < to_do) { + spin_lock_irq(&hw->hw_lock); + __netif_rx_complete(dev, napi); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + } - return 0; + return work_done; } /* Parity errors seem to happen when Genesis is connected to a switch @@ -3252,8 +3250,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id) } if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); - netif_rx_schedule(hw->dev[0]); + netif_rx_schedule(hw->dev[0], &skge->napi); } if (status & IS_PA_TO_TX1) @@ -3271,13 +3270,14 @@ static irqreturn_t skge_intr(int irq, void *dev_id) skge_mac_intr(hw, 0); if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + if (status & (IS_XA2_F|IS_R2_F)) { hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); - netif_rx_schedule(hw->dev[1]); + netif_rx_schedule(hw->dev[1], &skge->napi); } if (status & IS_PA_TO_RX2) { - struct skge_port *skge = netdev_priv(hw->dev[1]); ++skge->net_stats.rx_over_errors; skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); } @@ -3552,7 +3552,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, return NULL; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->open = skge_up; dev->stop = skge_down; @@ -3569,8 +3568,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, SET_ETHTOOL_OPS(dev, &skge_ethtool_ops); dev->tx_timeout = skge_tx_timeout; dev->watchdog_timeo = TX_WATCHDOG; - dev->poll = skge_poll; - dev->weight = NAPI_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = skge_netpoll; #endif @@ -3580,6 +3577,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, dev->features |= NETIF_F_HIGHDMA; skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); skge->netdev = dev; skge->hw = hw; skge->msg_enable = netif_msg_init(debug, default_msg); @@ -3623,12 +3621,11 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, static void __devinit skge_show_addr(struct net_device *dev) { const struct skge_port *skge = netdev_priv(dev); + DECLARE_MAC_BUF(mac); if (netif_msg_probe(skge)) - printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO PFX "%s: addr %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } static int __devinit skge_probe(struct pci_dev *pdev, diff --git a/drivers/net/skge.h b/drivers/net/skge.h index edd71468220ca0e5a19377db04ed312f773181fd..1a57bdd1ddf158323a39905a73058ac3c214b944 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -1351,8 +1351,6 @@ enum { PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ }; -#define PHY_M_PC_MDI_XMODE(x) ((((u16)(x)<<5) & PHY_M_PC_MDIX_MSK) - enum { PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ @@ -2448,6 +2446,7 @@ enum pause_status { struct skge_port { struct skge_hw *hw; struct net_device *netdev; + struct napi_struct napi; int port; u32 msg_enable; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ea117fc3d5e390c6da6b763a61f6d94a071c3c86..0a3203465415fafda51abd5be2ecd19d436412c1 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -222,21 +223,22 @@ static void sky2_power_on(struct sky2_hw *hw) sky2_write8(hw, B2_Y2_CLK_GATE, 0); if (hw->flags & SKY2_HW_ADV_POWER_CTL) { + struct pci_dev *pdev = hw->pdev; u32 reg; - sky2_pci_write32(hw, PCI_DEV_REG3, 0); + pci_write_config_dword(pdev, PCI_DEV_REG3, 0); - reg = sky2_pci_read32(hw, PCI_DEV_REG4); + pci_read_config_dword(pdev, PCI_DEV_REG4, ®); /* set all bits to 0 except bits 15..12 and 8 */ reg &= P_ASPM_CONTROL_MSK; - sky2_pci_write32(hw, PCI_DEV_REG4, reg); + pci_write_config_dword(pdev, PCI_DEV_REG4, reg); - reg = sky2_pci_read32(hw, PCI_DEV_REG5); + pci_read_config_dword(pdev, PCI_DEV_REG5, ®); /* set all bits to 0 except bits 28 & 27 */ reg &= P_CTL_TIM_VMAIN_AV_MSK; - sky2_pci_write32(hw, PCI_DEV_REG5, reg); + pci_write_config_dword(pdev, PCI_DEV_REG5, reg); - sky2_pci_write32(hw, PCI_CFG_REG_1, 0); + pci_write_config_dword(pdev, PCI_CFG_REG_1, 0); /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */ reg = sky2_read32(hw, B2_GP_IO); @@ -602,6 +604,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) { + struct pci_dev *pdev = hw->pdev; u32 reg1; static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; @@ -610,17 +613,16 @@ static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff) if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) onoff = !onoff; - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); if (onoff) /* Turn off phy power saving */ reg1 &= ~phy_power[port]; else reg1 |= phy_power[port]; - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - sky2_pci_read32(hw, PCI_DEV_REG1); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + pci_write_config_dword(pdev, PCI_DEV_REG1, reg1); + pci_read_config_dword(pdev, PCI_DEV_REG1, ®1); + udelay(100); } @@ -688,11 +690,9 @@ static void sky2_wol_init(struct sky2_port *sky2) sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); /* Turn on legacy PCI-Express PME mode */ - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); reg1 |= PCI_Y2_PME_LEGACY; - sky2_pci_write32(hw, PCI_DEV_REG1, reg1); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); @@ -1130,7 +1130,7 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp u16 port = sky2->port; netif_tx_lock_bh(dev); - netif_poll_disable(sky2->hw->dev[0]); + napi_disable(&hw->napi); sky2->vlgrp = grp; if (grp) { @@ -1145,7 +1145,7 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp TX_VLAN_TAG_OFF); } - netif_poll_enable(sky2->hw->dev[0]); + napi_enable(&hw->napi); netif_tx_unlock_bh(dev); } #endif @@ -1309,9 +1309,9 @@ static int sky2_up(struct net_device *dev) struct sky2_port *osky2 = netdev_priv(otherdev); u16 cmd; - cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); + pci_read_config_word(hw->pdev, cap + PCI_X_CMD, &cmd); cmd &= ~PCI_X_CMD_MAX_SPLIT; - sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); + pci_write_config_word(hw->pdev, cap + PCI_X_CMD, cmd); sky2->rx_csum = 0; osky2->rx_csum = 0; @@ -1385,9 +1385,13 @@ static int sky2_up(struct net_device *dev) sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, TX_RING_SIZE - 1); + napi_enable(&hw->napi); + err = sky2_rx_start(sky2); - if (err) + if (err) { + napi_disable(&hw->napi); goto err_out; + } /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); @@ -1676,6 +1680,8 @@ static int sky2_down(struct net_device *dev) /* Stop more packets from being queued */ netif_stop_queue(dev); + napi_disable(&hw->napi); + /* Disable port IRQ */ imask = sky2_read32(hw, B0_IMSK); imask &= ~portirq_msk[port]; @@ -2016,7 +2022,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) dev->trans_start = jiffies; /* prevent tx timeout */ netif_stop_queue(dev); - netif_poll_disable(hw->dev[0]); + napi_disable(&hw->napi); synchronize_irq(hw->pdev->irq); @@ -2043,12 +2049,16 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) err = sky2_rx_start(sky2); sky2_write32(hw, B0_IMSK, imask); + /* Unconditionally re-enable NAPI because even if we + * call dev_close() that will do a napi_disable(). + */ + napi_enable(&hw->napi); + if (err) dev_close(dev); else { gma_write16(hw, port, GM_GP_CTRL, ctl); - netif_poll_enable(hw->dev[0]); netif_wake_queue(dev); } @@ -2235,15 +2245,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } /* Process status response ring */ -static int sky2_status_intr(struct sky2_hw *hw, int to_do) +static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) { int work_done = 0; unsigned rx[2] = { 0, 0 }; - u16 hwidx = sky2_read16(hw, STAT_PUT_IDX); rmb(); - - while (hw->st_idx != hwidx) { + do { struct sky2_port *sky2; struct sky2_status_le *le = hw->st_le + hw->st_idx; unsigned port = le->css & CSS_LINK_BIT; @@ -2354,7 +2362,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do) printk(KERN_WARNING PFX "unknown status opcode 0x%x\n", le->opcode); } - } + } while (hw->st_idx != idx); /* Fully processed status ring so clear irq */ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); @@ -2415,7 +2423,11 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) static void sky2_hw_intr(struct sky2_hw *hw) { + struct pci_dev *pdev = hw->pdev; u32 status = sky2_read32(hw, B0_HWE_ISRC); + u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); + + status &= hwmsk; if (status & Y2_IS_TIST_OV) sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); @@ -2423,38 +2435,24 @@ static void sky2_hw_intr(struct sky2_hw *hw) if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; - pci_err = sky2_pci_read16(hw, PCI_STATUS); + pci_read_config_word(pdev, PCI_STATUS, &pci_err); if (net_ratelimit()) - dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n", + dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", pci_err); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - sky2_pci_write16(hw, PCI_STATUS, - pci_err | PCI_STATUS_ERROR_BITS); - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + pci_write_config_word(pdev, PCI_STATUS, + pci_err | PCI_STATUS_ERROR_BITS); } if (status & Y2_IS_PCI_EXP) { /* PCI-Express uncorrectable Error occurred */ - u32 pex_err; - - pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); + int pos = pci_find_aer_capability(hw->pdev); + u32 err; + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_STATUS, &err); if (net_ratelimit()) - dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n", - pex_err); - - /* clear the interrupt */ - sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - sky2_pci_write32(hw, PEX_UNC_ERR_STAT, - 0xffffffffUL); - sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - - if (pex_err & PEX_FATAL_ERRORS) { - u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); - hwmsk &= ~Y2_IS_PCI_EXP; - sky2_write32(hw, B0_HWE_IMSK, hwmsk); - } + dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); + pci_cleanup_aer_uncorrect_error_status(pdev); } if (status & Y2_HWE_L1_MASK) @@ -2544,18 +2542,15 @@ static int sky2_rx_hung(struct net_device *dev) static void sky2_watchdog(unsigned long arg) { struct sky2_hw *hw = (struct sky2_hw *) arg; - struct net_device *dev; /* Check for lost IRQ once a second */ if (sky2_read32(hw, B0_ISRC)) { - dev = hw->dev[0]; - if (__netif_rx_schedule_prep(dev)) - __netif_rx_schedule(dev); + napi_schedule(&hw->napi); } else { int i, active = 0; for (i = 0; i < hw->ports; i++) { - dev = hw->dev[i]; + struct net_device *dev = hw->dev[i]; if (!netif_running(dev)) continue; ++active; @@ -2605,11 +2600,12 @@ static void sky2_err_intr(struct sky2_hw *hw, u32 status) sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE); } -static int sky2_poll(struct net_device *dev0, int *budget) +static int sky2_poll(struct napi_struct *napi, int work_limit) { - struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; - int work_done; + struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi); u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + int work_done = 0; + u16 idx; if (unlikely(status & Y2_IS_ERROR)) sky2_err_intr(hw, status); @@ -2620,13 +2616,12 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (status & Y2_IS_IRQ_PHY2) sky2_phy_intr(hw, 1); - work_done = sky2_status_intr(hw, min(dev0->quota, *budget)); - *budget -= work_done; - dev0->quota -= work_done; + while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { + work_done += sky2_status_intr(hw, work_limit - work_done, idx); - /* More work? */ - if (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)) - return 1; + if (work_done >= work_limit) + goto done; + } /* Bug/Errata workaround? * Need to kick the TX irq moderation timer. @@ -2635,16 +2630,16 @@ static int sky2_poll(struct net_device *dev0, int *budget) sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); } - netif_rx_complete(dev0); - + napi_complete(napi); sky2_read32(hw, B0_Y2_SP_LISR); - return 0; +done: + + return work_done; } static irqreturn_t sky2_intr(int irq, void *dev_id) { struct sky2_hw *hw = dev_id; - struct net_device *dev0 = hw->dev[0]; u32 status; /* Reading this mask interrupts as side effect */ @@ -2653,8 +2648,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) return IRQ_NONE; prefetch(&hw->st_le[hw->st_idx]); - if (likely(__netif_rx_schedule_prep(dev0))) - __netif_rx_schedule(dev0); + + napi_schedule(&hw->napi); return IRQ_HANDLED; } @@ -2663,10 +2658,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id) static void sky2_netpoll(struct net_device *dev) { struct sky2_port *sky2 = netdev_priv(dev); - struct net_device *dev0 = sky2->hw->dev[0]; - if (netif_running(dev) && __netif_rx_schedule_prep(dev0)) - __netif_rx_schedule(dev0); + napi_schedule(&sky2->hw->napi); } #endif @@ -2706,10 +2699,13 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk) static int __devinit sky2_init(struct sky2_hw *hw) { + int rc; u8 t8; - /* Enable all clocks */ - sky2_pci_write32(hw, PCI_DEV_REG3, 0); + /* Enable all clocks and check for bad PCI access */ + rc = pci_write_config_dword(hw->pdev, PCI_DEV_REG3, 0); + if (rc) + return rc; sky2_write8(hw, B0_CTST, CS_RST_CLR); @@ -2783,8 +2779,10 @@ static int __devinit sky2_init(struct sky2_hw *hw) static void sky2_reset(struct sky2_hw *hw) { + struct pci_dev *pdev = hw->pdev; u16 status; - int i; + int i, cap; + u32 hwe_mask = Y2_HWE_ALL_MASK; /* disable ASF */ if (hw->chip_id == CHIP_ID_YUKON_EX) { @@ -2801,18 +2799,25 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B0_CTST, CS_RST_CLR); /* clear PCI errors, if any */ - status = sky2_pci_read16(hw, PCI_STATUS); - - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS); - + pci_read_config_word(pdev, PCI_STATUS, &status); + status |= PCI_STATUS_ERROR_BITS; + pci_write_config_word(pdev, PCI_STATUS, status); sky2_write8(hw, B0_CTST, CS_MRST_CLR); - /* clear any PEX errors */ - if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) - sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); + cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (cap) { + /* Check for advanced error reporting */ + pci_cleanup_aer_uncorrect_error_status(pdev); + pci_cleanup_aer_correct_error_status(pdev); + + /* If error bit is stuck on ignore it */ + if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP) + dev_info(&pdev->dev, "ignoring stuck error report bit\n"); + else if (pci_enable_pcie_error_reporting(pdev)) + hwe_mask |= Y2_IS_PCI_EXP; + } sky2_power_on(hw); @@ -2826,8 +2831,6 @@ static void sky2_reset(struct sky2_hw *hw) | GMC_BYP_RETR_ON); } - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - /* Clear I2C IRQ noise */ sky2_write32(hw, B2_I2C_IRQ, 1); @@ -2866,7 +2869,7 @@ static void sky2_reset(struct sky2_hw *hw) sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); } - sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); + sky2_write32(hw, B0_HWE_IMSK, hwe_mask); for (i = 0; i < hw->ports; i++) sky2_gmac_reset(hw, i); @@ -2914,8 +2917,6 @@ static void sky2_restart(struct work_struct *work) sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); - netif_poll_disable(hw->dev[0]); - for (i = 0; i < hw->ports; i++) { dev = hw->dev[i]; if (netif_running(dev)) @@ -2924,7 +2925,6 @@ static void sky2_restart(struct work_struct *work) sky2_reset(hw); sky2_write32(hw, B0_IMSK, Y2_IS_BASE); - netif_poll_enable(hw->dev[0]); for (i = 0; i < hw->ports; i++) { dev = hw->dev[i]; @@ -3192,9 +3192,14 @@ static void sky2_set_msglevel(struct net_device *netdev, u32 value) sky2->msg_enable = value; } -static int sky2_get_stats_count(struct net_device *dev) +static int sky2_get_sset_count(struct net_device *dev, int sset) { - return ARRAY_SIZE(sky2_stats); + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(sky2_stats); + default: + return -EOPNOTSUPP; + } } static void sky2_get_ethtool_stats(struct net_device *dev, @@ -3613,26 +3618,31 @@ static int sky2_get_eeprom_len(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); u16 reg2; - reg2 = sky2_pci_read32(sky2->hw, PCI_DEV_REG2); + pci_read_config_word(sky2->hw->pdev, PCI_DEV_REG2, ®2); return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); } -static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) +static u32 sky2_vpd_read(struct pci_dev *pdev, int cap, u16 offset) { - sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); - while (!(sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F)) - cpu_relax(); - return sky2_pci_read32(hw, cap + PCI_VPD_DATA); + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; } -static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) +static void sky2_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) { - sky2_pci_write32(hw, cap + PCI_VPD_DATA, val); - sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); + pci_write_config_word(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_dword(pdev, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); do { - cpu_relax(); - } while (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F); + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); } static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, @@ -3649,7 +3659,7 @@ static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom eeprom->magic = SKY2_EEPROM_MAGIC; while (length > 0) { - u32 val = sky2_vpd_read(sky2->hw, cap, offset); + u32 val = sky2_vpd_read(sky2->hw->pdev, cap, offset); int n = min_t(int, length, sizeof(val)); memcpy(data, &val, n); @@ -3679,10 +3689,10 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom int n = min_t(int, length, sizeof(val)); if (n < sizeof(val)) - val = sky2_vpd_read(sky2->hw, cap, offset); + val = sky2_vpd_read(sky2->hw->pdev, cap, offset); memcpy(&val, data, n); - sky2_vpd_write(sky2->hw, cap, offset, val); + sky2_vpd_write(sky2->hw->pdev, cap, offset, val); length -= n; data += n; @@ -3707,11 +3717,8 @@ static const struct ethtool_ops sky2_ethtool_ops = { .get_eeprom_len = sky2_get_eeprom_len, .get_eeprom = sky2_get_eeprom, .set_eeprom = sky2_set_eeprom, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = sky2_set_tx_csum, - .get_tso = ethtool_op_get_tso, .set_tso = sky2_set_tso, .get_rx_csum = sky2_get_rx_csum, .set_rx_csum = sky2_set_rx_csum, @@ -3723,7 +3730,7 @@ static const struct ethtool_ops sky2_ethtool_ops = { .get_pauseparam = sky2_get_pauseparam, .set_pauseparam = sky2_set_pauseparam, .phys_id = sky2_phys_id, - .get_stats_count = sky2_get_stats_count, + .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, }; @@ -3735,7 +3742,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; const struct sky2_port *sky2 = netdev_priv(dev); - const struct sky2_hw *hw = sky2->hw; + struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; unsigned idx, last; int sop; @@ -3748,7 +3755,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); - netif_poll_disable(hw->dev[0]); + napi_disable(&hw->napi); last = sky2_read16(hw, STAT_PUT_IDX); if (hw->st_idx == last) @@ -3818,7 +3825,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)), sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX))); - netif_poll_enable(hw->dev[0]); + napi_enable(&hw->napi); return 0; } @@ -3843,42 +3850,34 @@ static int sky2_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; + struct sky2_port *sky2 = netdev_priv(dev); - if (dev->open == sky2_up) { - struct sky2_port *sky2 = netdev_priv(dev); + if (dev->open != sky2_up || !sky2_debug) + return NOTIFY_DONE; - switch(event) { - case NETDEV_CHANGENAME: - if (!netif_running(dev)) - break; - /* fallthrough */ - case NETDEV_DOWN: - case NETDEV_GOING_DOWN: - if (sky2->debugfs) { - printk(KERN_DEBUG PFX "%s: remove debugfs\n", - dev->name); - debugfs_remove(sky2->debugfs); - sky2->debugfs = NULL; - } + switch(event) { + case NETDEV_CHANGENAME: + if (sky2->debugfs) { + sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs, + sky2_debug, dev->name); + } + break; - if (event != NETDEV_CHANGENAME) - break; - /* fallthrough for changename */ - case NETDEV_UP: - if (sky2_debug) { - struct dentry *d; - d = debugfs_create_file(dev->name, S_IRUGO, - sky2_debug, dev, - &sky2_debug_fops); - if (d == NULL || IS_ERR(d)) - printk(KERN_INFO PFX - "%s: debugfs create failed\n", - dev->name); - else - sky2->debugfs = d; - } - break; + case NETDEV_GOING_DOWN: + if (sky2->debugfs) { + printk(KERN_DEBUG PFX "%s: remove debugfs\n", + dev->name); + debugfs_remove(sky2->debugfs); + sky2->debugfs = NULL; } + break; + + case NETDEV_UP: + sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO, + sky2_debug, dev, + &sky2_debug_fops); + if (IS_ERR(sky2->debugfs)) + sky2->debugfs = NULL; } return NOTIFY_DONE; @@ -3929,7 +3928,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, return NULL; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->irq = hw->pdev->irq; dev->open = sky2_up; @@ -3943,15 +3941,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); dev->tx_timeout = sky2_tx_timeout; dev->watchdog_timeo = TX_WATCHDOG; - if (port == 0) - dev->poll = sky2_poll; - dev->weight = NAPI_WEIGHT; #ifdef CONFIG_NET_POLL_CONTROLLER - /* Network console (only works on port 0) - * because netpoll makes assumptions about NAPI - */ - if (port == 0) - dev->poll_controller = sky2_netpoll; + dev->poll_controller = sky2_netpoll; #endif sky2 = netdev_priv(dev); @@ -4000,12 +3991,11 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, static void __devinit sky2_show_addr(struct net_device *dev) { const struct sky2_port *sky2 = netdev_priv(dev); + DECLARE_MAC_BUF(mac); if (netif_msg_probe(sky2)) - printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO PFX "%s: addr %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } /* Handle software interrupt used during MSI test */ @@ -4138,15 +4128,14 @@ static int __devinit sky2_probe(struct pci_dev *pdev, */ { u32 reg; - reg = sky2_pci_read32(hw, PCI_DEV_REG2); + pci_read_config_dword(pdev,PCI_DEV_REG2, ®); reg &= ~PCI_REV_DESC; - sky2_pci_write32(hw, PCI_DEV_REG2, reg); + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); } #endif /* ring for status responses */ - hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, - &hw->st_dma); + hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma); if (!hw->st_le) goto err_out_iounmap; @@ -4166,6 +4155,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, err = -ENOMEM; goto err_out_free_pci; } + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); @@ -4222,7 +4212,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, free_netdev(dev); err_out_free_pci: sky2_write8(hw, B0_CTST, CS_RST_SET); - pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); + pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); err_out_iounmap: iounmap(hw->regs); err_out_free_hw: @@ -4288,8 +4278,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) if (!hw) return 0; - netif_poll_disable(hw->dev[0]); - for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; struct sky2_port *sky2 = netdev_priv(dev); @@ -4335,7 +4323,7 @@ static int sky2_resume(struct pci_dev *pdev) if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_FE_P) - sky2_pci_write32(hw, PCI_DEV_REG3, 0); + pci_write_config_dword(pdev, PCI_DEV_REG3, 0); sky2_reset(hw); @@ -4356,8 +4344,6 @@ static int sky2_resume(struct pci_dev *pdev) } } - netif_poll_enable(hw->dev[0]); - return 0; out: dev_err(&pdev->dev, "resume failed (%d)\n", err); @@ -4374,7 +4360,7 @@ static void sky2_shutdown(struct pci_dev *pdev) if (!hw) return; - netif_poll_disable(hw->dev[0]); + napi_disable(&hw->napi); for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 8bc5c54e3efacd2cdc63e12aabd31a5ce37b0051..f4a3c2f403e578dca936a8d3e92ae5f0cfdb1dfc 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -18,14 +18,6 @@ enum { PCI_CFG_REG_1 = 0x94, }; -enum { - PEX_DEV_CAP = 0xe4, - PEX_DEV_CTRL = 0xe8, - PEX_DEV_STA = 0xea, - PEX_LNK_STAT = 0xf2, - PEX_UNC_ERR_STAT= 0x104, -}; - /* Yukon-2 */ enum pci_dev_reg_1 { PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ @@ -151,38 +143,6 @@ enum pci_cfg_reg1 { PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_PARITY) -enum pex_dev_ctrl { - PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */ - PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */ - PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */ - PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */ - PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */ - PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */ - PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */ - PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */ - PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */ - PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */ - PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */ -}; -#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK) - -/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */ -enum pex_err { - PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */ - - PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */ - - PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */ - - PEX_COMP_TO = 1<<14, /* Completion Timeout */ - PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */ - PEX_POIS_TLP = 1<<12, /* Poisoned TLP */ - - PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */ - PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P), -}; - - enum csr_regs { B0_RAP = 0x0000, B0_CTST = 0x0004, @@ -419,7 +379,6 @@ enum { Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2, Y2_HWE_ALL_MASK = Y2_IS_TIST_OV | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT | - Y2_IS_PCI_EXP | Y2_HWE_L1_MASK | Y2_HWE_L2_MASK, }; @@ -1850,6 +1809,28 @@ enum { /* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ enum { + GPC_TX_PAUSE = 1<<30, /* Tx pause enabled (ro) */ + GPC_RX_PAUSE = 1<<29, /* Rx pause enabled (ro) */ + GPC_SPEED = 3<<27, /* PHY speed (ro) */ + GPC_LINK = 1<<26, /* Link up (ro) */ + GPC_DUPLEX = 1<<25, /* Duplex (ro) */ + GPC_CLOCK = 1<<24, /* 125Mhz clock stable (ro) */ + + GPC_PDOWN = 1<<23, /* Internal regulator 2.5 power down */ + GPC_TSTMODE = 1<<22, /* Test mode */ + GPC_REG18 = 1<<21, /* Reg18 Power down */ + GPC_REG12SEL = 3<<19, /* Reg12 power setting */ + GPC_REG18SEL = 3<<17, /* Reg18 power setting */ + GPC_SPILOCK = 1<<16, /* SPI lock (ASF) */ + + GPC_LEDMUX = 3<<14, /* LED Mux */ + GPC_INTPOL = 1<<13, /* Interrupt polarity */ + GPC_DETECT = 1<<12, /* Energy detect */ + GPC_1000HD = 1<<11, /* Enable 1000Mbit HD */ + GPC_SLAVE = 1<<10, /* Slave mode */ + GPC_PAUSE = 1<<9, /* Pause enable */ + GPC_LEDCTL = 3<<6, /* GPHY Leds */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ GPC_RST_SET = 1<<0, /* Set GPHY Reset */ }; @@ -2057,6 +2038,7 @@ struct sky2_port { struct sky2_hw { void __iomem *regs; struct pci_dev *pdev; + struct napi_struct napi; struct net_device *dev[2]; unsigned long flags; #define SKY2_HW_USE_MSI 0x00000001 @@ -2147,25 +2129,4 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); } - -/* PCI config space access */ -static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) -{ - return sky2_read32(hw, Y2_CFG_SPC + reg); -} - -static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) -{ - return sky2_read16(hw, Y2_CFG_SPC + reg); -} - -static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) -{ - sky2_write32(hw, Y2_CFG_SPC + reg, val); -} - -static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) -{ - sky2_write16(hw, Y2_CFG_SPC + reg, val); -} #endif diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 3fd4735006f558f206135fb7a5546af0f3a8411f..335b7cc80ebaf3aaf430af776f08d716dd215dbf 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -639,8 +639,6 @@ static void sl_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 10; - SET_MODULE_OWNER(dev); - /* New-style flags. */ dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST; } diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c index ae1ae343beedfd13981e10cb80e3854558086977..d6abb68e6e2f20eb8b238c85f710ccd7a75f913d 100644 --- a/drivers/net/smc-mca.c +++ b/drivers/net/smc-mca.c @@ -196,6 +196,7 @@ static int __init ultramca_probe(struct device *gen_dev) int tirq = 0; int base_addr = ultra_io[ultra_found]; int irq = ultra_irq[ultra_found]; + DECLARE_MAC_BUF(mac); if (base_addr || irq) { printk(KERN_INFO "Probing for SMC MCA adapter"); @@ -264,7 +265,6 @@ static int __init ultramca_probe(struct device *gen_dev) if(!dev) return -ENODEV; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, gen_dev); mca_device_set_name(mca_dev, smc_mca_adapter_names[adapter]); mca_device_set_claim(mca_dev, 1); @@ -331,10 +331,11 @@ static int __init ultramca_probe(struct device *gen_dev) reg4 = inb(ioaddr + 4) & 0x7f; outb(reg4, ioaddr + 4); - printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x,", slot + 1, ioaddr); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %s", + slot + 1, ioaddr, print_mac(mac, dev->dev_addr)); /* Switch from the station address to the alternate register set * and read the useful registers there. diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c index a52b22d7db657776700a3f9029fd7b43e7d144f9..00d6cf1af4843a929f5856a40c8bf3af9b0b0c9b 100644 --- a/drivers/net/smc-ultra.c +++ b/drivers/net/smc-ultra.c @@ -142,8 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev) int base_addr = dev->base_addr; int irq = dev->irq; - SET_MODULE_OWNER(dev); - #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &ultra_poll; #endif @@ -200,6 +198,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr) unsigned char num_pages, irqreg, addr, piomode; unsigned char idreg = inb(ioaddr + 7); unsigned char reg4 = inb(ioaddr + 4) & 0x7f; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -226,10 +225,11 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr) model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ"; - printk("%s: %s at %#3x,", dev->name, model_name, ioaddr); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: %s at %#3x, %s", dev->name, model_name, + ioaddr, print_mac(mac, dev->dev_addr)); /* Switch from the station address to the alternate register set and read the useful registers there. */ diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c index 88a30e56c64ce50c0d62057dc3e9560cf0e4725d..a5a91ace28ccec743507e38210164a67d5dc58c4 100644 --- a/drivers/net/smc-ultra32.c +++ b/drivers/net/smc-ultra32.c @@ -132,8 +132,6 @@ struct net_device * __init ultra32_probe(int unit) netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); - irq = dev->irq; /* EISA spec allows for up to 16 slots, but 8 is typical. */ @@ -165,6 +163,7 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr) unsigned char idreg; unsigned char reg4; const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"}; + DECLARE_MAC_BUF(mac); if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -205,10 +204,11 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr) model_name = "SMC Ultra32"; - printk("%s: %s at 0x%X,", dev->name, model_name, ioaddr); - for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: %s at 0x%X, %s", + dev->name, model_name, ioaddr, print_mac(mac, dev->dev_addr)); /* Switch from the station address to the alternate register set and read the useful registers there. */ diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index db43e42bee3599483389d3b1d9612e2116149702..7c60df46fc65c99208d3e539abaeecc7d409fecf 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -115,13 +115,6 @@ struct smc911x_local { */ struct sk_buff *pending_tx_skb; - /* - * these are things that the kernel wants me to keep, so users - * can find out semi-useless statistics of how well the card is - * performing - */ - struct net_device_stats stats; - /* version/revision of the SMC911x chip */ u16 version; u16 revision; @@ -315,8 +308,8 @@ static void smc911x_reset(struct net_device *dev) if (lp->pending_tx_skb != NULL) { dev_kfree_skb (lp->pending_tx_skb); lp->pending_tx_skb = NULL; - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } } @@ -449,14 +442,14 @@ static inline void smc911x_rcv(struct net_device *dev) pkt_len = (status & RX_STS_PKT_LEN_) >> 16; if (status & RX_STS_ES_) { /* Deal with a bad packet */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (status & RX_STS_CRC_ERR_) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; else { if (status & RX_STS_LEN_ERR_) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & RX_STS_MCAST_) - lp->stats.multicast++; + dev->stats.multicast++; } /* Remove the bad packet data from the RX FIFO */ smc911x_drop_pkt(dev); @@ -467,7 +460,7 @@ static inline void smc911x_rcv(struct net_device *dev) if (unlikely(skb == NULL)) { PRINTK( "%s: Low memory, rcvd packet dropped.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; smc911x_drop_pkt(dev); return; } @@ -503,8 +496,8 @@ static inline void smc911x_rcv(struct net_device *dev) dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len-4; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len-4; #endif } } @@ -616,8 +609,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) printk("%s: No Tx free space %d < %d\n", dev->name, free, skb->len); lp->pending_tx_skb = NULL; - lp->stats.tx_errors++; - lp->stats.tx_dropped++; + dev->stats.tx_errors++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -667,8 +660,8 @@ static void smc911x_tx(struct net_device *dev) dev->name, (SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16); tx_status = SMC_GET_TX_STS_FIFO(); - lp->stats.tx_packets++; - lp->stats.tx_bytes+=tx_status>>16; + dev->stats.tx_packets++; + dev->stats.tx_bytes+=tx_status>>16; DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n", dev->name, (tx_status & 0xffff0000) >> 16, tx_status & 0x0000ffff); @@ -676,22 +669,22 @@ static void smc911x_tx(struct net_device *dev) * full-duplex mode */ if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx && !(tx_status & 0x00000306))) { - lp->stats.tx_errors++; + dev->stats.tx_errors++; } if (tx_status & TX_STS_MANY_COLL_) { - lp->stats.collisions+=16; - lp->stats.tx_aborted_errors++; + dev->stats.collisions+=16; + dev->stats.tx_aborted_errors++; } else { - lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; + dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3; } /* carrier error only has meaning for half-duplex communication */ if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) && !lp->ctl_rfduplx) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; } if (tx_status & TX_STS_LATE_COLL_) { - lp->stats.collisions++; - lp->stats.tx_aborted_errors++; + dev->stats.collisions++; + dev->stats.tx_aborted_errors++; } } } @@ -1121,11 +1114,11 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) /* Handle various error conditions */ if (status & INT_STS_RXE_) { SMC_ACK_INT(INT_STS_RXE_); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } if (status & INT_STS_RXDFH_INT_) { SMC_ACK_INT(INT_STS_RXDFH_INT_); - lp->stats.rx_dropped+=SMC_GET_RX_DROP(); + dev->stats.rx_dropped+=SMC_GET_RX_DROP(); } /* Undocumented interrupt-what is the right thing to do here? */ if (status & INT_STS_RXDF_INT_) { @@ -1140,8 +1133,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) cr &= ~MAC_CR_RXEN_; SMC_SET_MAC_CR(cr); DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; } SMC_ACK_INT(INT_STS_RDFL_); } @@ -1152,8 +1145,8 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) SMC_SET_MAC_CR(cr); rx_overrun=1; DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name); - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; } SMC_ACK_INT(INT_STS_RDFO_); } @@ -1307,8 +1300,8 @@ smc911x_rx_dma_irq(int dma, void *data) dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; spin_lock_irqsave(&lp->lock, flags); pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16; @@ -1567,19 +1560,6 @@ static int smc911x_close(struct net_device *dev) return 0; } -/* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *smc911x_query_statistics(struct net_device *dev) -{ - struct smc911x_local *lp = netdev_priv(dev); - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); - - - return &lp->stats; -} - /* * Ethtool support */ @@ -2056,7 +2036,6 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) dev->hard_start_xmit = smc911x_hard_start_xmit; dev->tx_timeout = smc911x_timeout; dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->get_stats = smc911x_query_statistics; dev->set_multicast_list = smc911x_set_multicast_list; dev->ethtool_ops = &smc911x_ethtool_ops; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2084,7 +2063,7 @@ static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr) /* Grab the IRQ */ retval = request_irq(dev->irq, &smc911x_interrupt, - IRQF_SHARED | IRQF_TRIGGER_FALLING, dev->name, dev); + IRQF_SHARED | SMC_IRQ_SENSE, dev->name, dev); if (retval) goto err_out; @@ -2181,7 +2160,6 @@ static int smc911x_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto release_1; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->dma = (unsigned char)-1; diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h index 962a710459fc6ee111d75c1a2f77ba202f772a37..16a0edc078fd370b7995d98cbe331aff8cc26785 100644 --- a/drivers/net/smc911x.h +++ b/drivers/net/smc911x.h @@ -36,6 +36,12 @@ #define SMC_USE_PXA_DMA 1 #define SMC_USE_16BIT 0 #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_FALLING +#elif CONFIG_SH_MAGIC_PANEL_R2 + #define SMC_USE_SH_DMA 0 + #define SMC_USE_16BIT 0 + #define SMC_USE_32BIT 1 + #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW #endif diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index 36c1ebadbf20a4d5edf3ca1a30b52cc09d3ae702..cb2698de51909ecff7efa491a267547874860a3c 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c @@ -190,13 +190,6 @@ static struct devlist smc_devlist[] __initdata = { /* store this information for the driver.. */ struct smc_local { - /* - these are things that the kernel wants me to keep, so users - can find out semi-useless statistics of how well the card is - performing - */ - struct net_device_stats stats; - /* If I have to wait until memory is available to send a packet, I will store the skbuff here, until I get the @@ -248,12 +241,6 @@ static void smc_timeout(struct net_device *dev); */ static int smc_close(struct net_device *dev); -/* - . This routine allows the proc file system to query the driver's - . statistics. -*/ -static struct net_device_stats * smc_query_statistics( struct net_device *dev); - /* . Finally, a call to set promiscuous mode ( for TCPDUMP and related . programs ) and multicast modes. @@ -514,7 +501,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de if ( lp->saved_skb) { /* THIS SHOULD NEVER HAPPEN. */ - lp->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; printk(CARDNAME": Bad Craziness - sent packet while busy.\n" ); return 1; } @@ -744,8 +731,6 @@ struct net_device * __init smc_init(int unit) irq = dev->irq; } - SET_MODULE_OWNER(dev); - if (io > 0x1ff) { /* Check a single specified location. */ err = smc_probe(dev, io); } else if (io != 0) { /* Don't probe at all. */ @@ -891,6 +876,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) word memory_info_register; word memory_cfg_register; + DECLARE_MAC_BUF(mac); + /* Grab the region so that no one else tries to probe our ioports. */ if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) return -EBUSY; @@ -1046,10 +1033,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) /* . Print the Ethernet address */ - printk("ADDR: "); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i] ); - printk("%2.2x \n", dev->dev_addr[5] ); + printk("ADDR: %s\n", print_mac(mac, dev->dev_addr)); /* set the private data to zero by default */ memset(dev->priv, 0, sizeof(struct smc_local)); @@ -1067,7 +1051,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr) dev->hard_start_xmit = smc_wait_to_send_packet; dev->tx_timeout = smc_timeout; dev->watchdog_timeo = HZ/20; - dev->get_stats = smc_query_statistics; dev->set_multicast_list = smc_set_multicast_list; return 0; @@ -1201,7 +1184,6 @@ static void smc_timeout(struct net_device *dev) */ static void smc_rcv(struct net_device *dev) { - struct smc_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; int packet_number; word status; @@ -1245,13 +1227,13 @@ static void smc_rcv(struct net_device *dev) /* set multicast stats */ if ( status & RS_MULTICAST ) - lp->stats.multicast++; + dev->stats.multicast++; skb = dev_alloc_skb( packet_length + 5); if ( skb == NULL ) { printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n"); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; goto done; } @@ -1291,16 +1273,16 @@ static void smc_rcv(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev ); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += packet_length; + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_length; } else { /* error ... */ - lp->stats.rx_errors++; + dev->stats.rx_errors++; - if ( status & RS_ALGNERR ) lp->stats.rx_frame_errors++; + if ( status & RS_ALGNERR ) dev->stats.rx_frame_errors++; if ( status & (RS_TOOSHORT | RS_TOOLONG ) ) - lp->stats.rx_length_errors++; - if ( status & RS_BADCRC) lp->stats.rx_crc_errors++; + dev->stats.rx_length_errors++; + if ( status & RS_BADCRC) dev->stats.rx_crc_errors++; } done: @@ -1348,12 +1330,12 @@ static void smc_tx( struct net_device * dev ) tx_status = inw( ioaddr + DATA_1 ); PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); - lp->stats.tx_errors++; - if ( tx_status & TS_LOSTCAR ) lp->stats.tx_carrier_errors++; + dev->stats.tx_errors++; + if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; if ( tx_status & TS_LATCOL ) { printk(KERN_DEBUG CARDNAME ": Late collision occurred on last xmit.\n"); - lp->stats.tx_window_errors++; + dev->stats.tx_window_errors++; } #if 0 if ( tx_status & TS_16COL ) { ... } @@ -1448,10 +1430,10 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) SMC_SELECT_BANK( 0 ); card_stats = inw( ioaddr + COUNTER ); /* single collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; card_stats >>= 4; /* multiple collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; /* these are for when linux supports these statistics */ @@ -1460,7 +1442,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) ": TX_BUFFER_EMPTY handled\n")); outb( IM_TX_EMPTY_INT, ioaddr + INTERRUPT ); mask &= ~IM_TX_EMPTY_INT; - lp->stats.tx_packets += lp->packets_waiting; + dev->stats.tx_packets += lp->packets_waiting; lp->packets_waiting = 0; } else if (status & IM_ALLOC_INT ) { @@ -1479,8 +1461,8 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id) PRINTK2((CARDNAME": Handoff done successfully.\n")); } else if (status & IM_RX_OVRN_INT ) { - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); } else if (status & IM_EPH_INT ) { PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); @@ -1523,16 +1505,6 @@ static int smc_close(struct net_device *dev) return 0; } -/*------------------------------------------------------------ - . Get the current statistics. - . This may be called with the card open or closed. - .-------------------------------------------------------------*/ -static struct net_device_stats* smc_query_statistics(struct net_device *dev) { - struct smc_local *lp = netdev_priv(dev); - - return &lp->stats; -} - /*----------------------------------------------------------- . smc_set_multicast_list . diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 01cc3c742c38fb85a0ccc11ca75eea1b2d758cc8..24e610e711e8302c6f61e7bf09acc38814aabb34 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -183,13 +183,6 @@ struct smc_local { struct sk_buff *pending_tx_skb; struct tasklet_struct tx_task; - /* - * these are things that the kernel wants me to keep, so users - * can find out semi-useless statistics of how well the card is - * performing - */ - struct net_device_stats stats; - /* version/revision of the SMC91x chip */ int version; @@ -332,8 +325,8 @@ static void smc_reset(struct net_device *dev) /* free any pending tx skb */ if (pending_skb) { dev_kfree_skb(pending_skb); - lp->stats.tx_errors++; - lp->stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; } /* @@ -512,13 +505,13 @@ static inline void smc_rcv(struct net_device *dev) } SMC_WAIT_MMU_BUSY(); SMC_SET_MMU_CMD(MC_RELEASE); - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (status & RS_ALGNERR) - lp->stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & (RS_TOOSHORT | RS_TOOLONG)) - lp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & RS_BADCRC) - lp->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else { struct sk_buff *skb; unsigned char *data; @@ -526,7 +519,7 @@ static inline void smc_rcv(struct net_device *dev) /* set multicast stats */ if (status & RS_MULTICAST) - lp->stats.multicast++; + dev->stats.multicast++; /* * Actual payload is packet_len - 6 (or 5 if odd byte). @@ -542,7 +535,7 @@ static inline void smc_rcv(struct net_device *dev) dev->name); SMC_WAIT_MMU_BUSY(); SMC_SET_MMU_CMD(MC_RELEASE); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; return; } @@ -570,8 +563,8 @@ static inline void smc_rcv(struct net_device *dev) dev->last_rx = jiffies; skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += data_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += data_len; } } @@ -644,8 +637,8 @@ static void smc_hardware_send_pkt(unsigned long data) packet_no = SMC_GET_AR(); if (unlikely(packet_no & AR_FAILED)) { printk("%s: Memory allocation failed.\n", dev->name); - lp->stats.tx_errors++; - lp->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + dev->stats.tx_fifo_errors++; smc_special_unlock(&lp->lock); goto done; } @@ -688,8 +681,8 @@ static void smc_hardware_send_pkt(unsigned long data) smc_special_unlock(&lp->lock); dev->trans_start = jiffies; - lp->stats.tx_packets++; - lp->stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT); @@ -729,8 +722,8 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) numPages = ((skb->len & ~1) + (6 - 1)) >> 8; if (unlikely(numPages > 7)) { printk("%s: Far too big packet error.\n", dev->name); - lp->stats.tx_errors++; - lp->stats.tx_dropped++; + dev->stats.tx_errors++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -803,17 +796,17 @@ static void smc_tx(struct net_device *dev) dev->name, tx_status, packet_no); if (!(tx_status & ES_TX_SUC)) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (tx_status & ES_LOSTCARR) - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & (ES_LATCOL | ES_16COL)) { PRINTK("%s: %s occurred on last xmit\n", dev->name, (tx_status & ES_LATCOL) ? "late collision" : "too many collisions"); - lp->stats.tx_window_errors++; - if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) { + dev->stats.tx_window_errors++; + if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) { printk(KERN_INFO "%s: unexpectedly large number of " "bad collisions. Please check duplex " "setting.\n", dev->name); @@ -1347,19 +1340,19 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) SMC_SELECT_BANK(2); /* single collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; card_stats >>= 4; /* multiple collisions */ - lp->stats.collisions += card_stats & 0xF; + dev->stats.collisions += card_stats & 0xF; } else if (status & IM_RX_OVRN_INT) { DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, ({ int eph_st; SMC_SELECT_BANK(0); eph_st = SMC_GET_EPH_STATUS(); SMC_SELECT_BANK(2); eph_st; }) ); SMC_ACK_INT(IM_RX_OVRN_INT); - lp->stats.rx_errors++; - lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; + dev->stats.rx_fifo_errors++; } else if (status & IM_EPH_INT) { smc_eph_interrupt(dev); } else if (status & IM_MDINT) { @@ -1627,19 +1620,6 @@ static int smc_close(struct net_device *dev) return 0; } -/* - * Get the current statistics. - * This may be called with the card open or closed. - */ -static struct net_device_stats *smc_query_statistics(struct net_device *dev) -{ - struct smc_local *lp = netdev_priv(dev); - - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); - - return &lp->stats; -} - /* * Ethtool support */ @@ -1842,9 +1822,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) { struct smc_local *lp = netdev_priv(dev); static int version_printed = 0; - int i, retval; + int retval; unsigned int val, revision_register; const char *version_string; + DECLARE_MAC_BUF(mac); DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); @@ -1965,7 +1946,6 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) dev->hard_start_xmit = smc_hard_start_xmit; dev->tx_timeout = smc_timeout; dev->watchdog_timeo = msecs_to_jiffies(watchdog); - dev->get_stats = smc_query_statistics; dev->set_multicast_list = smc_set_multicast_list; dev->ethtool_ops = &smc_ethtool_ops; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2035,10 +2015,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) "set using ifconfig\n", dev->name); } else { /* Print the Ethernet address */ - printk("%s: Ethernet addr: ", dev->name); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x\n", dev->dev_addr[5]); + printk("%s: Ethernet addr: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); } if (lp->phy_type == 0) { @@ -2212,7 +2190,6 @@ static int smc_drv_probe(struct platform_device *pdev) ret = -ENOMEM; goto out_release_io; } - SET_MODULE_OWNER(ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->dma = (unsigned char)-1; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 6ff3a1627af87112ea87cca3f7ca7dd952c3a2e7..af9e6bf595522d51fa14225a123b0549c4f17c67 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -284,6 +284,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #elif defined(CONFIG_SUPERH) #ifdef CONFIG_SOLUTION_ENGINE +#define SMC_IRQ_FLAGS (0) #define SMC_CAN_USE_8BIT 0 #define SMC_CAN_USE_16BIT 1 #define SMC_CAN_USE_32BIT 0 diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 82d837ab4db9bd54840d548d694fea291e5791ae..fab055ffcc9037070442469a34cc4d0fa1675237 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -795,6 +795,7 @@ spider_net_set_low_watermark(struct spider_net_card *card) static int spider_net_release_tx_chain(struct spider_net_card *card, int brutal) { + struct net_device *dev = card->netdev; struct spider_net_descr_chain *chain = &card->tx_chain; struct spider_net_descr *descr; struct spider_net_hw_descr *hwdescr; @@ -815,8 +816,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) status = spider_net_get_descr_status(hwdescr); switch (status) { case SPIDER_NET_DESCR_COMPLETE: - card->netdev_stats.tx_packets++; - card->netdev_stats.tx_bytes += descr->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += descr->skb->len; break; case SPIDER_NET_DESCR_CARDOWNED: @@ -835,11 +836,11 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) if (netif_msg_tx_err(card)) dev_err(&card->netdev->dev, "forcing end of tx descriptor " "with status x%02x\n", status); - card->netdev_stats.tx_errors++; + dev->stats.tx_errors++; break; default: - card->netdev_stats.tx_dropped++; + dev->stats.tx_dropped++; if (!brutal) { spin_unlock_irqrestore(&chain->lock, flags); return 1; @@ -919,7 +920,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) spider_net_release_tx_chain(card, 0); if (spider_net_prepare_tx_descr(card, skb) != 0) { - card->netdev_stats.tx_dropped++; + netdev->stats.tx_dropped++; netif_stop_queue(netdev); return NETDEV_TX_BUSY; } @@ -979,16 +980,12 @@ static void spider_net_pass_skb_up(struct spider_net_descr *descr, struct spider_net_card *card) { - struct spider_net_hw_descr *hwdescr= descr->hwdescr; - struct sk_buff *skb; - struct net_device *netdev; - u32 data_status, data_error; - - data_status = hwdescr->data_status; - data_error = hwdescr->data_error; - netdev = card->netdev; + struct spider_net_hw_descr *hwdescr = descr->hwdescr; + struct sk_buff *skb = descr->skb; + struct net_device *netdev = card->netdev; + u32 data_status = hwdescr->data_status; + u32 data_error = hwdescr->data_error; - skb = descr->skb; skb_put(skb, hwdescr->valid_size); /* the card seems to add 2 bytes of junk in front @@ -1015,8 +1012,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, } /* update netdevice statistics */ - card->netdev_stats.rx_packets++; - card->netdev_stats.rx_bytes += skb->len; + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ netif_receive_skb(skb); @@ -1184,6 +1181,7 @@ static int spider_net_resync_tail_ptr(struct spider_net_card *card) static int spider_net_decode_one_descr(struct spider_net_card *card) { + struct net_device *dev = card->netdev; struct spider_net_descr_chain *chain = &card->rx_chain; struct spider_net_descr *descr = chain->tail; struct spider_net_hw_descr *hwdescr = descr->hwdescr; @@ -1210,9 +1208,9 @@ spider_net_decode_one_descr(struct spider_net_card *card) (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || (status == SPIDER_NET_DESCR_FORCE_END) ) { if (netif_msg_rx_err(card)) - dev_err(&card->netdev->dev, + dev_err(&dev->dev, "dropping RX descriptor with state %d\n", status); - card->netdev_stats.rx_dropped++; + dev->stats.rx_dropped++; goto bad_desc; } @@ -1278,34 +1276,26 @@ spider_net_decode_one_descr(struct spider_net_card *card) * (using netif_receive_skb). If all/enough packets are up, the driver * reenables interrupts and returns 0. If not, 1 is returned. */ -static int -spider_net_poll(struct net_device *netdev, int *budget) +static int spider_net_poll(struct napi_struct *napi, int budget) { - struct spider_net_card *card = netdev_priv(netdev); - int packets_to_do, packets_done = 0; - int no_more_packets = 0; - - packets_to_do = min(*budget, netdev->quota); - - while (packets_to_do) { - if (spider_net_decode_one_descr(card)) { - packets_done++; - packets_to_do--; - } else { - /* no more packets for the stack */ - no_more_packets = 1; + struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); + struct net_device *netdev = card->netdev; + int packets_done = 0; + + while (packets_done < budget) { + if (!spider_net_decode_one_descr(card)) break; - } + + packets_done++; } if ((packets_done == 0) && (card->num_rx_ints != 0)) { - no_more_packets = spider_net_resync_tail_ptr(card); + if (!spider_net_resync_tail_ptr(card)) + packets_done = budget; spider_net_resync_head_ptr(card); } card->num_rx_ints = 0; - netdev->quota -= packets_done; - *budget -= packets_done; spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); @@ -1313,28 +1303,13 @@ spider_net_poll(struct net_device *netdev, int *budget) /* if all packets are in the stack, enable interrupts and return 0 */ /* if not, return 1 */ - if (no_more_packets) { - netif_rx_complete(netdev); + if (packets_done < budget) { + netif_rx_complete(netdev, napi); spider_net_rx_irq_on(card); card->ignore_rx_ramfull = 0; - return 0; } - return 1; -} - -/** - * spider_net_get_stats - get interface statistics - * @netdev: interface device structure - * - * returns the interface statistics residing in the spider_net_card struct - */ -static struct net_device_stats * -spider_net_get_stats(struct net_device *netdev) -{ - struct spider_net_card *card = netdev_priv(netdev); - struct net_device_stats *stats = &card->netdev_stats; - return stats; + return packets_done; } /** @@ -1560,7 +1535,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(card->netdev); + netif_rx_schedule(card->netdev, + &card->napi); } show_error = 0; break; @@ -1580,7 +1556,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(card->netdev); + netif_rx_schedule(card->netdev, + &card->napi); show_error = 0; break; @@ -1594,7 +1571,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) spider_net_refill_rx_chain(card); spider_net_enable_rxdmac(card); card->num_rx_ints ++; - netif_rx_schedule(card->netdev); + netif_rx_schedule(card->netdev, + &card->napi); show_error = 0; break; @@ -1686,11 +1664,11 @@ spider_net_interrupt(int irq, void *ptr) if (status_reg & SPIDER_NET_RXINT ) { spider_net_rx_irq_off(card); - netif_rx_schedule(netdev); + netif_rx_schedule(netdev, &card->napi); card->num_rx_ints ++; } if (status_reg & SPIDER_NET_TXINT) - netif_rx_schedule(netdev); + netif_rx_schedule(netdev, &card->napi); if (status_reg & SPIDER_NET_LINKINT) spider_net_link_reset(netdev); @@ -2034,7 +2012,7 @@ spider_net_open(struct net_device *netdev) netif_start_queue(netdev); netif_carrier_on(netdev); - netif_poll_enable(netdev); + napi_enable(&card->napi); spider_net_enable_interrupts(card); @@ -2204,7 +2182,7 @@ spider_net_stop(struct net_device *netdev) { struct spider_net_card *card = netdev_priv(netdev); - netif_poll_disable(netdev); + napi_disable(&card->napi); netif_carrier_off(netdev); netif_stop_queue(netdev); del_timer_sync(&card->tx_timer); @@ -2296,7 +2274,6 @@ spider_net_setup_netdev_ops(struct net_device *netdev) netdev->open = &spider_net_open; netdev->stop = &spider_net_stop; netdev->hard_start_xmit = &spider_net_xmit; - netdev->get_stats = &spider_net_get_stats; netdev->set_multicast_list = &spider_net_set_multi; netdev->set_mac_address = &spider_net_set_mac; netdev->change_mtu = &spider_net_change_mtu; @@ -2304,9 +2281,6 @@ spider_net_setup_netdev_ops(struct net_device *netdev) /* tx watchdog */ netdev->tx_timeout = &spider_net_tx_timeout; netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT; - /* NAPI */ - netdev->poll = &spider_net_poll; - netdev->weight = SPIDER_NET_NAPI_WEIGHT; /* HW VLAN */ #ifdef CONFIG_NET_POLL_CONTROLLER /* poll controller */ @@ -2333,7 +2307,6 @@ spider_net_setup_netdev(struct spider_net_card *card) struct sockaddr addr; const u8 *mac; - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &card->pdev->dev); pci_set_drvdata(card->pdev, netdev); @@ -2351,6 +2324,9 @@ spider_net_setup_netdev(struct spider_net_card *card) card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; + netif_napi_add(netdev, &card->napi, + spider_net_poll, SPIDER_NET_NAPI_WEIGHT); + spider_net_setup_netdev_ops(netdev); netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX; diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index dbbdb8cee3c639c0be5480a161d92b3fb16174e9..a897beee7d5dce748ece6f8e055b413c91828844 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -466,6 +466,8 @@ struct spider_net_card { struct pci_dev *pdev; struct mii_phy phy; + struct napi_struct napi; + int medium; void __iomem *regs; @@ -485,7 +487,6 @@ struct spider_net_card { /* for ethtool */ int msg_enable; - struct net_device_stats netdev_stats; struct spider_net_extra_stats spider_stats; struct spider_net_options options; diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c index d940474e024a19b0eed798443b5f92a7feec2adc..85691d2a0be26a454e65550d867d4e59a0713119 100644 --- a/drivers/net/spider_net_ethtool.c +++ b/drivers/net/spider_net_ethtool.c @@ -28,8 +28,6 @@ #include "spider_net.h" -#define SPIDER_NET_NUM_STATS 13 - static struct { const char str[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { @@ -147,9 +145,14 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev, ering->rx_pending = card->rx_chain.num_desc; } -static int spider_net_get_stats_count(struct net_device *netdev) +static int spider_net_get_sset_count(struct net_device *netdev, int sset) { - return SPIDER_NET_NUM_STATS; + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } } static void spider_net_get_ethtool_stats(struct net_device *netdev, @@ -157,13 +160,13 @@ static void spider_net_get_ethtool_stats(struct net_device *netdev, { struct spider_net_card *card = netdev->priv; - data[0] = card->netdev_stats.tx_packets; - data[1] = card->netdev_stats.tx_bytes; - data[2] = card->netdev_stats.rx_packets; - data[3] = card->netdev_stats.rx_bytes; - data[4] = card->netdev_stats.tx_errors; - data[5] = card->netdev_stats.tx_dropped; - data[6] = card->netdev_stats.rx_dropped; + data[0] = netdev->stats.tx_packets; + data[1] = netdev->stats.tx_bytes; + data[2] = netdev->stats.rx_packets; + data[3] = netdev->stats.rx_bytes; + data[4] = netdev->stats.tx_errors; + data[5] = netdev->stats.tx_dropped; + data[6] = netdev->stats.rx_dropped; data[7] = card->spider_stats.rx_desc_error; data[8] = card->spider_stats.tx_timeouts; data[9] = card->spider_stats.alloc_rx_skb_error; @@ -188,11 +191,10 @@ const struct ethtool_ops spider_net_ethtool_ops = { .nway_reset = spider_net_ethtool_nway_reset, .get_rx_csum = spider_net_ethtool_get_rx_csum, .set_rx_csum = spider_net_ethtool_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, .get_ringparam = spider_net_ethtool_get_ringparam, .get_strings = spider_net_get_strings, - .get_stats_count = spider_net_get_stats_count, + .get_sset_count = spider_net_get_sset_count, .get_ethtool_stats = spider_net_get_ethtool_stats, }; diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 8b6478663a56b029c9322cb6cb239f00328cdc7f..bcc430bd9e493b0f89d0f0c85cae54debfcb9d86 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -155,7 +155,7 @@ static int full_duplex[MAX_UNITS] = {0, }; #if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) /* 64-bit dma_addr_t */ #define ADDR_64BITS /* This chip uses 64 bit addresses. */ -#define netdrv_addr_t u64 +#define netdrv_addr_t __le64 #define cpu_to_dma(x) cpu_to_le64(x) #define dma_to_cpu(x) le64_to_cpu(x) #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit @@ -164,7 +164,7 @@ static int full_duplex[MAX_UNITS] = {0, }; #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit #define RX_DESC_ADDR_SIZE RxDescAddr64bit #else /* 32-bit dma_addr_t */ -#define netdrv_addr_t u32 +#define netdrv_addr_t __le32 #define cpu_to_dma(x) cpu_to_le32(x) #define dma_to_cpu(x) le32_to_cpu(x) #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit @@ -178,16 +178,13 @@ static int full_duplex[MAX_UNITS] = {0, }; #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1) #ifdef HAVE_NETDEV_POLL -#define init_poll(dev) \ -do { \ - dev->poll = &netdev_poll; \ - dev->weight = max_interrupt_work; \ -} while (0) -#define netdev_rx(dev, ioaddr) \ +#define init_poll(dev, np) \ + netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work) +#define netdev_rx(dev, np, ioaddr) \ do { \ u32 intr_enable; \ - if (netif_rx_schedule_prep(dev)) { \ - __netif_rx_schedule(dev); \ + if (netif_rx_schedule_prep(dev, &np->napi)) { \ + __netif_rx_schedule(dev, &np->napi); \ intr_enable = readl(ioaddr + IntrEnable); \ intr_enable &= ~(IntrRxDone | IntrRxEmpty); \ writel(intr_enable, ioaddr + IntrEnable); \ @@ -204,12 +201,12 @@ do { \ } while (0) #define netdev_receive_skb(skb) netif_receive_skb(skb) #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid) -static int netdev_poll(struct net_device *dev, int *budget); +static int netdev_poll(struct napi_struct *napi, int budget); #else /* not HAVE_NETDEV_POLL */ -#define init_poll(dev) +#define init_poll(dev, np) #define netdev_receive_skb(skb) netif_rx(skb) #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid) -#define netdev_rx(dev, ioaddr) \ +#define netdev_rx(dev, np, ioaddr) \ do { \ int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \ __netdev_rx(dev, "a);\ @@ -497,7 +494,7 @@ enum intr_ctrl_bits { /* The Rx and Tx buffer descriptors. */ struct starfire_rx_desc { - dma_addr_t rxaddr; + netdrv_addr_t rxaddr; }; enum rx_desc_bits { RxDescValid=1, RxDescEndRing=2, @@ -505,25 +502,25 @@ enum rx_desc_bits { /* Completion queue entry. */ struct short_rx_done_desc { - u32 status; /* Low 16 bits is length. */ + __le32 status; /* Low 16 bits is length. */ }; struct basic_rx_done_desc { - u32 status; /* Low 16 bits is length. */ - u16 vlanid; - u16 status2; + __le32 status; /* Low 16 bits is length. */ + __le16 vlanid; + __le16 status2; }; struct csum_rx_done_desc { - u32 status; /* Low 16 bits is length. */ - u16 csum; /* Partial checksum */ - u16 status2; + __le32 status; /* Low 16 bits is length. */ + __le16 csum; /* Partial checksum */ + __le16 status2; }; struct full_rx_done_desc { - u32 status; /* Low 16 bits is length. */ - u16 status3; - u16 status2; - u16 vlanid; - u16 csum; /* partial checksum */ - u32 timestamp; + __le32 status; /* Low 16 bits is length. */ + __le16 status3; + __le16 status2; + __le16 vlanid; + __le16 csum; /* partial checksum */ + __le32 timestamp; }; /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */ #ifdef VLAN_SUPPORT @@ -540,15 +537,15 @@ enum rx_done_bits { /* Type 1 Tx descriptor. */ struct starfire_tx_desc_1 { - u32 status; /* Upper bits are status, lower 16 length. */ - u32 addr; + __le32 status; /* Upper bits are status, lower 16 length. */ + __le32 addr; }; /* Type 2 Tx descriptor. */ struct starfire_tx_desc_2 { - u32 status; /* Upper bits are status, lower 16 length. */ - u32 reserved; - u64 addr; + __le32 status; /* Upper bits are status, lower 16 length. */ + __le32 reserved; + __le64 addr; }; #ifdef ADDR_64BITS @@ -566,9 +563,9 @@ enum tx_desc_bits { TxRingWrap=0x04000000, TxCalTCP=0x02000000, }; struct tx_done_desc { - u32 status; /* timestamp, index. */ + __le32 status; /* timestamp, index. */ #if 0 - u32 intrstatus; /* interrupt status */ + __le32 intrstatus; /* interrupt status */ #endif }; @@ -599,6 +596,8 @@ struct netdev_private { struct tx_done_desc *tx_done_q; dma_addr_t tx_done_q_dma; unsigned int tx_done; + struct napi_struct napi; + struct net_device *dev; struct net_device_stats stats; struct pci_dev *pci_dev; #ifdef VLAN_SUPPORT @@ -695,6 +694,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, void __iomem *base; int drv_flags, io_size; int boguscnt; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -720,7 +720,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); irq = pdev->irq; @@ -791,6 +790,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, dev->irq = irq; np = netdev_priv(dev); + np->dev = dev; np->base = base; spin_lock_init(&np->lock); pci_set_drvdata(pdev, dev); @@ -851,7 +851,7 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, dev->hard_start_xmit = &start_tx; dev->tx_timeout = tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; - init_poll(dev); + init_poll(dev, np); dev->stop = &netdev_close; dev->get_stats = &get_stats; dev->set_multicast_list = &set_rx_mode; @@ -864,11 +864,9 @@ static int __devinit starfire_init_one(struct pci_dev *pdev, if (register_netdev(dev)) goto err_out_cleardev; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, netdrv_tbl[chip_idx].name, base); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, netdrv_tbl[chip_idx].name, base, + print_mac(mac, dev->dev_addr), irq); if (drv_flags & CanHaveMII) { int phy, phy_idx = 0; @@ -965,7 +963,7 @@ static int netdev_open(struct net_device *dev) dev->name, dev->irq); /* Allocate the various queues. */ - if (np->queue_mem == 0) { + if (!np->queue_mem) { tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN; @@ -1038,11 +1036,11 @@ static int netdev_open(struct net_device *dev) writew(0, ioaddr + PerfFilterTable + 4); writew(0, ioaddr + PerfFilterTable + 8); for (i = 1; i < 16; i++) { - u16 *eaddrs = (u16 *)dev->dev_addr; + __be16 *eaddrs = (__be16 *)dev->dev_addr; void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16; - writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4; - writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4; - writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8; + writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4; + writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4; + writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8; } /* Initialize other registers. */ @@ -1056,6 +1054,9 @@ static int netdev_open(struct net_device *dev) writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl); +#ifdef HAVE_NETDEV_POLL + napi_enable(&np->napi); +#endif netif_start_queue(dev); if (debug > 1) @@ -1330,7 +1331,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) handled = 1; if (intr_status & (IntrRxDone | IntrRxEmpty)) - netdev_rx(dev, ioaddr); + netdev_rx(dev, np, ioaddr); /* Scavenge the skbuff list based on the Tx-done queue. There are redundant checks here that may be cleaned up @@ -1470,13 +1471,16 @@ static int __netdev_rx(struct net_device *dev, int *quota) } #ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ - if (debug > 5) - printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n", - skb->data[0], skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5], skb->data[6], skb->data[7], - skb->data[8], skb->data[9], skb->data[10], - skb->data[11], skb->data[12], skb->data[13]); + if (debug > 5) { + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG " Rx data %s %s" + " %2.2x%2.2x.\n", + print_mac(mac, &skb->data[0]), + print_mac(mac2, &skb->data[6]), + skb->data[12], skb->data[13]); + } #endif skb->protocol = eth_type_trans(skb, dev); @@ -1531,36 +1535,35 @@ static int __netdev_rx(struct net_device *dev, int *quota) #ifdef HAVE_NETDEV_POLL -static int netdev_poll(struct net_device *dev, int *budget) +static int netdev_poll(struct napi_struct *napi, int budget) { + struct netdev_private *np = container_of(napi, struct netdev_private, napi); + struct net_device *dev = np->dev; u32 intr_status; - struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; - int retcode = 0, quota = dev->quota; + int quota = budget; do { writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear); - retcode = __netdev_rx(dev, "a); - *budget -= (dev->quota - quota); - dev->quota = quota; - if (retcode) + if (__netdev_rx(dev, "a)) goto out; intr_status = readl(ioaddr + IntrStatus); } while (intr_status & (IntrRxDone | IntrRxEmpty)); - netif_rx_complete(dev); + netif_rx_complete(dev, napi); intr_status = readl(ioaddr + IntrEnable); intr_status |= IntrRxDone | IntrRxEmpty; writel(intr_status, ioaddr + IntrEnable); out: if (debug > 5) - printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode); + printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", + budget - quota); /* Restart Rx engine if stopped. */ - return retcode; + return budget - quota; } #endif /* HAVE_NETDEV_POLL */ @@ -1764,26 +1767,26 @@ static void set_rx_mode(struct net_device *dev) } else if (dev->mc_count <= 14) { /* Use the 16 element perfect filter, skip first two entries. */ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; - u16 *eaddrs; + __be16 *eaddrs; for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2; i++, mclist = mclist->next) { - eaddrs = (u16 *)mclist->dmi_addr; - writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8; + eaddrs = (__be16 *)mclist->dmi_addr; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; } - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (__be16 *)dev->dev_addr; while (i++ < 16) { - writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; } rx_mode |= AcceptBroadcast|PerfectFilter; } else { /* Must use a multicast hash table. */ void __iomem *filter_addr; - u16 *eaddrs; - u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ + __be16 *eaddrs; + __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; @@ -1791,17 +1794,17 @@ static void set_rx_mode(struct net_device *dev) /* The chip uses the upper 9 CRC bits as index into the hash table */ int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; - __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1]; + __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; *fptr |= cpu_to_le32(1 << (bit_nr & 31)); } /* Clear the perfect filter list, skip first two entries. */ filter_addr = ioaddr + PerfFilterTable + 2 * 16; - eaddrs = (u16 *)dev->dev_addr; + eaddrs = (__be16 *)dev->dev_addr; for (i = 2; i < 16; i++) { - writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4; - writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8; + writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; + writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8; } for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++) writew(mc_filter[i], filter_addr); @@ -1904,6 +1907,9 @@ static int netdev_close(struct net_device *dev) int i; netif_stop_queue(dev); +#ifdef HAVE_NETDEV_POLL + napi_disable(&np->napi); +#endif if (debug > 1) { printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n", diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c index e6f90427160cf162d82682a409af375eb6e4903b..b65be5d70fecfee4d21f8d9d66505e411ed08164 100644 --- a/drivers/net/stnic.c +++ b/drivers/net/stnic.c @@ -112,7 +112,6 @@ static int __init stnic_probe(void) dev = alloc_ei_netdev(); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index b77ab6e8fd3576eb7369f14d3981872b4e8ef40c..9b2a7f7bb25863f1c2e68e14865bc3f1f4ff85c7 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -311,7 +311,6 @@ struct net_device * __init sun3_82586_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); dev->irq = IE_IRQ; dev->base_addr = ioaddr; diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c index f1548c033327d9a3ae3d9c50438de721d7663906..f8d46134dacaeaa16f66a2d98b8ce2faad4616f2 100644 --- a/drivers/net/sun3lance.c +++ b/drivers/net/sun3lance.c @@ -152,7 +152,6 @@ struct lance_private { struct lance_memory *mem; int new_rx, new_tx; /* The next free ring entry */ int old_tx, old_rx; /* ring entry to be processed */ - struct net_device_stats stats; /* These two must be longs for set_bit() */ long tx_full; long lock; @@ -241,7 +240,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); -static struct net_device_stats *lance_get_stats( struct net_device *dev ); static void set_multicast_list( struct net_device *dev ); /************************* End of Prototypes **************************/ @@ -274,7 +272,6 @@ struct net_device * __init sun3lance_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } - SET_MODULE_OWNER(dev); if (!lance_probe(dev)) goto out; @@ -303,6 +300,7 @@ static int __init lance_probe( struct net_device *dev) static int did_version; volatile unsigned short *ioaddr_probe; unsigned short tmp1, tmp2; + DECLARE_MAC_BUF(mac); #ifdef CONFIG_SUN3 ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); @@ -378,8 +376,7 @@ static int __init lance_probe( struct net_device *dev) MEM->init.hwaddr[4] = dev->dev_addr[5]; MEM->init.hwaddr[5] = dev->dev_addr[4]; - for( i = 0; i < 6; ++i ) - printk( "%02x%s", dev->dev_addr[i], (i < 5) ? ":" : "\n" ); + printk("%s\n", print_mac(mac, dev->dev_addr)); MEM->init.mode = 0x0000; MEM->init.filter[0] = 0x00000000; @@ -402,15 +399,12 @@ static int __init lance_probe( struct net_device *dev) dev->open = &lance_open; dev->hard_start_xmit = &lance_start_xmit; dev->stop = &lance_close; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &set_multicast_list; dev->set_mac_address = NULL; // KLUDGE -- REMOVE ME set_bit(__LINK_STATE_PRESENT, &dev->state); - memset( &lp->stats, 0, sizeof(lp->stats) ); - return 1; } @@ -535,7 +529,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) * little endian mode. */ REGA(CSR3) = CSR3_BSWP; - lp->stats.tx_errors++; + dev->stats.tx_errors++; if(lance_debug >= 2) { int i; @@ -596,17 +590,12 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) /* Fill in a Tx ring entry */ #if 0 if (lance_debug >= 2) { - u_char *p; - int i; - printk( "%s: TX pkt %d type 0x%04x from ", dev->name, - lp->new_tx, ((u_short *)skb->data)[6]); - for( p = &((u_char *)skb->data)[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = (u_char *)skb->data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" data at 0x%08x len %d\n", (int)skb->data, - (int)skb->len ); + printk( "%s: TX pkt %d type 0x%04x" + " from %s to %s" + " data at 0x%08x len %d\n", + dev->name, lp->new_tx, ((u_short *)skb->data)[6], + DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data), + (int)skb->data, (int)skb->len ); } #endif /* We're not prepared for the int until the last flags are set/reset. @@ -635,7 +624,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; - lp->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; @@ -713,12 +702,12 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) if (head->flag & TMD1_ERR) { int status = head->misc; - lp->stats.tx_errors++; - if (status & TMD3_RTRY) lp->stats.tx_aborted_errors++; - if (status & TMD3_LCAR) lp->stats.tx_carrier_errors++; - if (status & TMD3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; + if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; + if (status & TMD3_LCOL) dev->stats.tx_window_errors++; if (status & (TMD3_UFLO | TMD3_BUFF)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk("%s: Tx FIFO error\n", dev->name); REGA(CSR0) = CSR0_STOP; @@ -731,9 +720,9 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) head->flag &= ~(TMD1_ENP | TMD1_STP); if(head->flag & (TMD1_ONE | TMD1_MORE)) - lp->stats.collisions++; + dev->stats.collisions++; - lp->stats.tx_packets++; + dev->stats.tx_packets++; DPRINTK(3, ("cleared tx ring %d\n", old_tx)); } old_tx = (old_tx +1) & TX_RING_MOD_MASK; @@ -753,8 +742,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id) lance_rx( dev ); /* Log misc errors. */ - if (csr0 & CSR0_BABL) lp->stats.tx_errors++; /* Tx babble. */ - if (csr0 & CSR0_MISS) lp->stats.rx_errors++; /* Missed a Rx frame. */ + if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ + if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & CSR0_MERR) { DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " "status %04x.\n", dev->name, csr0 )); @@ -800,11 +789,11 @@ static int lance_rx( struct net_device *dev ) full-sized buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & RMD1_ENP) /* Only count a general error at the */ - lp->stats.rx_errors++; /* end of a packet.*/ - if (status & RMD1_FRAM) lp->stats.rx_frame_errors++; - if (status & RMD1_OFLO) lp->stats.rx_over_errors++; - if (status & RMD1_CRC) lp->stats.rx_crc_errors++; - if (status & RMD1_BUFF) lp->stats.rx_fifo_errors++; + dev->stats.rx_errors++; /* end of a packet.*/ + if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; + if (status & RMD1_OFLO) dev->stats.rx_over_errors++; + if (status & RMD1_CRC) dev->stats.rx_crc_errors++; + if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; head->flag &= (RMD1_ENP|RMD1_STP); } else { /* Malloc up new buffer, compatible with net-3. */ @@ -814,7 +803,7 @@ static int lance_rx( struct net_device *dev ) if (pkt_len < 60) { printk( "%s: Runt packet!\n", dev->name ); - lp->stats.rx_errors++; + dev->stats.rx_errors++; } else { skb = dev_alloc_skb( pkt_len+2 ); @@ -822,7 +811,7 @@ static int lance_rx( struct net_device *dev ) DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", dev->name )); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; head->msg_length = 0; head->flag |= RMD1_OWN_CHIP; lp->new_rx = (lp->new_rx+1) & @@ -831,13 +820,14 @@ static int lance_rx( struct net_device *dev ) #if 0 if (lance_debug >= 3) { - u_char *data = PKTBUF_ADDR(head), *p; - printk( "%s: RX pkt %d type 0x%04x from ", dev->name, entry, ((u_short *)data)[6]); - for( p = &data[6], i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); - printk(" to "); - for( p = data, i = 0; i < 6; i++ ) - printk("%02x%s", *p++, i != 5 ? ":" : "" ); + u_char *data = PKTBUF_ADDR(head); + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2) + printk("%s: RX pkt %d type 0x%04x" + " from %s to %s", + dev->name, lp->new_tx, ((u_short *)data)[6], + print_mac(mac, &data[6]), print_mac(mac2, data)); + printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d at %08x\n", data[15], data[16], data[17], data[18], @@ -860,8 +850,8 @@ static int lance_rx( struct net_device *dev ) skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); dev->last_rx = jiffies; - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } } @@ -898,14 +888,6 @@ static int lance_close( struct net_device *dev ) } -static struct net_device_stats *lance_get_stats( struct net_device *dev ) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - - /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index b3e0158def4ff9f6021b72b4d25000e61176a060..fe3ac6f9ae89cd77d26328f50654ba95b180ddd9 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -1082,12 +1082,12 @@ static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) struct bigmac *bp; u8 bsizes, bsizes_more; int i; + DECLARE_MAC_BUF(mac); /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -1227,11 +1227,8 @@ static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) dev_set_drvdata(&bp->bigmac_sdev->ofdev.dev, bp); - printk(KERN_INFO "%s: BigMAC 100baseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index a8f2af8f778a7d5c51d46af333f3a0a178a5b669..ff98f5d597f117294ec870cce330bfd339a63bca 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -466,8 +466,8 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, #else int bar = 1; #endif - int phy, phy_idx = 0; - + int phy, phy_end, phy_idx = 0; + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -485,7 +485,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) @@ -547,19 +546,25 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, if (i) goto err_out_unmap_rx; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, pci_id_tbl[chip_idx].name, ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, + print_mac(mac, dev->dev_addr), irq); np->phys[0] = 1; /* Default setting */ np->mii_preamble_required++; + /* * It seems some phys doesn't deal well with address 0 being accessed - * first, so leave address zero to the end of the loop (32 & 31). + * first */ - for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { + if (sundance_pci_tbl[np->chip_id].device == 0x0200) { + phy = 0; + phy_end = 31; + } else { + phy = 1; + phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ + } + for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { int phyx = phy & 0x1f; int mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status != 0xffff && mii_status != 0x0000) { diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 4328038550344733f910f50f2f8e87e31a7fbf11..53b8344a68effd0766ec73c262d0aab7bf3c4f61 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -19,7 +19,7 @@ * * gem_change_mtu() and gem_set_multicast() are called with a read_lock() * help by net/core/dev.c, thus they can't schedule. That means they can't - * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock + * call napi_disable() neither, thus force gem_poll() to keep a spinlock * where it could have been dropped. change_mtu especially would love also to * be able to msleep instead of horrid locked delays when resetting the HW, * but that read_lock() makes it impossible, unless I defer it's action to @@ -878,19 +878,20 @@ static int gem_rx(struct gem *gp, int work_to_do) return work_done; } -static int gem_poll(struct net_device *dev, int *budget) +static int gem_poll(struct napi_struct *napi, int budget) { - struct gem *gp = dev->priv; + struct gem *gp = container_of(napi, struct gem, napi); + struct net_device *dev = gp->dev; unsigned long flags; + int work_done; /* * NAPI locking nightmare: See comment at head of driver */ spin_lock_irqsave(&gp->lock, flags); + work_done = 0; do { - int work_to_do, work_done; - /* Handle anomalies */ if (gp->status & GREG_STAT_ABNORMAL) { if (gem_abnormal_irq(dev, gp, gp->status)) @@ -906,29 +907,25 @@ static int gem_poll(struct net_device *dev, int *budget) /* Run RX thread. We don't use any locking here, * code willing to do bad things - like cleaning the - * rx ring - must call netif_poll_disable(), which + * rx ring - must call napi_disable(), which * schedule_timeout()'s if polling is already disabled. */ - work_to_do = min(*budget, dev->quota); - - work_done = gem_rx(gp, work_to_do); - - *budget -= work_done; - dev->quota -= work_done; + work_done += gem_rx(gp, budget); - if (work_done >= work_to_do) - return 1; + if (work_done >= budget) + return work_done; spin_lock_irqsave(&gp->lock, flags); gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); - __netif_rx_complete(dev); + __netif_rx_complete(dev, napi); gem_enable_ints(gp); spin_unlock_irqrestore(&gp->lock, flags); - return 0; + + return work_done; } static irqreturn_t gem_interrupt(int irq, void *dev_id) @@ -946,17 +943,17 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id) spin_lock_irqsave(&gp->lock, flags); - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &gp->napi)) { u32 gem_status = readl(gp->regs + GREG_STAT); if (gem_status == 0) { - netif_poll_enable(dev); + napi_enable(&gp->napi); spin_unlock_irqrestore(&gp->lock, flags); return IRQ_NONE; } gp->status = gem_status; gem_disable_ints(gp); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &gp->napi); } spin_unlock_irqrestore(&gp->lock, flags); @@ -2284,7 +2281,7 @@ static void gem_reset_task(struct work_struct *work) mutex_lock(&gp->pm_mutex); - netif_poll_disable(gp->dev); + napi_disable(&gp->napi); spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); @@ -2307,7 +2304,7 @@ static void gem_reset_task(struct work_struct *work) spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - netif_poll_enable(gp->dev); + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); } @@ -2324,6 +2321,8 @@ static int gem_open(struct net_device *dev) if (!gp->asleep) rc = gem_do_start(dev); gp->opened = (rc == 0); + if (gp->opened) + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); @@ -2334,9 +2333,7 @@ static int gem_close(struct net_device *dev) { struct gem *gp = dev->priv; - /* Note: we don't need to call netif_poll_disable() here because - * our caller (dev_close) already did it for us - */ + napi_disable(&gp->napi); mutex_lock(&gp->pm_mutex); @@ -2358,7 +2355,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) mutex_lock(&gp->pm_mutex); - netif_poll_disable(dev); + napi_disable(&gp->napi); printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", dev->name, @@ -2482,7 +2479,7 @@ static int gem_resume(struct pci_dev *pdev) spin_unlock(&gp->tx_lock); spin_unlock_irqrestore(&gp->lock, flags); - netif_poll_enable(dev); + napi_enable(&gp->napi); mutex_unlock(&gp->pm_mutex); @@ -2968,7 +2965,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev, unsigned long gemreg_base, gemreg_len; struct net_device *dev; struct gem *gp; - int i, err, pci_using_dac; + int err, pci_using_dac; + DECLARE_MAC_BUF(mac); if (gem_version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -3026,7 +3024,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev, err = -ENOMEM; goto err_disable_device; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); gp = dev->priv; @@ -3121,8 +3118,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, dev->get_stats = gem_get_stats; dev->set_multicast_list = gem_set_multicast; dev->do_ioctl = gem_ioctl; - dev->poll = gem_poll; - dev->weight = 64; + netif_napi_add(dev, &gp->napi, gem_poll, 64); dev->ethtool_ops = &gem_ethtool_ops; dev->tx_timeout = gem_tx_timeout; dev->watchdog_timeo = 5 * HZ; @@ -3154,12 +3150,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev, goto err_out_free_consistent; } - printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ", - dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ' : ':'); - printk("\n"); + printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet " + "%s\n", + dev->name, print_mac(mac, dev->dev_addr)); if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 58cf87c5751e44b63adf331019cf309b039bb986..76d760acc9e27e737bb9ed6a2f887e1e42005bc3 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -993,6 +993,7 @@ struct gem { u32 msg_enable; u32 status; + struct napi_struct napi; struct net_device_stats net_stats; int tx_fifo_sz; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 8b35f13318ea4b9e02b87644ed955a897985219d..120c8affe83dba36f29f972657fc05954af6284d 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2664,6 +2664,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe struct net_device *dev; int i, qfe_slot = -1; int err = -ENODEV; + DECLARE_MAC_BUF(mac); if (is_qfe) { qp = quattro_sbus_find(sdev); @@ -2680,7 +2681,6 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe dev = alloc_etherdev(sizeof(struct happy_meal)); if (!dev) goto err_out; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); if (hme_version_printed++ == 0) @@ -2851,10 +2851,7 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", - dev->dev_addr[i], i == 5 ? ' ' : ':'); - printk("\n"); + printk("%s\n", print_mac(mac, dev->dev_addr)); return 0; @@ -2989,6 +2986,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, int i, qfe_slot = -1; char prom_name[64]; int err; + DECLARE_MAC_BUF(mac); /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC @@ -3022,7 +3020,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, err = -ENOMEM; if (!dev) goto err_out; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (hme_version_printed++ == 0) @@ -3203,10 +3200,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", dev->name); - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], i == 5 ? ' ' : ':'); - - printk("\n"); + printk("%s\n", print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 68e4f660367d5e1795ffbef7a4fb2fe6e75339ac..26ade68aeabf29cc342baf7eb4dbe278964e59ae 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -248,7 +248,6 @@ struct lance_private { int rx_new, tx_new; int rx_old, tx_old; - struct net_device_stats stats; struct sbus_dma *ledma; /* If set this points to ledma */ char tpe; /* cable-selection is TPE */ char auto_select; /* cable-selection by carrier */ @@ -519,17 +518,17 @@ static void lance_rx_dvma(struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (rd->mblength & 0xfff) - 4; skb = dev_alloc_skb(len + 2); @@ -537,14 +536,14 @@ static void lance_rx_dvma(struct net_device *dev) if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = RX_NEXT(entry); return; } - lp->stats.rx_bytes += len; + dev->stats.rx_bytes += len; skb_reserve(skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ @@ -554,7 +553,7 @@ static void lance_rx_dvma(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } /* Return the packet to the pool */ @@ -586,12 +585,12 @@ static void lance_tx_dvma(struct net_device *dev) if (bits & LE_T1_ERR) { u16 status = td->misc; - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", @@ -608,7 +607,7 @@ static void lance_tx_dvma(struct net_device *dev) * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -626,13 +625,13 @@ static void lance_tx_dvma(struct net_device *dev) /* One collision before packet was sent. */ if (bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = TX_NEXT(j); @@ -692,17 +691,17 @@ static void lance_rx_pio(struct net_device *dev) /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { - lp->stats.rx_over_errors++; - lp->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ - if (bits & LE_R1_BUF) lp->stats.rx_fifo_errors++; - if (bits & LE_R1_CRC) lp->stats.rx_crc_errors++; - if (bits & LE_R1_OFL) lp->stats.rx_over_errors++; - if (bits & LE_R1_FRA) lp->stats.rx_frame_errors++; - if (bits & LE_R1_EOP) lp->stats.rx_errors++; + if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; + if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; + if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; + if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; + if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { len = (sbus_readw(&rd->mblength) & 0xfff) - 4; skb = dev_alloc_skb(len + 2); @@ -710,14 +709,14 @@ static void lance_rx_pio(struct net_device *dev) if (skb == NULL) { printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", dev->name); - lp->stats.rx_dropped++; + dev->stats.rx_dropped++; sbus_writew(0, &rd->mblength); sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); lp->rx_new = RX_NEXT(entry); return; } - lp->stats.rx_bytes += len; + dev->stats.rx_bytes += len; skb_reserve (skb, 2); /* 16 byte align */ skb_put(skb, len); /* make room */ @@ -725,7 +724,7 @@ static void lance_rx_pio(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - lp->stats.rx_packets++; + dev->stats.rx_packets++; } /* Return the packet to the pool */ @@ -757,12 +756,12 @@ static void lance_tx_pio(struct net_device *dev) if (bits & LE_T1_ERR) { u16 status = sbus_readw(&td->misc); - lp->stats.tx_errors++; - if (status & LE_T3_RTY) lp->stats.tx_aborted_errors++; - if (status & LE_T3_LCOL) lp->stats.tx_window_errors++; + dev->stats.tx_errors++; + if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; + if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { - lp->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n", @@ -779,7 +778,7 @@ static void lance_tx_pio(struct net_device *dev) * transmitter, restart the adapter. */ if (status & (LE_T3_BUF|LE_T3_UFL)) { - lp->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); @@ -797,13 +796,13 @@ static void lance_tx_pio(struct net_device *dev) /* One collision before packet was sent. */ if (bits & LE_T1_EONE) - lp->stats.collisions++; + dev->stats.collisions++; /* More than one collision, be optimistic. */ if (bits & LE_T1_EMORE) - lp->stats.collisions += 2; + dev->stats.collisions += 2; - lp->stats.tx_packets++; + dev->stats.tx_packets++; } j = TX_NEXT(j); @@ -844,10 +843,10 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) lp->tx(dev); if (csr0 & LE_C0_BABL) - lp->stats.tx_errors++; + dev->stats.tx_errors++; if (csr0 & LE_C0_MISS) - lp->stats.rx_errors++; + dev->stats.rx_errors++; if (csr0 & LE_C0_MERR) { if (lp->dregs) { @@ -1127,7 +1126,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irq(&lp->lock); - lp->stats.tx_bytes += len; + dev->stats.tx_bytes += len; entry = lp->tx_new & TX_RING_MOD_MASK; if (lp->pio_buffer) { @@ -1170,13 +1169,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *lance_get_stats(struct net_device *dev) -{ - struct lance_private *lp = netdev_priv(dev); - - return &lp->stats; -} - /* taken from the depca driver */ static void lance_load_multicast(struct net_device *dev) { @@ -1329,13 +1321,13 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, struct net_device *dev; struct lance_private *lp; int i; + DECLARE_MAC_BUF(mac); dev = alloc_etherdev(sizeof(struct lance_private) + 8); if (!dev) return -ENOMEM; lp = netdev_priv(dev); - memset(lp, 0, sizeof(*lp)); if (sparc_lance_debug && version_printed++ == 0) printk (KERN_INFO "%s", version); @@ -1458,14 +1450,12 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, lp->dregs = NULL; lp->dev = dev; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); dev->open = &lance_open; dev->stop = &lance_close; dev->hard_start_xmit = &lance_start_xmit; dev->tx_timeout = &lance_tx_timeout; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->ethtool_ops = &sparc_lance_ethtool_ops; @@ -1489,12 +1479,8 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, dev_set_drvdata(&sdev->ofdev.dev, lp); - printk(KERN_INFO "%s: LANCE ", dev->name); - - for (i = 0; i < 6; i++) - printk("%2.2x%c", dev->dev_addr[i], - i == 5 ? ' ': ':'); - printk("\n"); + printk(KERN_INFO "%s: LANCE %s\n", + dev->name, print_mac(mac, dev->dev_addr)); return 0; diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 1b65ae8a1c7c6e3ef778d727fc6f0be83c40c9b3..ff23c6489efd33033d4cfd3f287cdd343805f0a7 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -260,31 +260,31 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) if (qe_status & CREG_STAT_EDEFER) { printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; } if (qe_status & CREG_STAT_CLOSS) { printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_carrier_errors++; + dev->stats.tx_errors++; + dev->stats.tx_carrier_errors++; } if (qe_status & CREG_STAT_ERETRIES) { printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_LCOLL) { printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.collisions++; + dev->stats.tx_errors++; + dev->stats.collisions++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_FUFLOW) { printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } @@ -297,104 +297,104 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) } if (qe_status & CREG_STAT_CCOFLOW) { - qep->net_stats.tx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.tx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_TXDERROR) { printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXLERR) { printk(KERN_ERR "%s: Transmit late error.\n", dev->name); - qep->net_stats.tx_errors++; + dev->stats.tx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXPERR) { printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_TXSERR) { printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); - qep->net_stats.tx_errors++; - qep->net_stats.tx_aborted_errors++; + dev->stats.tx_errors++; + dev->stats.tx_aborted_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RCCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.collisions += 256; + dev->stats.rx_errors += 256; + dev->stats.collisions += 256; } if (qe_status & CREG_STAT_RUOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_over_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_over_errors += 256; } if (qe_status & CREG_STAT_MCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_missed_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_missed_errors += 256; } if (qe_status & CREG_STAT_RXFOFLOW) { printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_over_errors++; + dev->stats.rx_errors++; + dev->stats.rx_over_errors++; } if (qe_status & CREG_STAT_RLCOLL) { printk(KERN_ERR "%s: Late receive collision.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.collisions++; + dev->stats.rx_errors++; + dev->stats.collisions++; } if (qe_status & CREG_STAT_FCOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_frame_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_frame_errors += 256; } if (qe_status & CREG_STAT_CECOFLOW) { - qep->net_stats.rx_errors += 256; - qep->net_stats.rx_crc_errors += 256; + dev->stats.rx_errors += 256; + dev->stats.rx_crc_errors += 256; } if (qe_status & CREG_STAT_RXDROP) { printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_dropped++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_dropped++; + dev->stats.rx_missed_errors++; } if (qe_status & CREG_STAT_RXSMALL) { printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; } if (qe_status & CREG_STAT_RXLERR) { printk(KERN_ERR "%s: Receive late error.\n", dev->name); - qep->net_stats.rx_errors++; + dev->stats.rx_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXPERR) { printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } if (qe_status & CREG_STAT_RXSERR) { printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); - qep->net_stats.rx_errors++; - qep->net_stats.rx_missed_errors++; + dev->stats.rx_errors++; + dev->stats.rx_missed_errors++; mace_hwbug_workaround = 1; } @@ -409,6 +409,7 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) static void qe_rx(struct sunqe *qep) { struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; + struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; __u32 qbufs_dvma = qep->buffers_dvma; @@ -428,14 +429,14 @@ static void qe_rx(struct sunqe *qep) /* Check for errors. */ if (len < ETH_ZLEN) { - qep->net_stats.rx_errors++; - qep->net_stats.rx_length_errors++; - qep->net_stats.rx_dropped++; + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev->stats.rx_dropped++; } else { skb = dev_alloc_skb(len + 2); if (skb == NULL) { drops++; - qep->net_stats.rx_dropped++; + dev->stats.rx_dropped++; } else { skb_reserve(skb, 2); skb_put(skb, len); @@ -444,8 +445,8 @@ static void qe_rx(struct sunqe *qep) skb->protocol = eth_type_trans(skb, qep->dev); netif_rx(skb); qep->dev->last_rx = jiffies; - qep->net_stats.rx_packets++; - qep->net_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } } end_rxd->rx_addr = this_qbuf_dvma; @@ -603,8 +604,8 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); - qep->net_stats.tx_packets++; - qep->net_stats.tx_bytes += len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += len; if (TX_BUFFS_AVAIL(qep) <= 0) { /* Halt the net queue and enable tx interrupts. @@ -622,13 +623,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -static struct net_device_stats *qe_get_stats(struct net_device *dev) -{ - struct sunqe *qep = (struct sunqe *) dev->priv; - - return &qep->net_stats; -} - static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = (struct sunqe *) dev->priv; @@ -898,13 +892,11 @@ static int __init qec_ether_init(struct sbus_dev *sdev) /* Stop this QE. */ qe_stop(qe); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &sdev->ofdev.dev); dev->open = qe_open; dev->stop = qe_close; dev->hard_start_xmit = qe_start_xmit; - dev->get_stats = qe_get_stats; dev->set_multicast_list = qe_set_multicast; dev->tx_timeout = qe_tx_timeout; dev->watchdog_timeo = 5*HZ; diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h index af34f36111edef7f64c31e402e26feb9a9550f5e..347c8ddc1592a447109c8a4f94bf3e8d45d2e432 100644 --- a/drivers/net/sunqe.h +++ b/drivers/net/sunqe.h @@ -342,7 +342,6 @@ struct sunqe { __u32 buffers_dvma; /* DVMA visible address. */ struct sunqec *parent; u8 mconfig; /* Base MACE mconfig value */ - struct net_device_stats net_stats; /* Statistical counters */ struct sbus_dev *qe_sdev; /* QE's SBUS device struct */ struct net_device *dev; /* QE's netdevice struct */ int channel; /* Who am I? */ diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index ec41469eee82d569391780c5263d1e4e9584369b..a679f4310ce1ae0ef64ed1ea8b7d8bb11f7f299a 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -414,6 +414,9 @@ enum tc35815_timer_state { struct tc35815_local { struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + /* statistics */ struct net_device_stats stats; struct { @@ -566,7 +569,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t tc35815_interrupt(int irq, void *dev_id); #ifdef TC35815_NAPI static int tc35815_rx(struct net_device *dev, int limit); -static int tc35815_poll(struct net_device *dev, int *budget); +static int tc35815_poll(struct napi_struct *napi, int budget); #else static void tc35815_rx(struct net_device *dev); #endif @@ -682,9 +685,9 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, dev_err(&pdev->dev, "unable to alloc new ethernet\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); lp = dev->priv; + lp->dev = dev; /* enable device (incl. PCI PM wakeup), and bus-mastering */ rc = pci_enable_device (pdev); @@ -738,8 +741,7 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, dev->tx_timeout = tc35815_tx_timeout; dev->watchdog_timeo = TC35815_TX_TIMEOUT; #ifdef TC35815_NAPI - dev->poll = tc35815_poll; - dev->weight = NAPI_WEIGHT; + netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); #endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = tc35815_poll_controller; @@ -748,8 +750,6 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev, dev->irq = pdev->irq; dev->base_addr = (unsigned long) ioaddr; - /* dev->priv/lp zeroed and aligned in alloc_etherdev */ - lp = dev->priv; spin_lock_init(&lp->lock); lp->pci_dev = pdev; lp->boardtype = ent->driver_data; @@ -1237,6 +1237,10 @@ tc35815_open(struct net_device *dev) return -EAGAIN; } +#ifdef TC35815_NAPI + napi_enable(&lp->napi); +#endif + /* Reset the hardware here. Don't forget to set the station address. */ spin_lock_irq(&lp->lock); tc35815_chip_init(dev); @@ -1436,6 +1440,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status) static irqreturn_t tc35815_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; + struct tc35815_local *lp = netdev_priv(dev); struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; #ifdef TC35815_NAPI @@ -1444,8 +1449,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) if (!(dmactl & DMA_IntMask)) { /* disable interrupts */ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); - if (netif_rx_schedule_prep(dev)) - __netif_rx_schedule(dev); + if (netif_rx_schedule_prep(dev, &lp->napi)) + __netif_rx_schedule(dev, &lp->napi); else { printk(KERN_ERR "%s: interrupt taken in poll\n", dev->name); @@ -1726,13 +1731,12 @@ tc35815_rx(struct net_device *dev) } #ifdef TC35815_NAPI -static int -tc35815_poll(struct net_device *dev, int *budget) +static int tc35815_poll(struct napi_struct *napi, int budget) { - struct tc35815_local *lp = dev->priv; + struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); + struct net_device *dev = lp->dev; struct tc35815_regs __iomem *tr = (struct tc35815_regs __iomem *)dev->base_addr; - int limit = min(*budget, dev->quota); int received = 0, handled; u32 status; @@ -1744,23 +1748,19 @@ tc35815_poll(struct net_device *dev, int *budget) handled = tc35815_do_interrupt(dev, status, limit); if (handled >= 0) { received += handled; - limit -= handled; - if (limit <= 0) + if (received >= budget) break; } status = tc_readl(&tr->Int_Src); } while (status); spin_unlock(&lp->lock); - dev->quota -= received; - *budget -= received; - if (limit <= 0) - return 1; - - netif_rx_complete(dev); - /* enable interrupts */ - tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); - return 0; + if (received < budget) { + netif_rx_complete(dev, napi); + /* enable interrupts */ + tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); + } + return received; } #endif @@ -1949,7 +1949,11 @@ static int tc35815_close(struct net_device *dev) { struct tc35815_local *lp = dev->priv; + netif_stop_queue(dev); +#ifdef TC35815_NAPI + napi_disable(&lp->napi); +#endif /* Flush the Tx and disable Rx here. */ @@ -2158,10 +2162,16 @@ static void tc35815_set_msglevel(struct net_device *dev, u32 datum) lp->msg_enable = datum; } -static int tc35815_get_stats_count(struct net_device *dev) +static int tc35815_get_sset_count(struct net_device *dev, int sset) { struct tc35815_local *lp = dev->priv; - return sizeof(lp->lstats) / sizeof(int); + + switch (sset) { + case ETH_SS_STATS: + return sizeof(lp->lstats) / sizeof(int); + default: + return -EOPNOTSUPP; + } } static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) @@ -2196,7 +2206,7 @@ static const struct ethtool_ops tc35815_ethtool_ops = { .get_msglevel = tc35815_get_msglevel, .set_msglevel = tc35815_set_msglevel, .get_strings = tc35815_get_strings, - .get_stats_count = tc35815_get_stats_count, + .get_sset_count = tc35815_get_sset_count, .get_ethtool_stats = tc35815_get_ethtool_stats, }; diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c new file mode 100644 index 0000000000000000000000000000000000000000..8d04654f0c59ccb461db144ca79e2780cc2f75a7 --- /dev/null +++ b/drivers/net/tehuti.c @@ -0,0 +1,2506 @@ +/* + * Tehuti Networks(R) Network Driver + * ethtool interface implementation + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* + * RX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of RX communication channels betwean driver and NIC. + * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming + * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds + * info about buffer's location, size and ID. An ID field is used to identify a + * buffer when it's returned with data via RXD Fifo (see below) + * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is + * filled by HW and is readen by SW. Each descriptor holds status and ID. + * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data, + * via dma moves it into host memory, builds new RXD descriptor with same ID, + * pushes it into RXD Fifo and raises interrupt to indicate new RX data. + * + * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos. + * One holds 1.5K packets and another - 26K packets. Depending on incoming + * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is + * filled with data, HW builds new RXD descriptor for it and push it into single + * RXD Fifo. + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * skb db - used to keep track of all skbs owned by SW and their dma addresses. + * For RX case, ownership lasts from allocating new empty skb for RXF until + * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own + * skb db. Implemented as array with bitmask. + * fifo - keeps info about fifo's size and location, relevant HW registers, + * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. + * Implemented as simple struct. + * + * RX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * Upon initialization (ifconfig up) driver creates RX fifos and initializes + * relevant registers. At the end of init phase, driver enables interrupts. + * NIC sees that there is no RXF buffers and raises + * RD_INTR interrupt, isr fills skbs and Rx begins. + * Driver has two receive operation modes: + * NAPI - interrupt-driven mixed with polling + * interrupt-driven only + * + * Interrupt-driven only flow is following. When buffer is ready, HW raises + * interrupt and isr is called. isr collects all available packets + * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit. + + * Rx buffer allocation note + * ~~~~~~~~~~~~~~~~~~~~~~~~~ + * Driver cares to feed such amount of RxF descriptors that respective amount of + * RxD descriptors can not fill entire RxD fifo. The main reason is lack of + * overflow check in Bordeaux for RxD fifo free/used size. + * FIXME: this is NOT fully implemented, more work should be done + * + */ + +#include "tehuti.h" +#include "tehuti_fw.h" + +static struct pci_device_id __devinitdata bdx_pci_tbl[] = { + {0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, bdx_pci_tbl); + +/* Definitions needed by ISR or NAPI functions */ +static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f); +static void bdx_tx_cleanup(struct bdx_priv *priv); +static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget); + +/* Definitions needed by FW loading */ +static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size); + +/* Definitions needed by hw_start */ +static int bdx_tx_init(struct bdx_priv *priv); +static int bdx_rx_init(struct bdx_priv *priv); + +/* Definitions needed by bdx_close */ +static void bdx_rx_free(struct bdx_priv *priv); +static void bdx_tx_free(struct bdx_priv *priv); + +/* Definitions needed by bdx_probe */ +static void bdx_ethtool_ops(struct net_device *netdev); + +/************************************************************************* + * Print Info * + *************************************************************************/ + +static void print_hw_id(struct pci_dev *pdev) +{ + struct pci_nic *nic = pci_get_drvdata(pdev); + u16 pci_link_status = 0; + u16 pci_ctrl = 0; + + pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status); + pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl); + + printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME, + nic->port_num == 1 ? "" : ", 2-Port"); + printk(KERN_INFO + "tehuti: srom 0x%x fpga %d build %u lane# %d" + " max_pl 0x%x mrrs 0x%x\n", + readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF, + readl(nic->regs + FPGA_SEED), + GET_LINK_STATUS_LANES(pci_link_status), + GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl)); +} + +static void print_fw_id(struct pci_nic *nic) +{ + printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER)); +} + +static void print_eth_id(struct net_device *ndev) +{ + printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME, + (ndev->if_port == 0) ? 'A' : 'B'); + +} + +/************************************************************************* + * Code * + *************************************************************************/ + +#define bdx_enable_interrupts(priv) \ + do { WRITE_REG(priv, regIMR, IR_RUN); } while (0) +#define bdx_disable_interrupts(priv) \ + do { WRITE_REG(priv, regIMR, 0); } while (0) + +/* bdx_fifo_init + * create TX/RX descriptor fifo for host-NIC communication. + * 1K extra space is allocated at the end of the fifo to simplify + * processing of descriptors that wraps around fifo's end + * @priv - NIC private structure + * @f - fifo to initialize + * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB + * @reg_XXX - offsets of registers relative to base address + * + * Returns 0 on success, negative value on failure + * + */ +static int +bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type, + u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR) +{ + u16 memsz = FIFO_SIZE * (1 << fsz_type); + + memset(f, 0, sizeof(struct fifo)); + /* pci_alloc_consistent gives us 4k-aligned memory */ + f->va = pci_alloc_consistent(priv->pdev, + memsz + FIFO_EXTRA_SPACE, &f->da); + if (!f->va) { + ERR("pci_alloc_consistent failed\n"); + RET(-ENOMEM); + } + f->reg_CFG0 = reg_CFG0; + f->reg_CFG1 = reg_CFG1; + f->reg_RPTR = reg_RPTR; + f->reg_WPTR = reg_WPTR; + f->rptr = 0; + f->wptr = 0; + f->memsz = memsz; + f->size_mask = memsz - 1; + WRITE_REG(priv, reg_CFG0, (u32) ((f->da & TX_RX_CFG0_BASE) | fsz_type)); + WRITE_REG(priv, reg_CFG1, H32_64(f->da)); + + RET(0); +} + +/* bdx_fifo_free - free all resources used by fifo + * @priv - NIC private structure + * @f - fifo to release + */ +static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) +{ + ENTER; + if (f->va) { + pci_free_consistent(priv->pdev, + f->memsz + FIFO_EXTRA_SPACE, f->va, f->da); + f->va = NULL; + } + RET(); +} + +/* + * bdx_link_changed - notifies OS about hw link state. + * @bdx_priv - hw adapter structure + */ +static void bdx_link_changed(struct bdx_priv *priv) +{ + u32 link = READ_REG(priv, regMAC_LNK_STAT) & MAC_LINK_STAT; + + if (!link) { + if (netif_carrier_ok(priv->ndev)) { + netif_stop_queue(priv->ndev); + netif_carrier_off(priv->ndev); + ERR("%s: Link Down\n", priv->ndev->name); + } + } else { + if (!netif_carrier_ok(priv->ndev)) { + netif_wake_queue(priv->ndev); + netif_carrier_on(priv->ndev); + ERR("%s: Link Up\n", priv->ndev->name); + } + } +} + +static void bdx_isr_extra(struct bdx_priv *priv, u32 isr) +{ + if (isr & IR_RX_FREE_0) { + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + DBG("RX_FREE_0\n"); + } + + if (isr & IR_LNKCHG0) + bdx_link_changed(priv); + + if (isr & IR_PCIE_LINK) + ERR("%s: PCI-E Link Fault\n", priv->ndev->name); + + if (isr & IR_PCIE_TOUT) + ERR("%s: PCI-E Time Out\n", priv->ndev->name); + +} + +/* bdx_isr - Interrupt Service Routine for Bordeaux NIC + * @irq - interrupt number + * @ndev - network device + * @regs - CPU registers + * + * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise + * + * It reads ISR register to know interrupt reasons, and proceed them one by one. + * Reasons of interest are: + * RX_DESC - new packet has arrived and RXD fifo holds its descriptor + * RX_FREE - number of free Rx buffers in RXF fifo gets low + * TX_FREE - packet was transmited and RXF fifo holds its descriptor + */ + +static irqreturn_t bdx_isr_napi(int irq, void *dev) +{ + struct net_device *ndev = dev; + struct bdx_priv *priv = ndev->priv; + u32 isr; + + ENTER; + isr = (READ_REG(priv, regISR) & IR_RUN); + if (unlikely(!isr)) { + bdx_enable_interrupts(priv); + return IRQ_NONE; /* Not our interrupt */ + } + + if (isr & IR_EXTRA) + bdx_isr_extra(priv, isr); + + if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { + if (likely(netif_rx_schedule_prep(ndev, &priv->napi))) { + __netif_rx_schedule(ndev, &priv->napi); + RET(IRQ_HANDLED); + } else { + /* NOTE: we get here if intr has slipped into window + * between these lines in bdx_poll: + * bdx_enable_interrupts(priv); + * return 0; + * currently intrs are disabled (since we read ISR), + * and we have failed to register next poll. + * so we read the regs to trigger chip + * and allow further interupts. */ + READ_REG(priv, regTXF_WPTR_0); + READ_REG(priv, regRXD_WPTR_0); + } + } + + bdx_enable_interrupts(priv); + RET(IRQ_HANDLED); +} + +static int bdx_poll(struct napi_struct *napi, int budget) +{ + struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi); + struct net_device *dev = priv->ndev; + int work_done; + + ENTER; + bdx_tx_cleanup(priv); + work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget); + if ((work_done < budget) || + (priv->napi_stop++ >= 30)) { + DBG("rx poll is done. backing to isr-driven\n"); + + /* from time to time we exit to let NAPI layer release + * device lock and allow waiting tasks (eg rmmod) to advance) */ + priv->napi_stop = 0; + + netif_rx_complete(dev, napi); + bdx_enable_interrupts(priv); + } + return work_done; +} + +/* bdx_fw_load - loads firmware to NIC + * @priv - NIC private structure + * Firmware is loaded via TXD fifo, so it must be initialized first. + * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC + * can have few of them). So all drivers use semaphore register to choose one + * that will actually load FW to NIC. + */ + +static int bdx_fw_load(struct bdx_priv *priv) +{ + int master, i; + + ENTER; + master = READ_REG(priv, regINIT_SEMAPHORE); + if (!READ_REG(priv, regINIT_STATUS) && master) { + bdx_tx_push_desc_safe(priv, s_firmLoad, sizeof(s_firmLoad)); + mdelay(100); + } + for (i = 0; i < 200; i++) { + if (READ_REG(priv, regINIT_STATUS)) + break; + mdelay(2); + } + if (master) + WRITE_REG(priv, regINIT_SEMAPHORE, 1); + + if (i == 200) { + ERR("%s: firmware loading failed\n", priv->ndev->name); + DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n", + READ_REG(priv, regVPC), + READ_REG(priv, regVIC), READ_REG(priv, regINIT_STATUS), i); + RET(-EIO); + } else { + DBG("%s: firmware loading success\n", priv->ndev->name); + RET(0); + } +} + +static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv) +{ + u32 val; + + ENTER; + DBG("mac0=%x mac1=%x mac2=%x\n", + READ_REG(priv, regUNC_MAC0_A), + READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); + + val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); + WRITE_REG(priv, regUNC_MAC2_A, val); + val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); + WRITE_REG(priv, regUNC_MAC1_A, val); + val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); + WRITE_REG(priv, regUNC_MAC0_A, val); + + DBG("mac0=%x mac1=%x mac2=%x\n", + READ_REG(priv, regUNC_MAC0_A), + READ_REG(priv, regUNC_MAC1_A), READ_REG(priv, regUNC_MAC2_A)); + RET(); +} + +/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines + * @priv - NIC private structure + */ +static int bdx_hw_start(struct bdx_priv *priv) +{ + int rc = -EIO; + struct net_device *ndev = priv->ndev; + + ENTER; + bdx_link_changed(priv); + + /* 10G overall max length (vlan, eth&ip header, ip payload, crc) */ + WRITE_REG(priv, regFRM_LENGTH, 0X3FE0); + WRITE_REG(priv, regPAUSE_QUANT, 0x96); + WRITE_REG(priv, regRX_FIFO_SECTION, 0x800010); + WRITE_REG(priv, regTX_FIFO_SECTION, 0xE00010); + WRITE_REG(priv, regRX_FULLNESS, 0); + WRITE_REG(priv, regTX_FULLNESS, 0); + WRITE_REG(priv, regCTRLST, + regCTRLST_BASE | regCTRLST_RX_ENA | regCTRLST_TX_ENA); + + WRITE_REG(priv, regVGLB, 0); + WRITE_REG(priv, regMAX_FRAME_A, + priv->rxf_fifo0.m.pktsz & MAX_FRAME_AB_VAL); + + DBG("RDINTCM=%08x\n", priv->rdintcm); /*NOTE: test script uses this */ + WRITE_REG(priv, regRDINTCM0, priv->rdintcm); + WRITE_REG(priv, regRDINTCM2, 0); /*cpu_to_le32(rcm.val)); */ + + DBG("TDINTCM=%08x\n", priv->tdintcm); /*NOTE: test script uses this */ + WRITE_REG(priv, regTDINTCM0, priv->tdintcm); /* old val = 0x300064 */ + + /* Enable timer interrupt once in 2 secs. */ + /*WRITE_REG(priv, regGTMR0, ((GTMR_SEC * 2) & GTMR_DATA)); */ + bdx_restore_mac(priv->ndev, priv); + + WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN | + GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB); + +#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED) + if ((rc = request_irq(priv->pdev->irq, &bdx_isr_napi, BDX_IRQ_TYPE, + ndev->name, ndev))) + goto err_irq; + bdx_enable_interrupts(priv); + + RET(0); + +err_irq: + RET(rc); +} + +static void bdx_hw_stop(struct bdx_priv *priv) +{ + ENTER; + bdx_disable_interrupts(priv); + free_irq(priv->pdev->irq, priv->ndev); + + netif_carrier_off(priv->ndev); + netif_stop_queue(priv->ndev); + + RET(); +} + +static int bdx_hw_reset_direct(void __iomem *regs) +{ + u32 val, i; + ENTER; + + /* reset sequences: read, write 1, read, write 0 */ + val = readl(regs + regCLKPLL); + writel((val | CLKPLL_SFTRST) + 0x8, regs + regCLKPLL); + udelay(50); + val = readl(regs + regCLKPLL); + writel(val & ~CLKPLL_SFTRST, regs + regCLKPLL); + + /* check that the PLLs are locked and reset ended */ + for (i = 0; i < 70; i++, mdelay(10)) + if ((readl(regs + regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { + /* do any PCI-E read transaction */ + readl(regs + regRXD_CFG0_0); + return 0; + } + ERR("tehuti: HW reset failed\n"); + return 1; /* failure */ +} + +static int bdx_hw_reset(struct bdx_priv *priv) +{ + u32 val, i; + ENTER; + + if (priv->port == 0) { + /* reset sequences: read, write 1, read, write 0 */ + val = READ_REG(priv, regCLKPLL); + WRITE_REG(priv, regCLKPLL, (val | CLKPLL_SFTRST) + 0x8); + udelay(50); + val = READ_REG(priv, regCLKPLL); + WRITE_REG(priv, regCLKPLL, val & ~CLKPLL_SFTRST); + } + /* check that the PLLs are locked and reset ended */ + for (i = 0; i < 70; i++, mdelay(10)) + if ((READ_REG(priv, regCLKPLL) & CLKPLL_LKD) == CLKPLL_LKD) { + /* do any PCI-E read transaction */ + READ_REG(priv, regRXD_CFG0_0); + return 0; + } + ERR("tehuti: HW reset failed\n"); + return 1; /* failure */ +} + +static int bdx_sw_reset(struct bdx_priv *priv) +{ + int i; + + ENTER; + /* 1. load MAC (obsolete) */ + /* 2. disable Rx (and Tx) */ + WRITE_REG(priv, regGMAC_RXF_A, 0); + mdelay(100); + /* 3. disable port */ + WRITE_REG(priv, regDIS_PORT, 1); + /* 4. disable queue */ + WRITE_REG(priv, regDIS_QU, 1); + /* 5. wait until hw is disabled */ + for (i = 0; i < 50; i++) { + if (READ_REG(priv, regRST_PORT) & 1) + break; + mdelay(10); + } + if (i == 50) + ERR("%s: SW reset timeout. continuing anyway\n", + priv->ndev->name); + + /* 6. disable intrs */ + WRITE_REG(priv, regRDINTCM0, 0); + WRITE_REG(priv, regTDINTCM0, 0); + WRITE_REG(priv, regIMR, 0); + READ_REG(priv, regISR); + + /* 7. reset queue */ + WRITE_REG(priv, regRST_QU, 1); + /* 8. reset port */ + WRITE_REG(priv, regRST_PORT, 1); + /* 9. zero all read and write pointers */ + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + WRITE_REG(priv, i, 0); + /* 10. unseet port disable */ + WRITE_REG(priv, regDIS_PORT, 0); + /* 11. unset queue disable */ + WRITE_REG(priv, regDIS_QU, 0); + /* 12. unset queue reset */ + WRITE_REG(priv, regRST_QU, 0); + /* 13. unset port reset */ + WRITE_REG(priv, regRST_PORT, 0); + /* 14. enable Rx */ + /* skiped. will be done later */ + /* 15. save MAC (obsolete) */ + for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10) + DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); + + RET(0); +} + +/* bdx_reset - performs right type of reset depending on hw type */ +static int bdx_reset(struct bdx_priv *priv) +{ + ENTER; + RET((priv->pdev->device == 0x3009) + ? bdx_hw_reset(priv) + : bdx_sw_reset(priv)); +} + +/** + * bdx_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int bdx_close(struct net_device *ndev) +{ + struct bdx_priv *priv = NULL; + + ENTER; + priv = ndev->priv; + + napi_disable(&priv->napi); + + bdx_reset(priv); + bdx_hw_stop(priv); + bdx_rx_free(priv); + bdx_tx_free(priv); + RET(0); +} + +/** + * bdx_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int bdx_open(struct net_device *ndev) +{ + struct bdx_priv *priv; + int rc; + + ENTER; + priv = ndev->priv; + bdx_reset(priv); + if (netif_running(ndev)) + netif_stop_queue(priv->ndev); + + if ((rc = bdx_tx_init(priv))) + goto err; + + if ((rc = bdx_rx_init(priv))) + goto err; + + if ((rc = bdx_fw_load(priv))) + goto err; + + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + + if ((rc = bdx_hw_start(priv))) + goto err; + + napi_enable(&priv->napi); + + print_fw_id(priv->nic); + + RET(0); + +err: + bdx_close(ndev); + RET(rc); +} + +static void __init bdx_firmware_endianess(void) +{ + int i; + for (i = 0; i < sizeof(s_firmLoad) / sizeof(u32); i++) + s_firmLoad[i] = CPU_CHIP_SWAP32(s_firmLoad[i]); +} + +static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + struct bdx_priv *priv = ndev->priv; + u32 data[3]; + int error; + + ENTER; + + DBG("jiffies=%ld cmd=%d\n", jiffies, cmd); + if (cmd != SIOCDEVPRIVATE) { + error = copy_from_user(data, ifr->ifr_data, sizeof(data)); + if (error) { + ERR("cant copy from user\n"); + RET(error); + } + DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); + } + + switch (data[0]) { + + case BDX_OP_READ: + data[2] = READ_REG(priv, data[1]); + DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], + data[2]); + error = copy_to_user(ifr->ifr_data, data, sizeof(data)); + if (error) + RET(error); + break; + + case BDX_OP_WRITE: + WRITE_REG(priv, data[1], data[2]); + DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]); + break; + + default: + RET(-EOPNOTSUPP); + } + return 0; +} + +static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + ENTER; + if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) + RET(bdx_ioctl_priv(ndev, ifr, cmd)); + else + RET(-EOPNOTSUPP); +} + +/* + * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid + * by passing VLAN filter table to hardware + * @ndev network device + * @vid VLAN vid + * @op add or kill operation + */ +static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) +{ + struct bdx_priv *priv = ndev->priv; + u32 reg, bit, val; + + ENTER; + DBG2("vid=%d value=%d\n", (int)vid, enable); + if (unlikely(vid >= 4096)) { + ERR("tehuti: invalid VID: %u (> 4096)\n", vid); + RET(); + } + reg = regVLAN_0 + (vid / 32) * 4; + bit = 1 << vid % 32; + val = READ_REG(priv, reg); + DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit); + if (enable) + val |= bit; + else + val &= ~bit; + DBG2("new val %x\n", val); + WRITE_REG(priv, reg, val); + RET(); +} + +/* + * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table + * @ndev network device + * @vid VLAN vid to add + */ +static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) +{ + __bdx_vlan_rx_vid(ndev, vid, 1); +} + +/* + * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table + * @ndev network device + * @vid VLAN vid to kill + */ +static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) +{ + __bdx_vlan_rx_vid(ndev, vid, 0); +} + +/* + * bdx_vlan_rx_register - kernel hook for adding VLAN group + * @ndev network device + * @grp VLAN group + */ +static void +bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) +{ + struct bdx_priv *priv = ndev->priv; + + ENTER; + DBG("device='%s', group='%p'\n", ndev->name, grp); + priv->vlgrp = grp; + RET(); +} + +/** + * bdx_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int bdx_change_mtu(struct net_device *ndev, int new_mtu) +{ + ENTER; + + if (new_mtu == ndev->mtu) + RET(0); + + /* enforce minimum frame size */ + if (new_mtu < ETH_ZLEN) { + ERR("%s: %s mtu %d is less then minimal %d\n", + BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN); + RET(-EINVAL); + } + + ndev->mtu = new_mtu; + if (netif_running(ndev)) { + bdx_close(ndev); + bdx_open(ndev); + } + RET(0); +} + +static void bdx_setmulti(struct net_device *ndev) +{ + struct bdx_priv *priv = ndev->priv; + + u32 rxf_val = + GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN; + int i; + + ENTER; + /* IMF - imperfect (hash) rx multicat filter */ + /* PMF - perfect rx multicat filter */ + + /* FIXME: RXE(OFF) */ + if (ndev->flags & IFF_PROMISC) { + rxf_val |= GMAC_RX_FILTER_PRM; + } else if (ndev->flags & IFF_ALLMULTI) { + /* set IMF to accept all multicast frmaes */ + for (i = 0; i < MAC_MCST_HASH_NUM; i++) + WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); + } else if (ndev->mc_count) { + u8 hash; + struct dev_mc_list *mclist; + u32 reg, val; + + /* set IMF to deny all multicast frames */ + for (i = 0; i < MAC_MCST_HASH_NUM; i++) + WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0); + /* set PMF to deny all multicast frames */ + for (i = 0; i < MAC_MCST_NUM; i++) { + WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0); + WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0); + } + + /* use PMF to accept first MAC_MCST_NUM (15) addresses */ + /* TBD: sort addreses and write them in ascending order + * into RX_MAC_MCST regs. we skip this phase now and accept ALL + * multicast frames throu IMF */ + mclist = ndev->mc_list; + + /* accept the rest of addresses throu IMF */ + for (; mclist; mclist = mclist->next) { + hash = 0; + for (i = 0; i < ETH_ALEN; i++) + hash ^= mclist->dmi_addr[i]; + reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); + val = READ_REG(priv, reg); + val |= (1 << (hash % 32)); + WRITE_REG(priv, reg, val); + } + + } else { + DBG("only own mac %d\n", ndev->mc_count); + rxf_val |= GMAC_RX_FILTER_AB; + } + WRITE_REG(priv, regGMAC_RXF_A, rxf_val); + /* enable RX */ + /* FIXME: RXE(ON) */ + RET(); +} + +static int bdx_set_mac(struct net_device *ndev, void *p) +{ + struct bdx_priv *priv = ndev->priv; + struct sockaddr *addr = p; + + ENTER; + /* + if (netif_running(dev)) + return -EBUSY + */ + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + bdx_restore_mac(ndev, priv); + RET(0); +} + +static int bdx_read_mac(struct bdx_priv *priv) +{ + u16 macAddress[3], i; + ENTER; + + macAddress[2] = READ_REG(priv, regUNC_MAC0_A); + macAddress[2] = READ_REG(priv, regUNC_MAC0_A); + macAddress[1] = READ_REG(priv, regUNC_MAC1_A); + macAddress[1] = READ_REG(priv, regUNC_MAC1_A); + macAddress[0] = READ_REG(priv, regUNC_MAC2_A); + macAddress[0] = READ_REG(priv, regUNC_MAC2_A); + for (i = 0; i < 3; i++) { + priv->ndev->dev_addr[i * 2 + 1] = macAddress[i]; + priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8; + } + RET(0); +} + +static u64 bdx_read_l2stat(struct bdx_priv *priv, int reg) +{ + u64 val; + + val = READ_REG(priv, reg); + val |= ((u64) READ_REG(priv, reg + 8)) << 32; + return val; +} + +/*Do the statistics-update work*/ +static void bdx_update_stats(struct bdx_priv *priv) +{ + struct bdx_stats *stats = &priv->hw_stats; + u64 *stats_vector = (u64 *) stats; + int i; + int addr; + + /*Fill HW structure */ + addr = 0x7200; + /*First 12 statistics - 0x7200 - 0x72B0 */ + for (i = 0; i < 12; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x72C0); + /* 0x72C0-0x72E0 RSRV */ + addr = 0x72F0; + for (; i < 16; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x7330); + /* 0x7330-0x7360 RSRV */ + addr = 0x7370; + for (; i < 19; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x73A0); + /* 0x73A0-0x73B0 RSRV */ + addr = 0x73C0; + for (; i < 23; i++) { + stats_vector[i] = bdx_read_l2stat(priv, addr); + addr += 0x10; + } + BDX_ASSERT(addr != 0x7400); + BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i); +} + +static struct net_device_stats *bdx_get_stats(struct net_device *ndev) +{ + struct bdx_priv *priv = ndev->priv; + struct net_device_stats *net_stat = &priv->net_stats; + return net_stat; +} + +static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, + u16 rxd_vlan); +static void print_rxfd(struct rxf_desc *rxfd); + +/************************************************************************* + * Rx DB * + *************************************************************************/ + +static void bdx_rxdb_destroy(struct rxdb *db) +{ + if (db) + vfree(db); +} + +static struct rxdb *bdx_rxdb_create(int nelem) +{ + struct rxdb *db; + int i; + + db = vmalloc(sizeof(struct rxdb) + + (nelem * sizeof(int)) + + (nelem * sizeof(struct rx_map))); + if (likely(db != NULL)) { + db->stack = (int *)(db + 1); + db->elems = (void *)(db->stack + nelem); + db->nelem = nelem; + db->top = nelem; + for (i = 0; i < nelem; i++) + db->stack[i] = nelem - i - 1; /* to make first allocs + close to db struct*/ + } + + return db; +} + +static inline int bdx_rxdb_alloc_elem(struct rxdb *db) +{ + BDX_ASSERT(db->top <= 0); + return db->stack[--(db->top)]; +} + +static inline void *bdx_rxdb_addr_elem(struct rxdb *db, int n) +{ + BDX_ASSERT((n < 0) || (n >= db->nelem)); + return db->elems + n; +} + +static inline int bdx_rxdb_available(struct rxdb *db) +{ + return db->top; +} + +static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) +{ + BDX_ASSERT((n >= db->nelem) || (n < 0)); + db->stack[(db->top)++] = n; +} + +/************************************************************************* + * Rx Init * + *************************************************************************/ + +/* bdx_rx_init - initialize RX all related HW and SW resources + * @priv - NIC private structure + * + * Returns 0 on success, negative value on failure + * + * It creates rxf and rxd fifos, update relevant HW registers, preallocate + * skb for rx. It assumes that Rx is desabled in HW + * funcs are grouped for better cache usage + * + * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be + * filled and packets will be dropped by nic without getting into host or + * cousing interrupt. Anyway, in that condition, host has no chance to proccess + * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles + */ + +/* TBD: ensure proper packet size */ + +static int bdx_rx_init(struct bdx_priv *priv) +{ + ENTER; + + if (bdx_fifo_init(priv, &priv->rxd_fifo0.m, priv->rxd_size, + regRXD_CFG0_0, regRXD_CFG1_0, + regRXD_RPTR_0, regRXD_WPTR_0)) + goto err_mem; + if (bdx_fifo_init(priv, &priv->rxf_fifo0.m, priv->rxf_size, + regRXF_CFG0_0, regRXF_CFG1_0, + regRXF_RPTR_0, regRXF_WPTR_0)) + goto err_mem; + if (! + (priv->rxdb = + bdx_rxdb_create(priv->rxf_fifo0.m.memsz / + sizeof(struct rxf_desc)))) + goto err_mem; + + priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN; + return 0; + +err_mem: + ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name); + return -ENOMEM; +} + +/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo + * @priv - NIC private structure + * @f - RXF fifo + */ +static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) +{ + struct rx_map *dm; + struct rxdb *db = priv->rxdb; + u16 i; + + ENTER; + DBG("total=%d free=%d busy=%d\n", db->nelem, bdx_rxdb_available(db), + db->nelem - bdx_rxdb_available(db)); + while (bdx_rxdb_available(db) > 0) { + i = bdx_rxdb_alloc_elem(db); + dm = bdx_rxdb_addr_elem(db, i); + dm->dma = 0; + } + for (i = 0; i < db->nelem; i++) { + dm = bdx_rxdb_addr_elem(db, i); + if (dm->dma) { + pci_unmap_single(priv->pdev, + dm->dma, f->m.pktsz, + PCI_DMA_FROMDEVICE); + dev_kfree_skb(dm->skb); + } + } +} + +/* bdx_rx_free - release all Rx resources + * @priv - NIC private structure + * It assumes that Rx is desabled in HW + */ +static void bdx_rx_free(struct bdx_priv *priv) +{ + ENTER; + if (priv->rxdb) { + bdx_rx_free_skbs(priv, &priv->rxf_fifo0); + bdx_rxdb_destroy(priv->rxdb); + priv->rxdb = NULL; + } + bdx_fifo_free(priv, &priv->rxf_fifo0.m); + bdx_fifo_free(priv, &priv->rxd_fifo0.m); + + RET(); +} + +/************************************************************************* + * Rx Engine * + *************************************************************************/ + +/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs + * @priv - nic's private structure + * @f - RXF fifo that needs skbs + * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. + * skb's virtual and physical addresses are stored in skb db. + * To calculate free space, func uses cached values of RPTR and WPTR + * When needed, it also updates RPTR and WPTR. + */ + +/* TBD: do not update WPTR if no desc were written */ + +static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f) +{ + struct sk_buff *skb; + struct rxf_desc *rxfd; + struct rx_map *dm; + int dno, delta, idx; + struct rxdb *db = priv->rxdb; + + ENTER; + dno = bdx_rxdb_available(db) - 1; + while (dno > 0) { + if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) { + ERR("NO MEM: dev_alloc_skb failed\n"); + break; + } + skb->dev = priv->ndev; + skb_reserve(skb, NET_IP_ALIGN); + + idx = bdx_rxdb_alloc_elem(db); + dm = bdx_rxdb_addr_elem(db, idx); + dm->dma = pci_map_single(priv->pdev, + skb->data, f->m.pktsz, + PCI_DMA_FROMDEVICE); + dm->skb = skb; + rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ + rxfd->va_lo = idx; + rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); + rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); + rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); + print_rxfd(rxfd); + + f->m.wptr += sizeof(struct rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + DBG("wrapped descriptor\n"); + } + } + dno--; + } + /*TBD: to do - delayed rxf wptr like in txd */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + RET(); +} + +static inline void +NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan, + struct sk_buff *skb) +{ + ENTER; + DBG("rxdd->flags.bits.vtag=%d vlgrp=%p\n", GET_RXD_VTAG(rxd_val1), + priv->vlgrp); + if (priv->vlgrp && GET_RXD_VTAG(rxd_val1)) { + DBG("%s: vlan rcv vlan '%x' vtag '%x', device name '%s'\n", + priv->ndev->name, + GET_RXD_VLAN_ID(rxd_vlan), + GET_RXD_VTAG(rxd_val1), + vlan_group_get_device(priv->vlgrp, + GET_RXD_VLAN_ID(rxd_vlan))->name); + /* NAPI variant of receive functions */ + vlan_hwaccel_receive_skb(skb, priv->vlgrp, + GET_RXD_VLAN_ID(rxd_vlan)); + } else { + netif_receive_skb(skb); + } +} + +static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) +{ + struct rxf_desc *rxfd; + struct rx_map *dm; + struct rxf_fifo *f; + struct rxdb *db; + struct sk_buff *skb; + int delta; + + ENTER; + DBG("priv=%p rxdd=%p\n", priv, rxdd); + f = &priv->rxf_fifo0; + db = priv->rxdb; + DBG("db=%p f=%p\n", db, f); + dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); + DBG("dm=%p\n", dm); + skb = dm->skb; + rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */ + rxfd->va_lo = rxdd->va_lo; + rxfd->pa_lo = CPU_CHIP_SWAP32(L32_64(dm->dma)); + rxfd->pa_hi = CPU_CHIP_SWAP32(H32_64(dm->dma)); + rxfd->len = CPU_CHIP_SWAP32(f->m.pktsz); + print_rxfd(rxfd); + + f->m.wptr += sizeof(struct rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + DBG("wrapped descriptor\n"); + } + } + RET(); +} + +/* bdx_rx_receive - recieves full packets from RXD fifo and pass them to OS + * NOTE: a special treatment is given to non-continous descriptors + * that start near the end, wraps around and continue at the beginning. a second + * part is copied right after the first, and then descriptor is interpreted as + * normal. fifo has an extra space to allow such operations + * @priv - nic's private structure + * @f - RXF fifo that needs skbs + */ + +/* TBD: replace memcpy func call by explicite inline asm */ + +static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) +{ + struct sk_buff *skb, *skb2; + struct rxd_desc *rxdd; + struct rx_map *dm; + struct rxf_fifo *rxf_fifo; + int tmp_len, size; + int done = 0; + int max_done = BDX_MAX_RX_DONE; + struct rxdb *db = NULL; + /* Unmarshalled descriptor - copy of descriptor in host order */ + u32 rxd_val1; + u16 len; + u16 rxd_vlan; + + ENTER; + max_done = budget; + + priv->ndev->last_rx = jiffies; + f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR; + + size = f->m.wptr - f->m.rptr; + if (size < 0) + size = f->m.memsz + size; /* size is negative :-) */ + + while (size > 0) { + + rxdd = (struct rxd_desc *)(f->m.va + f->m.rptr); + rxd_val1 = CPU_CHIP_SWAP32(rxdd->rxd_val1); + + len = CPU_CHIP_SWAP16(rxdd->len); + + rxd_vlan = CPU_CHIP_SWAP16(rxdd->rxd_vlan); + + print_rxdd(rxdd, rxd_val1, len, rxd_vlan); + + tmp_len = GET_RXD_BC(rxd_val1) << 3; + BDX_ASSERT(tmp_len <= 0); + size -= tmp_len; + if (size < 0) /* test for partially arrived descriptor */ + break; + + f->m.rptr += tmp_len; + + tmp_len = f->m.rptr - f->m.memsz; + if (unlikely(tmp_len >= 0)) { + f->m.rptr = tmp_len; + if (tmp_len > 0) { + DBG("wrapped desc rptr=%d tmp_len=%d\n", + f->m.rptr, tmp_len); + memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); + } + } + + if (unlikely(GET_RXD_ERR(rxd_val1))) { + DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1)); + priv->net_stats.rx_errors++; + bdx_recycle_skb(priv, rxdd); + continue; + } + + rxf_fifo = &priv->rxf_fifo0; + db = priv->rxdb; + dm = bdx_rxdb_addr_elem(db, rxdd->va_lo); + skb = dm->skb; + + if (len < BDX_COPYBREAK && + (skb2 = dev_alloc_skb(len + NET_IP_ALIGN))) { + skb_reserve(skb2, NET_IP_ALIGN); + /*skb_put(skb2, len); */ + pci_dma_sync_single_for_cpu(priv->pdev, + dm->dma, rxf_fifo->m.pktsz, + PCI_DMA_FROMDEVICE); + memcpy(skb2->data, skb->data, len); + bdx_recycle_skb(priv, rxdd); + skb = skb2; + } else { + pci_unmap_single(priv->pdev, + dm->dma, rxf_fifo->m.pktsz, + PCI_DMA_FROMDEVICE); + bdx_rxdb_free_elem(db, rxdd->va_lo); + } + + priv->net_stats.rx_bytes += len; + + skb_put(skb, len); + skb->dev = priv->ndev; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->protocol = eth_type_trans(skb, priv->ndev); + + /* Non-IP packets aren't checksum-offloaded */ + if (GET_RXD_PKT_ID(rxd_val1) == 0) + skb->ip_summed = CHECKSUM_NONE; + + NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb); + + if (++done >= max_done) + break; + } + + priv->net_stats.rx_packets += done; + + /* FIXME: do smth to minimize pci accesses */ + WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); + + bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0); + + RET(done); +} + +/************************************************************************* + * Debug / Temprorary Code * + *************************************************************************/ +static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, + u16 rxd_vlan) +{ + DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d " + "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d " + "va_lo %d va_hi %d\n", + GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1), + GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1), + GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1), + GET_RXD_VTAG(rxd_val1), len, GET_RXD_VLAN_ID(rxd_vlan), + GET_RXD_CFI(rxd_vlan), GET_RXD_PRIO(rxd_vlan), rxdd->va_lo, + rxdd->va_hi); +} + +static void print_rxfd(struct rxf_desc *rxfd) +{ + DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" + "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", + rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); +} + +/* + * TX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of TX communication channels betwean driver and NIC. + * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets + * 2) TX Data Fifo - TXD - holds descriptors of full buffers. + * + * Currently NIC supports TSO, checksuming and gather DMA + * UFO and IP fragmentation is on the way + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * txdb - used to keep track of all skbs owned by SW and their dma addresses. + * For TX case, ownership lasts from geting packet via hard_xmit and until HW + * acknowledges sent by TXF descriptors. + * Implemented as cyclic buffer. + * fifo - keeps info about fifo's size and location, relevant HW registers, + * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. + * Implemented as simple struct. + * + * TX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * OS calls driver's hard_xmit method with packet to sent. + * Driver creates DMA mappings, builds TXD descriptors and kicks HW + * by updating TXD WPTR. + * When packet is sent, HW write us TXF descriptor and SW frees original skb. + * To prevent TXD fifo overflow without reading HW registers every time, + * SW deploys "tx level" technique. + * Upon strart up, tx level is initialized to TXD fifo length. + * For every sent packet, SW gets its TXD descriptor sizei + * (from precalculated array) and substructs it from tx level. + * The size is also stored in txdb. When TXF ack arrives, SW fetch size of + * original TXD descriptor from txdb and adds it to tx level. + * When Tx level drops under some predefined treshhold, the driver + * stops the TX queue. When TX level rises above that level, + * the tx queue is enabled again. + * + * This technique avoids eccessive reading of RPTR and WPTR registers. + * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput. + */ + +/************************************************************************* + * Tx DB * + *************************************************************************/ +static inline int bdx_tx_db_size(struct txdb *db) +{ + int taken = db->wptr - db->rptr; + if (taken < 0) + taken = db->size + 1 + taken; /* (size + 1) equals memsz */ + + return db->size - taken; +} + +/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap + * @d - tx data base + * @ptr - read or write pointer + */ +static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) +{ + BDX_ASSERT(db == NULL || pptr == NULL); /* sanity */ + + BDX_ASSERT(*pptr != db->rptr && /* expect either read */ + *pptr != db->wptr); /* or write pointer */ + + BDX_ASSERT(*pptr < db->start || /* pointer has to be */ + *pptr >= db->end); /* in range */ + + ++*pptr; + if (unlikely(*pptr == db->end)) + *pptr = db->start; +} + +/* bdx_tx_db_inc_rptr - increment read pointer + * @d - tx data base + */ +static inline void bdx_tx_db_inc_rptr(struct txdb *db) +{ + BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */ + __bdx_tx_db_ptr_next(db, &db->rptr); +} + +/* bdx_tx_db_inc_rptr - increment write pointer + * @d - tx data base + */ +static inline void bdx_tx_db_inc_wptr(struct txdb *db) +{ + __bdx_tx_db_ptr_next(db, &db->wptr); + BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as + a result of write */ +} + +/* bdx_tx_db_init - creates and initializes tx db + * @d - tx data base + * @sz_type - size of tx fifo + * Returns 0 on success, error code otherwise + */ +static int bdx_tx_db_init(struct txdb *d, int sz_type) +{ + int memsz = FIFO_SIZE * (1 << (sz_type + 1)); + + d->start = vmalloc(memsz); + if (!d->start) + return -ENOMEM; + + /* + * In order to differentiate between db is empty and db is full + * states at least one element should always be empty in order to + * avoid rptr == wptr which means db is empty + */ + d->size = memsz / sizeof(struct tx_map) - 1; + d->end = d->start + d->size + 1; /* just after last element */ + + /* all dbs are created equally empty */ + d->rptr = d->start; + d->wptr = d->start; + + return 0; +} + +/* bdx_tx_db_close - closes tx db and frees all memory + * @d - tx data base + */ +static void bdx_tx_db_close(struct txdb *d) +{ + BDX_ASSERT(d == NULL); + + if (d->start) { + vfree(d->start); + d->start = NULL; + } +} + +/************************************************************************* + * Tx Engine * + *************************************************************************/ + +/* sizes of tx desc (including padding if needed) as function + * of skb's frag number */ +static struct { + u16 bytes; + u16 qwords; /* qword = 64 bit */ +} txd_sizes[MAX_SKB_FRAGS + 1]; + +/* txdb_map_skb - creates and stores dma mappings for skb's data blocks + * @priv - NIC private structure + * @skb - socket buffer to map + * + * It makes dma mappings for skb's data blocks and writes them to PBL of + * new tx descriptor. It also stores them in the tx db, so they could be + * unmaped after data was sent. It is reponsibility of a caller to make + * sure that there is enough space in the tx db. Last element holds pointer + * to skb itself and marked with zero length + */ +static inline void +bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, + struct txd_desc *txdd) +{ + struct txdb *db = &priv->txdb; + struct pbl *pbl = &txdd->pbl[0]; + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + + db->wptr->len = skb->len - skb->data_len; + db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, + db->wptr->len, PCI_DMA_TODEVICE); + pbl->len = CPU_CHIP_SWAP32(db->wptr->len); + pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); + pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); + DBG("=== pbl len: 0x%x ================\n", pbl->len); + DBG("=== pbl pa_lo: 0x%x ================\n", pbl->pa_lo); + DBG("=== pbl pa_hi: 0x%x ================\n", pbl->pa_hi); + bdx_tx_db_inc_wptr(db); + + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[i]; + db->wptr->len = frag->size; + db->wptr->addr.dma = + pci_map_page(priv->pdev, frag->page, frag->page_offset, + frag->size, PCI_DMA_TODEVICE); + + pbl++; + pbl->len = CPU_CHIP_SWAP32(db->wptr->len); + pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma)); + pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma)); + bdx_tx_db_inc_wptr(db); + } + + /* add skb clean up info. */ + db->wptr->len = -txd_sizes[nr_frags].bytes; + db->wptr->addr.skb = skb; + bdx_tx_db_inc_wptr(db); +} + +/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags + * number of frags is used as index to fetch correct descriptors size, + * instead of calculating it each time */ +static void __init init_txd_sizes(void) +{ + int i, lwords; + + /* 7 - is number of lwords in txd with one phys buffer + * 3 - is number of lwords used for every additional phys buffer */ + for (i = 0; i < MAX_SKB_FRAGS + 1; i++) { + lwords = 7 + (i * 3); + if (lwords & 1) + lwords++; /* pad it with 1 lword */ + txd_sizes[i].qwords = lwords >> 1; + txd_sizes[i].bytes = lwords << 2; + } +} + +/* bdx_tx_init - initialize all Tx related stuff. + * Namely, TXD and TXF fifos, database etc */ +static int bdx_tx_init(struct bdx_priv *priv) +{ + if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size, + regTXD_CFG0_0, + regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0)) + goto err_mem; + if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size, + regTXF_CFG0_0, + regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0)) + goto err_mem; + + /* The TX db has to keep mappings for all packets sent (on TxD) + * and not yet reclaimed (on TxF) */ + if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size))) + goto err_mem; + + priv->tx_level = BDX_MAX_TX_LEVEL; +#ifdef BDX_DELAY_WPTR + priv->tx_update_mark = priv->tx_level - 1024; +#endif + return 0; + +err_mem: + ERR("tehuti: %s: Tx init failed\n", priv->ndev->name); + return -ENOMEM; +} + +/* + * bdx_tx_space - calculates avalable space in TX fifo + * @priv - NIC private structure + * Returns avaliable space in TX fifo in bytes + */ +static inline int bdx_tx_space(struct bdx_priv *priv) +{ + struct txd_fifo *f = &priv->txd_fifo0; + int fsize; + + f->m.rptr = READ_REG(priv, f->m.reg_RPTR) & TXF_WPTR_WR_PTR; + fsize = f->m.rptr - f->m.wptr; + if (fsize <= 0) + fsize = f->m.memsz + fsize; + return (fsize); +} + +/* bdx_tx_transmit - send packet to NIC + * @skb - packet to send + * ndev - network device assigned to NIC + * Return codes: + * o NETDEV_TX_OK everything ok. + * o NETDEV_TX_BUSY Cannot transmit packet, try later + * Usually a bug, means queue start/stop flow control is broken in + * the driver. Note: the driver must NOT put the skb in its DMA ring. + * o NETDEV_TX_LOCKED Locking failed, please retry quickly. + */ +static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct bdx_priv *priv = ndev->priv; + struct txd_fifo *f = &priv->txd_fifo0; + int txd_checksum = 7; /* full checksum */ + int txd_lgsnd = 0; + int txd_vlan_id = 0; + int txd_vtag = 0; + int txd_mss = 0; + + int nr_frags = skb_shinfo(skb)->nr_frags; + struct txd_desc *txdd; + int len; + unsigned long flags; + + ENTER; + local_irq_save(flags); + if (!spin_trylock(&priv->tx_lock)) { + local_irq_restore(flags); + DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", + BDX_DRV_NAME, ndev->name); + return NETDEV_TX_LOCKED; + } + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ + txdd = (struct txd_desc *)(f->m.va + f->m.wptr); + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + txd_checksum = 0; + + if (skb_shinfo(skb)->gso_size) { + txd_mss = skb_shinfo(skb)->gso_size; + txd_lgsnd = 1; + DBG("skb %p skb len %d gso size = %d\n", skb, skb->len, + txd_mss); + } + + if (vlan_tx_tag_present(skb)) { + /*Cut VLAN ID to 12 bits */ + txd_vlan_id = vlan_tx_tag_get(skb) & BITS_MASK(12); + txd_vtag = 1; + } + + txdd->length = CPU_CHIP_SWAP16(skb->len); + txdd->mss = CPU_CHIP_SWAP16(txd_mss); + txdd->txd_val1 = + CPU_CHIP_SWAP32(TXD_W1_VAL + (txd_sizes[nr_frags].qwords, txd_checksum, txd_vtag, + txd_lgsnd, txd_vlan_id)); + DBG("=== TxD desc =====================\n"); + DBG("=== w1: 0x%x ================\n", txdd->txd_val1); + DBG("=== w2: mss 0x%x len 0x%x\n", txdd->mss, txdd->length); + + bdx_tx_map_skb(priv, skb, txdd); + + /* increment TXD write pointer. In case of + fifo wrapping copy reminder of the descriptor + to the beginning */ + f->m.wptr += txd_sizes[nr_frags].bytes; + len = f->m.wptr - f->m.memsz; + if (unlikely(len >= 0)) { + f->m.wptr = len; + if (len > 0) { + BDX_ASSERT(len > f->m.memsz); + memcpy(f->m.va, f->m.va + f->m.memsz, len); + } + } + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */ + + priv->tx_level -= txd_sizes[nr_frags].bytes; + BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); +#ifdef BDX_DELAY_WPTR + if (priv->tx_level > priv->tx_update_mark) { + /* Force memory writes to complete before letting h/w + know there are new descriptors to fetch. + (might be needed on platforms like IA64) + wmb(); */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + } else { + if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) { + priv->tx_noupd = 0; + WRITE_REG(priv, f->m.reg_WPTR, + f->m.wptr & TXF_WPTR_WR_PTR); + } + } +#else + /* Force memory writes to complete before letting h/w + know there are new descriptors to fetch. + (might be needed on platforms like IA64) + wmb(); */ + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); + +#endif + ndev->trans_start = jiffies; + + priv->net_stats.tx_packets++; + priv->net_stats.tx_bytes += skb->len; + + if (priv->tx_level < BDX_MIN_TX_LEVEL) { + DBG("%s: %s: TX Q STOP level %d\n", + BDX_DRV_NAME, ndev->name, priv->tx_level); + netif_stop_queue(ndev); + } + + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; +} + +/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. + * @priv - bdx adapter + * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS + * that those packets were sent + */ +static void bdx_tx_cleanup(struct bdx_priv *priv) +{ + struct txf_fifo *f = &priv->txf_fifo0; + struct txdb *db = &priv->txdb; + int tx_level = 0; + + ENTER; + f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK; + BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */ + + while (f->m.wptr != f->m.rptr) { + f->m.rptr += BDX_TXF_DESC_SZ; + f->m.rptr &= f->m.size_mask; + + /* unmap all the fragments */ + /* first has to come tx_maps containing dma */ + BDX_ASSERT(db->rptr->len == 0); + do { + BDX_ASSERT(db->rptr->addr.dma == 0); + pci_unmap_page(priv->pdev, db->rptr->addr.dma, + db->rptr->len, PCI_DMA_TODEVICE); + bdx_tx_db_inc_rptr(db); + } while (db->rptr->len > 0); + tx_level -= db->rptr->len; /* '-' koz len is negative */ + + /* now should come skb pointer - free it */ + dev_kfree_skb_irq(db->rptr->addr.skb); + bdx_tx_db_inc_rptr(db); + } + + /* let h/w know which TXF descriptors were cleaned */ + BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz); + WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); + + /* We reclaimed resources, so in case the Q is stopped by xmit callback, + * we resume the transmition and use tx_lock to synchronize with xmit.*/ + spin_lock(&priv->tx_lock); + priv->tx_level += tx_level; + BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); +#ifdef BDX_DELAY_WPTR + if (priv->tx_noupd) { + priv->tx_noupd = 0; + WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR, + priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR); + } +#endif + + if (unlikely(netif_queue_stopped(priv->ndev) + && netif_carrier_ok(priv->ndev) + && (priv->tx_level >= BDX_MIN_TX_LEVEL))) { + DBG("%s: %s: TX Q WAKE level %d\n", + BDX_DRV_NAME, priv->ndev->name, priv->tx_level); + netif_wake_queue(priv->ndev); + } + spin_unlock(&priv->tx_lock); +} + +/* bdx_tx_free_skbs - frees all skbs from TXD fifo. + * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod + */ +static void bdx_tx_free_skbs(struct bdx_priv *priv) +{ + struct txdb *db = &priv->txdb; + + ENTER; + while (db->rptr != db->wptr) { + if (likely(db->rptr->len)) + pci_unmap_page(priv->pdev, db->rptr->addr.dma, + db->rptr->len, PCI_DMA_TODEVICE); + else + dev_kfree_skb(db->rptr->addr.skb); + bdx_tx_db_inc_rptr(db); + } + RET(); +} + +/* bdx_tx_free - frees all Tx resources */ +static void bdx_tx_free(struct bdx_priv *priv) +{ + ENTER; + bdx_tx_free_skbs(priv); + bdx_fifo_free(priv, &priv->txd_fifo0.m); + bdx_fifo_free(priv, &priv->txf_fifo0.m); + bdx_tx_db_close(&priv->txdb); +} + +/* bdx_tx_push_desc - push descriptor to TxD fifo + * @priv - NIC private structure + * @data - desc's data + * @size - desc's size + * + * Pushes desc to TxD fifo and overlaps it if needed. + * NOTE: this func does not check for available space. this is responsibility + * of the caller. Neither does it check that data size is smaller then + * fifo size. + */ +static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) +{ + struct txd_fifo *f = &priv->txd_fifo0; + int i = f->m.memsz - f->m.wptr; + + if (size == 0) + return; + + if (i > size) { + memcpy(f->m.va + f->m.wptr, data, size); + f->m.wptr += size; + } else { + memcpy(f->m.va + f->m.wptr, data, i); + f->m.wptr = size - i; + memcpy(f->m.va, data + i, f->m.wptr); + } + WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); +} + +/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way + * @priv - NIC private structure + * @data - desc's data + * @size - desc's size + * + * NOTE: this func does check for available space and, if neccessary, waits for + * NIC to read existing data before writing new one. + */ +static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size) +{ + int timer = 0; + ENTER; + + while (size > 0) { + /* we substruct 8 because when fifo is full rptr == wptr + which also means that fifo is empty, we can understand + the difference, but could hw do the same ??? :) */ + int avail = bdx_tx_space(priv) - 8; + if (avail <= 0) { + if (timer++ > 300) { /* prevent endless loop */ + DBG("timeout while writing desc to TxD fifo\n"); + break; + } + udelay(50); /* give hw a chance to clean fifo */ + continue; + } + avail = MIN(avail, size); + DBG("about to push %d bytes starting %p size %d\n", avail, + data, size); + bdx_tx_push_desc(priv, data, avail); + size -= avail; + data += avail; + } + RET(); +} + +/** + * bdx_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in bdx_pci_tbl + * + * Returns 0 on success, negative on failure + * + * bdx_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + * + * functions and their order used as explained in + * /usr/src/linux/Documentation/DMA-{API,mapping}.txt + * + */ + +/* TBD: netif_msg should be checked and implemented. I disable it for now */ +static int __devinit +bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *ndev; + struct bdx_priv *priv; + int err, pci_using_dac, port; + unsigned long pciaddr; + u32 regionSize; + struct pci_nic *nic; + + ENTER; + + nic = vmalloc(sizeof(*nic)); + if (!nic) + RET(-ENOMEM); + + /************** pci *****************/ + if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */ + RET(err); /* it's not a problem though */ + + if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && + !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { + pci_using_dac = 1; + } else { + if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || + (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { + printk(KERN_ERR "tehuti: No usable DMA configuration" + ", aborting\n"); + goto err_dma; + } + pci_using_dac = 0; + } + + if ((err = pci_request_regions(pdev, BDX_DRV_NAME))) + goto err_dma; + + pci_set_master(pdev); + + pciaddr = pci_resource_start(pdev, 0); + if (!pciaddr) { + err = -EIO; + ERR("tehuti: no MMIO resource\n"); + goto err_out_res; + } + if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) { + err = -EIO; + ERR("tehuti: MMIO resource (%x) too small\n", regionSize); + goto err_out_res; + } + + nic->regs = ioremap(pciaddr, regionSize); + if (!nic->regs) { + err = -EIO; + ERR("tehuti: ioremap failed\n"); + goto err_out_res; + } + + if (pdev->irq < 2) { + err = -EIO; + ERR("tehuti: invalid irq (%d)\n", pdev->irq); + goto err_out_iomap; + } + pci_set_drvdata(pdev, nic); + + if (pdev->device == 0x3014) + nic->port_num = 2; + else + nic->port_num = 1; + + print_hw_id(pdev); + + bdx_hw_reset_direct(nic->regs); + + nic->irq_type = IRQ_INTX; +#ifdef BDX_MSI + if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { + if ((err = pci_enable_msi(pdev))) + ERR("Tehuti: Can't eneble msi. error is %d\n", err); + else + nic->irq_type = IRQ_MSI; + } else + DBG("HW does not support MSI\n"); +#endif + + /************** netdev **************/ + for (port = 0; port < nic->port_num; port++) { + if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) { + err = -ENOMEM; + printk(KERN_ERR "tehuti: alloc_etherdev failed\n"); + goto err_out_iomap; + } + + ndev->open = bdx_open; + ndev->stop = bdx_close; + ndev->hard_start_xmit = bdx_tx_transmit; + ndev->do_ioctl = bdx_ioctl; + ndev->set_multicast_list = bdx_setmulti; + ndev->get_stats = bdx_get_stats; + ndev->change_mtu = bdx_change_mtu; + ndev->set_mac_address = bdx_set_mac; + ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; + ndev->vlan_rx_register = bdx_vlan_rx_register; + ndev->vlan_rx_add_vid = bdx_vlan_rx_add_vid; + ndev->vlan_rx_kill_vid = bdx_vlan_rx_kill_vid; + + bdx_ethtool_ops(ndev); /* ethtool interface */ + + /* these fields are used for info purposes only + * so we can have them same for all ports of the board */ + ndev->if_port = port; + ndev->base_addr = pciaddr; + ndev->mem_start = pciaddr; + ndev->mem_end = pciaddr + regionSize; + ndev->irq = pdev->irq; + ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO + | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER + /*| NETIF_F_FRAGLIST */ + ; + + if (pci_using_dac) + ndev->features |= NETIF_F_HIGHDMA; + + /************** priv ****************/ + priv = nic->priv[port] = ndev->priv; + + memset(priv, 0, sizeof(struct bdx_priv)); + priv->pBdxRegs = nic->regs + port * 0x8000; + priv->port = port; + priv->pdev = pdev; + priv->ndev = ndev; + priv->nic = nic; + priv->msg_enable = BDX_DEF_MSG_ENABLE; + + netif_napi_add(ndev, &priv->napi, bdx_poll, 64); + + if ((readl(nic->regs + FPGA_VER) & 0xFFF) == 308) { + DBG("HW statistics not supported\n"); + priv->stats_flag = 0; + } else { + priv->stats_flag = 1; + } + + /* Initialize fifo sizes. */ + priv->txd_size = 2; + priv->txf_size = 2; + priv->rxd_size = 2; + priv->rxf_size = 3; + + /* Initialize the initial coalescing registers. */ + priv->rdintcm = INT_REG_VAL(0x20, 1, 4, 12); + priv->tdintcm = INT_REG_VAL(0x20, 1, 0, 12); + + /* ndev->xmit_lock spinlock is not used. + * Private priv->tx_lock is used for synchronization + * between transmit and TX irq cleanup. In addition + * set multicast list callback has to use priv->tx_lock. + */ +#ifdef BDX_LLTX + ndev->features |= NETIF_F_LLTX; +#endif + spin_lock_init(&priv->tx_lock); + + /*bdx_hw_reset(priv); */ + if (bdx_read_mac(priv)) { + printk(KERN_ERR "tehuti: load MAC address failed\n"); + goto err_out_iomap; + } + SET_NETDEV_DEV(ndev, &pdev->dev); + if ((err = register_netdev(ndev))) { + printk(KERN_ERR "tehuti: register_netdev failed\n"); + goto err_out_free; + } + netif_carrier_off(ndev); + netif_stop_queue(ndev); + + print_eth_id(ndev); + } + RET(0); + +err_out_free: + free_netdev(ndev); +err_out_iomap: + iounmap(nic->regs); +err_out_res: + pci_release_regions(pdev); +err_dma: + pci_disable_device(pdev); + vfree(nic); + + RET(err); +} + +/****************** Ethtool interface *********************/ +/* get strings for tests */ +static const char + bdx_test_names[][ETH_GSTRING_LEN] = { + "No tests defined" +}; + +/* get strings for statistics counters */ +static const char + bdx_stat_names[][ETH_GSTRING_LEN] = { + "InUCast", /* 0x7200 */ + "InMCast", /* 0x7210 */ + "InBCast", /* 0x7220 */ + "InPkts", /* 0x7230 */ + "InErrors", /* 0x7240 */ + "InDropped", /* 0x7250 */ + "FrameTooLong", /* 0x7260 */ + "FrameSequenceErrors", /* 0x7270 */ + "InVLAN", /* 0x7280 */ + "InDroppedDFE", /* 0x7290 */ + "InDroppedIntFull", /* 0x72A0 */ + "InFrameAlignErrors", /* 0x72B0 */ + + /* 0x72C0-0x72E0 RSRV */ + + "OutUCast", /* 0x72F0 */ + "OutMCast", /* 0x7300 */ + "OutBCast", /* 0x7310 */ + "OutPkts", /* 0x7320 */ + + /* 0x7330-0x7360 RSRV */ + + "OutVLAN", /* 0x7370 */ + "InUCastOctects", /* 0x7380 */ + "OutUCastOctects", /* 0x7390 */ + + /* 0x73A0-0x73B0 RSRV */ + + "InBCastOctects", /* 0x73C0 */ + "OutBCastOctects", /* 0x73D0 */ + "InOctects", /* 0x73E0 */ + "OutOctects", /* 0x73F0 */ +}; + +/* + * bdx_get_settings - get device-specific settings + * @netdev + * @ecmd + */ +static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev->priv; + + rdintcm = priv->rdintcm; + tdintcm = priv->tdintcm; + + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + ecmd->speed = SPEED_10000; + ecmd->duplex = DUPLEX_FULL; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */ + ecmd->autoneg = AUTONEG_DISABLE; + + /* PCK_TH measures in multiples of FIFO bytes + We translate to packets */ + ecmd->maxtxpkt = + ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); + ecmd->maxrxpkt = + ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); + + return 0; +} + +/* + * bdx_get_drvinfo - report driver information + * @netdev + * @drvinfo + */ +static void +bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct bdx_priv *priv = netdev->priv; + + strncat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); + strncat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); + strncat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strncat(drvinfo->bus_info, pci_name(priv->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_stats = ((priv->stats_flag) ? + (sizeof(bdx_stat_names) / ETH_GSTRING_LEN) : 0); + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +/* + * bdx_get_rx_csum - report whether receive checksums are turned on or off + * @netdev + */ +static u32 bdx_get_rx_csum(struct net_device *netdev) +{ + return 1; /* always on */ +} + +/* + * bdx_get_tx_csum - report whether transmit checksums are turned on or off + * @netdev + */ +static u32 bdx_get_tx_csum(struct net_device *netdev) +{ + return (netdev->features & NETIF_F_IP_CSUM) != 0; +} + +/* + * bdx_get_coalesce - get interrupt coalescing parameters + * @netdev + * @ecoal + */ +static int +bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev->priv; + + rdintcm = priv->rdintcm; + tdintcm = priv->tdintcm; + + /* PCK_TH measures in multiples of FIFO bytes + We translate to packets */ + ecoal->rx_coalesce_usecs = GET_INT_COAL(rdintcm) * INT_COAL_MULT; + ecoal->rx_max_coalesced_frames = + ((GET_PCK_TH(rdintcm) * PCK_TH_MULT) / sizeof(struct rxf_desc)); + + ecoal->tx_coalesce_usecs = GET_INT_COAL(tdintcm) * INT_COAL_MULT; + ecoal->tx_max_coalesced_frames = + ((GET_PCK_TH(tdintcm) * PCK_TH_MULT) / BDX_TXF_DESC_SZ); + + /* adaptive parameters ignored */ + return 0; +} + +/* + * bdx_set_coalesce - set interrupt coalescing parameters + * @netdev + * @ecoal + */ +static int +bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +{ + u32 rdintcm; + u32 tdintcm; + struct bdx_priv *priv = netdev->priv; + int rx_coal; + int tx_coal; + int rx_max_coal; + int tx_max_coal; + + /* Check for valid input */ + rx_coal = ecoal->rx_coalesce_usecs / INT_COAL_MULT; + tx_coal = ecoal->tx_coalesce_usecs / INT_COAL_MULT; + rx_max_coal = ecoal->rx_max_coalesced_frames; + tx_max_coal = ecoal->tx_max_coalesced_frames; + + /* Translate from packets to multiples of FIFO bytes */ + rx_max_coal = + (((rx_max_coal * sizeof(struct rxf_desc)) + PCK_TH_MULT - 1) + / PCK_TH_MULT); + tx_max_coal = + (((tx_max_coal * BDX_TXF_DESC_SZ) + PCK_TH_MULT - 1) + / PCK_TH_MULT); + + if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) + || (rx_max_coal > 0xF) || (tx_max_coal > 0xF)) + return -EINVAL; + + rdintcm = INT_REG_VAL(rx_coal, GET_INT_COAL_RC(priv->rdintcm), + GET_RXF_TH(priv->rdintcm), rx_max_coal); + tdintcm = INT_REG_VAL(tx_coal, GET_INT_COAL_RC(priv->tdintcm), 0, + tx_max_coal); + + priv->rdintcm = rdintcm; + priv->tdintcm = tdintcm; + + WRITE_REG(priv, regRDINTCM0, rdintcm); + WRITE_REG(priv, regTDINTCM0, tdintcm); + + return 0; +} + +/* Convert RX fifo size to number of pending packets */ +static inline int bdx_rx_fifo_size_to_packets(int rx_size) +{ + return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc)); +} + +/* Convert TX fifo size to number of pending packets */ +static inline int bdx_tx_fifo_size_to_packets(int tx_size) +{ + return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ); +} + +/* + * bdx_get_ringparam - report ring sizes + * @netdev + * @ring + */ +static void +bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct bdx_priv *priv = netdev->priv; + + /*max_pending - the maximum-sized FIFO we allow */ + ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3); + ring->tx_max_pending = bdx_tx_fifo_size_to_packets(3); + ring->rx_pending = bdx_rx_fifo_size_to_packets(priv->rxf_size); + ring->tx_pending = bdx_tx_fifo_size_to_packets(priv->txd_size); +} + +/* + * bdx_set_ringparam - set ring sizes + * @netdev + * @ring + */ +static int +bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) +{ + struct bdx_priv *priv = netdev->priv; + int rx_size = 0; + int tx_size = 0; + + for (; rx_size < 4; rx_size++) { + if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->rx_pending) + break; + } + if (rx_size == 4) + rx_size = 3; + + for (; tx_size < 4; tx_size++) { + if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->tx_pending) + break; + } + if (tx_size == 4) + tx_size = 3; + + /*Is there anything to do? */ + if ((rx_size == priv->rxf_size) + && (tx_size == priv->txd_size)) + return 0; + + priv->rxf_size = rx_size; + if (rx_size > 1) + priv->rxd_size = rx_size - 1; + else + priv->rxd_size = rx_size; + + priv->txf_size = priv->txd_size = tx_size; + + if (netif_running(netdev)) { + bdx_close(netdev); + bdx_open(netdev); + } + return 0; +} + +/* + * bdx_get_strings - return a set of strings that describe the requested objects + * @netdev + * @data + */ +static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *bdx_test_names, sizeof(bdx_test_names)); + break; + case ETH_SS_STATS: + memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names)); + break; + } +} + +/* + * bdx_get_stats_count - return number of 64bit statistics counters + * @netdev + */ +static int bdx_get_stats_count(struct net_device *netdev) +{ + struct bdx_priv *priv = netdev->priv; + BDX_ASSERT(sizeof(bdx_stat_names) / ETH_GSTRING_LEN + != sizeof(struct bdx_stats) / sizeof(u64)); + return ((priv->stats_flag) ? (sizeof(bdx_stat_names) / ETH_GSTRING_LEN) + : 0); +} + +/* + * bdx_get_ethtool_stats - return device's hardware L2 statistics + * @netdev + * @stats + * @data + */ +static void bdx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct bdx_priv *priv = netdev->priv; + + if (priv->stats_flag) { + + /* Update stats from HW */ + bdx_update_stats(priv); + + /* Copy data to user buffer */ + memcpy(data, &priv->hw_stats, sizeof(priv->hw_stats)); + } +} + +/* + * bdx_ethtool_ops - ethtool interface implementation + * @netdev + */ +static void bdx_ethtool_ops(struct net_device *netdev) +{ + static struct ethtool_ops bdx_ethtool_ops = { + .get_settings = bdx_get_settings, + .get_drvinfo = bdx_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_coalesce = bdx_get_coalesce, + .set_coalesce = bdx_set_coalesce, + .get_ringparam = bdx_get_ringparam, + .set_ringparam = bdx_set_ringparam, + .get_rx_csum = bdx_get_rx_csum, + .get_tx_csum = bdx_get_tx_csum, + .get_sg = ethtool_op_get_sg, + .get_tso = ethtool_op_get_tso, + .get_strings = bdx_get_strings, + .get_stats_count = bdx_get_stats_count, + .get_ethtool_stats = bdx_get_ethtool_stats, + }; + + SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops); +} + +/** + * bdx_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * bdx_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit bdx_remove(struct pci_dev *pdev) +{ + struct pci_nic *nic = pci_get_drvdata(pdev); + struct net_device *ndev; + int port; + + for (port = 0; port < nic->port_num; port++) { + ndev = nic->priv[port]->ndev; + unregister_netdev(ndev); + free_netdev(ndev); + } + + /*bdx_hw_reset_direct(nic->regs); */ +#ifdef BDX_MSI + if (nic->irq_type == IRQ_MSI) + pci_disable_msi(pdev); +#endif + + iounmap(nic->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + vfree(nic); + + RET(); +} + +static struct pci_driver bdx_pci_driver = { + .name = BDX_DRV_NAME, + .id_table = bdx_pci_tbl, + .probe = bdx_probe, + .remove = __devexit_p(bdx_remove), +}; + +/* + * print_driver_id - print parameters of the driver build + */ +static void __init print_driver_id(void) +{ + printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC, + BDX_DRV_VERSION); + printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME, + BDX_MSI_STRING); +} + +static int __init bdx_module_init(void) +{ + ENTER; + bdx_firmware_endianess(); + init_txd_sizes(); + print_driver_id(); + RET(pci_register_driver(&bdx_pci_driver)); +} + +module_init(bdx_module_init); + +static void __exit bdx_module_exit(void) +{ + ENTER; + pci_unregister_driver(&bdx_pci_driver); + RET(); +} + +module_exit(bdx_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(BDX_DRV_DESC); diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h new file mode 100644 index 0000000000000000000000000000000000000000..efd170f451b45161cbb621926c2a6820de3672a7 --- /dev/null +++ b/drivers/net/tehuti.h @@ -0,0 +1,564 @@ +/* + * Tehuti Networks(R) Network Driver + * Copyright (C) 2007 Tehuti Networks Ltd. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _TEHUTI_H +#define _TEHUTI_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Compile Time Switches */ +/* start */ +#define BDX_TSO +#define BDX_LLTX +#define BDX_DELAY_WPTR +/* #define BDX_MSI */ +/* end */ + +#if !defined CONFIG_PCI_MSI +# undef BDX_MSI +#endif + +#define BDX_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK) + +/* ioctl ops */ +#define BDX_OP_READ 1 +#define BDX_OP_WRITE 2 + +/* RX copy break size */ +#define BDX_COPYBREAK 257 + +#define DRIVER_AUTHOR "Tehuti Networks(R)" +#define BDX_DRV_DESC "Tehuti Networks(R) Network Driver" +#define BDX_DRV_NAME "tehuti" +#define BDX_NIC_NAME "Tehuti 10 Giga TOE SmartNIC" +#define BDX_NIC2PORT_NAME "Tehuti 2-Port 10 Giga TOE SmartNIC" +#define BDX_DRV_VERSION "7.29.3" + +#ifdef BDX_MSI +# define BDX_MSI_STRING "msi " +#else +# define BDX_MSI_STRING "" +#endif + +/* netdev tx queue len for Luxor. default value is, btw, 1000 + * ifcontig eth1 txqueuelen 3000 - to change it at runtime */ +#define BDX_NDEV_TXQ_LEN 3000 + +#define FIFO_SIZE 4096 +#define FIFO_EXTRA_SPACE 1024 + +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + +#if BITS_PER_LONG == 64 +# define H32_64(x) (u32) ((u64)(x) >> 32) +# define L32_64(x) (u32) ((u64)(x) & 0xffffffff) +#elif BITS_PER_LONG == 32 +# define H32_64(x) 0 +# define L32_64(x) ((u32) (x)) +#else /* BITS_PER_LONG == ?? */ +# error BITS_PER_LONG is undefined. Must be 64 or 32 +#endif /* BITS_PER_LONG */ + +#ifdef __BIG_ENDIAN +# define CPU_CHIP_SWAP32(x) swab32(x) +# define CPU_CHIP_SWAP16(x) swab16(x) +#else +# define CPU_CHIP_SWAP32(x) (x) +# define CPU_CHIP_SWAP16(x) (x) +#endif + +#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg) +#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg) + +#ifndef DMA_64BIT_MASK +# define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +# define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef NET_IP_ALIGN +# define NET_IP_ALIGN 2 +#endif + +#ifndef NETDEV_TX_OK +# define NETDEV_TX_OK 0 +#endif + +#define LUXOR_MAX_PORT 2 +#define BDX_MAX_RX_DONE 150 +#define BDX_TXF_DESC_SZ 16 +#define BDX_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16) +#define BDX_MIN_TX_LEVEL 256 +#define BDX_NO_UPD_PACKETS 40 + +struct pci_nic { + int port_num; + void __iomem *regs; + int irq_type; + struct bdx_priv *priv[LUXOR_MAX_PORT]; +}; + +enum { IRQ_INTX, IRQ_MSI, IRQ_MSIX }; + +#define PCK_TH_MULT 128 +#define INT_COAL_MULT 2 + +#define BITS_MASK(nbits) ((1<>nshift)&BITS_MASK(nbits)) +#define BITS_SHIFT_MASK(nbits, nshift) (BITS_MASK(nbits)<regs + off)); } +static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) +{ + writel(val, tp->aperegs + off); +} + +static u32 tg3_ape_read32(struct tg3 *tp, u32 off) +{ + return (readl(tp->aperegs + off)); +} + static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) { unsigned long flags; @@ -498,6 +512,73 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) spin_unlock_irqrestore(&tp->indirect_lock, flags); } +static void tg3_ape_lock_init(struct tg3 *tp) +{ + int i; + + /* Make sure the driver hasn't any stale locks. */ + for (i = 0; i < 8; i++) + tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i, + APE_LOCK_GRANT_DRIVER); +} + +static int tg3_ape_lock(struct tg3 *tp, int locknum) +{ + int i, off; + int ret = 0; + u32 status; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return 0; + + switch (locknum) { + case TG3_APE_LOCK_MEM: + break; + default: + return -EINVAL; + } + + off = 4 * locknum; + + tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER); + + /* Wait for up to 1 millisecond to acquire lock. */ + for (i = 0; i < 100; i++) { + status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off); + if (status == APE_LOCK_GRANT_DRIVER) + break; + udelay(10); + } + + if (status != APE_LOCK_GRANT_DRIVER) { + /* Revoke the lock request. */ + tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, + APE_LOCK_GRANT_DRIVER); + + ret = -EBUSY; + } + + return ret; +} + +static void tg3_ape_unlock(struct tg3 *tp, int locknum) +{ + int off; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return; + + switch (locknum) { + case TG3_APE_LOCK_MEM: + break; + default: + return; + } + + off = 4 * locknum; + tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER); +} + static void tg3_disable_ints(struct tg3 *tp) { tw32(TG3PCI_MISC_HOST_CTRL, @@ -574,7 +655,7 @@ static void tg3_restart_ints(struct tg3 *tp) static inline void tg3_netif_stop(struct tg3 *tp) { tp->dev->trans_start = jiffies; /* prevent tx timeout */ - netif_poll_disable(tp->dev); + napi_disable(&tp->napi); netif_tx_disable(tp->dev); } @@ -585,7 +666,7 @@ static inline void tg3_netif_start(struct tg3 *tp) * so long as all callers are assured to have free tx slots * (such as after tg3_init_hw) */ - netif_poll_enable(tp->dev); + napi_enable(&tp->napi); tp->hw_status->status |= SD_STATUS_UPDATED; tg3_enable_ints(tp); } @@ -595,7 +676,8 @@ static void tg3_switch_clocks(struct tg3 *tp) u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); u32 orig_clock_ctrl; - if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) + if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) return; orig_clock_ctrl = clock_ctrl; @@ -1400,6 +1482,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | CLOCK_CTRL_PWRDOWN_PLL133, 40); } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || + (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) || (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) { /* do nothing */ } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && @@ -1444,7 +1527,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) } if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && - !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) tg3_power_down_phy(tp); tg3_frob_aux_power(tp); @@ -3471,11 +3555,9 @@ static int tg3_rx(struct tg3 *tp, int budget) return received; } -static int tg3_poll(struct net_device *netdev, int *budget) +static int tg3_poll_work(struct tg3 *tp, int work_done, int budget) { - struct tg3 *tp = netdev_priv(netdev); struct tg3_hw_status *sblk = tp->hw_status; - int done; /* handle link change and other phy events */ if (!(tp->tg3_flags & @@ -3493,44 +3575,55 @@ static int tg3_poll(struct net_device *netdev, int *budget) /* run TX completion thread */ if (sblk->idx[0].tx_consumer != tp->tx_cons) { tg3_tx(tp); - if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) { - netif_rx_complete(netdev); - schedule_work(&tp->reset_task); + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) return 0; - } } /* run RX thread, within the bounds set by NAPI. * All RX "locking" is done by ensuring outside - * code synchronizes with dev->poll() + * code synchronizes with tg3->napi.poll() */ - if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { - int orig_budget = *budget; - int work_done; + if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) + work_done += tg3_rx(tp, budget - work_done); - if (orig_budget > netdev->quota) - orig_budget = netdev->quota; + return work_done; +} - work_done = tg3_rx(tp, orig_budget); +static int tg3_poll(struct napi_struct *napi, int budget) +{ + struct tg3 *tp = container_of(napi, struct tg3, napi); + int work_done = 0; - *budget -= work_done; - netdev->quota -= work_done; - } + while (1) { + work_done = tg3_poll_work(tp, work_done, budget); - if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { - tp->last_tag = sblk->status_tag; - rmb(); - } else - sblk->status &= ~SD_STATUS_UPDATED; + if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) + goto tx_recovery; + + if (unlikely(work_done >= budget)) + break; + + if (likely(!tg3_has_work(tp))) { + struct tg3_hw_status *sblk = tp->hw_status; - /* if no more work, tell net stack and NIC we're done */ - done = !tg3_has_work(tp); - if (done) { - netif_rx_complete(netdev); - tg3_restart_ints(tp); + if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) { + tp->last_tag = sblk->status_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; + + netif_rx_complete(tp->dev, napi); + tg3_restart_ints(tp); + break; + } } - return (done ? 0 : 1); + return work_done; + +tx_recovery: + netif_rx_complete(tp->dev, napi); + schedule_work(&tp->reset_task); + return 0; } static void tg3_irq_quiesce(struct tg3 *tp) @@ -3577,7 +3670,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(dev); /* schedule NAPI poll */ + netif_rx_schedule(dev, &tp->napi); return IRQ_HANDLED; } @@ -3602,7 +3695,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id) */ tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (likely(!tg3_irq_sync(tp))) - netif_rx_schedule(dev); /* schedule NAPI poll */ + netif_rx_schedule(dev, &tp->napi); return IRQ_RETVAL(1); } @@ -3644,7 +3737,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id) sblk->status &= ~SD_STATUS_UPDATED; if (likely(tg3_has_work(tp))) { prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); - netif_rx_schedule(dev); /* schedule NAPI poll */ + netif_rx_schedule(dev, &tp->napi); } else { /* No work, shared interrupt perhaps? re-enable * interrupts, and flush that PCI write @@ -3690,7 +3783,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); if (tg3_irq_sync(tp)) goto out; - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &tp->napi)) { prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); /* Update last_tag to mark that this status has been * seen. Because interrupt may be shared, we may be @@ -3698,7 +3791,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) * if tg3_poll() is not scheduled. */ tp->last_tag = sblk->status_tag; - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &tp->napi); } out: return IRQ_RETVAL(handled); @@ -3737,7 +3830,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy) tg3_full_unlock(tp); del_timer_sync(&tp->timer); tp->irq_sync = 0; - netif_poll_enable(tp->dev); + napi_enable(&tp->napi); dev_close(tp->dev); tg3_full_lock(tp, 0); } @@ -3932,7 +4025,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->poll inside of a software + * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ @@ -4087,7 +4180,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->poll inside of a software + * and TX reclaim runs via tp->napi.poll inside of a software * interrupt. Furthermore, IRQ processing runs lockless so we have * no IRQ context deadlocks to worry about either. Rejoice! */ @@ -4732,6 +4825,80 @@ static void tg3_disable_nvram_access(struct tg3 *tp) } } +static void tg3_ape_send_event(struct tg3 *tp, u32 event) +{ + int i; + u32 apedata; + + apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); + if (apedata != APE_SEG_SIG_MAGIC) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS); + if (apedata != APE_FW_STATUS_READY) + return; + + /* Wait for up to 1 millisecond for APE to service previous event. */ + for (i = 0; i < 10; i++) { + if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM)) + return; + + apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, + event | APE_EVENT_STATUS_EVENT_PENDING); + + tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + break; + + udelay(100); + } + + if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING)) + tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1); +} + +static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) +{ + u32 event; + u32 apedata; + + if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) + return; + + switch (kind) { + case RESET_KIND_INIT: + tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, + APE_HOST_SEG_SIG_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, + APE_HOST_SEG_LEN_MAGIC); + apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); + tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); + tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, + APE_HOST_DRIVER_ID_MAGIC); + tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, + APE_HOST_BEHAV_NO_PHYLOCK); + + event = APE_EVENT_STATUS_STATE_START; + break; + case RESET_KIND_SHUTDOWN: + event = APE_EVENT_STATUS_STATE_UNLOAD; + break; + case RESET_KIND_SUSPEND: + event = APE_EVENT_STATUS_STATE_SUSPEND; + break; + default: + return; + } + + event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; + + tg3_ape_send_event(tp, event); +} + /* tp->lock is held. */ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) { @@ -4759,6 +4926,10 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind) break; }; } + + if (kind == RESET_KIND_INIT || + kind == RESET_KIND_SUSPEND) + tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -4780,6 +4951,9 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind) break; }; } + + if (kind == RESET_KIND_SHUTDOWN) + tg3_ape_driver_state_change(tp, kind); } /* tp->lock is held. */ @@ -4870,17 +5044,26 @@ static void tg3_restore_pci_state(struct tg3 *tp) if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) val |= PCISTATE_RETRY_SAME_DMA; + /* Allow reads and writes to the APE register and memory space. */ + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR; pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val); pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); /* Make sure PCI-X relaxed ordering bit is clear. */ - pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val); - val &= ~PCIX_CAPS_RELAXED_ORDERING; - pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); + if (tp->pcix_cap) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); + pcix_cmd &= ~PCI_X_CMD_ERO; + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); + } if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { - u32 val; /* Chip reset on 5780 will reset MSI enable bit, * so need to restore it. @@ -4924,7 +5107,9 @@ static int tg3_chip_reset(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tw32(GRC_FASTBOOT_PC, 0); /* @@ -5037,7 +5222,7 @@ static int tg3_chip_reset(struct tg3 *tp) tw32(GRC_MODE, tp->grc_mode); if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) { - u32 val = tr32(0xc4); + val = tr32(0xc4); tw32(0xc4, val | (1 << 15)); } @@ -5066,7 +5251,7 @@ static int tg3_chip_reset(struct tg3 *tp) if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) { - u32 val = tr32(0x7c00); + val = tr32(0x7c00); tw32(0x7c00, val | (1 << 25)); } @@ -5092,7 +5277,8 @@ static int tg3_chip_reset(struct tg3 *tp) /* tp->lock is held. */ static void tg3_stop_fw(struct tg3 *tp) { - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { u32 val; int i; @@ -6149,14 +6335,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tg3_write_sig_legacy(tp, RESET_KIND_INIT); + if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0) { + val = tr32(TG3_CPMU_CTRL); + val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); + tw32(TG3_CPMU_CTRL, val); + } + /* This works around an issue with Athlon chipsets on * B3 tigon3 silicon. This bit has no effect on any * other revision. But do not set this on PCI Express - * chips. + * chips and don't even touch the clocks if the CPMU is present. */ - if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) - tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; - tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) { + if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT; + tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); + } if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 && (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) { @@ -6165,6 +6359,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(TG3PCI_PCISTATE, val); } + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + /* Allow reads and writes to the + * APE register and memory space. + */ + val = tr32(TG3PCI_PCISTATE); + val |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR; + tw32(TG3PCI_PCISTATE, val); + } + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) { /* Enable some hw fixes. */ val = tr32(TG3PCI_MSI_DATA); @@ -6181,10 +6385,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (err) return err; - /* This value is determined during the probe time DMA - * engine test, tg3_test_dma. - */ - tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { + /* This value is determined during the probe time DMA + * engine test, tg3_test_dma. + */ + tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); + } tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS | GRC_MODE_4X_NIC_SEND_RINGS | @@ -6418,6 +6625,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) + rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | + RDMAC_MODE_MBUF_RBD_CRPT_ENAB | + RDMAC_MODE_MBUF_SBD_CRPT_ENAB; + /* If statement applies to 5705 and 5750 PCI devices only */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || @@ -6579,22 +6791,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Enable host coalescing bug fix */ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)) + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) || + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)) val |= (1 << 29); tw32_f(WDMAC_MODE, val); udelay(40); - if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { - val = tr32(TG3PCI_X_CAPS); + if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + u16 pcix_cmd; + + pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + &pcix_cmd); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { - val &= ~PCIX_CAPS_BURST_MASK; - val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); + pcix_cmd &= ~PCI_X_CMD_MAX_READ; + pcix_cmd |= PCI_X_CMD_READ_2K; } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK); - val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); + pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ); + pcix_cmd |= PCI_X_CMD_READ_2K; } - tw32(TG3PCI_X_CAPS, val); + pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, + pcix_cmd); } tw32_f(RDMAC_MODE, rdmac_mode); @@ -6603,7 +6821,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE); if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE); - tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tw32(SNDDATAC_MODE, + SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY); + else + tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE); + tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); @@ -6630,7 +6854,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(100); tp->rx_mode = RX_MODE_ENABLE; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; tw32_f(MAC_RX_MODE, tp->rx_mode); @@ -6760,6 +6985,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) break; }; + /* Write our heartbeat update interval to APE. */ + tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, + APE_HOST_HEARTBEAT_INT_DISABLE); + tg3_write_sig_post_reset(tp, RESET_KIND_INIT); return 0; @@ -7147,6 +7376,8 @@ static int tg3_open(struct net_device *dev) return err; } + napi_enable(&tp->napi); + tg3_full_lock(tp, 0); err = tg3_init_hw(tp, 1); @@ -7174,6 +7405,7 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); if (err) { + napi_disable(&tp->napi); free_irq(tp->pdev->irq, dev); if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { pci_disable_msi(tp->pdev); @@ -7199,6 +7431,8 @@ static int tg3_open(struct net_device *dev) tg3_full_unlock(tp); + napi_disable(&tp->napi); + return err; } @@ -7460,6 +7694,7 @@ static int tg3_close(struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); + napi_disable(&tp->napi); cancel_work_sync(&tp->reset_task); netif_stop_queue(dev); @@ -7995,7 +8230,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, buf = data; if (b_offset || odd_len) { buf = kmalloc(len, GFP_KERNEL); - if (buf == 0) + if (!buf) return -ENOMEM; if (b_offset) memcpy(buf, &start, 4); @@ -8075,7 +8310,8 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) tp->link_config.autoneg = cmd->autoneg; if (cmd->autoneg == AUTONEG_ENABLE) { - tp->link_config.advertising = cmd->advertising; + tp->link_config.advertising = (cmd->advertising | + ADVERTISED_Autoneg); tp->link_config.speed = SPEED_INVALID; tp->link_config.duplex = DUPLEX_INVALID; } else { @@ -8163,10 +8399,12 @@ static int tg3_set_tso(struct net_device *dev, u32 value) } if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { - if (value) + if (value) { dev->features |= NETIF_F_TSO6; - else - dev->features &= ~NETIF_F_TSO6; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + dev->features |= NETIF_F_TSO_ECN; + } else + dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); } return ethtool_op_set_tso(dev, value); } @@ -8344,7 +8582,9 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) } if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ethtool_op_set_tx_ipv6_csum(dev, data); else ethtool_op_set_tx_csum(dev, data); @@ -8352,14 +8592,16 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) return 0; } -static int tg3_get_stats_count (struct net_device *dev) +static int tg3_get_sset_count (struct net_device *dev, int sset) { - return TG3_NUM_STATS; -} - -static int tg3_get_test_count (struct net_device *dev) -{ - return TG3_NUM_TEST; + switch (sset) { + case ETH_SS_TEST: + return TG3_NUM_TEST; + case ETH_SS_STATS: + return TG3_NUM_STATS; + default: + return -EOPNOTSUPP; + } } static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) @@ -8424,7 +8666,7 @@ static void tg3_get_ethtool_stats (struct net_device *dev, static int tg3_test_nvram(struct tg3 *tp) { u32 *buf, csum, magic; - int i, j, err = 0, size; + int i, j, k, err = 0, size; if (tg3_nvram_read_swab(tp, 0, &magic) != 0) return -EIO; @@ -8478,7 +8720,6 @@ static int tg3_test_nvram(struct tg3 *tp) u8 data[NVRAM_SELFBOOT_DATA_SIZE]; u8 parity[NVRAM_SELFBOOT_DATA_SIZE]; u8 *buf8 = (u8 *) buf; - int j, k; /* Separate the parity bits and the data bytes. */ for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) { @@ -8839,7 +9080,9 @@ static int tg3_test_memory(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) mem_tbl = mem_tbl_5755; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) mem_tbl = mem_tbl_5906; @@ -9036,6 +9279,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) static int tg3_test_loopback(struct tg3 *tp) { int err = 0; + u32 cpmuctrl = 0; if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; @@ -9044,8 +9288,40 @@ static int tg3_test_loopback(struct tg3 *tp) if (err) return TG3_LOOPBACK_FAILED; + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + int i; + u32 status; + + tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER); + + /* Wait for up to 40 microseconds to acquire lock. */ + for (i = 0; i < 4; i++) { + status = tr32(TG3_CPMU_MUTEX_GNT); + if (status == CPMU_MUTEX_GNT_DRIVER) + break; + udelay(10); + } + + if (status != CPMU_MUTEX_GNT_DRIVER) + return TG3_LOOPBACK_FAILED; + + cpmuctrl = tr32(TG3_CPMU_CTRL); + + /* Turn off power management based on link speed. */ + tw32(TG3_CPMU_CTRL, + cpmuctrl & ~CPMU_CTRL_LINK_SPEED_MODE); + } + if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) err |= TG3_MAC_LOOPBACK_FAILED; + + if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) { + tw32(TG3_CPMU_CTRL, cpmuctrl); + + /* Release the mutex */ + tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER); + } + if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK)) err |= TG3_PHY_LOOPBACK_FAILED; @@ -9284,20 +9560,16 @@ static const struct ethtool_ops tg3_ethtool_ops = { .set_pauseparam = tg3_set_pauseparam, .get_rx_csum = tg3_get_rx_csum, .set_rx_csum = tg3_set_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = tg3_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = tg3_set_tso, - .self_test_count = tg3_get_test_count, .self_test = tg3_self_test, .get_strings = tg3_get_strings, .phys_id = tg3_phys_id, - .get_stats_count = tg3_get_stats_count, .get_ethtool_stats = tg3_get_ethtool_stats, .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, + .get_sset_count = tg3_get_sset_count, }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) @@ -9555,6 +9827,81 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) } } +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, protect = 0; + + nvcfg1 = tr32(NVRAM_CFG1); + + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; + protect = 1; + } + + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS; + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; + tp->tg3_flags2 |= TG3_FLG2_FLASH; + tp->nvram_pagesize = 256; + break; + } + + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = 0x100000; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = 0x80000; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = 0x40000; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = 0x20000; + break; + } + } +} + static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) { tp->nvram_jedecnum = JEDEC_ATMEL; @@ -9594,8 +9941,11 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) tg3_get_5752_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tg3_get_5755_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tg3_get_5906_nvram_info(tp); else @@ -9673,6 +10023,7 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) if ((tp->tg3_flags & TG3_FLAG_NVRAM) && (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && (tp->tg3_flags2 & TG3_FLG2_FLASH) && + !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr / tp->nvram_pagesize) << @@ -9687,6 +10038,7 @@ static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) if ((tp->tg3_flags & TG3_FLAG_NVRAM) && (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && (tp->tg3_flags2 & TG3_FLG2_FLASH) && + !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) && (tp->nvram_jedecnum == JEDEC_ATMEL)) addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * @@ -9907,6 +10259,8 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) && + (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) && (tp->nvram_jedecnum == JEDEC_ST) && (nvram_cmd & NVRAM_CMD_FIRST)) { @@ -10077,8 +10431,12 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT; tp->tg3_flags2 |= TG3_FLG2_IS_NIC; } - if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC) + val = tr32(VCPU_CFGSHDW); + if (val & VCPU_CFGSHDW_ASPM_DBNC) tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; + if ((val & VCPU_CFGSHDW_WOL_ENABLE) && + (val & VCPU_CFGSHDW_WOL_MAGPKT)) + tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; return; } @@ -10195,10 +10553,16 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; } + if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) + tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; + if (tp->tg3_flags & TG3_FLAG_WOL_CAP && + nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) + tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; + if (cfg2 & (1 << 17)) tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING; @@ -10227,7 +10591,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) * firwmare access to the PHY hardware. */ err = 0; - if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { + if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || + (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) { hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID; } else { /* Now read the physical PHY_ID from the chip and verify @@ -10274,6 +10639,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) } if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) && + !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { u32 bmsr, adv_reg, tg3_ctrl, mask; @@ -10525,6 +10891,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pci_chip_rev_id = (misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { + u32 prod_id_asic_rev; + + pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, + &prod_id_asic_rev); + tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK; + } /* Wrong chip ID in 5752 A0. This code can be removed later * as A0 is not in production. @@ -10644,6 +11017,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; @@ -10663,6 +11038,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; @@ -10680,6 +11057,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; @@ -10720,10 +11099,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) cacheline_sz_reg); } + if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || + (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); + if (!tp->pcix_cap) { + printk(KERN_ERR PFX "Cannot find PCI-X " + "capability, aborting.\n"); + return -EIO; + } + } + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &pci_state_reg); - if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { + if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { tp->tg3_flags |= TG3_FLAG_PCIX_MODE; /* If this is a 5700 BX chipset, and we are in PCI-X @@ -10734,7 +11123,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { u32 pm_reg; - u16 pci_cmd; tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; @@ -10742,11 +11130,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * space registers clobbered due to this bug. * So explicitly force the chip into D0 here. */ - pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, + pci_read_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, &pm_reg); pm_reg &= ~PCI_PM_CTRL_STATE_MASK; pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */; - pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT, + pci_write_config_dword(tp->pdev, + tp->pm_cap + PCI_PM_CTRL, pm_reg); /* Also, force SERR#/PERR# in PCI command. */ @@ -10844,6 +11234,20 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_get_eeprom_hw_cfg(tp); + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + /* Allow reads and writes to the + * APE register and memory space. + */ + pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR | + PCISTATE_ALLOW_APE_SHMEM_WR; + pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, + pci_state_reg); + } + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; + /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). * GPIO1 driven high will bring 5700's external PHY out of reset. * It is also used as eeprom write protect on LOMs. @@ -10910,7 +11314,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) { if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 && tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722) tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; @@ -11053,6 +11459,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) tp->dev->hard_start_xmit = tg3_start_xmit; else @@ -11073,11 +11481,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) tp->rx_std_max_post = 8; - /* By default, disable wake-on-lan. User can change this - * using ETHTOOL_SWOL. - */ - tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE; - if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) & PCIE_PWR_MGMT_L1_THRESH_MSK; @@ -11674,8 +12077,10 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) case PHY_ID_BCM5780: return "5780"; case PHY_ID_BCM5755: return "5755"; case PHY_ID_BCM5787: return "5787"; + case PHY_ID_BCM5784: return "5784"; case PHY_ID_BCM5756: return "5722/5756"; case PHY_ID_BCM5906: return "5906"; + case PHY_ID_BCM5761: return "5761"; case PHY_ID_BCM8002: return "8002/serdes"; case 0: return "serdes"; default: return "unknown"; @@ -11833,7 +12238,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_res; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); #if TG3_VLAN_TAG_USED @@ -11880,7 +12284,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, INIT_WORK(&tp->reset_task, tg3_reset_task); tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); - if (tp->regs == 0UL) { + if (!tp->regs) { printk(KERN_ERR PFX "Cannot map device registers, " "aborting.\n"); err = -ENOMEM; @@ -11900,9 +12304,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, dev->set_mac_address = tg3_set_mac_addr; dev->do_ioctl = tg3_ioctl; dev->tx_timeout = tg3_tx_timeout; - dev->poll = tg3_poll; + netif_napi_add(dev, &tp->napi, tg3_poll, 64); dev->ethtool_ops = &tg3_ethtool_ops; - dev->weight = 64; dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->change_mtu = tg3_change_mtu; dev->irq = pdev->irq; @@ -11980,6 +12383,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) dev->features |= NETIF_F_TSO6; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + dev->features |= NETIF_F_TSO_ECN; } @@ -12020,7 +12425,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) dev->features |= NETIF_F_IPV6_CSUM; tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; @@ -12032,13 +12439,35 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, tg3_init_coal(tp); + if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + printk(KERN_ERR PFX "Cannot find proper PCI device " + "base address for APE, aborting.\n"); + err = -ENODEV; + goto err_out_iounmap; + } + + tg3reg_base = pci_resource_start(pdev, 2); + tg3reg_len = pci_resource_len(pdev, 2); + + tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len); + if (tp->aperegs == 0UL) { + printk(KERN_ERR PFX "Cannot map APE registers, " + "aborting.\n"); + err = -ENOMEM; + goto err_out_iounmap; + } + + tg3_ape_lock_init(tp); + } + pci_set_drvdata(pdev, dev); err = register_netdev(dev); if (err) { printk(KERN_ERR PFX "Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_apeunmap; } printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ", @@ -12071,6 +12500,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, return 0; +err_out_apeunmap: + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } + err_out_iounmap: if (tp->regs) { iounmap(tp->regs); @@ -12098,6 +12533,10 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) flush_scheduled_work(); unregister_netdev(dev); + if (tp->aperegs) { + iounmap(tp->aperegs); + tp->aperegs = NULL; + } if (tp->regs) { iounmap(tp->regs); tp->regs = NULL; diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 5c21f49026c9929507aab64083d6eb233e54d585..6dbdad2b8f88693ffe9f31a9885f9e49059f19bc 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -57,32 +57,7 @@ #define TG3PCI_IRQ_PIN 0x0000003d #define TG3PCI_MIN_GNT 0x0000003e #define TG3PCI_MAX_LAT 0x0000003f -#define TG3PCI_X_CAPS 0x00000040 -#define PCIX_CAPS_RELAXED_ORDERING 0x00020000 -#define PCIX_CAPS_SPLIT_MASK 0x00700000 -#define PCIX_CAPS_SPLIT_SHIFT 20 -#define PCIX_CAPS_BURST_MASK 0x000c0000 -#define PCIX_CAPS_BURST_SHIFT 18 -#define PCIX_CAPS_MAX_BURST_CPIOB 2 -#define TG3PCI_PM_CAP_PTR 0x00000041 -#define TG3PCI_X_COMMAND 0x00000042 -#define TG3PCI_X_STATUS 0x00000044 -#define TG3PCI_PM_CAP_ID 0x00000048 -#define TG3PCI_VPD_CAP_PTR 0x00000049 -#define TG3PCI_PM_CAPS 0x0000004a -#define TG3PCI_PM_CTRL_STAT 0x0000004c -#define TG3PCI_BR_SUPP_EXT 0x0000004e -#define TG3PCI_PM_DATA 0x0000004f -#define TG3PCI_VPD_CAP_ID 0x00000050 -#define TG3PCI_MSI_CAP_PTR 0x00000051 -#define TG3PCI_VPD_ADDR_FLAG 0x00000052 -#define VPD_ADDR_FLAG_WRITE 0x00008000 -#define TG3PCI_VPD_DATA 0x00000054 -#define TG3PCI_MSI_CAP_ID 0x00000058 -#define TG3PCI_NXT_CAP_PTR 0x00000059 -#define TG3PCI_MSI_CTRL 0x0000005a -#define TG3PCI_MSI_ADDR_LOW 0x0000005c -#define TG3PCI_MSI_ADDR_HIGH 0x00000060 +/* 0x40 --> 0x64 unused */ #define TG3PCI_MSI_DATA 0x00000064 /* 0x66 --> 0x68 unused */ #define TG3PCI_MISC_HOST_CTRL 0x00000068 @@ -133,6 +108,7 @@ #define CHIPREV_ID_5752_A1 0x6001 #define CHIPREV_ID_5714_A2 0x9002 #define CHIPREV_ID_5906_A1 0xc001 +#define CHIPREV_ID_5784_A0 0x5784000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -146,6 +122,9 @@ #define ASIC_REV_5755 0x0a #define ASIC_REV_5787 0x0b #define ASIC_REV_5906 0x0c +#define ASIC_REV_USE_PROD_ID_REG 0x0f +#define ASIC_REV_5784 0x5784 +#define ASIC_REV_5761 0x5761 #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) #define CHIPREV_5700_AX 0x70 #define CHIPREV_5700_BX 0x71 @@ -213,6 +192,8 @@ #define PCISTATE_ROM_RETRY_ENABLE 0x00000040 #define PCISTATE_FLAT_VIEW 0x00000100 #define PCISTATE_RETRY_SAME_DMA 0x00002000 +#define PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 +#define PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 #define TG3PCI_CLOCK_CTRL 0x00000074 #define CLOCK_CTRL_CORECLK_DISABLE 0x00000200 #define CLOCK_CTRL_RXCLK_DISABLE 0x00000400 @@ -239,7 +220,9 @@ #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 #define DUAL_MAC_CTRL_CH_MASK 0x00000003 #define DUAL_MAC_CTRL_ID 0x00000004 -/* 0xbc --> 0x100 unused */ +#define TG3PCI_PRODID_ASICREV 0x000000bc +#define PROD_ID_ASIC_REV_MASK 0x0fffffff +/* 0xc0 --> 0x100 unused */ /* 0x100 --> 0x200 unused */ @@ -683,6 +666,7 @@ #define SNDDATAC_MODE 0x00001000 #define SNDDATAC_MODE_RESET 0x00000001 #define SNDDATAC_MODE_ENABLE 0x00000002 +#define SNDDATAC_MODE_CDELAY 0x00000010 /* 0x1004 --> 0x1400 unused */ /* Send BD ring selector */ @@ -865,7 +849,20 @@ #define RCVLSC_MODE_ATTN_ENABLE 0x00000004 #define RCVLSC_STATUS 0x00003404 #define RCVLSC_STATUS_ERROR_ATTN 0x00000004 -/* 0x3408 --> 0x3800 unused */ +/* 0x3408 --> 0x3600 unused */ + +/* CPMU registers */ +#define TG3_CPMU_CTRL 0x00003600 +#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200 +#define CPMU_CTRL_LINK_AWARE_MODE 0x00000400 +#define CPMU_CTRL_LINK_SPEED_MODE 0x00004000 +/* 0x3604 --> 0x365c unused */ + +#define TG3_CPMU_MUTEX_REQ 0x0000365c +#define CPMU_MUTEX_REQ_DRIVER 0x00001000 +#define TG3_CPMU_MUTEX_GNT 0x00003660 +#define CPMU_MUTEX_GNT_DRIVER 0x00001000 +/* 0x3664 --> 0x3800 unused */ /* Mbuf cluster free registers */ #define MBFREE_MODE 0x00003800 @@ -1045,7 +1042,10 @@ #define RDMAC_MODE_FIFOOREAD_ENAB 0x00000100 #define RDMAC_MODE_LNGREAD_ENAB 0x00000200 #define RDMAC_MODE_SPLIT_ENABLE 0x00000800 +#define RDMAC_MODE_BD_SBD_CRPT_ENAB 0x00000800 #define RDMAC_MODE_SPLIT_RESET 0x00001000 +#define RDMAC_MODE_MBUF_RBD_CRPT_ENAB 0x00001000 +#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 #define RDMAC_MODE_FIFO_SIZE_128 0x00020000 #define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 #define RDMAC_STATUS 0x00004804 @@ -1151,6 +1151,8 @@ #define VCPU_STATUS_DRV_RESET 0x08000000 #define VCPU_CFGSHDW 0x00005104 +#define VCPU_CFGSHDW_WOL_ENABLE 0x00000001 +#define VCPU_CFGSHDW_WOL_MAGPKT 0x00000004 #define VCPU_CFGSHDW_ASPM_DBNC 0x00001000 /* Mailboxes */ @@ -1474,6 +1476,22 @@ #define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 #define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 #define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 +#define FLASH_5761VENDOR_ATMEL_MDB021D 0x00800003 +#define FLASH_5761VENDOR_ATMEL_MDB041D 0x00800000 +#define FLASH_5761VENDOR_ATMEL_MDB081D 0x00800002 +#define FLASH_5761VENDOR_ATMEL_MDB161D 0x00800001 +#define FLASH_5761VENDOR_ATMEL_ADB021D 0x00000003 +#define FLASH_5761VENDOR_ATMEL_ADB041D 0x00000000 +#define FLASH_5761VENDOR_ATMEL_ADB081D 0x00000002 +#define FLASH_5761VENDOR_ATMEL_ADB161D 0x00000001 +#define FLASH_5761VENDOR_ST_M_M45PE20 0x02800001 +#define FLASH_5761VENDOR_ST_M_M45PE40 0x02800000 +#define FLASH_5761VENDOR_ST_M_M45PE80 0x02800002 +#define FLASH_5761VENDOR_ST_M_M45PE16 0x02800003 +#define FLASH_5761VENDOR_ST_A_M45PE20 0x02000001 +#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 +#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 +#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 #define FLASH_5752PAGE_SIZE_256 0x00000000 #define FLASH_5752PAGE_SIZE_512 0x10000000 @@ -1504,9 +1522,11 @@ #define ACCESS_ENABLE 0x00000001 #define ACCESS_WR_ENABLE 0x00000002 #define NVRAM_WRITE1 0x00007028 -/* 0x702c --> 0x7400 unused */ +/* 0x702c unused */ + +#define NVRAM_ADDR_LOCKOUT 0x00007030 +/* 0x7034 --> 0x7c00 unused */ -/* 0x7400 --> 0x7c00 unused */ #define PCIE_TRANSACTION_CFG 0x00007c04 #define PCIE_TRANS_CFG_1SHOT_MSI 0x20000000 #define PCIE_TRANS_CFG_LOM 0x00000020 @@ -1552,6 +1572,7 @@ #define NIC_SRAM_DATA_CFG_MINI_PCI 0x00001000 #define NIC_SRAM_DATA_CFG_FIBER_WOL 0x00004000 #define NIC_SRAM_DATA_CFG_NO_GPIO2 0x00100000 +#define NIC_SRAM_DATA_CFG_APE_ENABLE 0x00200000 #define NIC_SRAM_DATA_VER 0x00000b5c #define NIC_SRAM_DATA_VER_SHIFT 16 @@ -1680,6 +1701,47 @@ #define MII_TG3_TEST1_TRIM_EN 0x0010 #define MII_TG3_TEST1_CRC_EN 0x8000 +/* APE registers. Accessible through BAR1 */ +#define TG3_APE_EVENT 0x000c +#define APE_EVENT_1 0x00000001 +#define TG3_APE_LOCK_REQ 0x002c +#define APE_LOCK_REQ_DRIVER 0x00001000 +#define TG3_APE_LOCK_GRANT 0x004c +#define APE_LOCK_GRANT_DRIVER 0x00001000 +#define TG3_APE_SEG_SIG 0x4000 +#define APE_SEG_SIG_MAGIC 0x41504521 + +/* APE shared memory. Accessible through BAR1 */ +#define TG3_APE_FW_STATUS 0x400c +#define APE_FW_STATUS_READY 0x00000100 +#define TG3_APE_HOST_SEG_SIG 0x4200 +#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 +#define TG3_APE_HOST_SEG_LEN 0x4204 +#define APE_HOST_SEG_LEN_MAGIC 0x0000001c +#define TG3_APE_HOST_INIT_COUNT 0x4208 +#define TG3_APE_HOST_DRIVER_ID 0x420c +#define APE_HOST_DRIVER_ID_MAGIC 0xf0035100 +#define TG3_APE_HOST_BEHAVIOR 0x4210 +#define APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 +#define TG3_APE_HOST_HEARTBEAT_INT_MS 0x4214 +#define APE_HOST_HEARTBEAT_INT_DISABLE 0 +#define APE_HOST_HEARTBEAT_INT_5SEC 5000 +#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 + +#define TG3_APE_EVENT_STATUS 0x4300 + +#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 +#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500 +#define APE_EVENT_STATUS_STATE_START 0x00010000 +#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 +#define APE_EVENT_STATUS_STATE_WOL 0x00030000 +#define APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 +#define APE_EVENT_STATUS_EVENT_PENDING 0x80000000 + +/* APE convenience enumerations. */ +#define TG3_APE_LOCK_MEM 4 + + /* There are two ways to manage the TX descriptors on the tigon3. * Either the descriptors are in host DMA'able memory, or they * exist only in the cards on-chip SRAM. All 16 send bds are under @@ -2155,6 +2217,7 @@ struct tg3 { void (*write32_mbox) (struct tg3 *, u32, u32); void __iomem *regs; + void __iomem *aperegs; struct net_device *dev; struct pci_dev *pdev; @@ -2176,6 +2239,7 @@ struct tg3 { dma_addr_t tx_desc_mapping; /* begin "rx thread" cacheline section */ + struct napi_struct napi; void (*write32_rx_mbox) (struct tg3 *, u32, u32); u32 rx_rcb_ptr; @@ -2237,7 +2301,7 @@ struct tg3 { #define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 #define TG3_FLAG_10_100_ONLY 0x01000000 #define TG3_FLAG_PAUSE_AUTONEG 0x02000000 - +#define TG3_FLAG_CPMU_PRESENT 0x04000000 #define TG3_FLAG_40BIT_DMA_BUG 0x08000000 #define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 #define TG3_FLAG_SUPPORT_MSI 0x20000000 @@ -2279,6 +2343,9 @@ struct tg3 { #define TG3_FLG2_PHY_JITTER_BUG 0x20000000 #define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 #define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000 + u32 tg3_flags3; +#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 +#define TG3_FLG3_ENABLE_APE 0x00000002 struct timer_list timer; u16 timer_counter; @@ -2309,7 +2376,7 @@ struct tg3 { u32 pwrmgmt_thresh; /* PCI block */ - u16 pci_chip_rev_id; + u32 pci_chip_rev_id; u8 pci_cacheline_sz; u8 pci_lat_timer; u8 pci_hdr_type; @@ -2317,6 +2384,7 @@ struct tg3 { int pm_cap; int msi_cap; + int pcix_cap; /* PHY info */ u32 phy_id; @@ -2335,6 +2403,8 @@ struct tg3 { #define PHY_ID_BCM5755 0xbc050cc0 #define PHY_ID_BCM5787 0xbc050ce0 #define PHY_ID_BCM5756 0xbc050ed0 +#define PHY_ID_BCM5784 0xbc050fa0 +#define PHY_ID_BCM5761 0xbc050fd0 #define PHY_ID_BCM5906 0xdc00ac40 #define PHY_ID_BCM8002 0x60010140 #define PHY_ID_INVALID 0xffffffff @@ -2364,7 +2434,8 @@ struct tg3 { (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ - (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM8002) + (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ + (X) == PHY_ID_BCM8002) struct tg3_hw_stats *hw_stats; dma_addr_t stats_mapping; diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 74eb12107e6874c0ba101993a48f1f44094dc34a..c99ce74a7aff7251c3821314fab0e43be38ffd2f 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -556,7 +556,6 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, rc = -ENOMEM; goto err_out_regions; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); priv = netdev_priv(dev); diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 9f1b6ab9c228485d5d5cdc3e1b62461e597e9bdc..7224d368b2a71ed2b4f986a434ff8eb52a75223b 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -156,7 +156,7 @@ static void print_rx_state(struct net_device *dev) ; static void print_tx_state(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); struct xl_tx_desc *txd ; u8 __iomem *xl_mmio = xl_priv->xl_mmio ; int i ; @@ -179,7 +179,7 @@ static void print_tx_state(struct net_device *dev) static void print_rx_state(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); struct xl_rx_desc *rxd ; u8 __iomem *xl_mmio = xl_priv->xl_mmio ; int i ; @@ -213,7 +213,7 @@ static void print_rx_state(struct net_device *dev) static u16 xl_ee_read(struct net_device *dev, int ee_addr) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; /* Wait for EEProm to not be busy */ @@ -245,7 +245,7 @@ static u16 xl_ee_read(struct net_device *dev, int ee_addr) static void xl_ee_write(struct net_device *dev, int ee_addr, u16 ee_value) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; /* Wait for EEProm to not be busy */ @@ -305,11 +305,11 @@ static int __devinit xl_probe(struct pci_dev *pdev, pci_release_regions(pdev) ; return -ENOMEM ; } - xl_priv = dev->priv ; + xl_priv = netdev_priv(dev); #if XL_DEBUG printk("pci_device: %p, dev:%p, dev->priv: %p, ba[0]: %10x, ba[1]:%10x\n", - pdev, dev, dev->priv, (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start) ; + pdev, dev, netdev_priv(dev), (unsigned int)pdev->resource[0].start, (unsigned int)pdev->resource[1].start); #endif dev->irq=pdev->irq; @@ -344,7 +344,6 @@ static int __devinit xl_probe(struct pci_dev *pdev, dev->set_multicast_list=&xl_set_rx_mode; dev->get_stats=&xl_get_stats ; dev->set_mac_address=&xl_set_mac_address ; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); pci_set_drvdata(pdev,dev) ; @@ -365,7 +364,7 @@ static int __devinit xl_probe(struct pci_dev *pdev, static int __devinit xl_init(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); printk(KERN_INFO "%s \n", version); printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", @@ -385,7 +384,7 @@ static int __devinit xl_init(struct net_device *dev) static int xl_hw_reset(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; unsigned long t ; u16 i ; @@ -568,7 +567,7 @@ static int xl_hw_reset(struct net_device *dev) static int xl_open(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; u8 i ; u16 hwaddr[3] ; /* Should be u8[6] but we get word return values */ @@ -641,14 +640,14 @@ static int xl_open(struct net_device *dev) * Now to set up the Rx and Tx buffer structures */ /* These MUST be on 8 byte boundaries */ - xl_priv->xl_tx_ring = kmalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL) ; + xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL); if (xl_priv->xl_tx_ring == NULL) { printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", dev->name); free_irq(dev->irq,dev); return -ENOMEM; } - xl_priv->xl_rx_ring = kmalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL) ; + xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL); if (xl_priv->xl_tx_ring == NULL) { printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", dev->name); @@ -656,8 +655,6 @@ static int xl_open(struct net_device *dev) kfree(xl_priv->xl_tx_ring); return -ENOMEM; } - memset(xl_priv->xl_tx_ring,0,sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) ; - memset(xl_priv->xl_rx_ring,0,sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) ; /* Setup Rx Ring */ for (i=0 ; i < XL_RX_RING_SIZE ; i++) { @@ -726,7 +723,7 @@ static int xl_open(struct net_device *dev) static int xl_open_hw(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem *xl_mmio = xl_priv->xl_mmio ; u16 vsoff ; char ver_str[33]; @@ -875,7 +872,7 @@ static int xl_open_hw(struct net_device *dev) static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on bloat in xl_rx */ { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); int prev_ring_loc ; prev_ring_loc = (xl_priv->rx_ring_tail + XL_RX_RING_SIZE - 1) & (XL_RX_RING_SIZE - 1); @@ -890,7 +887,7 @@ static void adv_rx_ring(struct net_device *dev) /* Advance rx_ring, cut down on static void xl_rx(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; struct sk_buff *skb, *skb2 ; int frame_length = 0, copy_len = 0 ; @@ -997,7 +994,7 @@ static void xl_rx(struct net_device *dev) static void xl_reset(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; unsigned long t; @@ -1020,7 +1017,7 @@ static void xl_reset(struct net_device *dev) static void xl_freemem(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv ; + struct xl_private *xl_priv=netdev_priv(dev); int i ; for (i=0;ipriv; + struct xl_private *xl_priv =netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u16 intstatus, macstatus ; - if (!dev) { - printk(KERN_WARNING "Device structure dead, aaahhhh !\n") ; - return IRQ_NONE; - } - intstatus = readw(xl_mmio + MMIO_INTSTATUS) ; if (!(intstatus & 1)) /* We didn't generate the interrupt */ @@ -1171,7 +1163,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id) static int xl_xmit(struct sk_buff *skb, struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); struct xl_tx_desc *txd ; int tx_head, tx_tail, tx_prev ; unsigned long flags ; @@ -1232,7 +1224,7 @@ static int xl_xmit(struct sk_buff *skb, struct net_device *dev) static void xl_dn_comp(struct net_device *dev) { - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; struct xl_tx_desc *txd ; @@ -1268,7 +1260,7 @@ static void xl_dn_comp(struct net_device *dev) static int xl_close(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; unsigned long t ; @@ -1366,7 +1358,7 @@ static int xl_close(struct net_device *dev) static void xl_set_rx_mode(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); struct dev_mc_list *dmi ; unsigned char dev_mc_address[4] ; u16 options ; @@ -1407,7 +1399,7 @@ static void xl_set_rx_mode(struct net_device *dev) static void xl_srb_bh(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u8 srb_cmd, ret_code ; int i ; @@ -1476,14 +1468,14 @@ static void xl_srb_bh(struct net_device *dev) static struct net_device_stats * xl_get_stats(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); return (struct net_device_stats *) &xl_priv->xl_stats; } static int xl_set_mac_address (struct net_device *dev, void *addr) { struct sockaddr *saddr = addr ; - struct xl_private *xl_priv = (struct xl_private *)dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); if (netif_running(dev)) { printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; @@ -1504,7 +1496,7 @@ static int xl_set_mac_address (struct net_device *dev, void *addr) static void xl_arb_cmd(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u8 arb_cmd ; u16 lan_status, lan_status_diff ; @@ -1632,7 +1624,7 @@ static void xl_arb_cmd(struct net_device *dev) static void xl_asb_cmd(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; if (xl_priv->asb_queued == 1) @@ -1663,7 +1655,7 @@ static void xl_asb_cmd(struct net_device *dev) */ static void xl_asb_bh(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; u8 ret_code ; @@ -1691,7 +1683,7 @@ static void xl_asb_bh(struct net_device *dev) static void xl_srb_cmd(struct net_device *dev, int srb_cmd) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; switch (srb_cmd) { @@ -1748,7 +1740,7 @@ static void xl_srb_cmd(struct net_device *dev, int srb_cmd) static void xl_wait_misr_flags(struct net_device *dev) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv ; + struct xl_private *xl_priv = netdev_priv(dev); u8 __iomem * xl_mmio = xl_priv->xl_mmio ; int i ; @@ -1773,7 +1765,7 @@ static void xl_wait_misr_flags(struct net_device *dev) static int xl_change_mtu(struct net_device *dev, int mtu) { - struct xl_private *xl_priv = (struct xl_private *) dev->priv; + struct xl_private *xl_priv = netdev_priv(dev); u16 max_mtu ; if (xl_priv->xl_ring_speed == 4) @@ -1795,7 +1787,7 @@ static int xl_change_mtu(struct net_device *dev, int mtu) static void __devexit xl_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct xl_private *xl_priv=(struct xl_private *)dev->priv; + struct xl_private *xl_priv=netdev_priv(dev); unregister_netdev(dev); iounmap(xl_priv->xl_mmio) ; diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c index 1bdd3beefbe54cf17121d1296e5d18dbfed3ac04..124cfd4fbcf4f7004941dadefff88345c9320a11 100644 --- a/drivers/net/tokenring/abyss.c +++ b/drivers/net/tokenring/abyss.c @@ -97,8 +97,9 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ static int versionprinted; struct net_device *dev; struct net_local *tp; - int i, ret, pci_irq_line; + int ret, pci_irq_line; unsigned long pci_ioaddr; + DECLARE_MAC_BUF(mac); if (versionprinted++ == 0) printk("%s", version); @@ -116,8 +117,6 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); - if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) { ret = -EBUSY; goto err_out_trdev; @@ -147,12 +146,9 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_ } abyss_read_eeprom(dev); - - printk("%s: Ring Station Address: ", dev->name); - printk("%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - printk(":%2.2x", dev->dev_addr[i]); - printk("\n"); + + printk("%s: Ring Station Address: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); tp = netdev_priv(dev); tp->setnselout = abyss_setnselout_pins; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 1e8958ee2d0ab5905897c088d493f077ed732886..e494c63bfbd9643088c0c30680fdc956becf8b60 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -116,9 +116,6 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */ #define ENABLE_PAGING 1 #endif -#define FALSE 0 -#define TRUE (!FALSE) - /* changes the output format of driver initialization */ #define TR_VERBOSE 0 @@ -327,7 +324,7 @@ static void ibmtr_cleanup_card(struct net_device *dev) release_region(dev->base_addr, IBMTR_IO_EXTENT); { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); iounmap(ti->mmio); iounmap(ti->sram_virt); } @@ -384,7 +381,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) unsigned char segment, intr=0, irq=0, i, j, cardpresent=NOTOK, temp=0; void __iomem * t_mmio = NULL; - struct tok_info *ti = dev->priv; + struct tok_info *ti = netdev_priv(dev); void __iomem *cd_chanid; unsigned char *tchanid, ctemp; #ifndef PCMCIA @@ -392,6 +389,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) unsigned long timeout; static int version_printed; #endif + DECLARE_MAC_BUF(mac); /* Query the adapter PIO base port which will return * indication of where MMIO was placed. We also have a @@ -705,9 +703,8 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) channel_def[cardpresent - 1], adapter_def(ti->adapter_type)); DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n", irq, PIOaddr, ti->mapped_ram_size / 2); - DPRINTK("Hardware address : %02X:%02X:%02X:%02X:%02X:%02X\n", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + DPRINTK("Hardware address : %s\n", + print_mac(mac, dev->dev_addr)); if (ti->page_mask) DPRINTK("Shared RAM paging enabled. " "Page size: %uK Shared Ram size %dK\n", @@ -823,7 +820,7 @@ static unsigned char __devinit get_sram_size(struct tok_info *adapt_info) static int __devinit trdev_init(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); SET_PAGE(ti->srb_page); ti->open_failure = NO ; @@ -846,7 +843,7 @@ static int tok_init_card(struct net_device *dev) unsigned long i; PIOaddr = dev->base_addr; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); /* Special processing for first interrupt after reset */ ti->do_tok_int = FIRST_INT; /* Reset adapter */ @@ -868,7 +865,7 @@ static int tok_init_card(struct net_device *dev) /*****************************************************************************/ static int tok_open(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); int i; /*the case we were left in a failure state during a previous open */ @@ -927,7 +924,7 @@ static void tok_open_adapter(unsigned long dev_addr) struct tok_info *ti; int i; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); SET_PAGE(ti->init_srb_page); writeb(~SRB_RESP_INT, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_ODD); for (i = 0; i < sizeof(struct dir_open_adapter); i++) @@ -962,7 +959,7 @@ static void tok_open_adapter(unsigned long dev_addr) static void open_sap(unsigned char type, struct net_device *dev) { int i; - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); SET_PAGE(ti->srb_page); for (i = 0; i < sizeof(struct dlc_open_sap); i++) @@ -986,7 +983,7 @@ static void open_sap(unsigned char type, struct net_device *dev) static void tok_set_multicast_list(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); struct dev_mc_list *mclist; unsigned char address[4]; @@ -1029,7 +1026,7 @@ static int tok_send_packet(struct sk_buff *skb, struct net_device *dev) { struct tok_info *ti; unsigned long flags; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); netif_stop_queue(dev); @@ -1051,7 +1048,7 @@ static int tok_send_packet(struct sk_buff *skb, struct net_device *dev) static int tok_close(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); /* Important for PCMCIA hot unplug, otherwise, we'll pull the card, */ /* unloading the module from memory, and then if a timer pops, ouch */ @@ -1094,7 +1091,7 @@ static void __iomem *map_address(struct tok_info *ti, unsigned index, __u8 *page static void dir_open_adapter (struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); unsigned char ret_code; __u16 err; @@ -1179,7 +1176,7 @@ static irqreturn_t tok_interrupt(int irq, void *dev_id) #if TR_VERBOSE DPRINTK("Int from tok_driver, dev : %p irq%d\n", dev,irq); #endif - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); if (ti->sram_phys & 1) return IRQ_NONE; /* PCMCIA card extraction flag */ spin_lock(&(ti->lock)); @@ -1498,7 +1495,7 @@ static void initial_tok_int(struct net_device *dev) struct tok_info *ti; unsigned char init_status; /*BMS 12/2000*/ - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); ti->do_tok_int = NOT_FIRST; @@ -1542,7 +1539,7 @@ static void initial_tok_int(struct net_device *dev) ti->ring_speed = init_status & 0x01 ? 16 : 4; DPRINTK("Initial interrupt : %d Mbps, shared RAM base %08x.\n", ti->ring_speed, (unsigned int)dev->mem_start); - ti->auto_speedsave=readb(ti->init_srb+INIT_STATUS_2_OFST)&4?TRUE:FALSE; + ti->auto_speedsave = (readb(ti->init_srb+INIT_STATUS_2_OFST) & 4) != 0; if (ti->open_mode == MANUAL) wake_up(&ti->wait_for_reset); else tok_open_adapter((unsigned long)dev); @@ -1560,7 +1557,7 @@ static void initial_tok_int(struct net_device *dev) static void tr_tx(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); struct trh_hdr *trhdr = (struct trh_hdr *) ti->current_skb->data; unsigned int hdr_len; __u32 dhb=0,dhb_base; @@ -1674,7 +1671,7 @@ static void tr_tx(struct net_device *dev) static void tr_rx(struct net_device *dev) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); __u32 rbuffer; void __iomem *rbuf, *rbufdata, *llc; __u8 rbuffer_page = 0; @@ -1742,18 +1739,20 @@ static void tr_rx(struct net_device *dev) if (!IPv4_p) { void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data); - + u8 saddr[6]; + u8 daddr[6]; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + int i; + for (i = 0 ; i < 6 ; i++) + saddr[i] = readb(trhhdr + SADDR_OFST + i); + for (i = 0 ; i < 6 ; i++) + daddr[i] = readb(trhhdr + DADDR_OFST + i); DPRINTK("Probably non-IP frame received.\n"); DPRINTK("ssap: %02X dsap: %02X " - "saddr: %02X:%02X:%02X:%02X:%02X:%02X " - "daddr: %02X:%02X:%02X:%02X:%02X:%02X\n", + "saddr: %s daddr: %$s\n", readb(llc + SSAP_OFST), readb(llc + DSAP_OFST), - readb(trhhdr+SADDR_OFST), readb(trhhdr+ SADDR_OFST+1), - readb(trhhdr+SADDR_OFST+2), readb(trhhdr+SADDR_OFST+3), - readb(trhhdr+SADDR_OFST+4), readb(trhhdr+SADDR_OFST+5), - readb(trhhdr+DADDR_OFST), readb(trhhdr+DADDR_OFST + 1), - readb(trhhdr+DADDR_OFST+2), readb(trhhdr+DADDR_OFST+3), - readb(trhhdr+DADDR_OFST+4), readb(trhhdr+DADDR_OFST+5)); + print_mac(mac, saddr), print_mac(mac2, daddr)); } #endif @@ -1846,7 +1845,7 @@ static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev) void tok_rerun(unsigned long dev_addr){ struct net_device *dev = (struct net_device *)dev_addr; - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); if ( ti->open_action == RESTART){ ti->do_tok_int = FIRST_INT; @@ -1868,7 +1867,7 @@ static void ibmtr_readlog(struct net_device *dev) { struct tok_info *ti; - ti = (struct tok_info *) dev->priv; + ti = netdev_priv(dev); ti->readlog_pending = 0; SET_PAGE(ti->srb_page); @@ -1891,7 +1890,7 @@ static struct net_device_stats *tok_get_stats(struct net_device *dev) { struct tok_info *toki; - toki = (struct tok_info *) dev->priv; + toki = netdev_priv(dev); return (struct net_device_stats *) &toki->tr_stats; } @@ -1899,7 +1898,7 @@ static struct net_device_stats *tok_get_stats(struct net_device *dev) static int ibmtr_change_mtu(struct net_device *dev, int mtu) { - struct tok_info *ti = (struct tok_info *) dev->priv; + struct tok_info *ti = netdev_priv(dev); if (ti->ring_speed == 16 && mtu > ti->maxmtu16) return -EINVAL; diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 5d849c089a3b62776df3a8a54cd84f997ebaed4b..47d84cd2809770b3996a3a0fa65dd6f8eb39c3c0 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -123,6 +123,7 @@ #include #include +#include #include #include @@ -244,13 +245,12 @@ static int __devinit streamer_init_one(struct pci_dev *pdev, return -ENOMEM; } - SET_MODULE_OWNER(dev); - streamer_priv = dev->priv; + streamer_priv = netdev_priv(dev); #if STREAMER_NETWORK_MONITOR #ifdef CONFIG_PROC_FS if (!dev_streamer) - create_proc_read_entry("net/streamer_tr", 0, 0, + create_proc_read_entry("streamer_tr", 0, init_net.proc_net, streamer_proc_info, NULL); streamer_priv->next = dev_streamer; dev_streamer = streamer_priv; @@ -404,7 +404,7 @@ static void __devexit streamer_remove_one(struct pci_dev *pdev) return; } - streamer_priv=dev->priv; + streamer_priv=netdev_priv(dev); if (streamer_priv == NULL) { printk(KERN_ERR "lanstreamer::streamer_remove_one, ERROR dev->priv is NULL\n"); return; @@ -423,7 +423,7 @@ static void __devexit streamer_remove_one(struct pci_dev *pdev) } } if (!dev_streamer) - remove_proc_entry("net/streamer_tr", NULL); + remove_proc_entry("streamer_tr", init_net.proc_net); } #endif #endif @@ -447,8 +447,11 @@ static int streamer_reset(struct net_device *dev) unsigned int uaa_addr; struct sk_buff *skb = NULL; __u16 misr; +#if STREAMER_DEBUG + DECLARE_MAC_BUF(mac); +#endif - streamer_priv = (struct streamer_private *) dev->priv; + streamer_priv = netdev_priv(dev); streamer_mmio = streamer_priv->streamer_mmio; writew(readw(streamer_mmio + BCTL) | BCTL_SOFTRESET, streamer_mmio + BCTL); @@ -575,11 +578,8 @@ static int streamer_reset(struct net_device *dev) dev->dev_addr[i+1]= addr & 0xff; } #if STREAMER_DEBUG - printk("Adapter address: "); - for (i = 0; i < 6; i++) { - printk("%02x:", dev->dev_addr[i]); - } - printk("\n"); + printk("Adapter address: %s\n", + print_mac(mac, dev->dev_addr)); #endif } return 0; @@ -587,7 +587,7 @@ static int streamer_reset(struct net_device *dev) static int streamer_open(struct net_device *dev) { - struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; + struct streamer_private *streamer_priv = netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; unsigned long flags; char open_error[255]; @@ -904,7 +904,7 @@ static int streamer_open(struct net_device *dev) static void streamer_rx(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; struct streamer_rx_desc *rx_desc; int rx_ring_last_received, length, frame_length, buffer_cnt = 0; @@ -1029,7 +1029,7 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u16 sisr; __u16 misr; @@ -1152,7 +1152,7 @@ static irqreturn_t streamer_interrupt(int irq, void *dev_id) static int streamer_xmit(struct sk_buff *skb, struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; unsigned long flags ; @@ -1203,7 +1203,7 @@ static int streamer_xmit(struct sk_buff *skb, struct net_device *dev) static int streamer_close(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; unsigned long flags; int i; @@ -1269,7 +1269,7 @@ static int streamer_close(struct net_device *dev) static void streamer_set_rx_mode(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u8 options = 0; struct dev_mc_list *dmi; @@ -1328,7 +1328,7 @@ static void streamer_set_rx_mode(struct net_device *dev) static void streamer_srb_bh(struct net_device *dev) { - struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; + struct streamer_private *streamer_priv = netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u16 srb_word; @@ -1493,14 +1493,14 @@ static void streamer_srb_bh(struct net_device *dev) static struct net_device_stats *streamer_get_stats(struct net_device *dev) { struct streamer_private *streamer_priv; - streamer_priv = (struct streamer_private *) dev->priv; + streamer_priv = netdev_priv(dev); return (struct net_device_stats *) &streamer_priv->streamer_stats; } static int streamer_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *saddr = addr; - struct streamer_private *streamer_priv = (struct streamer_private *) dev->priv; + struct streamer_private *streamer_priv = netdev_priv(dev); if (netif_running(dev)) { @@ -1525,7 +1525,7 @@ static int streamer_set_mac_address(struct net_device *dev, void *addr) static void streamer_arb_cmd(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u8 header_len; __u16 frame_len, buffer_len; @@ -1539,6 +1539,7 @@ static void streamer_arb_cmd(struct net_device *dev) #if STREAMER_NETWORK_MONITOR struct trh_hdr *mac_hdr; + DECLARE_MAC_BUF(mac); #endif writew(streamer_priv->arb, streamer_mmio + LAPA); @@ -1611,15 +1612,11 @@ static void streamer_arb_cmd(struct net_device *dev) dev->name); mac_hdr = tr_hdr(mac_frame); printk(KERN_WARNING - "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", - dev->name, mac_hdr->daddr[0], mac_hdr->daddr[1], - mac_hdr->daddr[2], mac_hdr->daddr[3], - mac_hdr->daddr[4], mac_hdr->daddr[5]); + "%s: MAC Frame Dest. Addr: %s\n", + dev->name, print_mac(mac, mac_hdr->daddr)); printk(KERN_WARNING - "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", - dev->name, mac_hdr->saddr[0], mac_hdr->saddr[1], - mac_hdr->saddr[2], mac_hdr->saddr[3], - mac_hdr->saddr[4], mac_hdr->saddr[5]); + "%s: MAC Frame Srce. Addr: %s\n", + dev->name, DEV->ADDR6(mac_hdr->saddr)); #endif netif_rx(mac_frame); @@ -1740,7 +1737,7 @@ static void streamer_arb_cmd(struct net_device *dev) static void streamer_asb_bh(struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; if (streamer_priv->asb_queued == 1) @@ -1784,7 +1781,7 @@ static void streamer_asb_bh(struct net_device *dev) static int streamer_change_mtu(struct net_device *dev, int mtu) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u16 max_mtu; if (streamer_priv->streamer_ring_speed == 4) @@ -1848,12 +1845,14 @@ static int streamer_proc_info(char *buffer, char **start, off_t offset, static int sprintf_info(char *buffer, struct net_device *dev) { struct streamer_private *streamer_priv = - (struct streamer_private *) dev->priv; + netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; struct streamer_adapter_addr_table sat; struct streamer_parameters_table spt; int size = 0; int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); for (i = 0; i < 14; i += 2) { @@ -1875,37 +1874,30 @@ static int sprintf_info(char *buffer, struct net_device *dev) size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n", - dev->name, dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5], sat.node_addr[0], sat.node_addr[1], - sat.node_addr[2], sat.node_addr[3], sat.node_addr[4], - sat.node_addr[5], sat.func_addr[0], sat.func_addr[1], - sat.func_addr[2], sat.func_addr[3]); + "%6s: %s : %s : %02x:%02x:%02x:%02x\n", + dev->name, print_mac(mac, dev->dev_addr), + print_mac(mac2, sat.node_addr), + sat.func_addr[0], sat.func_addr[1], + sat.func_addr[2], sat.func_addr[3]); size += sprintf(buffer + size, "\n%6s: Token Ring Parameters Table:\n", dev->name); size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name); size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n", + "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n", dev->name, spt.phys_addr[0], spt.phys_addr[1], spt.phys_addr[2], spt.phys_addr[3], - spt.up_node_addr[0], spt.up_node_addr[1], - spt.up_node_addr[2], spt.up_node_addr[3], - spt.up_node_addr[4], spt.up_node_addr[4], - spt.poll_addr[0], spt.poll_addr[1], spt.poll_addr[2], - spt.poll_addr[3], spt.poll_addr[4], spt.poll_addr[5], + print_mac(mac, spt.up_node_addr), + print_mac(mac2, spt.poll_addr), ntohs(spt.acc_priority), ntohs(spt.auth_source_class), ntohs(spt.att_code)); size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name); size += sprintf(buffer + size, - "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n", - dev->name, spt.source_addr[0], spt.source_addr[1], - spt.source_addr[2], spt.source_addr[3], - spt.source_addr[4], spt.source_addr[5], + "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n", + dev->name, print_mac(mac, spt.source_addr), ntohs(spt.beacon_type), ntohs(spt.major_vector), ntohs(spt.lan_status), ntohs(spt.local_ring), ntohs(spt.mon_error), ntohs(spt.frame_correl)); @@ -1914,14 +1906,12 @@ static int sprintf_info(char *buffer, struct net_device *dev) dev->name); size += sprintf(buffer + size, - "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n", + "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n", dev->name, ntohs(spt.beacon_transmit), - ntohs(spt.beacon_receive), spt.beacon_naun[0], - spt.beacon_naun[1], spt.beacon_naun[2], - spt.beacon_naun[3], spt.beacon_naun[4], - spt.beacon_naun[5], spt.beacon_phys[0], - spt.beacon_phys[1], spt.beacon_phys[2], - spt.beacon_phys[3]); + ntohs(spt.beacon_receive), + print_mac(mac, spt.beacon_naun), + spt.beacon_phys[0], spt.beacon_phys[1], + spt.beacon_phys[2], spt.beacon_phys[3]); return size; } #endif diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c index f8f4d74f01f1e4959640d8dc0ad35fcd15afe4dd..5a4151362fc064034389861794e2cee6cc0e0ddb 100644 --- a/drivers/net/tokenring/madgemc.c +++ b/drivers/net/tokenring/madgemc.c @@ -151,7 +151,8 @@ static int __devinit madgemc_probe(struct device *device) struct net_local *tp; struct card_info *card; struct mca_device *mdev = to_mca_device(device); - int ret = 0, i = 0; + int ret = 0; + DECLARE_MAC_BUF(mac); if (versionprinted++ == 0) printk("%s", version); @@ -168,7 +169,6 @@ static int __devinit madgemc_probe(struct device *device) goto getout; } - SET_MODULE_OWNER(dev); dev->dma = 0; card = kmalloc(sizeof(struct card_info), GFP_KERNEL); @@ -323,11 +323,8 @@ static int __devinit madgemc_probe(struct device *device) mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME); mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev); - printk("%s: Ring Station Address: ", dev->name); - printk("%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - printk(":%2.2x", dev->dev_addr[i]); - printk("\n"); + printk("%s: Ring Station Address: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); if (tmsdev_init(dev, device)) { printk("%s: unable to get memory for dev->priv.\n", @@ -690,14 +687,14 @@ static int madgemc_close(struct net_device *dev) static int madgemc_mcaproc(char *buf, int slot, void *d) { struct net_device *dev = (struct net_device *)d; - struct net_local *tp = dev->priv; + struct net_local *tp = netdev_priv(dev); struct card_info *curcard = tp->tmspriv; int len = 0; + DECLARE_MAC_BUF(mac); len += sprintf(buf+len, "-------\n"); if (curcard) { struct net_local *tp = netdev_priv(dev); - int i; len += sprintf(buf+len, "Card Revision: %d\n", curcard->cardrev); len += sprintf(buf+len, "RAM Size: %dkb\n", curcard->ramsize); @@ -717,11 +714,8 @@ static int madgemc_mcaproc(char *buf, int slot, void *d) } len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair"); - len += sprintf(buf+len, "Ring Station Address: "); - len += sprintf(buf+len, "%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - len += sprintf(buf+len, " %2.2x", dev->dev_addr[i]); - len += sprintf(buf+len, "\n"); + len += sprintf(buf+len, "Ring Station Address: %s\n", + print_mac(mac, dev->dev_addr)); } else len += sprintf(buf+len, "Card not configured\n"); @@ -736,7 +730,7 @@ static int __devexit madgemc_remove(struct device *device) BUG_ON(!dev); - tp = dev->priv; + tp = netdev_priv(dev); card = tp->tmspriv; kfree(card); tp->tmspriv = NULL; diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 09b3cfb8e809933a30af6f7b58c487f2e0af3eaf..74c1f0f189f54f227f760661e3798c9e8cd5b137 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -102,6 +102,7 @@ #include #include +#include #include #include @@ -219,14 +220,14 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device goto op_release_dev; } - olympic_priv = dev->priv ; + olympic_priv = netdev_priv(dev) ; spin_lock_init(&olympic_priv->olympic_lock) ; init_waitqueue_head(&olympic_priv->srb_wait); init_waitqueue_head(&olympic_priv->trb_wait); #if OLYMPIC_DEBUG - printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv); + printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev)); #endif dev->irq=pdev->irq; dev->base_addr=pci_resource_start(pdev, 0); @@ -260,7 +261,6 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device dev->set_multicast_list=&olympic_set_rx_mode; dev->get_stats=&olympic_get_stats ; dev->set_mac_address=&olympic_set_mac_address ; - SET_MODULE_OWNER(dev) ; SET_NETDEV_DEV(dev, &pdev->dev); pci_set_drvdata(pdev,dev) ; @@ -268,9 +268,9 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name); if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */ char proc_name[20] ; - strcpy(proc_name,"net/olympic_") ; + strcpy(proc_name,"olympic_") ; strcat(proc_name,dev->name) ; - create_proc_read_entry(proc_name,0,NULL,olympic_proc_info,(void *)dev) ; + create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ; printk("Olympic: Network Monitor information: /proc/%s\n",proc_name); } return 0 ; @@ -297,7 +297,7 @@ static int __devinit olympic_init(struct net_device *dev) unsigned long t; unsigned int uaa_addr; - olympic_priv=(struct olympic_private *)dev->priv; + olympic_priv=netdev_priv(dev); olympic_mmio=olympic_priv->olympic_mmio; printk("%s \n", version); @@ -418,14 +418,15 @@ static int __devinit olympic_init(struct net_device *dev) writel(uaa_addr,olympic_mmio+LAPA); adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800)); + memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); + #if OLYMPIC_DEBUG - printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n", - readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2), - readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5)); + { + DECLARE_MAC_BUF(mac); + printk("adapter address: %s\n", print_mac(mac, dev->dev_addr)); + } #endif - memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); - olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12)); olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14)); @@ -435,11 +436,12 @@ static int __devinit olympic_init(struct net_device *dev) static int olympic_open(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb; unsigned long flags, t; int i, open_finished = 1 ; u8 resp, err; + DECLARE_MAC_BUF(mac); DECLARE_WAITQUEUE(wait,current) ; @@ -567,14 +569,8 @@ static int olympic_open(struct net_device *dev) goto out; case 0x32: - printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, - olympic_priv->olympic_laa[0], - olympic_priv->olympic_laa[1], - olympic_priv->olympic_laa[2], - olympic_priv->olympic_laa[3], - olympic_priv->olympic_laa[4], - olympic_priv->olympic_laa[5]) ; + printk(KERN_WARNING "%s: Invalid LAA: %s\n", + dev->name, print_mac(mac, olympic_priv->olympic_laa)); goto out; default: @@ -704,30 +700,26 @@ static int olympic_open(struct net_device *dev) #endif if (olympic_priv->olympic_network_monitor) { - u8 __iomem *oat ; - u8 __iomem *opt ; - oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; - opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; - - printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5)); + u8 __iomem *oat; + u8 __iomem *opt; + int i; + u8 addr[6]; + DECLARE_MAC_BUF(mac); + oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr); + opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr); + + for (i = 0; i < 6; i++) + addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i); + printk("%s: Node Address: %s\n",dev->name, print_mac(mac, addr)); printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3)); - printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5)); + + for (i = 0; i < 6; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i); + printk("%s: NAUN Address: %s\n",dev->name, print_mac(mac, addr)); } netif_start_queue(dev); @@ -755,7 +747,7 @@ static int olympic_open(struct net_device *dev) */ static void olympic_rx(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; struct olympic_rx_status *rx_status; struct olympic_rx_desc *rx_desc ; @@ -897,7 +889,7 @@ static void olympic_rx(struct net_device *dev) static void olympic_freemem(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); int i; for(i=0;ipriv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; u32 sisr; u8 __iomem *adapter_check_area ; @@ -1046,7 +1038,7 @@ static irqreturn_t olympic_interrupt(int irq, void *dev_id) static int olympic_xmit(struct sk_buff *skb, struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; unsigned long flags ; @@ -1077,7 +1069,7 @@ static int olympic_xmit(struct sk_buff *skb, struct net_device *dev) static int olympic_close(struct net_device *dev) { - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb; unsigned long t,flags; @@ -1147,7 +1139,7 @@ static int olympic_close(struct net_device *dev) static void olympic_set_rx_mode(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; u8 options = 0; u8 __iomem *srb; @@ -1215,7 +1207,7 @@ static void olympic_set_rx_mode(struct net_device *dev) static void olympic_srb_bh(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; u8 __iomem *srb; @@ -1361,14 +1353,14 @@ static void olympic_srb_bh(struct net_device *dev) static struct net_device_stats * olympic_get_stats(struct net_device *dev) { struct olympic_private *olympic_priv ; - olympic_priv=(struct olympic_private *) dev->priv; + olympic_priv=netdev_priv(dev); return (struct net_device_stats *) &olympic_priv->olympic_stats; } static int olympic_set_mac_address (struct net_device *dev, void *addr) { struct sockaddr *saddr = addr ; - struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); if (netif_running(dev)) { printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ; @@ -1389,7 +1381,7 @@ static int olympic_set_mac_address (struct net_device *dev, void *addr) static void olympic_arb_cmd(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio; u8 __iomem *arb_block, *asb_block, *srb ; u8 header_len ; @@ -1445,11 +1437,14 @@ static void olympic_arb_cmd(struct net_device *dev) mac_frame->protocol = tr_type_trans(mac_frame, dev); if (olympic_priv->olympic_network_monitor) { - struct trh_hdr *mac_hdr ; - printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ; + struct trh_hdr *mac_hdr; + DECLARE_MAC_BUF(mac); + printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name); mac_hdr = tr_hdr(mac_frame); - printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ; - printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ; + printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %s\n", + dev->name, print_mac(mac, mac_hdr->daddr)); + printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %s\n", + dev->name, print_mac(mac, mac_hdr->saddr)); } netif_rx(mac_frame); dev->last_rx = jiffies; @@ -1575,7 +1570,7 @@ static void olympic_arb_cmd(struct net_device *dev) static void olympic_asb_bh(struct net_device *dev) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ; + struct olympic_private *olympic_priv = netdev_priv(dev); u8 __iomem *arb_block, *asb_block ; arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ; @@ -1615,7 +1610,7 @@ static void olympic_asb_bh(struct net_device *dev) static int olympic_change_mtu(struct net_device *dev, int mtu) { - struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv; + struct olympic_private *olympic_priv = netdev_priv(dev); u16 max_mtu ; if (olympic_priv->olympic_ring_speed == 4) @@ -1637,33 +1632,31 @@ static int olympic_change_mtu(struct net_device *dev, int mtu) static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { struct net_device *dev = (struct net_device *)data ; - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ; u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ; int size = 0 ; int len=0; off_t begin=0; off_t pos=0; - + u8 addr[6]; + u8 addr2[6]; + int i; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + size = sprintf(buffer, "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name); size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); - size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n", + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i); + + size += sprintf(buffer+size, "%6s: %s : %s : %02x:%02x:%02x:%02x\n", dev->name, - dev->dev_addr[0], - dev->dev_addr[1], - dev->dev_addr[2], - dev->dev_addr[3], - dev->dev_addr[4], - dev->dev_addr[5], - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4), - readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5), + print_mac(mac, dev->dev_addr), + print_mac(mac2, addr), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), @@ -1673,25 +1666,20 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name) ; - - size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n", + + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i); + for (i = 0 ; i < 6 ; i++) + addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i); + + size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n", dev->name, readb(opt+offsetof(struct olympic_parameters_table, phys_addr)), readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1), readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2), readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5), + print_mac(mac, addr), + print_mac(mac2, addr2), swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))), swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))), swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code)))); @@ -1699,14 +1687,11 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name) ; - size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n", + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i); + size += sprintf(buffer+size, "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n", dev->name, - readb(opt+offsetof(struct olympic_parameters_table, source_addr)), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4), - readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5), + print_mac(mac, addr), swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))), swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))), swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))), @@ -1717,16 +1702,13 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n", dev->name) ; - size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n", + for (i = 0 ; i < 6 ; i++) + addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i); + size += sprintf(buffer+size, "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n", dev->name, swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))), swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4), - readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5), + print_mac(mac, addr), readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)), readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1), readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2), @@ -1748,13 +1730,13 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt static void __devexit olympic_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev) ; - struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv; + struct olympic_private *olympic_priv=netdev_priv(dev); if (olympic_priv->olympic_network_monitor) { char proc_name[20] ; - strcpy(proc_name,"net/olympic_") ; + strcpy(proc_name,"olympic_") ; strcat(proc_name,dev->name) ; - remove_proc_entry(proc_name,NULL); + remove_proc_entry(proc_name,init_net.proc_net); } unregister_netdev(dev) ; iounmap(olympic_priv->olympic_mmio) ; diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c index cb7dbb63c9d9da6b6794dc30045943f4cac4eced..ca6b65919b3d0bf00806b64349cef355a7edb919 100644 --- a/drivers/net/tokenring/proteon.c +++ b/drivers/net/tokenring/proteon.c @@ -122,11 +122,11 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) static int versionprinted; const unsigned *port; int j,err = 0; + DECLARE_MAC_BUF(mac); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (dev->base_addr) /* probe specific location */ err = proteon_probe1(dev, dev->base_addr); else { @@ -153,11 +153,8 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) proteon_read_eeprom(dev); - printk(KERN_DEBUG "proteon.c: Ring Station Address: "); - printk("%2.2x", dev->dev_addr[0]); - for (j = 1; j < 6; j++) - printk(":%2.2x", dev->dev_addr[j]); - printk("\n"); + printk(KERN_DEBUG "proteon.c: Ring Station Address: %s\n", + print_mac(mac, dev->dev_addr)); tp = netdev_priv(dev); tp->setnselout = proteon_setnselout_pins; diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c index 33afea31d87b93b591a640cc3a4771923f7184d2..32e8d5a9f958fa354067b98da188dbebe5a23b57 100644 --- a/drivers/net/tokenring/skisa.c +++ b/drivers/net/tokenring/skisa.c @@ -139,11 +139,11 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) static int versionprinted; const unsigned *port; int j, err = 0; + DECLARE_MAC_BUF(mac); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (dev->base_addr) /* probe specific location */ err = sk_isa_probe1(dev, dev->base_addr); else { @@ -170,11 +170,8 @@ static int __init setup_card(struct net_device *dev, struct device *pdev) sk_isa_read_eeprom(dev); - printk(KERN_DEBUG "skisa.c: Ring Station Address: "); - printk("%2.2x", dev->dev_addr[0]); - for (j = 1; j < 6; j++) - printk(":%2.2x", dev->dev_addr[j]); - printk("\n"); + printk(KERN_DEBUG "skisa.c: Ring Station Address: %s\n", + print_mac(mac, dev->dev_addr)); tp = netdev_priv(dev); tp->setnselout = sk_isa_setnselout_pins; diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index f83bb5cb0d3d84e42e428365ee8e98da26369d80..93da3a36cde8915922579b2ba26f91a571361388 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -3583,8 +3583,6 @@ struct net_device __init *smctr_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); - if (unit >= 0) { sprintf(dev->name, "tr%d", unit); netdev_boot_setup_check(dev); diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index 12bd294045a75a284c887c49ff7fed53cfaa02eb..d5fa36d3651591d62b8f1fb3cb305500b187d551 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -2124,7 +2124,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) /* Get the frame size (Byte swap for Intel). * Do this early (see workaround comment below) */ - Length = be16_to_cpu((unsigned short)rpl->FrameSize); + Length = be16_to_cpu(rpl->FrameSize); /* Check if the Frame_Start, Frame_End and * Frame_Complete bits are set. @@ -2140,7 +2140,7 @@ static void tms380tr_rcv_status_irq(struct net_device *dev) * Length2 is there because there have also been * cases where the FrameSize was partially written */ - Length2 = be16_to_cpu((unsigned short)rpl->FrameSize); + Length2 = be16_to_cpu(rpl->FrameSize); if(Length == 0 || Length != Length2) { diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h index 2a16078ac3fdc6ecde5d685cf9e6ea33e396e516..7daf74e31ccd58a2426c5c33512e93ecec2f2e7e 100644 --- a/drivers/net/tokenring/tms380tr.h +++ b/drivers/net/tokenring/tms380tr.h @@ -476,13 +476,13 @@ typedef struct { * bytes = 0xC000 */ u_int32_t FunctAddr; /* High order bytes = 0xC000 */ - u_int16_t RxListSize; /* RPL size: 0 (=26), 14, 20 or + __be16 RxListSize; /* RPL size: 0 (=26), 14, 20 or * 26 bytes read by the adapter. * (Depending on the number of * fragments/list) */ - u_int16_t TxListSize; /* TPL size */ - u_int16_t BufSize; /* Is automatically rounded up to the + __be16 TxListSize; /* TPL size */ + __be16 BufSize; /* Is automatically rounded up to the * nearest nK boundary. */ u_int16_t FullDuplex; @@ -580,14 +580,14 @@ typedef struct { /*--------------------- Send and Receive definitions -------------------*/ #pragma pack(1) typedef struct { - u_int16_t DataCount; /* Value 0, even and odd values are + __be16 DataCount; /* Value 0, even and odd values are * permitted; value is unaltered most * significant bit set: following * fragments last fragment: most * significant bit is not evaluated. * (???) */ - u_int32_t DataAddr; /* Pointer to frame data fragment; + __be32 DataAddr; /* Pointer to frame data fragment; * even or odd. */ } Fragment; @@ -679,7 +679,7 @@ typedef struct { typedef struct s_TPL TPL; struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */ - u_int32_t NextTPLAddr; /* Pointer to next TPL in chain; if + __be32 NextTPLAddr; /* Pointer to next TPL in chain; if * pointer is odd: this is the last * TPL. Pointing to itself can cause * problems! @@ -689,7 +689,7 @@ struct s_TPL { /* Transmit Parameter List (align on even word boundaries) */ * significant bit first! Set by the * adapter: CSTAT_COMPLETE status. */ - u_int16_t FrameSize; /* Number of bytes to be transmitted + __be16 FrameSize; /* Number of bytes to be transmitted * as a frame including AC/FC, * Destination, Source, Routing field * not including CRC, FS, End Delimiter @@ -1020,7 +1020,7 @@ enum SKB_STAT { #pragma pack(1) typedef struct s_RPL RPL; struct s_RPL { /* Receive Parameter List */ - u_int32_t NextRPLAddr; /* Pointer to next RPL in chain + __be32 NextRPLAddr; /* Pointer to next RPL in chain * (normalized = physical 32 bit * address) if pointer is odd: this * is last RPL. Pointing to itself can @@ -1031,7 +1031,7 @@ struct s_RPL { /* Receive Parameter List */ * adapter in lists that start or end * a frame. */ - volatile u_int16_t FrameSize; /* Number of bytes received as a + volatile __be16 FrameSize; /* Number of bytes received as a * frame including AC/FC, Destination, * Source, Routing field not including * CRC, FS (Frame Status), End Delimiter diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c index 3b2f00b9b7bd89a717734e16fa065a1e6991cd81..1c18f782f522225fb0467b8fcb460a4aa33afa98 100644 --- a/drivers/net/tokenring/tmspci.c +++ b/drivers/net/tokenring/tmspci.c @@ -96,10 +96,11 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic static int versionprinted; struct net_device *dev; struct net_local *tp; - int i, ret; + int ret; unsigned int pci_irq_line; unsigned long pci_ioaddr; struct card_info *cardinfo = &card_info_table[ent->driver_data]; + DECLARE_MAC_BUF(mac); if (versionprinted++ == 0) printk("%s", version); @@ -115,7 +116,6 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic dev = alloc_trdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (!request_region(pci_ioaddr, TMS_PCI_IO_EXTENT, dev->name)) { ret = -EBUSY; @@ -137,11 +137,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic tms_pci_read_eeprom(dev); - printk("%s: Ring Station Address: ", dev->name); - printk("%2.2x", dev->dev_addr[0]); - for (i = 1; i < 6; i++) - printk(":%2.2x", dev->dev_addr[i]); - printk("\n"); + printk("%s: Ring Station Address: %s\n", + dev->name, print_mac(mac, dev->dev_addr)); ret = tmsdev_init(dev, &pdev->dev); if (ret) { @@ -149,7 +146,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic goto err_out_irq; } - tp = dev->priv; + tp = netdev_priv(dev); tp->setnselout = tms_pci_setnselout_pins; tp->sifreadb = tms_pci_sifreadb; @@ -210,7 +207,7 @@ static void tms_pci_read_eeprom(struct net_device *dev) static unsigned short tms_pci_setnselout_pins(struct net_device *dev) { unsigned short val = 0; - struct net_local *tp = dev->priv; + struct net_local *tp = netdev_priv(dev); struct card_info *cardinfo = tp->tmspriv; if(tp->DataRate == SPEED_4) diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 1aabc91f6458e3d5c3a2b880127002a05d45ee01..df10af7df7b86a1d1aa708954256d3048d58da6b 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include @@ -79,6 +78,9 @@ struct tsi108_prv_data { void __iomem *regs; /* Base of normal regs */ void __iomem *phyregs; /* Base of register bank used for PHY access */ + struct net_device *dev; + struct napi_struct napi; + unsigned int phy; /* Index of PHY for this interface */ unsigned int irq_num; unsigned int id; @@ -837,13 +839,13 @@ static int tsi108_refill_rx(struct net_device *dev, int budget) return done; } -static int tsi108_poll(struct net_device *dev, int *budget) +static int tsi108_poll(struct napi_struct *napi, int budget) { - struct tsi108_prv_data *data = netdev_priv(dev); + struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi); + struct net_device *dev = data->dev; u32 estat = TSI_READ(TSI108_EC_RXESTAT); u32 intstat = TSI_READ(TSI108_EC_INTSTAT); - int total_budget = min(*budget, dev->quota); - int num_received = 0, num_filled = 0, budget_used; + int num_received = 0, num_filled = 0; intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT; @@ -852,7 +854,7 @@ static int tsi108_poll(struct net_device *dev, int *budget) TSI_WRITE(TSI108_EC_INTSTAT, intstat); if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT)) - num_received = tsi108_complete_rx(dev, total_budget); + num_received = tsi108_complete_rx(dev, budget); /* This should normally fill no more slots than the number of * packets received in tsi108_complete_rx(). The exception @@ -867,7 +869,7 @@ static int tsi108_poll(struct net_device *dev, int *budget) */ if (data->rxfree < TSI108_RXRING_LEN) - num_filled = tsi108_refill_rx(dev, total_budget * 2); + num_filled = tsi108_refill_rx(dev, budget * 2); if (intstat & TSI108_INT_RXERROR) { u32 err = TSI_READ(TSI108_EC_RXERR); @@ -890,14 +892,9 @@ static int tsi108_poll(struct net_device *dev, int *budget) spin_unlock_irq(&data->misclock); } - budget_used = max(num_received, num_filled / 2); - - *budget -= budget_used; - dev->quota -= budget_used; - - if (budget_used != total_budget) { + if (num_received < budget) { data->rxpending = 0; - netif_rx_complete(dev); + netif_rx_complete(dev, napi); TSI_WRITE(TSI108_EC_INTMASK, TSI_READ(TSI108_EC_INTMASK) @@ -906,14 +903,11 @@ static int tsi108_poll(struct net_device *dev, int *budget) TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT)); - - /* IRQs are level-triggered, so no need to re-check */ - return 0; } else { data->rxpending = 1; } - return 1; + return num_received; } static void tsi108_rx_int(struct net_device *dev) @@ -931,7 +925,7 @@ static void tsi108_rx_int(struct net_device *dev) * from tsi108_check_rxring(). */ - if (netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &data->napi)) { /* Mask, rather than ack, the receive interrupts. The ack * will happen in tsi108_poll(). */ @@ -942,7 +936,7 @@ static void tsi108_rx_int(struct net_device *dev) | TSI108_INT_RXTHRESH | TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &data->napi); } else { if (!netif_running(dev)) { /* This can happen if an interrupt occurs while the @@ -1401,6 +1395,8 @@ static int tsi108_open(struct net_device *dev) TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma); tsi108_init_phy(dev); + napi_enable(&data->napi); + setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); mod_timer(&data->timer, jiffies + 1); @@ -1425,6 +1421,7 @@ static int tsi108_close(struct net_device *dev) struct tsi108_prv_data *data = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&data->napi); del_timer_sync(&data->timer); @@ -1543,6 +1540,7 @@ tsi108_init_one(struct platform_device *pdev) struct tsi108_prv_data *data = NULL; hw_info *einfo; int err = 0; + DECLARE_MAC_BUF(mac); einfo = pdev->dev.platform_data; @@ -1562,6 +1560,7 @@ tsi108_init_one(struct platform_device *pdev) printk("tsi108_eth%d: probe...\n", pdev->id); data = netdev_priv(dev); + data->dev = dev; pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n", pdev->id, einfo->regs, einfo->phyregs, @@ -1597,9 +1596,8 @@ tsi108_init_one(struct platform_device *pdev) dev->set_mac_address = tsi108_set_mac; dev->set_multicast_list = tsi108_set_rx_mode; dev->get_stats = tsi108_get_stats; - dev->poll = tsi108_poll; + netif_napi_add(dev, &data->napi, tsi108_poll, 64); dev->do_ioctl = tsi108_do_ioctl; - dev->weight = 64; /* 64 is more suitable for GigE interface - klai */ /* Apparently, the Linux networking code won't use scatter-gather * if the hardware doesn't do checksums. However, it's faster @@ -1610,7 +1608,6 @@ tsi108_init_one(struct platform_device *pdev) */ dev->features = NETIF_F_HIGHDMA; - SET_MODULE_OWNER(dev); spin_lock_init(&data->txlock); spin_lock_init(&data->misclock); @@ -1632,10 +1629,8 @@ tsi108_init_one(struct platform_device *pdev) goto register_fail; } - printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n" + dev->name, print_mac(mac, dev->dev_addr)); #ifdef DEBUG data->msg_enable = DEBUG; dump_eth_one(dev); diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index d380e0b3f05a1fe0925eaee771b37e324dc08d70..77d9dd7ea34f647f77e04d1c30bc20257146de55 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -264,10 +264,10 @@ struct de_srom_info_leaf { } __attribute__((packed)); struct de_desc { - u32 opts1; - u32 opts2; - u32 addr1; - u32 addr2; + __le32 opts1; + __le32 opts2; + __le32 addr1; + __le32 addr2; }; struct media_info { @@ -1670,8 +1670,6 @@ static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs, static const struct ethtool_ops de_ethtool_ops = { .get_link = ethtool_op_get_link, - .get_tx_csum = ethtool_op_get_tx_csum, - .get_sg = ethtool_op_get_sg, .get_drvinfo = de_get_drvinfo, .get_regs_len = de_get_regs_len, .get_settings = de_get_settings, @@ -1773,8 +1771,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de) /* download entire eeprom */ for (i = 0; i < DE_EEPROM_WORDS; i++) - ((u16 *)ee_data)[i] = - le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size)); + ((__le16 *)ee_data)[i] = + cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size)); /* DEC now has a specification but early board makers just put the address in the first EEPROM locations. */ @@ -1931,6 +1929,7 @@ static int __devinit de_init_one (struct pci_dev *pdev, void __iomem *regs; unsigned long pciaddr; static int board_idx = -1; + DECLARE_MAC_BUF(mac); board_idx++; @@ -1944,7 +1943,6 @@ static int __devinit de_init_one (struct pci_dev *pdev, if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->open = de_open; dev->stop = de_close; @@ -2045,15 +2043,11 @@ static int __devinit de_init_one (struct pci_dev *pdev, goto err_out_iomap; /* print info about board and interface just registered */ - printk (KERN_INFO "%s: %s at 0x%lx, " - "%02x:%02x:%02x:%02x:%02x:%02x, " - "IRQ %d\n", + printk (KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n", dev->name, de->de21040 ? "21040" : "21041", dev->base_addr, - dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], - dev->dev_addr[4], dev->dev_addr[5], + print_mac(mac, dev->dev_addr), dev->irq); pci_set_drvdata(pdev, dev); diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 09902891a6e6549541a3a0d1e904cfe9df51e53f..9b9cd83fb8b660dbfeb8d44e330e851221412435 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -482,7 +482,7 @@ static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; #define c_char const char -#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((u_short *)(a))) +#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a))) /* ** MII Information @@ -756,10 +756,10 @@ struct de4x5_srom { /* Multiple of 4 for DC21040 */ /* Allows 512 byte alignment */ struct de4x5_desc { - volatile s32 status; - u32 des1; - u32 buf; - u32 next; + volatile __le32 status; + __le32 des1; + __le32 buf; + __le32 next; DESC_ALIGN }; @@ -1088,6 +1088,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) struct de4x5_private *lp = netdev_priv(dev); struct pci_dev *pdev = NULL; int i, status=0; + DECLARE_MAC_BUF(mac); gendev->driver_data = dev; @@ -1123,12 +1124,8 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) dev->base_addr = iobase; printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase); - printk(", h/w address "); status = get_hw_addr(dev); - for (i = 0; i < ETH_ALEN - 1; i++) { /* get the ethernet addr. */ - printk("%2.2x:", dev->dev_addr[i]); - } - printk("%2.2x,\n", dev->dev_addr[i]); + printk(", h/w address %s\n", print_mac(mac, dev->dev_addr)); if (status != 0) { printk(" which has an Ethernet PROM CRC error.\n"); @@ -1261,7 +1258,6 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) } /* The DE4X5-specific entries in the device structure. */ - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, gendev); dev->open = &de4x5_open; dev->hard_start_xmit = &de4x5_queue_pkt; @@ -3946,7 +3942,7 @@ create_packet(struct net_device *dev, char *frame, int len) static int EISA_signature(char *name, struct device *device) { - int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); + int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); struct eisa_device *edev; *name = '\0'; @@ -3967,7 +3963,7 @@ EISA_signature(char *name, struct device *device) static int PCI_signature(char *name, struct de4x5_private *lp) { - int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *); + int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); if (lp->chipset == DC21040) { strcpy(name, "DE434/5"); @@ -5073,7 +5069,7 @@ mii_get_phy(struct net_device *dev) { struct de4x5_private *lp = netdev_priv(dev); u_long iobase = dev->base_addr; - int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table); + int i, j, k, n, limit=ARRAY_SIZE(phy_info); int id; lp->active = 0; @@ -5469,19 +5465,16 @@ static void de4x5_dbg_srom(struct de4x5_srom *p) { int i; + DECLARE_MAC_BUF(mac); if (de4x5_debug & DEBUG_SROM) { printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id)); printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id)); printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc)); printk("SROM version: %02x\n", (u_char)(p->version)); - printk("# controllers: %02x\n", (u_char)(p->num_controllers)); + printk("# controllers: %02x\n", (u_char)(p->num_controllers)); - printk("Hardware Address: "); - for (i=0;iieee_addr+i)); - } - printk("%02x\n", (u_char)*(p->ieee_addr+i)); + printk("Hardware Address: %s\n", print_mac(mac, p->ieee_addr)); printk("CRC checksum: %04x\n", (u_short)(p->chksum)); for (i=0; i<64; i++) { printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); @@ -5495,21 +5488,12 @@ static void de4x5_dbg_rx(struct sk_buff *skb, int len) { int i, j; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); if (de4x5_debug & DEBUG_RX) { - printk("R: %02x:%02x:%02x:%02x:%02x:%02x <- %02x:%02x:%02x:%02x:%02x:%02x len/SAP:%02x%02x [%d]\n", - (u_char)skb->data[0], - (u_char)skb->data[1], - (u_char)skb->data[2], - (u_char)skb->data[3], - (u_char)skb->data[4], - (u_char)skb->data[5], - (u_char)skb->data[6], - (u_char)skb->data[7], - (u_char)skb->data[8], - (u_char)skb->data[9], - (u_char)skb->data[10], - (u_char)skb->data[11], + printk("R: %s <- %s len/SAP:%02x%02x [%d]\n", + print_mac(mac, skb->data), print_mac(mac2, &skb->data[6]), (u_char)skb->data[12], (u_char)skb->data[13], len); diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index dab74feb44bcf125937d66d674a87c806592f64b..ca90566d5bcd4f5a0f8cb1e06de329103280ffbe 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -362,6 +362,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, struct net_device *dev; u32 pci_pmr; int i, err; + DECLARE_MAC_BUF(mac); DMFE_DBUG(0, "dmfe_init_one()", 0); @@ -372,7 +373,6 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*db)); if (dev == NULL) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { @@ -471,13 +471,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev, if (err) goto err_out_res; - printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,", - dev->name, - ent->driver_data >> 16, - pci_name(pdev)); - for (i = 0; i < 6; i++) - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - printk(", irq %d.\n", dev->irq); + printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, " + "%s, irq %d.\n", + dev->name, + ent->driver_data >> 16, + pci_name(pdev), + print_mac(mac, dev->dev_addr), + dev->irq); pci_set_master(pdev); diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c index 53efd6694e755e80e64504e21fd90d0666bccec5..365331446387395ca7e878adb45dd22d0065c156 100644 --- a/drivers/net/tulip/interrupt.c +++ b/drivers/net/tulip/interrupt.c @@ -103,28 +103,29 @@ int tulip_refill_rx(struct net_device *dev) void oom_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; - netif_rx_schedule(dev); + struct tulip_private *tp = netdev_priv(dev); + netif_rx_schedule(dev, &tp->napi); } -int tulip_poll(struct net_device *dev, int *budget) +int tulip_poll(struct napi_struct *napi, int budget) { - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = container_of(napi, struct tulip_private, napi); + struct net_device *dev = tp->dev; int entry = tp->cur_rx % RX_RING_SIZE; - int rx_work_limit = *budget; + int work_done = 0; +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION int received = 0; +#endif if (!netif_running(dev)) goto done; - if (rx_work_limit > dev->quota) - rx_work_limit = dev->quota; - #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION /* that one buffer is needed for mit activation; or might be a bug in the ring buffer code; check later -- JHS*/ - if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--; + if (budget >=RX_RING_SIZE) budget--; #endif if (tulip_debug > 4) @@ -144,14 +145,13 @@ int tulip_poll(struct net_device *dev, int *budget) while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { s32 status = le32_to_cpu(tp->rx_ring[entry].status); - if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) break; if (tulip_debug > 5) printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n", dev->name, entry, status); - if (--rx_work_limit < 0) + if (work_done++ >= budget) goto not_done; if ((status & 0x38008300) != 0x0300) { @@ -238,7 +238,9 @@ int tulip_poll(struct net_device *dev, int *budget) tp->stats.rx_packets++; tp->stats.rx_bytes += pkt_len; } - received++; +#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION + received++; +#endif entry = (++tp->cur_rx) % RX_RING_SIZE; if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) @@ -296,17 +298,15 @@ int tulip_poll(struct net_device *dev, int *budget) #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ - dev->quota -= received; - *budget -= received; - tulip_refill_rx(dev); /* If RX ring is not full we are out of memory. */ - if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; + if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + goto oom; /* Remove us from polling list and enable RX intr. */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); /* The last op happens after poll completion. Which means the following: @@ -320,28 +320,20 @@ int tulip_poll(struct net_device *dev, int *budget) * processed irqs. But it must not result in losing events. */ - return 0; + return work_done; not_done: - if (!received) { - - received = dev->quota; /* Not to happen */ - } - dev->quota -= received; - *budget -= received; - if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) tulip_refill_rx(dev); - if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom; - - return 1; + if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) + goto oom; + return work_done; oom: /* Executed with RX ints disabled */ - /* Start timer, stop polling, but do not enable rx interrupts. */ mod_timer(&tp->oom_timer, jiffies+1); @@ -350,9 +342,9 @@ int tulip_poll(struct net_device *dev, int *budget) * before we did netif_rx_complete(). See? We would lose it. */ /* remove ourselves from the polling list */ - netif_rx_complete(dev); + netif_rx_complete(dev, napi); - return 0; + return work_done; } #else /* CONFIG_TULIP_NAPI */ @@ -534,7 +526,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance) rxd++; /* Mask RX intrs and add the device to poll list. */ iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &tp->napi); if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) break; diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 16f26a8364f074c7c5e5b8c33c333b1e123a6ed5..3f69f53d7768f44e90385ce43dd9a22830ac7a97 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h @@ -178,18 +178,18 @@ enum tulip_busconfig_bits { /* The Tulip Rx and Tx buffer descriptors. */ struct tulip_rx_desc { - s32 status; - s32 length; - u32 buffer1; - u32 buffer2; + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; }; struct tulip_tx_desc { - s32 status; - s32 length; - u32 buffer1; - u32 buffer2; /* We use only buffer 1. */ + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; /* We use only buffer 1. */ }; @@ -353,6 +353,7 @@ struct tulip_private { int chip_id; int revision; int flags; + struct napi_struct napi; struct net_device_stats stats; struct timer_list timer; /* Media selection timer. */ struct timer_list oom_timer; /* Out of memory timer. */ @@ -429,7 +430,7 @@ extern int tulip_rx_copybreak; irqreturn_t tulip_interrupt(int irq, void *dev_instance); int tulip_refill_rx(struct net_device *dev); #ifdef CONFIG_TULIP_NAPI -int tulip_poll(struct net_device *dev, int *budget); +int tulip_poll(struct napi_struct *napi, int budget); #endif diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index eca984f89bbf6f419c156e3023ec358bc4fcb1b5..ee08292bcf8579f3f23bcb748702aad8ae1761db 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -294,6 +294,10 @@ static void tulip_up(struct net_device *dev) int next_tick = 3*HZ; int i; +#ifdef CONFIG_TULIP_NAPI + napi_enable(&tp->napi); +#endif + /* Wake the chip from sleep/snooze mode. */ tulip_set_power_state (tp, 0, 0); @@ -322,8 +326,8 @@ static void tulip_up(struct net_device *dev) tp->dirty_rx = tp->dirty_tx = 0; if (tp->flags & MC_HASH_ONLY) { - u32 addr_low = le32_to_cpu(get_unaligned((u32 *)dev->dev_addr)); - u32 addr_high = le16_to_cpu(get_unaligned((u16 *)(dev->dev_addr+4))); + u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr)); + u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4))); if (tp->chip_id == AX88140) { iowrite32(0, ioaddr + CSR13); iowrite32(addr_low, ioaddr + CSR14); @@ -728,6 +732,10 @@ static void tulip_down (struct net_device *dev) flush_scheduled_work(); +#ifdef CONFIG_TULIP_NAPI + napi_disable(&tp->napi); +#endif + del_timer_sync (&tp->timer); #ifdef CONFIG_TULIP_NAPI del_timer_sync (&tp->oom_timer); @@ -1043,12 +1051,11 @@ static void set_rx_mode(struct net_device *dev) filterbit &= 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); if (tulip_debug > 2) { - printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:" - "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name, - mclist->dmi_addr[0], mclist->dmi_addr[1], - mclist->dmi_addr[2], mclist->dmi_addr[3], - mclist->dmi_addr[4], mclist->dmi_addr[5], - ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); + DECLARE_MAC_BUF(mac); + printk(KERN_INFO "%s: Added filter for %s" + " %8.8x bit %d.\n", + dev->name, print_mac(mac, mclist->dmi_addr), + ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); } } if (mc_filter[0] == tp->mc_filter[0] && @@ -1248,6 +1255,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, const char *chip_name = tulip_tbl[chip_idx].chip_name; unsigned int eeprom_missing = 0; unsigned int force_csr0 = 0; + DECLARE_MAC_BUF(mac); #ifndef MODULE static int did_version; /* Already printed version info. */ @@ -1337,7 +1345,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, " @@ -1436,13 +1443,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, do value = ioread32(ioaddr + CSR9); while (value < 0 && --boguscnt > 0); - put_unaligned(le16_to_cpu(value), ((u16*)dev->dev_addr) + i); + put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i); sum += value & 0xffff; } } else if (chip_idx == COMET) { /* No need to read the EEPROM. */ - put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (u32 *)dev->dev_addr); - put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (u16 *)(dev->dev_addr + 4)); + put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr); + put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4)); for (i = 0; i < 6; i ++) sum += dev->dev_addr[i]; } else { @@ -1606,8 +1613,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, dev->tx_timeout = tulip_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_TULIP_NAPI - dev->poll = tulip_poll; - dev->weight = 16; + netif_napi_add(dev, &tp->napi, tulip_poll, 16); #endif dev->stop = tulip_close; dev->get_stats = tulip_get_stats; @@ -1633,8 +1639,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, if (eeprom_missing) printk(" EEPROM not present,"); - for (i = 0; i < 6; i++) - printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]); + printk(" %s", print_mac(mac, dev->dev_addr)); printk(", IRQ %d.\n", irq); if (tp->chip_id == PNIC2) diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index ca2548eb7d63196f1edd1bd02ee5830d6c007c53..76e55612430bae38ffc8c352d798bb112fcf61b7 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -112,13 +112,13 @@ /* Structure/enum declaration ------------------------------- */ struct tx_desc { - u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ + __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ char *tx_buf_ptr; /* Data for us */ struct tx_desc *next_tx_desc; } __attribute__(( aligned(32) )); struct rx_desc { - u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ + __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ struct sk_buff *rx_skb_ptr; /* Data for us */ struct rx_desc *next_rx_desc; } __attribute__(( aligned(32) )); @@ -258,6 +258,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, struct uli526x_board_info *db; /* board information structure */ struct net_device *dev; int i, err; + DECLARE_MAC_BUF(mac); ULI526X_DBUG(0, "uli526x_init_one()", 0); @@ -268,7 +269,6 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*db)); if (dev == NULL) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { @@ -344,7 +344,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, /* read 64 word srom data */ for (i = 0; i < 64; i++) - ((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); + ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); /* Set Node address */ if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ @@ -373,11 +373,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, if (err) goto err_out_res; - printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev)); - - for (i = 0; i < 6; i++) - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - printk(", irq %d.\n", dev->irq); + printk(KERN_INFO "%s: ULi M%04lx at pci%s, %s, irq %d.\n", + dev->name,ent->driver_data >> 16,pci_name(pdev), + print_mac(mac, dev->dev_addr), dev->irq); pci_set_master(pdev); @@ -666,11 +664,6 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) unsigned long ioaddr = dev->base_addr; unsigned long flags; - if (!dev) { - ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0); - return IRQ_NONE; - } - spin_lock_irqsave(&db->lock, flags); outl(0, ioaddr + DCR7); @@ -1110,19 +1103,15 @@ static void uli526x_timer(unsigned long data) /* - * Dynamic reset the ULI526X board * Stop ULI526X board * Free Tx/Rx allocated memory - * Reset ULI526X board - * Re-initialize ULI526X board + * Init system variable */ -static void uli526x_dynamic_reset(struct net_device *dev) +static void uli526x_reset_prepare(struct net_device *dev) { struct uli526x_board_info *db = netdev_priv(dev); - ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); - /* Sopt MAC controller */ db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ update_cr6(db->cr6_data, dev->base_addr); @@ -1141,6 +1130,22 @@ static void uli526x_dynamic_reset(struct net_device *dev) db->link_failed = 1; db->init=1; db->wait_reset = 0; +} + + +/* + * Dynamic reset the ULI526X board + * Stop ULI526X board + * Free Tx/Rx allocated memory + * Reset ULI526X board + * Re-initialize ULI526X board + */ + +static void uli526x_dynamic_reset(struct net_device *dev) +{ + ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); + + uli526x_reset_prepare(dev); /* Re-initialize ULI526X board */ uli526x_init(dev); @@ -1150,6 +1155,88 @@ static void uli526x_dynamic_reset(struct net_device *dev) } +#ifdef CONFIG_PM + +/* + * Suspend the interface. + */ + +static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + pci_power_t power_state; + int err; + + ULI526X_DBUG(0, "uli526x_suspend", 0); + + if (!netdev_priv(dev)) + return 0; + + pci_save_state(pdev); + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + uli526x_reset_prepare(dev); + + power_state = pci_choose_state(pdev, state); + pci_enable_wake(pdev, power_state, 0); + err = pci_set_power_state(pdev, power_state); + if (err) { + netif_device_attach(dev); + /* Re-initialize ULI526X board */ + uli526x_init(dev); + /* Restart upper layer interface */ + netif_wake_queue(dev); + } + + return err; +} + +/* + * Resume the interface. + */ + +static int uli526x_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + int err; + + ULI526X_DBUG(0, "uli526x_resume", 0); + + if (!netdev_priv(dev)) + return 0; + + pci_restore_state(pdev); + + if (!netif_running(dev)) + return 0; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) { + printk(KERN_WARNING "%s: Could not put device into D0\n", + dev->name); + return err; + } + + netif_device_attach(dev); + /* Re-initialize ULI526X board */ + uli526x_init(dev); + /* Restart upper layer interface */ + netif_wake_queue(dev); + + return 0; +} + +#else /* !CONFIG_PM */ + +#define uli526x_suspend NULL +#define uli526x_resume NULL + +#endif /* !CONFIG_PM */ + + /* * free all allocated rx buffer */ @@ -1512,7 +1599,6 @@ static void uli526x_process_mode(struct uli526x_board_info *db) case ULI526X_100MFD: phy_reg = 0x2100; break; } phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); - phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); } } } @@ -1689,6 +1775,8 @@ static struct pci_driver uli526x_driver = { .id_table = uli526x_pci_tbl, .probe = uli526x_init_one, .remove = __devexit_p(uli526x_remove_one), + .suspend = uli526x_suspend, + .resume = uli526x_resume, }; MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 5824f6a354950380c8e1116b0a5b537b6ecb8eff..3c8e3b63be07a74d6d4f807317b9a427f7b27aa0 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -354,6 +354,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, int irq; int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; void __iomem *ioaddr; + DECLARE_MAC_BUF(mac); i = pci_enable_device(pdev); if (i) return i; @@ -370,7 +371,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) @@ -381,7 +381,7 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, goto err_out_free_res; for (i = 0; i < 3; i++) - ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i)); + ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); /* Reset the chip to erase previous misconfiguration. No hold time required! */ @@ -434,11 +434,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev, if (i) goto err_out_cleardev; - printk(KERN_INFO "%s: %s at %p, ", - dev->name, pci_id_tbl[chip_idx].name, ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", + dev->name, pci_id_tbl[chip_idx].name, ioaddr, + print_mac(mac, dev->dev_addr), irq); if (np->drv_flags & CanHaveMII) { int phy, phy_idx = 0; @@ -1246,16 +1244,16 @@ static int netdev_rx(struct net_device *dev) } #ifndef final_version /* Remove after testing. */ /* You will want this info for the initial debug. */ - if (debug > 5) - printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " - "%d.%d.%d.%d.\n", - skb->data[0], skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5], skb->data[6], skb->data[7], - skb->data[8], skb->data[9], skb->data[10], - skb->data[11], skb->data[12], skb->data[13], - skb->data[14], skb->data[15], skb->data[16], - skb->data[17]); + if (debug > 5) { + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + printk(KERN_DEBUG " Rx data %s %s" + " %2.2x%2.2x %d.%d.%d.%d.\n", + print_mac(mac, &skb->data[0]), print_mac(mac2, &skb->data[6]), + skb->data[12], skb->data[13], + skb->data[14], skb->data[15], skb->data[16], skb->data[17]); + } #endif skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); @@ -1452,8 +1450,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .get_link = netdev_get_link, .get_msglevel = netdev_get_msglevel, .set_msglevel = netdev_set_msglevel, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c index 16a54e6b8d4f002b31e1a2e967785a1fac538e91..70befe33e454c63d1a8e4dd4ba2fab0fb728d4a3 100644 --- a/drivers/net/tulip/xircom_cb.c +++ b/drivers/net/tulip/xircom_cb.c @@ -252,7 +252,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ goto tx_buf_fail; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -271,7 +270,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_ dev->hard_start_xmit = &xircom_start_xmit; dev->stop = &xircom_close; dev->get_stats = &xircom_get_stats; - dev->priv = private; #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = &xircom_poll_controller; #endif @@ -1076,6 +1074,7 @@ static void read_mac_address(struct xircom_private *card) unsigned char j, tuple, link, data_id, data_count; unsigned long flags; int i; + DECLARE_MAC_BUF(mac); enter("read_mac_address"); @@ -1105,11 +1104,7 @@ static void read_mac_address(struct xircom_private *card) } } spin_unlock_irqrestore(&card->lock, flags); -#ifdef DEBUG - for (i = 0; i < 6; i++) - printk("%c%2.2X", i ? ':' : ' ', card->dev->dev_addr[i]); - printk("\n"); -#endif + pr_debug(" %s\n", print_mac(mac, card->dev->dev_addr)); leave("read_mac_address"); } diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c index fc439f333350ded1eae790db6268e035e597a876..c3f8e303c6c7690b8ec7794cdcfa24798ee4dd03 100644 --- a/drivers/net/tulip/xircom_tulip_cb.c +++ b/drivers/net/tulip/xircom_tulip_cb.c @@ -547,7 +547,6 @@ static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_devi printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->base_addr = ioaddr; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 62b2b3005019c90865ef0f019f16d8d2607089f0..1f76446959765ef577382517e7039c0ab044d5c3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -109,7 +110,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) /* We won't see all dropped packets individually, so overrun * error is more appropriate. */ - tun->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } else { /* Single queue mode. * Driver handles dropping of all packets itself. */ @@ -128,7 +129,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev) return 0; drop: - tun->stats.tx_dropped++; + dev->stats.tx_dropped++; kfree_skb(skb); return 0; } @@ -158,23 +159,28 @@ tun_net_mclist(struct net_device *dev) struct tun_struct *tun = netdev_priv(dev); const struct dev_mc_list *mclist; int i; + DECLARE_MAC_BUF(mac); DBG(KERN_DEBUG "%s: tun_net_mclist: mc_count %d\n", dev->name, dev->mc_count); memset(tun->chr_filter, 0, sizeof tun->chr_filter); for (i = 0, mclist = dev->mc_list; i < dev->mc_count && mclist != NULL; i++, mclist = mclist->next) { add_multi(tun->net_filter, mclist->dmi_addr); - DBG(KERN_DEBUG "%s: tun_net_mclist: %x:%x:%x:%x:%x:%x\n", - dev->name, - mclist->dmi_addr[0], mclist->dmi_addr[1], mclist->dmi_addr[2], - mclist->dmi_addr[3], mclist->dmi_addr[4], mclist->dmi_addr[5]); + DBG(KERN_DEBUG "%s: tun_net_mclist: %s\n", + dev->name, print_mac(mac, mclist->dmi_addr)); } } -static struct net_device_stats *tun_net_stats(struct net_device *dev) +#define MIN_MTU 68 +#define MAX_MTU 65535 + +static int +tun_net_change_mtu(struct net_device *dev, int new_mtu) { - struct tun_struct *tun = netdev_priv(dev); - return &tun->stats; + if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) + return -EINVAL; + dev->mtu = new_mtu; + return 0; } /* Initialize net device. */ @@ -188,6 +194,7 @@ static void tun_net_init(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; + dev->change_mtu = tun_net_change_mtu; /* Zero header length */ dev->type = ARPHRD_NONE; @@ -200,6 +207,7 @@ static void tun_net_init(struct net_device *dev) dev->set_multicast_list = tun_net_mclist; ether_setup(dev); + dev->change_mtu = tun_net_change_mtu; /* random address already created for us by tun_set_iff, use it */ memcpy(dev->dev_addr, tun->dev_addr, min(sizeof(tun->dev_addr), sizeof(dev->dev_addr)) ); @@ -249,14 +257,14 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, align = NET_IP_ALIGN; if (!(skb = alloc_skb(len + align, GFP_KERNEL))) { - tun->stats.rx_dropped++; + tun->dev->stats.rx_dropped++; return -ENOMEM; } if (align) skb_reserve(skb, align); if (memcpy_fromiovec(skb_put(skb, len), iv, len)) { - tun->stats.rx_dropped++; + tun->dev->stats.rx_dropped++; kfree_skb(skb); return -EFAULT; } @@ -278,8 +286,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, netif_rx_ni(skb); tun->dev->last_rx = jiffies; - tun->stats.rx_packets++; - tun->stats.rx_bytes += len; + tun->dev->stats.rx_packets++; + tun->dev->stats.rx_bytes += len; return count; } @@ -335,8 +343,8 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, skb_copy_datagram_iovec(skb, 0, iv, len); total += len; - tun->stats.tx_packets++; - tun->stats.tx_bytes += len; + tun->dev->stats.tx_packets++; + tun->dev->stats.tx_bytes += len; return total; } @@ -349,6 +357,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, DECLARE_WAITQUEUE(wait, current); struct sk_buff *skb; ssize_t len, ret = 0; + DECLARE_MAC_BUF(mac); if (!tun) return -EBADFD; @@ -403,16 +412,14 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, (addr[0] == 0x33 && addr[1] == 0x33)) && ((tun->if_flags & IFF_ALLMULTI) || (tun->chr_filter[bit_nr >> 5] & (1 << (bit_nr & 31)))))) { - DBG(KERN_DEBUG "%s: tun_chr_readv: accepted: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, addr[0], addr[1], addr[2], - addr[3], addr[4], addr[5]); + DBG(KERN_DEBUG "%s: tun_chr_readv: accepted: %s\n", + tun->dev->name, print_mac(mac, addr)); ret = tun_put_user(tun, skb, (struct iovec *) iv, len); kfree_skb(skb); break; } else { - DBG(KERN_DEBUG "%s: tun_chr_readv: rejected: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, addr[0], addr[1], addr[2], - addr[3], addr[4], addr[5]); + DBG(KERN_DEBUG "%s: tun_chr_readv: rejected: %s\n", + tun->dev->name, print_mac(mac, addr)); kfree_skb(skb); continue; } @@ -434,11 +441,9 @@ static void tun_setup(struct net_device *dev) tun->owner = -1; tun->group = -1; - SET_MODULE_OWNER(dev); dev->open = tun_net_open; dev->hard_start_xmit = tun_net_xmit; dev->stop = tun_net_close; - dev->get_stats = tun_net_stats; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = free_netdev; } @@ -475,7 +480,7 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr) !capable(CAP_NET_ADMIN)) return -EPERM; } - else if (__dev_get_by_name(ifr->ifr_name)) + else if (__dev_get_by_name(&init_net, ifr->ifr_name)) return -EINVAL; else { char *name; @@ -557,6 +562,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, struct tun_struct *tun = file->private_data; void __user* argp = (void __user*)arg; struct ifreq ifr; + DECLARE_MAC_BUF(mac); if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) if (copy_from_user(&ifr, argp, sizeof ifr)) @@ -685,22 +691,16 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, /** Add the specified group to the character device's multicast filter * list. */ add_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data); - DBG(KERN_DEBUG "%s: add multi: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, - (u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1], - (u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3], - (u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]); + DBG(KERN_DEBUG "%s: add multi: %s\n", + tun->dev->name, print_mac(mac, ifr.ifr_hwaddr.sa_data)); return 0; case SIOCDELMULTI: /** Remove the specified group from the character device's multicast * filter list. */ del_multi(tun->chr_filter, ifr.ifr_hwaddr.sa_data); - DBG(KERN_DEBUG "%s: del multi: %x:%x:%x:%x:%x:%x\n", - tun->dev->name, - (u8)ifr.ifr_hwaddr.sa_data[0], (u8)ifr.ifr_hwaddr.sa_data[1], - (u8)ifr.ifr_hwaddr.sa_data[2], (u8)ifr.ifr_hwaddr.sa_data[3], - (u8)ifr.ifr_hwaddr.sa_data[4], (u8)ifr.ifr_hwaddr.sa_data[5]); + DBG(KERN_DEBUG "%s: del multi: %s\n", + tun->dev->name, print_mac(mac, ifr.ifr_hwaddr.sa_data)); return 0; default: diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 03587205546e71260ad81c10c76dee6d0674d719..72e5e9be7e995ec51c0bb779ee1699d509e09621 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -284,6 +284,7 @@ struct typhoon { struct basic_ring rxLoRing; struct pci_dev * pdev; struct net_device * dev; + struct napi_struct napi; spinlock_t state_lock; struct vlan_group * vlgrp; struct basic_ring rxHiRing; @@ -299,9 +300,9 @@ struct typhoon { const char * name; struct typhoon_shared * shared; dma_addr_t shared_dma; - u16 xcvr_select; - u16 wol_events; - u32 offload; + __le16 xcvr_select; + __le16 wol_events; + __le32 offload; /* unused stuff (future use) */ int capabilities; @@ -827,7 +828,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) first_txd->processFlags |= TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; first_txd->processFlags |= - cpu_to_le32(htons(vlan_tx_tag_get(skb)) << + cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) << TYPHOON_TX_PF_VLAN_TAG_SHIFT); } @@ -919,7 +920,7 @@ typhoon_set_rx_mode(struct net_device *dev) struct typhoon *tp = netdev_priv(dev); struct cmd_desc xp_cmd; u32 mc_filter[2]; - u16 filter; + __le16 filter; filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; if(dev->flags & IFF_PROMISC) { @@ -1130,7 +1131,7 @@ typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct typhoon *tp = netdev_priv(dev); struct cmd_desc xp_cmd; - int xcvr; + __le16 xcvr; int err; err = -EINVAL; @@ -1235,11 +1236,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = { .set_wol = typhoon_set_wol, .get_link = ethtool_op_get_link, .get_rx_csum = typhoon_get_rx_csum, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_ringparam = typhoon_get_ringparam, }; @@ -1538,7 +1536,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status) static u32 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, - volatile u32 * index) + volatile __le32 * index) { u32 lastRead = txRing->lastRead; struct tx_desc *tx; @@ -1574,7 +1572,7 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, static void typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, - volatile u32 * index) + volatile __le32 * index) { u32 lastRead; int numDesc = MAX_SKB_FRAGS + 1; @@ -1664,8 +1662,8 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) } static int -typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, - volatile u32 * cleared, int budget) +typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, + volatile __le32 * cleared, int budget) { struct rx_desc *rx; struct sk_buff *skb, *new_skb; @@ -1675,7 +1673,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready, u32 rxaddr; int pkt_len; u32 idx; - u32 csum_bits; + __le32 csum_bits; int received; received = 0; @@ -1759,12 +1757,12 @@ typhoon_fill_free_ring(struct typhoon *tp) } static int -typhoon_poll(struct net_device *dev, int *total_budget) +typhoon_poll(struct napi_struct *napi, int budget) { - struct typhoon *tp = netdev_priv(dev); + struct typhoon *tp = container_of(napi, struct typhoon, napi); + struct net_device *dev = tp->dev; struct typhoon_indexes *indexes = tp->indexes; - int orig_budget = *total_budget; - int budget, work_done, done; + int work_done; rmb(); if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) @@ -1773,30 +1771,16 @@ typhoon_poll(struct net_device *dev, int *total_budget) if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); - if(orig_budget > dev->quota) - orig_budget = dev->quota; - - budget = orig_budget; work_done = 0; - done = 1; if(indexes->rxHiCleared != indexes->rxHiReady) { - work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, + work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, &indexes->rxHiCleared, budget); - budget -= work_done; } if(indexes->rxLoCleared != indexes->rxLoReady) { work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, - &indexes->rxLoCleared, budget); - } - - if(work_done) { - *total_budget -= work_done; - dev->quota -= work_done; - - if(work_done >= orig_budget) - done = 0; + &indexes->rxLoCleared, budget - work_done); } if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { @@ -1804,14 +1788,14 @@ typhoon_poll(struct net_device *dev, int *total_budget) typhoon_fill_free_ring(tp); } - if(done) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); iowrite32(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(tp->ioaddr); } - return (done ? 0 : 1); + return work_done; } static irqreturn_t @@ -1828,10 +1812,10 @@ typhoon_interrupt(int irq, void *dev_instance) iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); - if(netif_rx_schedule_prep(dev)) { + if (netif_rx_schedule_prep(dev, &tp->napi)) { iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); typhoon_post_pci_writes(ioaddr); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &tp->napi); } else { printk(KERN_ERR "%s: Error, poll already scheduled\n", dev->name); @@ -1856,7 +1840,7 @@ typhoon_free_rx_rings(struct typhoon *tp) } static int -typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events) +typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) { struct pci_dev *pdev = tp->pdev; void __iomem *ioaddr = tp->ioaddr; @@ -1944,8 +1928,8 @@ typhoon_start_runtime(struct typhoon *tp) goto error_out; INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); - xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0])); - xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2])); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); if(err < 0) goto error_out; @@ -2119,9 +2103,13 @@ typhoon_open(struct net_device *dev) if(err < 0) goto out_sleep; + napi_enable(&tp->napi); + err = typhoon_start_runtime(tp); - if(err < 0) + if(err < 0) { + napi_disable(&tp->napi); goto out_irq; + } netif_start_queue(dev); return 0; @@ -2150,6 +2138,7 @@ typhoon_close(struct net_device *dev) struct typhoon *tp = netdev_priv(dev); netif_stop_queue(dev); + napi_disable(&tp->napi); if(typhoon_stop_runtime(tp, WaitSleep) < 0) printk(KERN_ERR "%s: unable to stop runtime\n", dev->name); @@ -2240,8 +2229,8 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state) } INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); - xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0])); - xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2])); + xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); + xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { printk(KERN_ERR "%s: unable to set mac address in suspend\n", dev->name); @@ -2327,8 +2316,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dma_addr_t shared_dma; struct cmd_desc xp_cmd; struct resp_desc xp_resp[3]; - int i; int err = 0; + DECLARE_MAC_BUF(mac); if(!did_version++) printk(KERN_INFO "%s", version); @@ -2340,7 +2329,6 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto error_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); err = pci_enable_device(pdev); @@ -2477,8 +2465,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto error_out_reset; } - *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); - *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); + *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); + *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); if(!is_valid_ether_addr(dev->dev_addr)) { printk(ERR_PFX "%s: Could not obtain valid ethernet address, " @@ -2521,8 +2509,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->stop = typhoon_close; dev->set_multicast_list = typhoon_set_rx_mode; dev->tx_timeout = typhoon_tx_timeout; - dev->poll = typhoon_poll; - dev->weight = 16; + netif_napi_add(dev, &tp->napi, typhoon_poll, 16); dev->watchdog_timeo = TX_TIMEOUT; dev->get_stats = typhoon_get_stats; dev->set_mac_address = typhoon_set_mac_address; @@ -2545,13 +2532,11 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - printk(KERN_INFO "%s: %s at %s 0x%llx, ", + printk(KERN_INFO "%s: %s at %s 0x%llx, %s\n", dev->name, typhoon_card_info[card_id].name, use_mmio ? "MMIO" : "IO", - (unsigned long long)pci_resource_start(pdev, use_mmio)); - for(i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x\n", dev->dev_addr[i]); + (unsigned long long)pci_resource_start(pdev, use_mmio), + print_mac(mac, dev->dev_addr)); /* xp_resp still contains the response to the READ_VERSIONS command. * For debugging, let the user know what version he has. diff --git a/drivers/net/typhoon.h b/drivers/net/typhoon.h index 2f14a050051ba639054cdd18f36c51c773de18bf..19df20889b820a14c6a4039928d96fa2386db44a 100644 --- a/drivers/net/typhoon.h +++ b/drivers/net/typhoon.h @@ -64,19 +64,19 @@ struct transmit_ring { */ struct typhoon_indexes { /* The first four are written by the host, and read by the NIC */ - volatile u32 rxHiCleared; - volatile u32 rxLoCleared; - volatile u32 rxBuffReady; - volatile u32 respCleared; + volatile __le32 rxHiCleared; + volatile __le32 rxLoCleared; + volatile __le32 rxBuffReady; + volatile __le32 respCleared; /* The remaining are written by the NIC, and read by the host */ - volatile u32 txLoCleared; - volatile u32 txHiCleared; - volatile u32 rxLoReady; - volatile u32 rxBuffCleared; - volatile u32 cmdCleared; - volatile u32 respReady; - volatile u32 rxHiReady; + volatile __le32 txLoCleared; + volatile __le32 txHiCleared; + volatile __le32 rxLoReady; + volatile __u32 rxBuffCleared; /* AV: really? */ + volatile __le32 cmdCleared; + volatile __le32 respReady; + volatile __le32 rxHiReady; } __attribute__ ((packed)); /* The host<->Typhoon interface @@ -100,31 +100,31 @@ struct typhoon_indexes { * be zero. */ struct typhoon_interface { - u32 ringIndex; - u32 ringIndexHi; - u32 txLoAddr; - u32 txLoAddrHi; - u32 txLoSize; - u32 txHiAddr; - u32 txHiAddrHi; - u32 txHiSize; - u32 rxLoAddr; - u32 rxLoAddrHi; - u32 rxLoSize; - u32 rxBuffAddr; - u32 rxBuffAddrHi; - u32 rxBuffSize; - u32 cmdAddr; - u32 cmdAddrHi; - u32 cmdSize; - u32 respAddr; - u32 respAddrHi; - u32 respSize; - u32 zeroAddr; - u32 zeroAddrHi; - u32 rxHiAddr; - u32 rxHiAddrHi; - u32 rxHiSize; + __le32 ringIndex; + __le32 ringIndexHi; + __le32 txLoAddr; + __le32 txLoAddrHi; + __le32 txLoSize; + __le32 txHiAddr; + __le32 txHiAddrHi; + __le32 txHiSize; + __le32 rxLoAddr; + __le32 rxLoAddrHi; + __le32 rxLoSize; + __le32 rxBuffAddr; + __le32 rxBuffAddrHi; + __le32 rxBuffSize; + __le32 cmdAddr; + __le32 cmdAddrHi; + __le32 cmdSize; + __le32 respAddr; + __le32 respAddrHi; + __le32 respSize; + __le32 zeroAddr; + __le32 zeroAddrHi; + __le32 rxHiAddr; + __le32 rxHiAddrHi; + __le32 rxHiSize; } __attribute__ ((packed)); /* The Typhoon transmit/fragment descriptor @@ -165,10 +165,10 @@ struct tx_desc { #define TYPHOON_RX_ERROR 0x40 #define TYPHOON_DESC_VALID 0x80 u8 numDesc; - u16 len; + __le16 len; u32 addr; u32 addrHi; - u32 processFlags; + __le32 processFlags; #define TYPHOON_TX_PF_NO_CRC __constant_cpu_to_le32(0x00000001) #define TYPHOON_TX_PF_IP_CHKSUM __constant_cpu_to_le32(0x00000002) #define TYPHOON_TX_PF_TCP_CHKSUM __constant_cpu_to_le32(0x00000004) @@ -197,12 +197,12 @@ struct tx_desc { struct tcpopt_desc { u8 flags; u8 numDesc; - u16 mss_flags; + __le16 mss_flags; #define TYPHOON_TSO_FIRST __constant_cpu_to_le16(0x1000) #define TYPHOON_TSO_LAST __constant_cpu_to_le16(0x2000) - u32 respAddrLo; - u32 bytesTx; - u32 status; + __le32 respAddrLo; + __le32 bytesTx; + __le32 status; } __attribute__ ((packed)); /* The IPSEC Offload descriptor @@ -216,12 +216,12 @@ struct tcpopt_desc { struct ipsec_desc { u8 flags; u8 numDesc; - u16 ipsecFlags; + __le16 ipsecFlags; #define TYPHOON_IPSEC_GEN_IV __constant_cpu_to_le16(0x0000) #define TYPHOON_IPSEC_USE_IV __constant_cpu_to_le16(0x0001) - u32 sa1; - u32 sa2; - u32 reserved; + __le32 sa1; + __le32 sa2; + __le32 reserved; } __attribute__ ((packed)); /* The Typhoon receive descriptor (Updated by NIC) @@ -239,10 +239,10 @@ struct ipsec_desc { struct rx_desc { u8 flags; u8 numDesc; - u16 frameLen; + __le16 frameLen; u32 addr; u32 addrHi; - u32 rxStatus; + __le32 rxStatus; #define TYPHOON_RX_ERR_INTERNAL __constant_cpu_to_le32(0x00000000) #define TYPHOON_RX_ERR_FIFO_UNDERRUN __constant_cpu_to_le32(0x00000001) #define TYPHOON_RX_ERR_BAD_SSD __constant_cpu_to_le32(0x00000002) @@ -264,10 +264,10 @@ struct rx_desc { #define TYPHOON_RX_IP_CHK_GOOD __constant_cpu_to_le32(0x00000100) #define TYPHOON_RX_TCP_CHK_GOOD __constant_cpu_to_le32(0x00000200) #define TYPHOON_RX_UDP_CHK_GOOD __constant_cpu_to_le32(0x00000400) - u16 filterResults; + __le16 filterResults; #define TYPHOON_RX_FILTER_MASK __constant_cpu_to_le16(0x7fff) #define TYPHOON_RX_FILTERED __constant_cpu_to_le16(0x8000) - u16 ipsecResults; + __le16 ipsecResults; #define TYPHOON_RX_OUTER_AH_GOOD __constant_cpu_to_le16(0x0001) #define TYPHOON_RX_OUTER_ESP_GOOD __constant_cpu_to_le16(0x0002) #define TYPHOON_RX_INNER_AH_GOOD __constant_cpu_to_le16(0x0004) @@ -278,7 +278,7 @@ struct rx_desc { #define TYPHOON_RX_INNER_ESP_FAIL __constant_cpu_to_le16(0x0080) #define TYPHOON_RX_UNKNOWN_SA __constant_cpu_to_le16(0x0100) #define TYPHOON_RX_ESP_FORMAT_ERR __constant_cpu_to_le16(0x0200) - u32 vlanTag; + __be32 vlanTag; } __attribute__ ((packed)); /* The Typhoon free buffer descriptor, used to give a buffer to the NIC @@ -292,8 +292,8 @@ struct rx_desc { * from the NIC */ struct rx_free { - u32 physAddr; - u32 physAddrHi; + __le32 physAddr; + __le32 physAddrHi; u32 virtAddr; u32 virtAddrHi; } __attribute__ ((packed)); @@ -312,7 +312,7 @@ struct rx_free { struct cmd_desc { u8 flags; u8 numDesc; - u16 cmd; + __le16 cmd; #define TYPHOON_CMD_TX_ENABLE __constant_cpu_to_le16(0x0001) #define TYPHOON_CMD_TX_DISABLE __constant_cpu_to_le16(0x0002) #define TYPHOON_CMD_RX_ENABLE __constant_cpu_to_le16(0x0003) @@ -339,9 +339,9 @@ struct cmd_desc { #define TYPHOON_CMD_GET_IPSEC_ENABLE __constant_cpu_to_le16(0x0067) #define TYPHOON_CMD_GET_CMD_LVL __constant_cpu_to_le16(0x0069) u16 seqNo; - u16 parm1; - u32 parm2; - u32 parm3; + __le16 parm1; + __le32 parm2; + __le32 parm3; } __attribute__ ((packed)); /* The Typhoon response descriptor, see command descriptor for details @@ -349,11 +349,11 @@ struct cmd_desc { struct resp_desc { u8 flags; u8 numDesc; - u16 cmd; - u16 seqNo; - u16 parm1; - u32 parm2; - u32 parm3; + __le16 cmd; + __le16 seqNo; + __le16 parm1; + __le32 parm2; + __le32 parm3; } __attribute__ ((packed)); #define INIT_COMMAND_NO_RESPONSE(x, command) \ @@ -386,31 +386,31 @@ struct resp_desc { struct stats_resp { u8 flags; u8 numDesc; - u16 cmd; - u16 seqNo; - u16 unused; - u32 txPackets; - u64 txBytes; - u32 txDeferred; - u32 txLateCollisions; - u32 txCollisions; - u32 txCarrierLost; - u32 txMultipleCollisions; - u32 txExcessiveCollisions; - u32 txFifoUnderruns; - u32 txMulticastTxOverflows; - u32 txFiltered; - u32 rxPacketsGood; - u64 rxBytesGood; - u32 rxFifoOverruns; - u32 BadSSD; - u32 rxCrcErrors; - u32 rxOversized; - u32 rxBroadcast; - u32 rxMulticast; - u32 rxOverflow; - u32 rxFiltered; - u32 linkStatus; + __le16 cmd; + __le16 seqNo; + __le16 unused; + __le32 txPackets; + __le64 txBytes; + __le32 txDeferred; + __le32 txLateCollisions; + __le32 txCollisions; + __le32 txCarrierLost; + __le32 txMultipleCollisions; + __le32 txExcessiveCollisions; + __le32 txFifoUnderruns; + __le32 txMulticastTxOverflows; + __le32 txFiltered; + __le32 rxPacketsGood; + __le64 rxBytesGood; + __le32 rxFifoOverruns; + __le32 BadSSD; + __le32 rxCrcErrors; + __le32 rxOversized; + __le32 rxBroadcast; + __le32 rxMulticast; + __le32 rxOverflow; + __le32 rxFiltered; + __le32 linkStatus; #define TYPHOON_LINK_STAT_MASK __constant_cpu_to_le32(0x00000001) #define TYPHOON_LINK_GOOD __constant_cpu_to_le32(0x00000001) #define TYPHOON_LINK_BAD __constant_cpu_to_le32(0x00000000) @@ -420,8 +420,8 @@ struct stats_resp { #define TYPHOON_LINK_DUPLEX_MASK __constant_cpu_to_le32(0x00000004) #define TYPHOON_LINK_FULL_DUPLEX __constant_cpu_to_le32(0x00000004) #define TYPHOON_LINK_HALF_DUPLEX __constant_cpu_to_le32(0x00000000) - u32 unused2; - u32 unused3; + __le32 unused2; + __le32 unused3; } __attribute__ ((packed)); /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) @@ -509,17 +509,17 @@ struct sa_descriptor { */ struct typhoon_file_header { u8 tag[8]; - u32 version; - u32 numSections; - u32 startAddr; - u32 hmacDigest[5]; + __le32 version; + __le32 numSections; + __le32 startAddr; + __le32 hmacDigest[5]; } __attribute__ ((packed)); struct typhoon_section_header { - u32 len; + __le32 len; u16 checksum; u16 reserved; - u32 startAddr; + __le32 startAddr; } __attribute__ ((packed)); /* The Typhoon Register offsets diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 9a38dfe45f8f2c9cc316a59901a6627e6275b0e9..9667dac383f04bd6e9e959852c744cac6f0fa720 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -3350,14 +3350,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) return 0; } -/* returns a net_device_stats structure pointer */ -static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev) -{ - struct ucc_geth_private *ugeth = netdev_priv(dev); - - return &(ugeth->stats); -} - /* ucc_geth_timeout gets called when a packet has not been * transmitted after a set amount of time. * For now, assume that clearing out all the structures, and @@ -3368,7 +3360,7 @@ static void ucc_geth_timeout(struct net_device *dev) ugeth_vdbg("%s: IN", __FUNCTION__); - ugeth->stats.tx_errors++; + dev->stats.tx_errors++; ugeth_dump_regs(ugeth); @@ -3396,7 +3388,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irq(&ugeth->lock); - ugeth->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; /* Start from the next BD that should be filled */ bd = ugeth->txBd[txQ]; @@ -3488,9 +3480,9 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit dev_kfree_skb_any(skb); ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; - ugeth->stats.rx_dropped++; + dev->stats.rx_dropped++; } else { - ugeth->stats.rx_packets++; + dev->stats.rx_packets++; howmany++; /* Prep the skb for the packet */ @@ -3499,7 +3491,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, ugeth->dev); - ugeth->stats.rx_bytes += length; + dev->stats.rx_bytes += length; /* Send the packet up the stack */ #ifdef CONFIG_UGETH_NAPI netif_receive_skb(skb); @@ -3514,7 +3506,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit if (!skb) { if (netif_msg_rx_err(ugeth)) ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); - ugeth->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -3556,7 +3548,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) break; - ugeth->stats.tx_packets++; + dev->stats.tx_packets++; /* Free the sk buffer associated with this TxBD */ dev_kfree_skb_irq(ugeth-> @@ -3582,41 +3574,31 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) } #ifdef CONFIG_UGETH_NAPI -static int ucc_geth_poll(struct net_device *dev, int *budget) +static int ucc_geth_poll(struct napi_struct *napi, int budget) { - struct ucc_geth_private *ugeth = netdev_priv(dev); + struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); + struct net_device *dev = ugeth->dev; struct ucc_geth_info *ug_info; - struct ucc_fast_private *uccf; - int howmany; - u8 i; - int rx_work_limit; - register u32 uccm; + int howmany, i; ug_info = ugeth->ug_info; - rx_work_limit = *budget; - if (rx_work_limit > dev->quota) - rx_work_limit = dev->quota; - howmany = 0; + for (i = 0; i < ug_info->numQueuesRx; i++) + howmany += ucc_geth_rx(ugeth, i, budget - howmany); - for (i = 0; i < ug_info->numQueuesRx; i++) { - howmany += ucc_geth_rx(ugeth, i, rx_work_limit); - } - - dev->quota -= howmany; - rx_work_limit -= howmany; - *budget -= howmany; + if (howmany < budget) { + struct ucc_fast_private *uccf; + u32 uccm; - if (rx_work_limit > 0) { - netif_rx_complete(dev); + netif_rx_complete(dev, napi); uccf = ugeth->uccf; uccm = in_be32(uccf->p_uccm); uccm |= UCCE_RX_EVENTS; out_be32(uccf->p_uccm, uccm); } - return (rx_work_limit > 0) ? 0 : 1; + return howmany; } #endif /* CONFIG_UGETH_NAPI */ @@ -3651,10 +3633,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) /* check for receive events that require processing */ if (ucce & UCCE_RX_EVENTS) { #ifdef CONFIG_UGETH_NAPI - if (netif_rx_schedule_prep(dev)) { - uccm &= ~UCCE_RX_EVENTS; + if (netif_rx_schedule_prep(dev, &ugeth->napi)) { + uccm &= ~UCCE_RX_EVENTS; out_be32(uccf->p_uccm, uccm); - __netif_rx_schedule(dev); + __netif_rx_schedule(dev, &ugeth->napi); } #else rx_mask = UCCE_RXBF_SINGLE_MASK; @@ -3683,10 +3665,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) /* Errors and other events */ if (ucce & UCCE_OTHER) { if (ucce & UCCE_BSY) { - ugeth->stats.rx_errors++; + dev->stats.rx_errors++; } if (ucce & UCCE_TXE) { - ugeth->stats.tx_errors++; + dev->stats.tx_errors++; } } @@ -3717,12 +3699,15 @@ static int ucc_geth_open(struct net_device *dev) return err; } +#ifdef CONFIG_UGETH_NAPI + napi_enable(&ugeth->napi); +#endif err = ucc_geth_startup(ugeth); if (err) { if (netif_msg_ifup(ugeth)) ugeth_err("%s: Cannot configure net device, aborting.", dev->name); - return err; + goto out_err; } err = adjust_enet_interface(ugeth); @@ -3730,7 +3715,7 @@ static int ucc_geth_open(struct net_device *dev) if (netif_msg_ifup(ugeth)) ugeth_err("%s: Cannot configure net device, aborting.", dev->name); - return err; + goto out_err; } /* Set MACSTNADDR1, MACSTNADDR2 */ @@ -3748,7 +3733,7 @@ static int ucc_geth_open(struct net_device *dev) if (err) { if (netif_msg_ifup(ugeth)) ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); - return err; + goto out_err; } phy_start(ugeth->phydev); @@ -3761,7 +3746,7 @@ static int ucc_geth_open(struct net_device *dev) ugeth_err("%s: Cannot get IRQ for net device, aborting.", dev->name); ucc_geth_stop(ugeth); - return err; + goto out_err; } err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); @@ -3769,12 +3754,18 @@ static int ucc_geth_open(struct net_device *dev) if (netif_msg_ifup(ugeth)) ugeth_err("%s: Cannot enable net device, aborting.", dev->name); ucc_geth_stop(ugeth); - return err; + goto out_err; } netif_start_queue(dev); return err; + +out_err: +#ifdef CONFIG_UGETH_NAPI + napi_disable(&ugeth->napi); +#endif + return err; } /* Stops the kernel queue, and halts the controller */ @@ -3784,6 +3775,10 @@ static int ucc_geth_close(struct net_device *dev) ugeth_vdbg("%s: IN", __FUNCTION__); +#ifdef CONFIG_UGETH_NAPI + napi_disable(&ugeth->napi); +#endif + ucc_geth_stop(ugeth); phy_disconnect(ugeth->phydev); @@ -3954,7 +3949,6 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma /* Set the dev->base_addr to the gfar reg region */ dev->base_addr = (unsigned long)(ug_info->uf_info.regs); - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, device); /* Fill in the dev structure */ @@ -3964,11 +3958,9 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma dev->tx_timeout = ucc_geth_timeout; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_UGETH_NAPI - dev->poll = ucc_geth_poll; - dev->weight = UCC_GETH_DEV_WEIGHT; + netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); #endif /* CONFIG_UGETH_NAPI */ dev->stop = ucc_geth_close; - dev->get_stats = ucc_geth_get_stats; // dev->change_mtu = ucc_geth_change_mtu; dev->mtu = 1500; dev->set_multicast_list = ucc_geth_set_multi; diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index bb4dac8c0c654003e4e778ad2fffa0503a884983..aaeb9487798750469e1ea85271cc025559326d8e 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -1184,7 +1184,7 @@ struct ucc_geth_private { struct ucc_geth_info *ug_info; struct ucc_fast_private *uccf; struct net_device *dev; - struct net_device_stats stats; /* linux network statistics */ + struct napi_struct napi; struct ucc_geth *ug_regs; struct ucc_geth_init_pram *p_init_enet_param_shadow; struct ucc_geth_exf_global_pram *p_exf_glbl_param; diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index 64bef7c123654529b5a46864f872717fdde8623f..9a9622c13e2b8d578fceec7563de0a703e697d9a 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -276,20 +276,26 @@ uec_set_ringparam(struct net_device *netdev, return ret; } -static int uec_get_stats_count(struct net_device *netdev) +static int uec_get_sset_count(struct net_device *netdev, int sset) { struct ucc_geth_private *ugeth = netdev_priv(netdev); u32 stats_mode = ugeth->ug_info->statisticsMode; int len = 0; - if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) - len += UEC_HW_STATS_LEN; - if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) - len += UEC_TX_FW_STATS_LEN; - if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) - len += UEC_RX_FW_STATS_LEN; + switch (sset) { + case ETH_SS_STATS: + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) + len += UEC_HW_STATS_LEN; + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) + len += UEC_TX_FW_STATS_LEN; + if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) + len += UEC_RX_FW_STATS_LEN; + + return len; - return len; + default: + return -EOPNOTSUPP; + } } static void uec_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) @@ -353,8 +359,6 @@ uec_get_drvinfo(struct net_device *netdev, strncpy(drvinfo->version, DRV_VERSION, 32); strncpy(drvinfo->fw_version, "N/A", 32); strncpy(drvinfo->bus_info, "QUICC ENGINE", 32); - drvinfo->n_stats = uec_get_stats_count(netdev); - drvinfo->testinfo_len = 0; drvinfo->eedump_len = 0; drvinfo->regdump_len = uec_get_regs_len(netdev); } @@ -373,10 +377,8 @@ static const struct ethtool_ops uec_ethtool_ops = { .set_ringparam = uec_set_ringparam, .get_pauseparam = uec_get_pauseparam, .set_pauseparam = uec_set_pauseparam, - .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, - .get_tso = ethtool_op_get_tso, - .get_stats_count = uec_get_stats_count, + .get_sset_count = uec_get_sset_count, .get_strings = uec_get_strings, .get_ethtool_stats = uec_get_ethtool_stats, }; diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 6d95cacd5284dec9642d41d2e4dfce146eb65189..61daa096de665025d7443783f06cdbc3d4be6c21 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -1474,6 +1474,7 @@ static struct usb_driver asix_driver = { .suspend = usbnet_suspend, .resume = usbnet_resume, .disconnect = usbnet_disconnect, + .supports_autosuspend = 1, }; static int __init asix_init(void) diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 524dc5f5e46ddfd197cea239e60c59caffd93374..58a53a641754ba74fefd04fa048c649cc6f4faee 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1152,8 +1152,6 @@ static int kaweth_probe( INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); - SET_MODULE_OWNER(netdev); - usb_set_intfdata(intf, kaweth); #if 0 diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 04cba6bf3d5477ffb6678e53187cde36e724f624..d1ed68a11e70192ef320c1c15c52c060b952cd4b 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1297,6 +1297,7 @@ static int pegasus_probe(struct usb_interface *intf, pegasus_t *pegasus; int dev_index = id - pegasus_ids; int res = -ENOMEM; + DECLARE_MAC_BUF(mac); usb_get_dev(dev); net = alloc_etherdev(sizeof(struct pegasus)); @@ -1306,7 +1307,6 @@ static int pegasus_probe(struct usb_interface *intf, } pegasus = netdev_priv(net); - memset(pegasus, 0, sizeof (struct pegasus)); pegasus->dev_index = dev_index; init_waitqueue_head(&pegasus->ctrl_wait); @@ -1322,7 +1322,6 @@ static int pegasus_probe(struct usb_interface *intf, pegasus->intf = intf; pegasus->usb = dev; pegasus->net = net; - SET_MODULE_OWNER(net); net->open = pegasus_open; net->stop = pegasus_close; net->watchdog_timeo = PEGASUS_TX_TIMEOUT; @@ -1369,12 +1368,10 @@ static int pegasus_probe(struct usb_interface *intf, queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, CARRIER_CHECK_DELAY); - dev_info(&intf->dev, "%s, %s, %02x:%02x:%02x:%02x:%02x:%02x\n", - net->name, - usb_dev_id[dev_index].name, - net->dev_addr [0], net->dev_addr [1], - net->dev_addr [2], net->dev_addr [3], - net->dev_addr [4], net->dev_addr [5]); + dev_info(&intf->dev, "%s, %s, %s\n", + net->name, + usb_dev_id[dev_index].name, + print_mac(mac, net->dev_addr)); return 0; out3: diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index fa598f0340cf90f5a7d9b5cca48cf0b374a988b1..33cbc306226c67a9a98019d34c4b7eb1ba52b0d5 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -905,7 +905,6 @@ static int rtl8150_probe(struct usb_interface *intf, } dev = netdev_priv(netdev); - memset(dev, 0, sizeof(rtl8150_t)); dev->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!dev->intr_buff) { @@ -918,7 +917,6 @@ static int rtl8150_probe(struct usb_interface *intf, dev->udev = udev; dev->netdev = netdev; - SET_MODULE_OWNER(netdev); netdev->open = rtl8150_open; netdev->stop = rtl8150_close; netdev->do_ioctl = rtl8150_ioctl; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 37bf4f2c0a44c07204dcdc991cecee505462dcf1..acd5f1c0e63a4c79eb4057d39a559ff20166b56a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -590,6 +590,7 @@ static int usbnet_stop (struct net_device *net) dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); + usb_autopm_put_interface(dev->intf); return 0; } @@ -603,9 +604,19 @@ static int usbnet_stop (struct net_device *net) static int usbnet_open (struct net_device *net) { struct usbnet *dev = netdev_priv(net); - int retval = 0; + int retval; struct driver_info *info = dev->driver_info; + if ((retval = usb_autopm_get_interface(dev->intf)) < 0) { + if (netif_msg_ifup (dev)) + devinfo (dev, + "resumption fail (%d) usbnet usb-%s-%s, %s", + retval, + dev->udev->bus->bus_name, dev->udev->devpath, + info->description); + goto done_nopm; + } + // put into "known safe" state if (info->reset && (retval = info->reset (dev)) < 0) { if (netif_msg_ifup (dev)) @@ -659,7 +670,10 @@ static int usbnet_open (struct net_device *net) // delay posting reads until we're fully open tasklet_schedule (&dev->bh); + return retval; done: + usb_autopm_put_interface(dev->intf); +done_nopm: return retval; } @@ -1120,6 +1134,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) struct usb_device *xdev; int status; const char *name; + DECLARE_MAC_BUF(mac); name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; @@ -1143,6 +1158,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev = netdev_priv(net); dev->udev = xdev; + dev->intf = udev; dev->driver_info = info; dev->driver_name = name; dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV @@ -1158,7 +1174,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) init_timer (&dev->delay); mutex_init (&dev->phy_mutex); - SET_MODULE_OWNER (net); dev->net = net; strcpy (net->name, "usb%d"); memcpy (net->dev_addr, node_id, sizeof node_id); @@ -1227,14 +1242,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (status) goto out3; if (netif_msg_probe (dev)) - devinfo (dev, "register '%s' at usb-%s-%s, %s, " - "%02x:%02x:%02x:%02x:%02x:%02x", + devinfo (dev, "register '%s' at usb-%s-%s, %s, %s", udev->dev.driver->name, xdev->bus->bus_name, xdev->devpath, dev->driver_info->description, - net->dev_addr [0], net->dev_addr [1], - net->dev_addr [2], net->dev_addr [3], - net->dev_addr [4], net->dev_addr [5]); + print_mac(mac, net->dev_addr)); // ok, it's ready to go. usb_set_intfdata (udev, dev); @@ -1267,12 +1279,18 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) struct usbnet *dev = usb_get_intfdata(intf); if (!dev->suspend_count++) { - /* accelerate emptying of the rx and queues, to avoid + /* + * accelerate emptying of the rx and queues, to avoid * having everything error out. */ netif_device_detach (dev->net); (void) unlink_urbs (dev, &dev->rxq); (void) unlink_urbs (dev, &dev->txq); + /* + * reattach so runtime management can use and + * wake the device + */ + netif_device_attach (dev->net); } return 0; } @@ -1282,10 +1300,9 @@ int usbnet_resume (struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - if (!--dev->suspend_count) { - netif_device_attach (dev->net); + if (!--dev->suspend_count) tasklet_schedule (&dev->bh); - } + return 0; } EXPORT_SYMBOL_GPL(usbnet_resume); diff --git a/drivers/net/usb/usbnet.h b/drivers/net/usb/usbnet.h index a6c5820767de7f52920d1904ca1149b721c2f3f3..1fae4347e8316eef06f8e9319f618e413f51e32b 100644 --- a/drivers/net/usb/usbnet.h +++ b/drivers/net/usb/usbnet.h @@ -28,6 +28,7 @@ struct usbnet { /* housekeeping */ struct usb_device *udev; + struct usb_interface *intf; struct driver_info *driver_info; const char *driver_name; wait_queue_head_t *wait; diff --git a/drivers/net/veth.c b/drivers/net/veth.c new file mode 100644 index 0000000000000000000000000000000000000000..fdd1e034569d9c44fe49f1c9d678e7550de52537 --- /dev/null +++ b/drivers/net/veth.c @@ -0,0 +1,482 @@ +/* + * drivers/net/veth.c + * + * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc + * + * Author: Pavel Emelianov + * Ethtool interface from: Eric W. Biederman + * + */ + +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "veth" +#define DRV_VERSION "1.0" + +struct veth_net_stats { + unsigned long rx_packets; + unsigned long tx_packets; + unsigned long rx_bytes; + unsigned long tx_bytes; + unsigned long tx_dropped; +}; + +struct veth_priv { + struct net_device *peer; + struct net_device *dev; + struct list_head list; + struct veth_net_stats *stats; + unsigned ip_summed; +}; + +static LIST_HEAD(veth_list); + +/* + * ethtool interface + */ + +static struct { + const char string[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "peer_ifindex" }, +}; + +static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + cmd->supported = 0; + cmd->advertising = 0; + cmd->speed = SPEED_10000; + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_TP; + cmd->phy_address = 0; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "N/A"); +} + +static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys)); + break; + } +} + +static int veth_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void veth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + data[0] = priv->peer->ifindex; +} + +static u32 veth_get_rx_csum(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + return priv->ip_summed == CHECKSUM_UNNECESSARY; +} + +static int veth_set_rx_csum(struct net_device *dev, u32 data) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; + return 0; +} + +static u32 veth_get_tx_csum(struct net_device *dev) +{ + return (dev->features & NETIF_F_NO_CSUM) != 0; +} + +static int veth_set_tx_csum(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_NO_CSUM; + else + dev->features &= ~NETIF_F_NO_CSUM; + return 0; +} + +static struct ethtool_ops veth_ethtool_ops = { + .get_settings = veth_get_settings, + .get_drvinfo = veth_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_rx_csum = veth_get_rx_csum, + .set_rx_csum = veth_set_rx_csum, + .get_tx_csum = veth_get_tx_csum, + .set_tx_csum = veth_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_strings = veth_get_strings, + .get_sset_count = veth_get_sset_count, + .get_ethtool_stats = veth_get_ethtool_stats, +}; + +/* + * xmit + */ + +static int veth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct net_device *rcv = NULL; + struct veth_priv *priv, *rcv_priv; + struct veth_net_stats *stats; + int length, cpu; + + skb_orphan(skb); + + priv = netdev_priv(dev); + rcv = priv->peer; + rcv_priv = netdev_priv(rcv); + + cpu = smp_processor_id(); + stats = per_cpu_ptr(priv->stats, cpu); + + if (!(rcv->flags & IFF_UP)) + goto outf; + + skb->pkt_type = PACKET_HOST; + skb->protocol = eth_type_trans(skb, rcv); + if (dev->features & NETIF_F_NO_CSUM) + skb->ip_summed = rcv_priv->ip_summed; + + dst_release(skb->dst); + skb->dst = NULL; + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); + + length = skb->len; + + stats->tx_bytes += length; + stats->tx_packets++; + + stats = per_cpu_ptr(rcv_priv->stats, cpu); + stats->rx_bytes += length; + stats->rx_packets++; + + netif_rx(skb); + return 0; + +outf: + kfree_skb(skb); + stats->tx_dropped++; + return 0; +} + +/* + * general routines + */ + +static struct net_device_stats *veth_get_stats(struct net_device *dev) +{ + struct veth_priv *priv; + struct net_device_stats *dev_stats; + int cpu; + struct veth_net_stats *stats; + + priv = netdev_priv(dev); + dev_stats = &dev->stats; + + dev_stats->rx_packets = 0; + dev_stats->tx_packets = 0; + dev_stats->rx_bytes = 0; + dev_stats->tx_bytes = 0; + dev_stats->tx_dropped = 0; + + for_each_online_cpu(cpu) { + stats = per_cpu_ptr(priv->stats, cpu); + + dev_stats->rx_packets += stats->rx_packets; + dev_stats->tx_packets += stats->tx_packets; + dev_stats->rx_bytes += stats->rx_bytes; + dev_stats->tx_bytes += stats->tx_bytes; + dev_stats->tx_dropped += stats->tx_dropped; + } + + return dev_stats; +} + +static int veth_open(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + if (priv->peer == NULL) + return -ENOTCONN; + + if (priv->peer->flags & IFF_UP) { + netif_carrier_on(dev); + netif_carrier_on(priv->peer); + } + return 0; +} + +static int veth_close(struct net_device *dev) +{ + struct veth_priv *priv; + + if (netif_carrier_ok(dev)) { + priv = netdev_priv(dev); + netif_carrier_off(dev); + netif_carrier_off(priv->peer); + } + return 0; +} + +static int veth_dev_init(struct net_device *dev) +{ + struct veth_net_stats *stats; + struct veth_priv *priv; + + stats = alloc_percpu(struct veth_net_stats); + if (stats == NULL) + return -ENOMEM; + + priv = netdev_priv(dev); + priv->stats = stats; + return 0; +} + +static void veth_dev_free(struct net_device *dev) +{ + struct veth_priv *priv; + + priv = netdev_priv(dev); + free_percpu(priv->stats); + free_netdev(dev); +} + +static void veth_setup(struct net_device *dev) +{ + ether_setup(dev); + + dev->hard_start_xmit = veth_xmit; + dev->get_stats = veth_get_stats; + dev->open = veth_open; + dev->stop = veth_close; + dev->ethtool_ops = &veth_ethtool_ops; + dev->features |= NETIF_F_LLTX; + dev->init = veth_dev_init; + dev->destructor = veth_dev_free; +} + +/* + * netlink interface + */ + +static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static struct rtnl_link_ops veth_link_ops; + +static int veth_newlink(struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + int err; + struct net_device *peer; + struct veth_priv *priv; + char ifname[IFNAMSIZ]; + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; + + /* + * create and register peer first + * + * struct ifinfomsg is at the head of VETH_INFO_PEER, but we + * skip it since no info from it is useful yet + */ + + if (data != NULL && data[VETH_INFO_PEER] != NULL) { + struct nlattr *nla_peer; + + nla_peer = data[VETH_INFO_PEER]; + err = nla_parse(peer_tb, IFLA_MAX, + nla_data(nla_peer) + sizeof(struct ifinfomsg), + nla_len(nla_peer) - sizeof(struct ifinfomsg), + ifla_policy); + if (err < 0) + return err; + + err = veth_validate(peer_tb, NULL); + if (err < 0) + return err; + + tbp = peer_tb; + } else + tbp = tb; + + if (tbp[IFLA_IFNAME]) + nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + else + snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); + + peer = rtnl_create_link(dev->nd_net, ifname, &veth_link_ops, tbp); + if (IS_ERR(peer)) + return PTR_ERR(peer); + + if (tbp[IFLA_ADDRESS] == NULL) + random_ether_addr(peer->dev_addr); + + err = register_netdevice(peer); + if (err < 0) + goto err_register_peer; + + netif_carrier_off(peer); + + /* + * register dev last + * + * note, that since we've registered new device the dev's name + * should be re-allocated + */ + + if (tb[IFLA_ADDRESS] == NULL) + random_ether_addr(dev->dev_addr); + + if (tb[IFLA_IFNAME]) + nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); + + if (strchr(dev->name, '%')) { + err = dev_alloc_name(dev, dev->name); + if (err < 0) + goto err_alloc_name; + } + + err = register_netdevice(dev); + if (err < 0) + goto err_register_dev; + + netif_carrier_off(dev); + + /* + * tie the deviced together + */ + + priv = netdev_priv(dev); + priv->dev = dev; + priv->peer = peer; + list_add(&priv->list, &veth_list); + + priv = netdev_priv(peer); + priv->dev = peer; + priv->peer = dev; + INIT_LIST_HEAD(&priv->list); + return 0; + +err_register_dev: + /* nothing to do */ +err_alloc_name: + unregister_netdevice(peer); + return err; + +err_register_peer: + free_netdev(peer); + return err; +} + +static void veth_dellink(struct net_device *dev) +{ + struct veth_priv *priv; + struct net_device *peer; + + priv = netdev_priv(dev); + peer = priv->peer; + + if (!list_empty(&priv->list)) + list_del(&priv->list); + + priv = netdev_priv(peer); + if (!list_empty(&priv->list)) + list_del(&priv->list); + + unregister_netdevice(dev); + unregister_netdevice(peer); +} + +static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; + +static struct rtnl_link_ops veth_link_ops = { + .kind = DRV_NAME, + .priv_size = sizeof(struct veth_priv), + .setup = veth_setup, + .validate = veth_validate, + .newlink = veth_newlink, + .dellink = veth_dellink, + .policy = veth_policy, + .maxtype = VETH_INFO_MAX, +}; + +/* + * init/fini + */ + +static __init int veth_init(void) +{ + return rtnl_link_register(&veth_link_ops); +} + +static __exit void veth_exit(void) +{ + struct veth_priv *priv, *next; + + rtnl_lock(); + /* + * cannot trust __rtnl_link_unregister() to unregister all + * devices, as each ->dellink call will remove two devices + * from the list at once. + */ + list_for_each_entry_safe(priv, next, &veth_list, list) + veth_dellink(priv->dev); + + __rtnl_link_unregister(&veth_link_ops); + rtnl_unlock(); +} + +module_init(veth_init); +module_exit(veth_exit); + +MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index b56dff26772de6a1f194b446a481c286439b7d77..07263cd93f9cfdf57f95ecccb61511cdce683f1d 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -335,16 +335,16 @@ enum wol_bits { /* The Rx and Tx buffer descriptors. */ struct rx_desc { - s32 rx_status; - u32 desc_length; /* Chain flag, Buffer/frame length */ - u32 addr; - u32 next_desc; + __le32 rx_status; + __le32 desc_length; /* Chain flag, Buffer/frame length */ + __le32 addr; + __le32 next_desc; }; struct tx_desc { - s32 tx_status; - u32 desc_length; /* Chain flag, Tx Config, Frame length */ - u32 addr; - u32 next_desc; + __le32 tx_status; + __le32 desc_length; /* Chain flag, Tx Config, Frame length */ + __le32 addr; + __le32 next_desc; }; /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */ @@ -389,6 +389,8 @@ struct rhine_private { struct pci_dev *pdev; long pioaddr; + struct net_device *dev; + struct napi_struct napi; struct net_device_stats stats; spinlock_t lock; @@ -582,28 +584,25 @@ static void rhine_poll(struct net_device *dev) #endif #ifdef CONFIG_VIA_RHINE_NAPI -static int rhine_napipoll(struct net_device *dev, int *budget) +static int rhine_napipoll(struct napi_struct *napi, int budget) { - struct rhine_private *rp = netdev_priv(dev); + struct rhine_private *rp = container_of(napi, struct rhine_private, napi); + struct net_device *dev = rp->dev; void __iomem *ioaddr = rp->base; - int done, limit = min(dev->quota, *budget); + int work_done; - done = rhine_rx(dev, limit); - *budget -= done; - dev->quota -= done; + work_done = rhine_rx(dev, budget); - if (done < limit) { - netif_rx_complete(dev); + if (work_done < budget) { + netif_rx_complete(dev, napi); iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); - return 0; } - else - return 1; + return work_done; } #endif @@ -639,6 +638,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, #else int bar = 0; #endif + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -703,10 +703,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, printk(KERN_ERR "alloc_etherdev failed\n"); goto err_out; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); rp = netdev_priv(dev); + rp->dev = dev; rp->quirks = quirks; rp->pioaddr = pioaddr; rp->pdev = pdev; @@ -785,8 +785,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, dev->poll_controller = rhine_poll; #endif #ifdef CONFIG_VIA_RHINE_NAPI - dev->poll = rhine_napipoll; - dev->weight = 64; + netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); #endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; @@ -796,18 +795,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, if (rc) goto err_out_unmap; - printk(KERN_INFO "%s: VIA %s at 0x%lx, ", + printk(KERN_INFO "%s: VIA %s at 0x%lx, %s, IRQ %d.\n", dev->name, name, #ifdef USE_MMIO - memaddr + memaddr, #else - (long)ioaddr + (long)ioaddr, #endif - ); - - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq); + print_mac(mac, dev->dev_addr), pdev->irq); pci_set_drvdata(pdev, dev); @@ -1061,7 +1056,9 @@ static void init_registers(struct net_device *dev) rhine_set_rx_mode(dev); - netif_poll_enable(dev); +#ifdef CONFIG_VIA_RHINE_NAPI + napi_enable(&rp->napi); +#endif /* Enable interrupts by setting the interrupt mask. */ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | @@ -1196,6 +1193,10 @@ static void rhine_tx_timeout(struct net_device *dev) /* protect against concurrent rx interrupts */ disable_irq(rp->pdev->irq); +#ifdef CONFIG_VIA_RHINE_NAPI + napi_disable(&rp->napi); +#endif + spin_lock(&rp->lock); /* clear all descriptors */ @@ -1324,7 +1325,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance) IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); - netif_rx_schedule(dev); + netif_rx_schedule(dev, &rp->napi); #else rhine_rx(dev, RX_RING_SIZE); #endif @@ -1809,8 +1810,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_msglevel = netdev_set_msglevel, .get_wol = rhine_get_wol, .set_wol = rhine_set_wol, - .get_sg = ethtool_op_get_sg, - .get_tx_csum = ethtool_op_get_tx_csum, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -1837,7 +1836,9 @@ static int rhine_close(struct net_device *dev) spin_lock_irq(&rp->lock); netif_stop_queue(dev); - netif_poll_disable(dev); +#ifdef CONFIG_VIA_RHINE_NAPI + napi_disable(&rp->napi); +#endif if (debug > 1) printk(KERN_DEBUG "%s: Shutting down ethercard, " @@ -1936,6 +1937,9 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state) if (!netif_running(dev)) return 0; +#ifdef CONFIG_VIA_RHINE_NAPI + napi_disable(&rp->napi); +#endif netif_device_detach(dev); pci_save_state(pdev); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 93574add4063e6d17af9dec41e9ff57060e403c6..4ae05799ac447b571412990d6fd9c7f2bdfc2ac2 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include @@ -84,6 +85,163 @@ static int velocity_nics = 0; static int msglevel = MSG_LEVEL_INFO; +/** + * mac_get_cam_mask - Read a CAM mask + * @regs: register block for this velocity + * @mask: buffer to store mask + * + * Fetch the mask bits of the selected CAM and store them into the + * provided mask buffer. + */ + +static void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(0, ®s->CAMADDR); + + /* read mask */ + for (i = 0; i < 8; i++) + *mask++ = readb(&(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + +} + + +/** + * mac_set_cam_mask - Set a CAM mask + * @regs: register block for this velocity + * @mask: CAM mask to load + * + * Store a new mask into a CAM + */ + +static void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN, ®s->CAMADDR); + + for (i = 0; i < 8; i++) { + writeb(*mask++, &(regs->MARCAM[i])); + } + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam_mask(struct mac_regs __iomem * regs, u8 * mask) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); + + for (i = 0; i < 8; i++) { + writeb(*mask++, &(regs->MARCAM[i])); + } + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam - set CAM data + * @regs: register block of this velocity + * @idx: Cam index + * @addr: 2 or 6 bytes of CAM data + * + * Load an address or vlan tag into a CAM + */ + +static void mac_set_cam(struct mac_regs __iomem * regs, int idx, const u8 *addr) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); + + for (i = 0; i < 6; i++) { + writeb(*addr++, &(regs->MARCAM[i])); + } + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +static void mac_set_vlan_cam(struct mac_regs __iomem * regs, int idx, + const u8 *addr) +{ + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); + writew(*((u16 *) addr), ®s->MARCAM[0]); + + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + + +/** + * mac_wol_reset - reset WOL after exiting low power + * @regs: register block of this velocity + * + * Called after we drop out of wake on lan mode in order to + * reset the Wake on lan features. This function doesn't restore + * the rest of the logic from the result of sleep/wakeup + */ + +static void mac_wol_reset(struct mac_regs __iomem * regs) +{ + + /* Turn off SWPTAG right after leaving power mode */ + BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); + /* clear sticky bits */ + BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* disable force PME-enable */ + writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); + /* disable power-event config bit */ + writew(0xFFFF, ®s->WOLCRClr); + /* clear power status */ + writew(0xFFFF, ®s->WOLSRClr); +} static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct ethtool_ops velocity_ethtool_ops; @@ -111,15 +269,6 @@ VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); #define TX_DESC_DEF 64 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); -#define VLAN_ID_MIN 0 -#define VLAN_ID_MAX 4095 -#define VLAN_ID_DEF 0 -/* VID_setting[] is used for setting the VID of NIC. - 0: default VID. - 1-4094: other VIDs. -*/ -VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID"); - #define RX_THRESH_MIN 0 #define RX_THRESH_MAX 3 #define RX_THRESH_DEF 0 @@ -147,13 +296,6 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); */ VELOCITY_PARAM(DMA_length, "DMA length"); -#define TAGGING_DEF 0 -/* enable_tagging[] is used for enabling 802.1Q VID tagging. - 0: disable VID seeting(default). - 1: enable VID setting. -*/ -VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging"); - #define IP_ALIG_DEF 0 /* IP_byte_align[] is used for IP header DWORD byte aligned 0: indicate the IP header won't be DWORD byte aligned.(Default) . @@ -324,7 +466,7 @@ MODULE_DEVICE_TABLE(pci, velocity_id_table); * a pointer a static string valid while the driver is loaded. */ -static char __devinit *get_chip_name(enum chip_type chip_id) +static const char __devinit *get_chip_name(enum chip_type chip_id) { int i; for (i = 0; chip_info_table[i].name != NULL; i++) @@ -442,8 +584,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index, velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); - velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname); - velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname); + velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname); velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); @@ -465,6 +606,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index, static void velocity_init_cam_filter(struct velocity_info *vptr) { struct mac_regs __iomem * regs = vptr->mac_regs; + unsigned short vid; /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); @@ -473,27 +615,52 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) /* Disable all CAMs */ memset(vptr->vCAMmask, 0, sizeof(u8) * 8); memset(vptr->mCAMmask, 0, sizeof(u8) * 8); - mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); - mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); + mac_set_cam_mask(regs, vptr->mCAMmask); /* Enable first VCAM */ - if (vptr->flags & VELOCITY_FLAGS_TAGGING) { - /* If Tagging option is enabled and VLAN ID is not zero, then - turn on MCFG_RTGOPT also */ - if (vptr->options.vid != 0) - WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); - - mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM); + if (vptr->vlgrp) { + for (vid = 0; vid < VLAN_VID_MASK; vid++) { + if (vlan_group_get_device(vptr->vlgrp, vid)) { + /* If Tagging option is enabled and + VLAN ID is not zero, then + turn on MCFG_RTGOPT also */ + if (vid != 0) + WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); + + mac_set_vlan_cam(regs, 0, (u8 *) &vid); + } + } vptr->vCAMmask[0] |= 1; - mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); + mac_set_vlan_cam_mask(regs, vptr->vCAMmask); } else { u16 temp = 0; - mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + mac_set_vlan_cam(regs, 0, (u8 *) &temp); temp = 1; - mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + mac_set_vlan_cam_mask(regs, (u8 *) &temp); } } +static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); +} + +static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct velocity_info *vptr = netdev_priv(dev); + + spin_lock_irq(&vptr->lock); + vlan_group_set_device(vptr->vlgrp, vid, NULL); + velocity_init_cam_filter(vptr); + spin_unlock_irq(&vptr->lock); +} + + /** * velocity_rx_reset - handle a receive reset * @vptr: velocity we are resetting @@ -712,7 +879,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi /* Chain it all together */ - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); vptr = netdev_priv(dev); @@ -791,13 +957,17 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi dev->do_ioctl = velocity_ioctl; dev->ethtool_ops = &velocity_ethtool_ops; dev->change_mtu = velocity_change_mtu; + + dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid; + dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid; + #ifdef VELOCITY_ZERO_COPY_SUPPORT dev->features |= NETIF_F_SG; #endif + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER; - if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { + if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) dev->features |= NETIF_F_IP_CSUM; - } ret = register_netdev(dev); if (ret < 0) @@ -1071,14 +1241,12 @@ static int velocity_rx_refill(struct velocity_info *vptr) static int velocity_init_rd_ring(struct velocity_info *vptr) { - int ret = -ENOMEM; - unsigned int rsize = sizeof(struct velocity_rd_info) * - vptr->options.numrx; + int ret; - vptr->rd_info = kmalloc(rsize, GFP_KERNEL); - if(vptr->rd_info == NULL) - goto out; - memset(vptr->rd_info, 0, rsize); + vptr->rd_info = kcalloc(vptr->options.numrx, + sizeof(struct velocity_rd_info), GFP_KERNEL); + if (!vptr->rd_info) + return -ENOMEM; vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; @@ -1088,7 +1256,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) "%s: failed to allocate RX buffer.\n", vptr->dev->name); velocity_free_rd_ring(vptr); } -out: + return ret; } @@ -1142,21 +1310,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr) dma_addr_t curr; struct tx_desc *td; struct velocity_td_info *td_info; - unsigned int tsize = sizeof(struct velocity_td_info) * - vptr->options.numtx; /* Init the TD ring entries */ for (j = 0; j < vptr->num_txq; j++) { curr = vptr->td_pool_dma[j]; - vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL); - if(vptr->td_infos[j] == NULL) - { + vptr->td_infos[j] = kcalloc(vptr->options.numtx, + sizeof(struct velocity_td_info), + GFP_KERNEL); + if (!vptr->td_infos[j]) { while(--j >= 0) kfree(vptr->td_infos[j]); return -ENOMEM; } - memset(vptr->td_infos[j], 0, tsize); for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { td = &(vptr->td_rings[j][i]); @@ -1994,8 +2160,8 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) td_ptr->tdesc1.CMDZ = 2; } - if (vptr->flags & VELOCITY_FLAGS_TAGGING) { - td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); + if (vptr->vlgrp && vlan_tx_tag_present(skb)) { + td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); td_ptr->tdesc1.pqinf.priority = 0; td_ptr->tdesc1.pqinf.CFI = 0; td_ptr->tdesc1.TCR |= TCR0_VETAG; @@ -2121,14 +2287,14 @@ static void velocity_set_multi(struct net_device *dev) rx_mode = (RCR_AM | RCR_AB); } else { int offset = MCAM_SIZE - vptr->multicast_limit; - mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + mac_get_cam_mask(regs, vptr->mCAMmask); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { - mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM); + mac_set_cam(regs, i + offset, mclist->dmi_addr); vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); } - mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + mac_set_cam_mask(regs, vptr->mCAMmask); rx_mode = (RCR_AM | RCR_AB); } if (dev->mtu > 1500) diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index b9e114d36c0f67efc3a13fbfc0e55f6870e0eca6..aa9179623d9048957f50037cd0c7f7a285532342 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -1173,7 +1173,7 @@ enum chip_type { struct velocity_info_tbl { enum chip_type chip_id; - char *name; + const char *name; int txqueue; u32 flags; }; @@ -1194,14 +1194,6 @@ struct velocity_info_tbl { #define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr)) #define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set)) -#define mac_hw_mibs_read(regs, MIBs) {\ - int i;\ - BYTE_REG_BITS_ON(MIBCR_MPTRINI,&((regs)->MIBCR));\ - for (i=0;iMIBData));\ - }\ -} - #define mac_set_dma_length(regs, n) {\ BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\ } @@ -1226,195 +1218,17 @@ struct velocity_info_tbl { writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\ } -#define mac_eeprom_reload(regs) {\ - int i=0;\ - BYTE_REG_BITS_ON(EECSR_RELOAD,&((regs)->EECSR));\ - do {\ - udelay(10);\ - if (i++>0x1000) {\ - break;\ - }\ - }while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&((regs)->EECSR)));\ -} - -enum velocity_cam_type { - VELOCITY_VLAN_ID_CAM = 0, - VELOCITY_MULTICAST_CAM -}; - -/** - * mac_get_cam_mask - Read a CAM mask - * @regs: register block for this velocity - * @mask: buffer to store mask - * @cam_type: CAM to fetch - * - * Fetch the mask bits of the selected CAM and store them into the - * provided mask buffer. - */ - -static inline void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask, enum velocity_cam_type cam_type) -{ - int i; - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_VCAMSL, ®s->CAMADDR); - else - writeb(0, ®s->CAMADDR); - - /* read mask */ - for (i = 0; i < 8; i++) - *mask++ = readb(&(regs->MARCAM[i])); - - /* disable CAMEN */ - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +static inline void mac_eeprom_reload(struct mac_regs __iomem * regs) { + int i=0; + BYTE_REG_BITS_ON(EECSR_RELOAD,&(regs->EECSR)); + do { + udelay(10); + if (i++>0x1000) + break; + } while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&(regs->EECSR))); } -/** - * mac_set_cam_mask - Set a CAM mask - * @regs: register block for this velocity - * @mask: CAM mask to load - * @cam_type: CAM to store - * - * Store a new mask into a CAM - */ - -static inline void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask, enum velocity_cam_type cam_type) -{ - int i; - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); - else - writeb(CAMADDR_CAMEN, ®s->CAMADDR); - - for (i = 0; i < 8; i++) { - writeb(*mask++, &(regs->MARCAM[i])); - } - /* disable CAMEN */ - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); -} - -/** - * mac_set_cam - set CAM data - * @regs: register block of this velocity - * @idx: Cam index - * @addr: 2 or 6 bytes of CAM data - * @cam_type: CAM to load - * - * Load an address or vlan tag into a CAM - */ - -static inline void mac_set_cam(struct mac_regs __iomem * regs, int idx, u8 *addr, enum velocity_cam_type cam_type) -{ - int i; - - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - idx &= (64 - 1); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); - else - writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writew(*((u16 *) addr), ®s->MARCAM[0]); - else { - for (i = 0; i < 6; i++) { - writeb(*addr++, &(regs->MARCAM[i])); - } - } - BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); - - udelay(10); - - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); -} - -/** - * mac_get_cam - fetch CAM data - * @regs: register block of this velocity - * @idx: Cam index - * @addr: buffer to hold up to 6 bytes of CAM data - * @cam_type: CAM to load - * - * Load an address or vlan tag from a CAM into the buffer provided by - * the caller. VLAN tags are 2 bytes the address cam entries are 6. - */ - -static inline void mac_get_cam(struct mac_regs __iomem * regs, int idx, u8 *addr, enum velocity_cam_type cam_type) -{ - int i; - - /* Select CAM mask */ - BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); - - idx &= (64 - 1); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); - else - writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); - - BYTE_REG_BITS_ON(CAMCR_CAMRD, ®s->CAMCR); - - udelay(10); - - if (cam_type == VELOCITY_VLAN_ID_CAM) - *((u16 *) addr) = readw(&(regs->MARCAM[0])); - else - for (i = 0; i < 6; i++, addr++) - *((u8 *) addr) = readb(&(regs->MARCAM[i])); - - writeb(0, ®s->CAMADDR); - - /* Select mar */ - BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); -} - -/** - * mac_wol_reset - reset WOL after exiting low power - * @regs: register block of this velocity - * - * Called after we drop out of wake on lan mode in order to - * reset the Wake on lan features. This function doesn't restore - * the rest of the logic from the result of sleep/wakeup - */ - -static inline void mac_wol_reset(struct mac_regs __iomem * regs) -{ - - /* Turn off SWPTAG right after leaving power mode */ - BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); - /* clear sticky bits */ - BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); - - BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); - BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); - /* disable force PME-enable */ - writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); - /* disable power-event config bit */ - writew(0xFFFF, ®s->WOLCRClr); - /* clear power status */ - writew(0xFFFF, ®s->WOLSRClr); -} - - /* * Header for WOL definitions. Used to compute hashes */ @@ -1701,7 +1515,7 @@ struct velocity_opt { int numrx; /* Number of RX descriptors */ int numtx; /* Number of TX descriptors */ enum speed_opt spd_dpx; /* Media link mode */ - int vid; /* vlan id */ + int DMA_length; /* DMA length */ int rx_thresh; /* RX_THRESH */ int flow_cntl; @@ -1727,6 +1541,7 @@ struct velocity_info { dma_addr_t tx_bufs_dma; u8 *tx_bufs; + struct vlan_group *vlgrp; u8 ip_addr[4]; enum chip_type chip_id; diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index 8ead774d14c87326498f5cd96a8933fde0795ba9..c4c8eab8574f79bb36fc3352648d8df09ab579fb 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -363,7 +363,6 @@ static int __init c101_run(unsigned long irq, unsigned long winbase) hdlc = dev_to_hdlc(dev); spin_lock_init(&card->lock); - SET_MODULE_OWNER(dev); dev->irq = irq; dev->mem_start = winbase; dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index a8af28b273d3f162f2394698990891713b57cbab..8a1778cf98d1cc0bc7b282e2d43610f78e9c8104 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c @@ -131,14 +131,15 @@ static int cycx_wan_update(struct wan_device *wandev), cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev); /* Network device interface */ -static int cycx_netdevice_init(struct net_device *dev), - cycx_netdevice_open(struct net_device *dev), - cycx_netdevice_stop(struct net_device *dev), - cycx_netdevice_hard_header(struct sk_buff *skb, - struct net_device *dev, u16 type, - void *daddr, void *saddr, unsigned len), - cycx_netdevice_rebuild_header(struct sk_buff *skb), - cycx_netdevice_hard_start_xmit(struct sk_buff *skb, +static int cycx_netdevice_init(struct net_device *dev); +static int cycx_netdevice_open(struct net_device *dev); +static int cycx_netdevice_stop(struct net_device *dev); +static int cycx_netdevice_hard_header(struct sk_buff *skb, + struct net_device *dev, u16 type, + const void *daddr, const void *saddr, + unsigned len); +static int cycx_netdevice_rebuild_header(struct sk_buff *skb); +static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); static struct net_device_stats * @@ -468,7 +469,14 @@ static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev) return 0; } + /* Network Device Interface */ + +static const struct header_ops cycx_header_ops = { + .create = cycx_netdevice_hard_header, + .rebuild = cycx_netdevice_rebuild_header, +}; + /* Initialize Linux network interface. * * This routine is called only once for each interface, during Linux network @@ -483,8 +491,8 @@ static int cycx_netdevice_init(struct net_device *dev) /* Initialize device driver entry points */ dev->open = cycx_netdevice_open; dev->stop = cycx_netdevice_stop; - dev->hard_header = cycx_netdevice_hard_header; - dev->rebuild_header = cycx_netdevice_rebuild_header; + dev->header_ops = &cycx_header_ops; + dev->hard_start_xmit = cycx_netdevice_hard_start_xmit; dev->get_stats = cycx_netdevice_get_stats; @@ -508,7 +516,6 @@ static int cycx_netdevice_init(struct net_device *dev) /* Set transmit buffer queue length */ dev->tx_queue_len = 10; - SET_MODULE_OWNER(dev); /* Initialize socket buffers */ cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); @@ -555,7 +562,8 @@ static int cycx_netdevice_stop(struct net_device *dev) * Return: media header length. */ static int cycx_netdevice_hard_header(struct sk_buff *skb, struct net_device *dev, u16 type, - void *daddr, void *saddr, unsigned len) + const void *daddr, const void *saddr, + unsigned len) { skb->protocol = type; diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 66be20c292b6dcd8855ec84830a15e7850c70baa..96b232446c0b03533851be84da4b158e775f38a1 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -66,8 +66,8 @@ static void dlci_setup(struct net_device *); */ static int dlci_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct frhdr hdr; struct dlci_local *dlp; @@ -361,7 +361,7 @@ static int dlci_add(struct dlci_add *dlci) /* validate slave device */ - slave = dev_get_by_name(dlci->devname); + slave = dev_get_by_name(&init_net, dlci->devname); if (!slave) return -ENODEV; @@ -427,7 +427,7 @@ static int dlci_del(struct dlci_add *dlci) int err; /* validate slave device */ - master = __dev_get_by_name(dlci->devname); + master = __dev_get_by_name(&init_net, dlci->devname); if (!master) return(-ENODEV); @@ -485,6 +485,10 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg) return(err); } +static const struct header_ops dlci_header_ops = { + .create = dlci_header, +}; + static void dlci_setup(struct net_device *dev) { struct dlci_local *dlp = dev->priv; @@ -494,7 +498,7 @@ static void dlci_setup(struct net_device *dev) dev->stop = dlci_close; dev->do_ioctl = dlci_dev_ioctl; dev->hard_start_xmit = dlci_transmit; - dev->hard_header = dlci_header; + dev->header_ops = &dlci_header_ops; dev->get_stats = dlci_get_stats; dev->change_mtu = dlci_change_mtu; dev->destructor = free_netdev; @@ -513,6 +517,9 @@ static int dlci_dev_event(struct notifier_block *unused, { struct net_device *dev = (struct net_device *) ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_UNREGISTER) { struct dlci_local *dlp; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 50d2f9108dca364806befcf9c1a068bf7609f4ef..33dc713b53017071acfcac25f7e5e9146534a4da 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -925,7 +925,6 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr) d->do_ioctl = dscc4_ioctl; d->tx_timeout = dscc4_tx_timeout; d->watchdog_timeo = TX_TIMEOUT; - SET_MODULE_OWNER(d); SET_NETDEV_DEV(d, &pdev->dev); dpriv->dev_id = i; diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 65ad2e24caf071a5aa16a0a710a7b07c05c75aa3..d553e6f328513a56f633bf35d2e562c5d979f08f 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -36,6 +36,7 @@ #include #include #include +#include static const char* version = "HDLC support module revision 1.21"; @@ -66,6 +67,12 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { struct hdlc_device_desc *desc = dev_to_desc(dev); + + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return 0; + } + if (desc->netif_rx) return desc->netif_rx(skb); @@ -102,6 +109,9 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event, unsigned long flags; int on; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (dev->get_stats != hdlc_get_stats) return NOTIFY_DONE; /* not an HDLC device */ @@ -222,6 +232,8 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EINVAL; } +static const struct header_ops hdlc_null_ops; + static void hdlc_setup_dev(struct net_device *dev) { /* Re-init all variables changed by HDLC protocol drivers, @@ -233,13 +245,9 @@ static void hdlc_setup_dev(struct net_device *dev) dev->type = ARPHRD_RAWHDLC; dev->hard_header_len = 16; dev->addr_len = 0; - dev->hard_header = NULL; - dev->rebuild_header = NULL; - dev->set_mac_address = NULL; - dev->hard_header_cache = NULL; - dev->header_cache_update = NULL; + dev->header_ops = &hdlc_null_ops; + dev->change_mtu = hdlc_change_mtu; - dev->hard_header_parse = NULL; } static void hdlc_setup(struct net_device *dev) diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 9ec6cf2e510ee32510fd3002f762476efe183031..038a6e748bbffdcfdd39bca117709f65f5510bb0 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -74,7 +74,7 @@ static inline struct cisco_state * state(hdlc_device *hdlc) static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev, - u16 type, void *daddr, void *saddr, + u16 type, const void *daddr, const void *saddr, unsigned int len) { struct hdlc_header *data; @@ -309,7 +309,6 @@ static void cisco_stop(struct net_device *dev) } - static struct hdlc_proto proto = { .start = cisco_start, .stop = cisco_stop, @@ -317,7 +316,10 @@ static struct hdlc_proto proto = { .ioctl = cisco_ioctl, .module = THIS_MODULE, }; - + +static const struct header_ops cisco_header_ops = { + .create = cisco_hard_header, +}; static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) { @@ -365,7 +367,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); dev->hard_start_xmit = hdlc->xmit; - dev->hard_header = cisco_hard_header; + dev->header_ops = &cisco_header_ops; dev->type = ARPHRD_CISCO; netif_dormant_on(dev); return 0; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 4591437dd2f3dca75470b5d259bdaed8f1d65c5e..3caeb528eacea2f247820a6e2f65e181c54f9e2e 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -73,7 +73,7 @@ static void ppp_close(struct net_device *dev) sppp_close(dev); sppp_detach(dev); - dev->rebuild_header = NULL; + dev->change_mtu = state(hdlc)->old_change_mtu; dev->mtu = HDLC_MAX_MTU; dev->hard_header_len = 16; diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index bf5f8d9b5c831d941319016c9b9ce2d9348dd94b..83dbc924fcb577349018fdc8101d042c3e837c8e 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -241,8 +241,6 @@ static struct sv11_device *sv11_init(int iobase, int irq) if(!sv->netdev.dev) goto fail2; - SET_MODULE_OWNER(sv->netdev.dev); - dev=&sv->sync; /* diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 6c302e9dbca2e2ee2aeabf570e0c37cca193c88b..fb37b809523150d2424710d7cdfdb5c1bff206e7 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -91,6 +91,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe int len, err; struct lapbethdev *lapbeth; + if (dev->nd_net != &init_net) + goto drop; + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; @@ -213,7 +216,7 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; - dev->hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); } @@ -326,7 +329,6 @@ static void lapbeth_setup(struct net_device *dev) dev->hard_header_len = 3; dev->mtu = 1000; dev->addr_len = 0; - SET_MODULE_OWNER(dev); } /* @@ -391,6 +393,9 @@ static int lapbeth_device_event(struct notifier_block *this, struct lapbethdev *lapbeth; struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (!dev_is_ethdev(dev)) return NOTIFY_DONE; diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index ae132c1c5459d0e0fe88ac354831bf306d613445..5ea877221f463c9dbb8c45b386a548a07464b435 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -883,7 +883,6 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, dev->base_addr = pci_resource_start(pdev, 0); dev->irq = pdev->irq; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); /* diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 31e1799571add1818688bc23f8d326e38178b0c3..426c0678d9830c2725f2802009ab37d0e29ff405 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c @@ -111,7 +111,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ * They set a few basics because they don't use sync_ppp */ dev->flags |= IFF_POINTOPOINT; - dev->hard_header = NULL; + dev->hard_header_len = 0; dev->addr_len = 0; } diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index cbdf0b748bdede7d6a642aea211ec635cc9c742b..0a566b0daacb51d0d979cb00af07ae341db76625 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -459,7 +459,6 @@ static int __init n2_run(unsigned long io, unsigned long irq, port->log_node = 1; spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->irq = irq; dev->mem_start = winbase; dev->mem_end = winbase + USE_WINDOWSIZE - 1; diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index 6353cb5c658d13703f64c4ee0d429be124b76800..bf1b01590429d00ee45f3947c632726c4078b4de 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -468,7 +468,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, port->phy_node = i; spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index 092e51d89036a555e8bd190662592e0b91d55d89..b595b64e753809205151ae658d61ca2014f46d43 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -415,7 +415,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev, port->phy_node = i; spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 1cc18e787a65e4c39dc8e0e1fff2eed027d1ab6c..76db40d200d8e3000c2c588bcbe86d4d275241b7 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -54,6 +54,7 @@ #include #include +#include #include #include @@ -215,8 +216,6 @@ static void __init sbni_devsetup(struct net_device *dev) dev->get_stats = &sbni_get_stats; dev->set_multicast_list = &set_multicast_list; dev->do_ioctl = &sbni_ioctl; - - SET_MODULE_OWNER( dev ); } int __init sbni_probe(int unit) @@ -1361,7 +1360,7 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) return -EFAULT; - slave_dev = dev_get_by_name( slave_name ); + slave_dev = dev_get_by_name(&init_net, slave_name ); if( !slave_dev || !(slave_dev->flags & IFF_UP) ) { printk( KERN_ERR "%s: trying to enslave non-active " "device %s\n", dev->name, slave_name ); diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 792e588d7d61605ceb05233fca6d448cadce2fe2..b39a541b25093b8724bb72603a4cf712d40a04f6 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -1603,7 +1603,6 @@ static void setup_sdla(struct net_device *dev) netdev_boot_setup_check(dev); - SET_MODULE_OWNER(dev); dev->flags = 0; dev->type = 0xFFFF; dev->hard_header_len = 0; diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 67fc67cfd452562768adb8b09bfb369ca0f1e360..232ecba5340fb1f703c20218e1c09c9dfa156a1f 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -358,8 +359,10 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb) * Handle transmit packets. */ -static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 type, - void *daddr, void *saddr, unsigned int len) +static int sppp_hard_header(struct sk_buff *skb, + struct net_device *dev, __u16 type, + const void *daddr, const void *saddr, + unsigned int len) { struct sppp *sp = (struct sppp *)sppp_of(dev); struct ppp_header *h; @@ -391,10 +394,9 @@ static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 t return sizeof(struct ppp_header); } -static int sppp_rebuild_header(struct sk_buff *skb) -{ - return 0; -} +static const struct header_ops sppp_header_ops = { + .create = sppp_hard_header, +}; /* * Send keepalive packets, every 10 seconds. @@ -1097,8 +1099,8 @@ void sppp_attach(struct ppp_device *pd) * hard_start_xmit. */ - dev->hard_header = sppp_hard_header; - dev->rebuild_header = sppp_rebuild_header; + dev->header_ops = &sppp_header_ops; + dev->tx_queue_len = 10; dev->type = ARPHRD_HDLC; dev->addr_len = 0; @@ -1114,8 +1116,6 @@ void sppp_attach(struct ppp_device *pd) dev->stop = sppp_close; #endif dev->change_mtu = sppp_change_mtu; - dev->hard_header_cache = NULL; - dev->header_cache_update = NULL; dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP; } @@ -1445,6 +1445,11 @@ static void sppp_print_bytes (u_char *p, u16 len) static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) { + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return 0; + } + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; sppp_input(dev,skb); diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 3c78f98563807524225bad9eb59e2c72dec43cab..8e320b76ae0ee49dee1b3598320081fd2af3e792 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -779,7 +779,6 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, port->dev = dev; hdlc = dev_to_hdlc(dev); spin_lock_init(&port->lock); - SET_MODULE_OWNER(dev); dev->tx_queue_len = 50; dev->do_ioctl = wanxl_ioctl; dev->open = wanxl_open; diff --git a/drivers/net/wd.c b/drivers/net/wd.c index a0326818ff2f815853f29871bb3b735750995e6a..fa14255282afe96770d5b44cc887fbaba22814df 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c @@ -93,8 +93,6 @@ static int __init do_wd_probe(struct net_device *dev) int mem_start = dev->mem_start; int mem_end = dev->mem_end; - SET_MODULE_OWNER(dev); - if (base_addr > 0x1ff) { /* Check a user specified location. */ r = request_region(base_addr, WD_IO_EXTENT, "wd-probe"); if ( r == NULL) @@ -158,6 +156,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ const char *model_name; static unsigned version_printed; + DECLARE_MAC_BUF(mac); for (i = 0; i < 8; i++) checksum += inb(ioaddr + 8 + i); @@ -176,9 +175,11 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr) if (ei_debug && version_printed++ == 0) printk(version); - printk("%s: WD80x3 at %#3x,", dev->name, ioaddr); for (i = 0; i < 6; i++) - printk(" %2.2X", dev->dev_addr[i] = inb(ioaddr + 8 + i)); + dev->dev_addr[i] = inb(ioaddr + 8 + i); + + printk("%s: WD80x3 at %#3x, %s", + dev->name, ioaddr, print_mac(mac, dev->dev_addr)); /* The following PureData probe code was contributed by Mike Jagdis . Puredata does software diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index ae27af0141c02ee5b6100aa7f512dfdd77c4e703..5a6fdfd0f1408375dbcceed450635937dfbb885c 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -63,11 +63,6 @@ config WAVELAN a Radio LAN (wireless Ethernet-like Local Area Network) using the radio frequencies 900 MHz and 2.4 GHz. - This driver support the ISA version of the WaveLAN card. A separate - driver for the PCMCIA (PC-card) hardware is available in David - Hinds' pcmcia-cs package (see the file - for location). - If you want to use an ISA WaveLAN card under Linux, say Y and read the Ethernet-HOWTO, available from . Some more specific @@ -280,6 +275,13 @@ config LIBERTAS_USB ---help--- A driver for Marvell Libertas 8388 USB devices. +config LIBERTAS_CS + tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards" + depends on LIBERTAS && PCMCIA && EXPERIMENTAL + select FW_LOADER + ---help--- + A driver for Marvell Libertas 8385 CompactFlash devices. + config LIBERTAS_DEBUG bool "Enable full debugging output in the Libertas module." depends on LIBERTAS @@ -379,30 +381,6 @@ config PCI_HERMES common. Some of the built-in wireless adaptors in laptops are of this variety. -config ATMEL - tristate "Atmel at76c50x chipset 802.11b support" - depends on (PCI || PCMCIA) && WLAN_80211 - select WIRELESS_EXT - select FW_LOADER - select CRC32 - ---help--- - A driver 802.11b wireless cards based on the Atmel fast-vnet - chips. This driver supports standard Linux wireless extensions. - - Many cards based on this chipset do not have flash memory - and need their firmware loaded at start-up. If yours is - one of these, you will need to provide a firmware image - to be loaded into the card by the driver. The Atmel - firmware package can be downloaded from - - -config PCI_ATMEL - tristate "Atmel at76c506 PCI cards" - depends on ATMEL && PCI - ---help--- - Enable support for PCI and mini-PCI cards containing the - Atmel at76c506 chip. - config PCMCIA_HERMES tristate "Hermes PCMCIA card support" depends on PCMCIA && HERMES @@ -414,12 +392,7 @@ config PCMCIA_HERMES such as the Linksys, D-Link and Farallon Skyline. It should also work on Symbol cards such as the 3Com AirConnect and Ericsson WLAN. - To use your PC-cards, you will need supporting software from David - Hinds' pcmcia-cs package (see the file - for location). You also want to check out the PCMCIA-HOWTO, - available from . - - You will also very likely also need the Wireless Tools in order to + You will very likely need the Wireless Tools in order to configure your card and that /etc/pcmcia/wireless.opts works: . @@ -437,6 +410,40 @@ config PCMCIA_SPECTRUM for downloading Symbol firmware are available at +config ATMEL + tristate "Atmel at76c50x chipset 802.11b support" + depends on (PCI || PCMCIA) && WLAN_80211 + select WIRELESS_EXT + select FW_LOADER + select CRC32 + ---help--- + A driver 802.11b wireless cards based on the Atmel fast-vnet + chips. This driver supports standard Linux wireless extensions. + + Many cards based on this chipset do not have flash memory + and need their firmware loaded at start-up. If yours is + one of these, you will need to provide a firmware image + to be loaded into the card by the driver. The Atmel + firmware package can be downloaded from + + +config PCI_ATMEL + tristate "Atmel at76c506 PCI cards" + depends on ATMEL && PCI + ---help--- + Enable support for PCI and mini-PCI cards containing the + Atmel at76c506 chip. + +config PCMCIA_ATMEL + tristate "Atmel at76c502/at76c504 PCMCIA cards" + depends on ATMEL && PCMCIA + select WIRELESS_EXT + select FW_LOADER + select CRC32 + ---help--- + Enable support for PCMCIA cards containing the + Atmel at76c502 and at76c504 chips. + config AIRO_CS tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211 @@ -457,21 +464,6 @@ config AIRO_CS and Cisco proprietary API, so both the Linux Wireless Tools and the Cisco Linux utilities can be used to configure the card. - To use your PC-cards, you will need supporting software from David - Hinds' pcmcia-cs package (see the file - for location). You also want to check out the PCMCIA-HOWTO, - available from . - -config PCMCIA_ATMEL - tristate "Atmel at76c502/at76c504 PCMCIA cards" - depends on ATMEL && PCMCIA - select WIRELESS_EXT - select FW_LOADER - select CRC32 - ---help--- - Enable support for PCMCIA cards containing the - Atmel at76c502 and at76c504 chips. - config PCMCIA_WL3501 tristate "Planet WL3501 PCMCIA cards" depends on EXPERIMENTAL && PCMCIA && WLAN_80211 @@ -558,8 +550,52 @@ config RTL8187 Thanks to Realtek for their support! +config ADM8211 + tristate "ADMtek ADM8211 support" + depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL + select CRC32 + select EEPROM_93CX6 + ---help--- + This driver is for ADM8211A, ADM8211B, and ADM8211C based cards. + These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as: + + Xterasys Cardbus XN-2411b + Blitz NetWave Point PC + TrendNet 221pc + Belkin F5D6001 + SMC 2635W + Linksys WPC11 v1 + Fiberline FL-WL-200X + 3com Office Connect (3CRSHPW796) + Corega WLPCIB-11 + SMC 2602W V2 EU + D-Link DWL-520 Revision C + + However, some of these cards have been replaced with other chips + like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or + the Ralink RT2400 (SMC2635W) without a model number change. + + Thanks to Infineon-ADMtek for their support of this driver. + +config P54_COMMON + tristate "Softmac Prism54 support" + depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL + +config P54_USB + tristate "Prism54 USB support" + depends on P54_COMMON && USB + select CRC32 + +config P54_PCI + tristate "Prism54 PCI support" + depends on P54_COMMON && PCI + +source "drivers/net/wireless/iwlwifi/Kconfig" source "drivers/net/wireless/hostap/Kconfig" source "drivers/net/wireless/bcm43xx/Kconfig" +source "drivers/net/wireless/b43/Kconfig" +source "drivers/net/wireless/b43legacy/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" +source "drivers/net/wireless/rt2x00/Kconfig" endmenu diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 4eb6d9752881e7459a4bb0be66d959903c394e2d..6f32b53ee1283c5847aca504485bb6ac2a7e0977 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -36,6 +36,8 @@ obj-$(CONFIG_PRISM54) += prism54/ obj-$(CONFIG_HOSTAP) += hostap/ obj-$(CONFIG_BCM43XX) += bcm43xx/ +obj-$(CONFIG_B43) += b43/ +obj-$(CONFIG_B43LEGACY) += b43legacy/ obj-$(CONFIG_ZD1211RW) += zd1211rw/ # 16-bit wireless PCMCIA client drivers @@ -47,3 +49,12 @@ obj-$(CONFIG_LIBERTAS) += libertas/ rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o obj-$(CONFIG_RTL8187) += rtl8187.o + +obj-$(CONFIG_ADM8211) += adm8211.o + +obj-$(CONFIG_IWLWIFI) += iwlwifi/ +obj-$(CONFIG_RT2X00) += rt2x00/ + +obj-$(CONFIG_P54_COMMON) += p54common.o +obj-$(CONFIG_P54_USB) += p54usb.o +obj-$(CONFIG_P54_PCI) += p54pci.o diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c new file mode 100644 index 0000000000000000000000000000000000000000..5bf7913aadda051f59f76bd76768576b31f74d6f --- /dev/null +++ b/drivers/net/wireless/adm8211.c @@ -0,0 +1,2053 @@ + +/* + * Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP) + * + * Copyright (c) 2003, Jouni Malinen + * Copyright (c) 2004-2007, Michael Wu + * Some parts copyright (c) 2003 by David Young + * and used with permission. + * + * Much thanks to Infineon-ADMtek for their support of this driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. See README and COPYING for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adm8211.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_AUTHOR("Jouni Malinen "); +MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211"); +MODULE_SUPPORTED_DEVICE("ADM8211"); +MODULE_LICENSE("GPL"); + +static unsigned int tx_ring_size __read_mostly = 16; +static unsigned int rx_ring_size __read_mostly = 16; + +module_param(tx_ring_size, uint, 0); +module_param(rx_ring_size, uint, 0); + +static struct pci_device_id adm8211_pci_id_table[] __devinitdata = { + /* ADMtek ADM8211 */ + { PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */ + { PCI_DEVICE(0x1200, 0x8201) }, /* ? */ + { PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */ + { PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */ + { 0 } +}; + +static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom) +{ + struct adm8211_priv *priv = eeprom->data; + u32 reg = ADM8211_CSR_READ(SPR); + + eeprom->reg_data_in = reg & ADM8211_SPR_SDI; + eeprom->reg_data_out = reg & ADM8211_SPR_SDO; + eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK; + eeprom->reg_chip_select = reg & ADM8211_SPR_SCS; +} + +static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom) +{ + struct adm8211_priv *priv = eeprom->data; + u32 reg = 0x4000 | ADM8211_SPR_SRS; + + if (eeprom->reg_data_in) + reg |= ADM8211_SPR_SDI; + if (eeprom->reg_data_out) + reg |= ADM8211_SPR_SDO; + if (eeprom->reg_data_clock) + reg |= ADM8211_SPR_SCLK; + if (eeprom->reg_chip_select) + reg |= ADM8211_SPR_SCS; + + ADM8211_CSR_WRITE(SPR, reg); + ADM8211_CSR_READ(SPR); /* eeprom_delay */ +} + +static int adm8211_read_eeprom(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int words, i; + struct ieee80211_chan_range chan_range; + u16 cr49; + struct eeprom_93cx6 eeprom = { + .data = priv, + .register_read = adm8211_eeprom_register_read, + .register_write = adm8211_eeprom_register_write + }; + + if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) { + /* 256 * 16-bit = 512 bytes */ + eeprom.width = PCI_EEPROM_WIDTH_93C66; + words = 256; + } else { + /* 64 * 16-bit = 128 bytes */ + eeprom.width = PCI_EEPROM_WIDTH_93C46; + words = 64; + } + + priv->eeprom_len = words * 2; + priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL); + if (!priv->eeprom) + return -ENOMEM; + + eeprom_93cx6_multiread(&eeprom, 0, (__le16 __force *)priv->eeprom, words); + + cr49 = le16_to_cpu(priv->eeprom->cr49); + priv->rf_type = (cr49 >> 3) & 0x7; + switch (priv->rf_type) { + case ADM8211_TYPE_INTERSIL: + case ADM8211_TYPE_RFMD: + case ADM8211_TYPE_MARVEL: + case ADM8211_TYPE_AIROHA: + case ADM8211_TYPE_ADMTEK: + break; + + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->rf_type = ADM8211_TYPE_RFMD; + else + priv->rf_type = ADM8211_TYPE_AIROHA; + + printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n", + pci_name(priv->pdev), (cr49 >> 3) & 0x7); + } + + priv->bbp_type = cr49 & 0x7; + switch (priv->bbp_type) { + case ADM8211_TYPE_INTERSIL: + case ADM8211_TYPE_RFMD: + case ADM8211_TYPE_MARVEL: + case ADM8211_TYPE_AIROHA: + case ADM8211_TYPE_ADMTEK: + break; + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->bbp_type = ADM8211_TYPE_RFMD; + else + priv->bbp_type = ADM8211_TYPE_ADMTEK; + + printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n", + pci_name(priv->pdev), cr49 >> 3); + } + + if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) { + printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n", + pci_name(priv->pdev), priv->eeprom->country_code); + + chan_range = cranges[2]; + } else + chan_range = cranges[priv->eeprom->country_code]; + + printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n", + pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max); + + priv->modes[0].num_channels = chan_range.max - chan_range.min + 1; + priv->modes[0].channels = priv->channels; + + memcpy(priv->channels, adm8211_channels, sizeof(adm8211_channels)); + + for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++) + if (i >= chan_range.min && i <= chan_range.max) + priv->channels[i - 1].flag = + IEEE80211_CHAN_W_SCAN | + IEEE80211_CHAN_W_ACTIVE_SCAN | + IEEE80211_CHAN_W_IBSS; + + switch (priv->eeprom->specific_bbptype) { + case ADM8211_BBP_RFMD3000: + case ADM8211_BBP_RFMD3002: + case ADM8211_BBP_ADM8011: + priv->specific_bbptype = priv->eeprom->specific_bbptype; + break; + + default: + if (priv->pdev->revision < ADM8211_REV_CA) + priv->specific_bbptype = ADM8211_BBP_RFMD3000; + else + priv->specific_bbptype = ADM8211_BBP_ADM8011; + + printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n", + pci_name(priv->pdev), priv->eeprom->specific_bbptype); + } + + switch (priv->eeprom->specific_rftype) { + case ADM8211_RFMD2948: + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + case ADM8211_MAX2820: + case ADM8211_AL2210L: + priv->transceiver_type = priv->eeprom->specific_rftype; + break; + + default: + if (priv->pdev->revision == ADM8211_REV_BA) + priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER; + else if (priv->pdev->revision == ADM8211_REV_CA) + priv->transceiver_type = ADM8211_AL2210L; + else if (priv->pdev->revision == ADM8211_REV_AB) + priv->transceiver_type = ADM8211_RFMD2948; + + printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n", + pci_name(priv->pdev), priv->eeprom->specific_rftype); + + break; + } + + printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d " + "Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type, + priv->bbp_type, priv->specific_bbptype, priv->transceiver_type); + + return 0; +} + +static inline void adm8211_write_sram(struct ieee80211_hw *dev, + u32 addr, u32 data) +{ + struct adm8211_priv *priv = dev->priv; + + ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR | + (priv->pdev->revision < ADM8211_REV_BA ? + 0 : ADM8211_WEPCTL_SEL_WEPTABLE )); + ADM8211_CSR_READ(WEPCTL); + msleep(1); + + ADM8211_CSR_WRITE(WESK, data); + ADM8211_CSR_READ(WESK); + msleep(1); +} + +static void adm8211_write_sram_bytes(struct ieee80211_hw *dev, + unsigned int addr, u8 *buf, + unsigned int len) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg = ADM8211_CSR_READ(WEPCTL); + unsigned int i; + + if (priv->pdev->revision < ADM8211_REV_BA) { + for (i = 0; i < len; i += 2) { + u16 val = buf[i] | (buf[i + 1] << 8); + adm8211_write_sram(dev, addr + i / 2, val); + } + } else { + for (i = 0; i < len; i += 4) { + u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) | + (buf[i + 2] << 16) | (buf[i + 3] << 24); + adm8211_write_sram(dev, addr + i / 4, val); + } + } + + ADM8211_CSR_WRITE(WEPCTL, reg); +} + +static void adm8211_clear_sram(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg = ADM8211_CSR_READ(WEPCTL); + unsigned int addr; + + for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++) + adm8211_write_sram(dev, addr, 0); + + ADM8211_CSR_WRITE(WEPCTL, reg); +} + +static int adm8211_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + struct adm8211_priv *priv = dev->priv; + + memcpy(stats, &priv->stats, sizeof(*stats)); + + return 0; +} + +static int adm8211_get_tx_stats(struct ieee80211_hw *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct adm8211_priv *priv = dev->priv; + struct ieee80211_tx_queue_stats_data *data = &stats->data[0]; + + data->len = priv->cur_tx - priv->dirty_tx; + data->limit = priv->tx_ring_size - 2; + data->count = priv->dirty_tx; + + return 0; +} + +static void adm8211_interrupt_tci(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int dirty_tx; + + spin_lock(&priv->lock); + + for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { + unsigned int entry = dirty_tx % priv->tx_ring_size; + u32 status = le32_to_cpu(priv->tx_ring[entry].status); + struct ieee80211_tx_status tx_status; + struct adm8211_tx_ring_info *info; + struct sk_buff *skb; + + if (status & TDES0_CONTROL_OWN || + !(status & TDES0_CONTROL_DONE)) + break; + + info = &priv->tx_buffers[entry]; + skb = info->skb; + + /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ + + pci_unmap_single(priv->pdev, info->mapping, + info->skb->len, PCI_DMA_TODEVICE); + + memset(&tx_status, 0, sizeof(tx_status)); + skb_pull(skb, sizeof(struct adm8211_tx_hdr)); + memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); + memcpy(&tx_status.control, &info->tx_control, + sizeof(tx_status.control)); + if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) { + if (status & TDES0_STATUS_ES) + tx_status.excessive_retries = 1; + else + tx_status.flags |= IEEE80211_TX_STATUS_ACK; + } + ieee80211_tx_status_irqsafe(dev, skb, &tx_status); + + info->skb = NULL; + } + + if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) + ieee80211_wake_queue(dev, 0); + + priv->dirty_tx = dirty_tx; + spin_unlock(&priv->lock); +} + + +static void adm8211_interrupt_rci(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int entry = priv->cur_rx % priv->rx_ring_size; + u32 status; + unsigned int pktlen; + struct sk_buff *skb, *newskb; + unsigned int limit = priv->rx_ring_size; + static const u8 rate_tbl[] = {10, 20, 55, 110, 220}; + u8 rssi, rate; + + while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { + if (!limit--) + break; + + status = le32_to_cpu(priv->rx_ring[entry].status); + rate = (status & RDES0_STATUS_RXDR) >> 12; + rssi = le32_to_cpu(priv->rx_ring[entry].length) & + RDES1_STATUS_RSSI; + + pktlen = status & RDES0_STATUS_FL; + if (pktlen > RX_PKT_SIZE) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: frame too long (%d)\n", + wiphy_name(dev->wiphy), pktlen); + pktlen = RX_PKT_SIZE; + } + + if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) { + skb = NULL; /* old buffer will be reused */ + /* TODO: update RX error stats */ + /* TODO: check RDES0_STATUS_CRC*E */ + } else if (pktlen < RX_COPY_BREAK) { + skb = dev_alloc_skb(pktlen); + if (skb) { + pci_dma_sync_single_for_cpu( + priv->pdev, + priv->rx_buffers[entry].mapping, + pktlen, PCI_DMA_FROMDEVICE); + memcpy(skb_put(skb, pktlen), + skb_tail_pointer(priv->rx_buffers[entry].skb), + pktlen); + pci_dma_sync_single_for_device( + priv->pdev, + priv->rx_buffers[entry].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + } + } else { + newskb = dev_alloc_skb(RX_PKT_SIZE); + if (newskb) { + skb = priv->rx_buffers[entry].skb; + skb_put(skb, pktlen); + pci_unmap_single( + priv->pdev, + priv->rx_buffers[entry].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + priv->rx_buffers[entry].skb = newskb; + priv->rx_buffers[entry].mapping = + pci_map_single(priv->pdev, + skb_tail_pointer(newskb), + RX_PKT_SIZE, + PCI_DMA_FROMDEVICE); + } else { + skb = NULL; + /* TODO: update rx dropped stats */ + } + + priv->rx_ring[entry].buffer1 = + cpu_to_le32(priv->rx_buffers[entry].mapping); + } + + priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN | + RDES0_STATUS_SQL); + priv->rx_ring[entry].length = + cpu_to_le32(RX_PKT_SIZE | + (entry == priv->rx_ring_size - 1 ? + RDES1_CONTROL_RER : 0)); + + if (skb) { + struct ieee80211_rx_status rx_status = {0}; + + if (priv->pdev->revision < ADM8211_REV_CA) + rx_status.ssi = rssi; + else + rx_status.ssi = 100 - rssi; + + if (rate <= 4) + rx_status.rate = rate_tbl[rate]; + + rx_status.channel = priv->channel; + rx_status.freq = adm8211_channels[priv->channel - 1].freq; + rx_status.phymode = MODE_IEEE80211B; + + ieee80211_rx_irqsafe(dev, skb, &rx_status); + } + + entry = (++priv->cur_rx) % priv->rx_ring_size; + } + + /* TODO: check LPC and update stats? */ +} + + +static irqreturn_t adm8211_interrupt(int irq, void *dev_id) +{ +#define ADM8211_INT(x) \ +do { \ + if (unlikely(stsr & ADM8211_STSR_ ## x)) \ + printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \ +} while (0) + + struct ieee80211_hw *dev = dev_id; + struct adm8211_priv *priv = dev->priv; + u32 stsr = ADM8211_CSR_READ(STSR); + ADM8211_CSR_WRITE(STSR, stsr); + if (stsr == 0xffffffff) + return IRQ_HANDLED; + + if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS))) + return IRQ_HANDLED; + + if (stsr & ADM8211_STSR_RCI) + adm8211_interrupt_rci(dev); + if (stsr & ADM8211_STSR_TCI) + adm8211_interrupt_tci(dev); + + /*ADM8211_INT(LinkOn);*/ + /*ADM8211_INT(LinkOff);*/ + + ADM8211_INT(PCF); + ADM8211_INT(BCNTC); + ADM8211_INT(GPINT); + ADM8211_INT(ATIMTC); + ADM8211_INT(TSFTF); + ADM8211_INT(TSCZ); + ADM8211_INT(SQL); + ADM8211_INT(WEPTD); + ADM8211_INT(ATIME); + /*ADM8211_INT(TBTT);*/ + ADM8211_INT(TEIS); + ADM8211_INT(FBE); + ADM8211_INT(REIS); + ADM8211_INT(GPTT); + ADM8211_INT(RPS); + ADM8211_INT(RDU); + ADM8211_INT(TUF); + /*ADM8211_INT(TRT);*/ + /*ADM8211_INT(TLT);*/ + /*ADM8211_INT(TDU);*/ + ADM8211_INT(TPS); + + return IRQ_HANDLED; + +#undef ADM8211_INT +} + +#define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\ +static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \ + u16 addr, u32 value) { \ + struct adm8211_priv *priv = dev->priv; \ + unsigned int i; \ + u32 reg, bitbuf; \ + \ + value &= v_mask; \ + addr &= a_mask; \ + bitbuf = (value << v_shift) | (addr << a_shift); \ + \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \ + ADM8211_CSR_READ(SYNRF); \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \ + ADM8211_CSR_READ(SYNRF); \ + \ + if (prewrite) { \ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + for (i = 0; i <= bits; i++) { \ + if (bitbuf & (1 << (bits - i))) \ + reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \ + else \ + reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \ + \ + ADM8211_CSR_WRITE(SYNRF, reg); \ + ADM8211_CSR_READ(SYNRF); \ + \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \ + ADM8211_CSR_READ(SYNRF); \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + if (postwrite == 1) { \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + if (postwrite == 2) { \ + ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \ + ADM8211_CSR_READ(SYNRF); \ + } \ + \ + ADM8211_CSR_WRITE(SYNRF, 0); \ + ADM8211_CSR_READ(SYNRF); \ +} + +WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1) +WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1) +WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1) +WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2) + +#undef WRITE_SYN + +static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int timeout; + u32 reg; + + timeout = 10; + while (timeout > 0) { + reg = ADM8211_CSR_READ(BBPCTL); + if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD))) + break; + timeout--; + msleep(2); + } + + if (timeout == 0) { + printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" + " prewrite (reg=0x%08x)\n", + wiphy_name(dev->wiphy), addr, data, reg); + return -ETIMEDOUT; + } + + switch (priv->bbp_type) { + case ADM8211_TYPE_INTERSIL: + reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */ + break; + case ADM8211_TYPE_RFMD: + reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | + (0x01 << 18); + break; + case ADM8211_TYPE_ADMTEK: + reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP | + (0x05 << 18); + break; + } + reg |= ADM8211_BBPCTL_WR | (addr << 8) | data; + + ADM8211_CSR_WRITE(BBPCTL, reg); + + timeout = 10; + while (timeout > 0) { + reg = ADM8211_CSR_READ(BBPCTL); + if (!(reg & ADM8211_BBPCTL_WR)) + break; + timeout--; + msleep(2); + } + + if (timeout == 0) { + ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) & + ~ADM8211_BBPCTL_WR); + printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed" + " postwrite (reg=0x%08x)\n", + wiphy_name(dev->wiphy), addr, data, reg); + return -ETIMEDOUT; + } + + return 0; +} + +static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan) +{ + static const u32 adm8211_rfmd2958_reg5[] = + {0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340, + 0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7}; + static const u32 adm8211_rfmd2958_reg6[] = + {0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000, + 0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745}; + + struct adm8211_priv *priv = dev->priv; + u8 ant_power = priv->ant_power > 0x3F ? + priv->eeprom->antenna_power[chan - 1] : priv->ant_power; + u8 tx_power = priv->tx_power > 0x3F ? + priv->eeprom->tx_power[chan - 1] : priv->tx_power; + u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ? + priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff; + u8 lnags_thresh = priv->lnags_threshold == 0xFF ? + priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold; + u32 reg; + + ADM8211_IDLE(); + + /* Program synthesizer to new channel */ + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007); + adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033); + + adm8211_rf_write_syn_rfmd2958(dev, 0x05, + adm8211_rfmd2958_reg5[chan - 1]); + adm8211_rf_write_syn_rfmd2958(dev, 0x06, + adm8211_rfmd2958_reg6[chan - 1]); + break; + + case ADM8211_RFMD2948: + adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF, + SI4126_MAIN_XINDIV2); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN, + SI4126_POWERDOWN_PDIB | + SI4126_POWERDOWN_PDRB); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV, + (chan == 14 ? + 2110 : (2033 + (chan * 5)))); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44); + adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44); + break; + + case ADM8211_MAX2820: + adm8211_rf_write_syn_max2820(dev, 0x3, + (chan == 14 ? 0x054 : (0x7 + (chan * 5)))); + break; + + case ADM8211_AL2210L: + adm8211_rf_write_syn_al2210l(dev, 0x0, + (chan == 14 ? 0x229B4 : (0x22967 + (chan * 5)))); + break; + + default: + printk(KERN_DEBUG "%s: unsupported transceiver type %d\n", + wiphy_name(dev->wiphy), priv->transceiver_type); + break; + } + + /* write BBP regs */ + if (priv->bbp_type == ADM8211_TYPE_RFMD) { + + /* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */ + /* TODO: remove if SMC 2635W doesn't need this */ + if (priv->transceiver_type == ADM8211_RFMD2948) { + reg = ADM8211_CSR_READ(GPIO); + reg &= 0xfffc0000; + reg |= ADM8211_CSR_GPIO_EN0; + if (chan != 14) + reg |= ADM8211_CSR_GPIO_O0; + ADM8211_CSR_WRITE(GPIO, reg); + } + + if (priv->transceiver_type == ADM8211_RFMD2958) { + /* set PCNT2 */ + adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100); + /* set PCNT1 P_DESIRED/MID_BIAS */ + reg = le16_to_cpu(priv->eeprom->cr49); + reg >>= 13; + reg <<= 15; + reg |= ant_power << 9; + adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg); + /* set TXRX TX_GAIN */ + adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 | + (priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0)); + } else { + reg = ADM8211_CSR_READ(PLCPHD); + reg &= 0xff00ffff; + reg |= tx_power << 18; + ADM8211_CSR_WRITE(PLCPHD, reg); + } + + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | + ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); + ADM8211_CSR_READ(SYNRF); + msleep(30); + + /* RF3000 BBP */ + if (priv->transceiver_type != ADM8211_RFMD2958) + adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, + tx_power<<2); + adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff); + adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh); + adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ? + priv->eeprom->cr28 : 0); + adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); + + ADM8211_CSR_WRITE(SYNRF, 0); + + /* Nothing to do for ADMtek BBP */ + } else if (priv->bbp_type != ADM8211_TYPE_ADMTEK) + printk(KERN_DEBUG "%s: unsupported BBP type %d\n", + wiphy_name(dev->wiphy), priv->bbp_type); + + ADM8211_RESTORE(); + + /* update current channel for adhoc (and maybe AP mode) */ + reg = ADM8211_CSR_READ(CAP0); + reg &= ~0xF; + reg |= chan; + ADM8211_CSR_WRITE(CAP0, reg); + + return 0; +} + +static void adm8211_update_mode(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + ADM8211_IDLE(); + + priv->soft_rx_crc = 0; + switch (priv->mode) { + case IEEE80211_IF_TYPE_STA: + priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); + priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; + break; + case IEEE80211_IF_TYPE_IBSS: + priv->nar &= ~ADM8211_NAR_PR; + priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; + + /* don't trust the error bits on rev 0x20 and up in adhoc */ + if (priv->pdev->revision >= ADM8211_REV_BA) + priv->soft_rx_crc = 1; + break; + case IEEE80211_IF_TYPE_MNTR: + priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); + priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; + break; + } + + ADM8211_RESTORE(); +} + +static void adm8211_hw_init_syn(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + /* comments taken from ADMtek vendor driver */ + + /* Reset RF2958 after power on */ + adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000); + /* Initialize RF VCO Core Bias to maximum */ + adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F); + /* Initialize IF PLL */ + adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03); + /* Initialize IF PLL Coarse Tuning */ + adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F); + /* Initialize RF PLL */ + adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403); + /* Initialize RF PLL Coarse Tuning */ + adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F); + /* Initialize TX gain and filter BW (R9) */ + adm8211_rf_write_syn_rfmd2958(dev, 0x09, + (priv->transceiver_type == ADM8211_RFMD2958 ? + 0x10050 : 0x00050)); + /* Initialize CAL register */ + adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8); + break; + + case ADM8211_MAX2820: + adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E); + adm8211_rf_write_syn_max2820(dev, 0x2, 0x001); + adm8211_rf_write_syn_max2820(dev, 0x3, 0x054); + adm8211_rf_write_syn_max2820(dev, 0x4, 0x310); + adm8211_rf_write_syn_max2820(dev, 0x5, 0x000); + break; + + case ADM8211_AL2210L: + adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C); + adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB); + adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F); + adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9); + adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280); + adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641); + adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130); + adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000); + adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F); + adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C); + adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000); + adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000); + break; + + case ADM8211_RFMD2948: + default: + break; + } +} + +static int adm8211_hw_init_bbp(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + /* write addresses */ + if (priv->bbp_type == ADM8211_TYPE_INTERSIL) { + ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A); + ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E); + ADM8211_CSR_WRITE(MMIRD1, 0x00100000); + } else if (priv->bbp_type == ADM8211_TYPE_RFMD || + priv->bbp_type == ADM8211_TYPE_ADMTEK) { + /* check specific BBP type */ + switch (priv->specific_bbptype) { + case ADM8211_BBP_RFMD3000: + case ADM8211_BBP_RFMD3002: + ADM8211_CSR_WRITE(MMIWA, 0x00009101); + ADM8211_CSR_WRITE(MMIRD0, 0x00000301); + break; + + case ADM8211_BBP_ADM8011: + ADM8211_CSR_WRITE(MMIWA, 0x00008903); + ADM8211_CSR_WRITE(MMIRD0, 0x00001716); + + reg = ADM8211_CSR_READ(BBPCTL); + reg &= ~ADM8211_BBPCTL_TYPE; + reg |= 0x5 << 18; + ADM8211_CSR_WRITE(BBPCTL, reg); + break; + } + + switch (priv->pdev->revision) { + case ADM8211_REV_CA: + if (priv->transceiver_type == ADM8211_RFMD2958 || + priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || + priv->transceiver_type == ADM8211_RFMD2948) + ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22); + else if (priv->transceiver_type == ADM8211_MAX2820 || + priv->transceiver_type == ADM8211_AL2210L) + ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22); + break; + + case ADM8211_REV_BA: + reg = ADM8211_CSR_READ(MMIRD1); + reg &= 0x0000FFFF; + reg |= 0x7e100000; + ADM8211_CSR_WRITE(MMIRD1, reg); + break; + + case ADM8211_REV_AB: + case ADM8211_REV_AF: + default: + ADM8211_CSR_WRITE(MMIRD1, 0x7e100000); + break; + } + + /* For RFMD */ + ADM8211_CSR_WRITE(MACTEST, 0x800); + } + + adm8211_hw_init_syn(dev); + + /* Set RF Power control IF pin to PE1+PHYRST# */ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF | + ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST); + ADM8211_CSR_READ(SYNRF); + msleep(20); + + /* write BBP regs */ + if (priv->bbp_type == ADM8211_TYPE_RFMD) { + /* RF3000 BBP */ + /* another set: + * 11: c8 + * 14: 14 + * 15: 50 (chan 1..13; chan 14: d0) + * 1c: 00 + * 1d: 84 + */ + adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80); + /* antenna selection: diversity */ + adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80); + adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74); + adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38); + adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40); + + if (priv->eeprom->major_version < 2) { + adm8211_write_bbp(dev, 0x1c, 0x00); + adm8211_write_bbp(dev, 0x1d, 0x80); + } else { + if (priv->pdev->revision == ADM8211_REV_BA) + adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28); + else + adm8211_write_bbp(dev, 0x1c, 0x00); + + adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29); + } + } else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) { + /* reset baseband */ + adm8211_write_bbp(dev, 0x00, 0xFF); + /* antenna selection: diversity */ + adm8211_write_bbp(dev, 0x07, 0x0A); + + /* TODO: find documentation for this */ + switch (priv->transceiver_type) { + case ADM8211_RFMD2958: + case ADM8211_RFMD2958_RF3000_CONTROL_POWER: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x09, 0x00); + adm8211_write_bbp(dev, 0x0a, 0x00); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x00); + adm8211_write_bbp(dev, 0x0f, 0xAA); + adm8211_write_bbp(dev, 0x10, 0x8c); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x40); + adm8211_write_bbp(dev, 0x20, 0x23); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x28); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x28, 0x35); + adm8211_write_bbp(dev, 0x2a, 0x8c); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x2d, 0x0A); + adm8211_write_bbp(dev, 0x29, 0x40); + adm8211_write_bbp(dev, 0x60, 0x08); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_MAX2820: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x09, 0x05); + adm8211_write_bbp(dev, 0x0a, 0x02); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x0f); + adm8211_write_bbp(dev, 0x0f, 0x55); + adm8211_write_bbp(dev, 0x10, 0x8d); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x4a); + adm8211_write_bbp(dev, 0x20, 0x20); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x23); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x2a, 0x8c); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x29, 0x4a); + adm8211_write_bbp(dev, 0x60, 0x2b); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_AL2210L: + adm8211_write_bbp(dev, 0x00, 0x00); + adm8211_write_bbp(dev, 0x01, 0x00); + adm8211_write_bbp(dev, 0x02, 0x00); + adm8211_write_bbp(dev, 0x03, 0x00); + adm8211_write_bbp(dev, 0x06, 0x0f); + adm8211_write_bbp(dev, 0x07, 0x05); + adm8211_write_bbp(dev, 0x08, 0x03); + adm8211_write_bbp(dev, 0x09, 0x00); + adm8211_write_bbp(dev, 0x0a, 0x00); + adm8211_write_bbp(dev, 0x0b, 0x00); + adm8211_write_bbp(dev, 0x0c, 0x10); + adm8211_write_bbp(dev, 0x0f, 0x55); + adm8211_write_bbp(dev, 0x10, 0x8d); + adm8211_write_bbp(dev, 0x11, 0x43); + adm8211_write_bbp(dev, 0x18, 0x4a); + adm8211_write_bbp(dev, 0x20, 0x20); + adm8211_write_bbp(dev, 0x21, 0x02); + adm8211_write_bbp(dev, 0x22, 0x23); + adm8211_write_bbp(dev, 0x23, 0x30); + adm8211_write_bbp(dev, 0x24, 0x2d); + adm8211_write_bbp(dev, 0x2a, 0xaa); + adm8211_write_bbp(dev, 0x2b, 0x81); + adm8211_write_bbp(dev, 0x2c, 0x44); + adm8211_write_bbp(dev, 0x29, 0xfa); + adm8211_write_bbp(dev, 0x60, 0x2d); + adm8211_write_bbp(dev, 0x64, 0x01); + break; + + case ADM8211_RFMD2948: + break; + + default: + printk(KERN_DEBUG "%s: unsupported transceiver %d\n", + wiphy_name(dev->wiphy), priv->transceiver_type); + break; + } + } else + printk(KERN_DEBUG "%s: unsupported BBP %d\n", + wiphy_name(dev->wiphy), priv->bbp_type); + + ADM8211_CSR_WRITE(SYNRF, 0); + + /* Set RF CAL control source to MAC control */ + reg = ADM8211_CSR_READ(SYNCTL); + reg |= ADM8211_SYNCTL_SELCAL; + ADM8211_CSR_WRITE(SYNCTL, reg); + + return 0; +} + +/* configures hw beacons/probe responses */ +static int adm8211_set_rate(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + int i = 0; + u8 rate_buf[12] = {0}; + + /* write supported rates */ + if (priv->pdev->revision != ADM8211_REV_BA) { + rate_buf[0] = ARRAY_SIZE(adm8211_rates); + for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++) + rate_buf[i + 1] = (adm8211_rates[i].rate / 5) | 0x80; + } else { + /* workaround for rev BA specific bug */ + rate_buf[0] = 0x04; + rate_buf[1] = 0x82; + rate_buf[2] = 0x04; + rate_buf[3] = 0x0b; + rate_buf[4] = 0x16; + } + + adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf, + ARRAY_SIZE(adm8211_rates) + 1); + + reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */ + reg |= 1 << 15; /* short preamble */ + reg |= 110 << 24; + ADM8211_CSR_WRITE(PLCPHD, reg); + + /* MTMLT = 512 TU (max TX MSDU lifetime) + * BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate) + * SRTYLIM = 224 (short retry limit, TX header value is default) */ + ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0)); + + return 0; +} + +static void adm8211_hw_init(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + u8 cline; + + reg = le32_to_cpu(ADM8211_CSR_READ(PAR)); + reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME; + reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL); + + if (!pci_set_mwi(priv->pdev)) { + reg |= 0x1 << 24; + pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); + + switch (cline) { + case 0x8: reg |= (0x1 << 14); + break; + case 0x16: reg |= (0x2 << 14); + break; + case 0x32: reg |= (0x3 << 14); + break; + default: reg |= (0x0 << 14); + break; + } + } + + ADM8211_CSR_WRITE(PAR, reg); + + reg = ADM8211_CSR_READ(CSR_TEST1); + reg &= ~(0xF << 28); + reg |= (1 << 28) | (1 << 31); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + + /* lose link after 4 lost beacons */ + reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE; + ADM8211_CSR_WRITE(WCSR, reg); + + /* Disable APM, enable receive FIFO threshold, and set drain receive + * threshold to store-and-forward */ + reg = ADM8211_CSR_READ(CMDR); + reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT); + reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF; + ADM8211_CSR_WRITE(CMDR, reg); + + adm8211_set_rate(dev); + + /* 4-bit values: + * PWR1UP = 8 * 2 ms + * PWR0PAPE = 8 us or 5 us + * PWR1PAPE = 1 us or 3 us + * PWR0TRSW = 5 us + * PWR1TRSW = 12 us + * PWR0PE2 = 13 us + * PWR1PE2 = 1 us + * PWR0TXPE = 8 or 6 */ + if (priv->pdev->revision < ADM8211_REV_CA) + ADM8211_CSR_WRITE(TOFS2, 0x8815cd18); + else + ADM8211_CSR_WRITE(TOFS2, 0x8535cd16); + + /* Enable store and forward for transmit */ + priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB; + ADM8211_CSR_WRITE(NAR, priv->nar); + + /* Reset RF */ + ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO); + ADM8211_CSR_READ(SYNRF); + msleep(10); + ADM8211_CSR_WRITE(SYNRF, 0); + ADM8211_CSR_READ(SYNRF); + msleep(5); + + /* Set CFP Max Duration to 0x10 TU */ + reg = ADM8211_CSR_READ(CFPP); + reg &= ~(0xffff << 8); + reg |= 0x0010 << 8; + ADM8211_CSR_WRITE(CFPP, reg); + + /* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us + * TUCNT = 0x3ff - Tu counter 1024 us */ + ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff); + + /* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us), + * DIFS=50 us, EIFS=100 us */ + if (priv->pdev->revision < ADM8211_REV_CA) + ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) | + (50 << 9) | 100); + else + ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) | + (50 << 9) | 100); + + /* PCNT = 1 (MAC idle time awake/sleep, unit S) + * RMRD = 2346 * 8 + 1 us (max RX duration) */ + ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769); + + /* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */ + ADM8211_CSR_WRITE(RSPT, 0xffffff00); + + /* Initialize BBP (and SYN) */ + adm8211_hw_init_bbp(dev); + + /* make sure interrupts are off */ + ADM8211_CSR_WRITE(IER, 0); + + /* ACK interrupts */ + ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR)); + + /* Setup WEP (turns it off for now) */ + reg = ADM8211_CSR_READ(MACTEST); + reg &= ~(7 << 20); + ADM8211_CSR_WRITE(MACTEST, reg); + + reg = ADM8211_CSR_READ(WEPCTL); + reg &= ~ADM8211_WEPCTL_WEPENABLE; + reg |= ADM8211_WEPCTL_WEPRXBYP; + ADM8211_CSR_WRITE(WEPCTL, reg); + + /* Clear the missed-packet counter. */ + ADM8211_CSR_READ(LPC); +} + +static int adm8211_hw_reset(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg, tmp; + int timeout = 100; + + /* Power-on issue */ + /* TODO: check if this is necessary */ + ADM8211_CSR_WRITE(FRCTL, 0); + + /* Reset the chip */ + tmp = ADM8211_CSR_READ(PAR); + ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR); + + while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--) + msleep(50); + + if (timeout <= 0) + return -ETIMEDOUT; + + ADM8211_CSR_WRITE(PAR, tmp); + + if (priv->pdev->revision == ADM8211_REV_BA && + (priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER || + priv->transceiver_type == ADM8211_RFMD2958)) { + reg = ADM8211_CSR_READ(CSR_TEST1); + reg |= (1 << 4) | (1 << 5); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + } else if (priv->pdev->revision == ADM8211_REV_CA) { + reg = ADM8211_CSR_READ(CSR_TEST1); + reg &= ~((1 << 4) | (1 << 5)); + ADM8211_CSR_WRITE(CSR_TEST1, reg); + } + + ADM8211_CSR_WRITE(FRCTL, 0); + + reg = ADM8211_CSR_READ(CSR_TEST0); + reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */ + ADM8211_CSR_WRITE(CSR_TEST0, reg); + + adm8211_clear_sram(dev); + + return 0; +} + +static u64 adm8211_get_tsft(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + u32 tsftl; + u64 tsft; + + tsftl = ADM8211_CSR_READ(TSFTL); + tsft = ADM8211_CSR_READ(TSFTH); + tsft <<= 32; + tsft |= tsftl; + + return tsft; +} + +static void adm8211_set_interval(struct ieee80211_hw *dev, + unsigned short bi, unsigned short li) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + /* BP (beacon interval) = data->beacon_interval + * LI (listen interval) = data->listen_interval (in beacon intervals) */ + reg = (bi << 16) | li; + ADM8211_CSR_WRITE(BPLI, reg); +} + +static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid) +{ + struct adm8211_priv *priv = dev->priv; + u32 reg; + + ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid)); + reg = ADM8211_CSR_READ(ABDA1); + reg &= 0x0000ffff; + reg |= (bssid[4] << 16) | (bssid[5] << 24); + ADM8211_CSR_WRITE(ABDA1, reg); +} + +static int adm8211_set_ssid(struct ieee80211_hw *dev, u8 *ssid, size_t ssid_len) +{ + struct adm8211_priv *priv = dev->priv; + u8 buf[36]; + + if (ssid_len > 32) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + buf[0] = ssid_len; + memcpy(buf + 1, ssid, ssid_len); + adm8211_write_sram_bytes(dev, ADM8211_SRAM_SSID, buf, 33); + /* TODO: configure beacon for adhoc? */ + return 0; +} + +static int adm8211_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + + if (conf->channel != priv->channel) { + priv->channel = conf->channel; + adm8211_rf_set_channel(dev, priv->channel); + } + + return 0; +} + +static int adm8211_config_interface(struct ieee80211_hw *dev, int if_id, + struct ieee80211_if_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + + if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) { + adm8211_set_bssid(dev, conf->bssid); + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + } + + if (conf->ssid_len != priv->ssid_len || + memcmp(conf->ssid, priv->ssid, conf->ssid_len)) { + adm8211_set_ssid(dev, conf->ssid, conf->ssid_len); + priv->ssid_len = conf->ssid_len; + memcpy(priv->ssid, conf->ssid, conf->ssid_len); + } + + return 0; +} + +static void adm8211_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_mc_list *mclist) +{ + static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + struct adm8211_priv *priv = dev->priv; + unsigned int bit_nr, new_flags; + u32 mc_filter[2]; + int i; + + new_flags = 0; + + if (*total_flags & FIF_PROMISC_IN_BSS) { + new_flags |= FIF_PROMISC_IN_BSS; + priv->nar |= ADM8211_NAR_PR; + priv->nar &= ~ADM8211_NAR_MM; + mc_filter[1] = mc_filter[0] = ~0; + } else if ((*total_flags & FIF_ALLMULTI) || (mc_count > 32)) { + new_flags |= FIF_ALLMULTI; + priv->nar &= ~ADM8211_NAR_PR; + priv->nar |= ADM8211_NAR_MM; + mc_filter[1] = mc_filter[0] = ~0; + } else { + priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR); + mc_filter[1] = mc_filter[0] = 0; + for (i = 0; i < mc_count; i++) { + if (!mclist) + break; + bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + + bit_nr &= 0x3F; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + mclist = mclist->next; + } + } + + ADM8211_IDLE_RX(); + + ADM8211_CSR_WRITE(MAR0, mc_filter[0]); + ADM8211_CSR_WRITE(MAR1, mc_filter[1]); + ADM8211_CSR_READ(NAR); + + if (priv->nar & ADM8211_NAR_PR) + dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS; + else + dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; + + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + adm8211_set_bssid(dev, bcast); + else + adm8211_set_bssid(dev, priv->bssid); + + ADM8211_RESTORE(); + + *total_flags = new_flags; +} + +static int adm8211_add_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + if (priv->mode != IEEE80211_IF_TYPE_MNTR) + return -EOPNOTSUPP; + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + priv->mode = conf->type; + break; + default: + return -EOPNOTSUPP; + } + + ADM8211_IDLE(); + + ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr)); + ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4))); + + adm8211_update_mode(dev); + + ADM8211_RESTORE(); + + return 0; +} + +static void adm8211_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct adm8211_priv *priv = dev->priv; + priv->mode = IEEE80211_IF_TYPE_MNTR; +} + +static int adm8211_init_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + struct adm8211_desc *desc = NULL; + struct adm8211_rx_ring_info *rx_info; + struct adm8211_tx_ring_info *tx_info; + unsigned int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + desc = &priv->rx_ring[i]; + desc->status = 0; + desc->length = cpu_to_le32(RX_PKT_SIZE); + priv->rx_buffers[i].skb = NULL; + } + /* Mark the end of RX ring; hw returns to base address after this + * descriptor */ + desc->length |= cpu_to_le32(RDES1_CONTROL_RER); + + for (i = 0; i < priv->rx_ring_size; i++) { + desc = &priv->rx_ring[i]; + rx_info = &priv->rx_buffers[i]; + + rx_info->skb = dev_alloc_skb(RX_PKT_SIZE); + if (rx_info->skb == NULL) + break; + rx_info->mapping = pci_map_single(priv->pdev, + skb_tail_pointer(rx_info->skb), + RX_PKT_SIZE, + PCI_DMA_FROMDEVICE); + desc->buffer1 = cpu_to_le32(rx_info->mapping); + desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL); + } + + /* Setup TX ring. TX buffers descriptors will be filled in as needed */ + for (i = 0; i < priv->tx_ring_size; i++) { + desc = &priv->tx_ring[i]; + tx_info = &priv->tx_buffers[i]; + + tx_info->skb = NULL; + tx_info->mapping = 0; + desc->status = 0; + } + desc->length = cpu_to_le32(TDES1_CONTROL_TER); + + priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0; + ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma); + ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma); + + return 0; +} + +static void adm8211_free_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int i; + + for (i = 0; i < priv->rx_ring_size; i++) { + if (!priv->rx_buffers[i].skb) + continue; + + pci_unmap_single( + priv->pdev, + priv->rx_buffers[i].mapping, + RX_PKT_SIZE, PCI_DMA_FROMDEVICE); + + dev_kfree_skb(priv->rx_buffers[i].skb); + } + + for (i = 0; i < priv->tx_ring_size; i++) { + if (!priv->tx_buffers[i].skb) + continue; + + pci_unmap_single(priv->pdev, + priv->tx_buffers[i].mapping, + priv->tx_buffers[i].skb->len, + PCI_DMA_TODEVICE); + + dev_kfree_skb(priv->tx_buffers[i].skb); + } +} + +static int adm8211_start(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + int retval; + + /* Power up MAC and RF chips */ + retval = adm8211_hw_reset(dev); + if (retval) { + printk(KERN_ERR "%s: hardware reset failed\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + retval = adm8211_init_rings(dev); + if (retval) { + printk(KERN_ERR "%s: failed to initialize rings\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + /* Init hardware */ + adm8211_hw_init(dev); + adm8211_rf_set_channel(dev, priv->channel); + + retval = request_irq(priv->pdev->irq, &adm8211_interrupt, + IRQF_SHARED, "adm8211", dev); + if (retval) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(dev->wiphy)); + goto fail; + } + + ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | + ADM8211_IER_RCIE | ADM8211_IER_TCIE | + ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); + priv->mode = IEEE80211_IF_TYPE_MNTR; + adm8211_update_mode(dev); + ADM8211_CSR_WRITE(RDR, 0); + + adm8211_set_interval(dev, 100, 10); + return 0; + +fail: + return retval; +} + +static void adm8211_stop(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + + priv->mode = IEEE80211_IF_TYPE_INVALID; + priv->nar = 0; + ADM8211_CSR_WRITE(NAR, 0); + ADM8211_CSR_WRITE(IER, 0); + ADM8211_CSR_READ(NAR); + + free_irq(priv->pdev->irq, dev); + + adm8211_free_rings(dev); +} + +static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len, + int plcp_signal, int short_preamble) +{ + /* Alternative calculation from NetBSD: */ + +/* IEEE 802.11b durations for DSSS PHY in microseconds */ +#define IEEE80211_DUR_DS_LONG_PREAMBLE 144 +#define IEEE80211_DUR_DS_SHORT_PREAMBLE 72 +#define IEEE80211_DUR_DS_FAST_PLCPHDR 24 +#define IEEE80211_DUR_DS_SLOW_PLCPHDR 48 +#define IEEE80211_DUR_DS_SLOW_ACK 112 +#define IEEE80211_DUR_DS_FAST_ACK 56 +#define IEEE80211_DUR_DS_SLOW_CTS 112 +#define IEEE80211_DUR_DS_FAST_CTS 56 +#define IEEE80211_DUR_DS_SLOT 20 +#define IEEE80211_DUR_DS_SIFS 10 + + int remainder; + + *dur = (80 * (24 + payload_len) + plcp_signal - 1) + / plcp_signal; + + if (plcp_signal <= PLCP_SIGNAL_2M) + /* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */ + *dur += 3 * (IEEE80211_DUR_DS_SIFS + + IEEE80211_DUR_DS_SHORT_PREAMBLE + + IEEE80211_DUR_DS_FAST_PLCPHDR) + + IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK; + else + /* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */ + *dur += 3 * (IEEE80211_DUR_DS_SIFS + + IEEE80211_DUR_DS_SHORT_PREAMBLE + + IEEE80211_DUR_DS_FAST_PLCPHDR) + + IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK; + + /* lengthen duration if long preamble */ + if (!short_preamble) + *dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE - + IEEE80211_DUR_DS_SHORT_PREAMBLE) + + 3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR - + IEEE80211_DUR_DS_FAST_PLCPHDR); + + + *plcp = (80 * len) / plcp_signal; + remainder = (80 * len) % plcp_signal; + if (plcp_signal == PLCP_SIGNAL_11M && + remainder <= 30 && remainder > 0) + *plcp = (*plcp | 0x8000) + 1; + else if (remainder) + (*plcp)++; +} + +/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */ +static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, + u16 plcp_signal, + struct ieee80211_tx_control *control, + size_t hdrlen) +{ + struct adm8211_priv *priv = dev->priv; + unsigned long flags; + dma_addr_t mapping; + unsigned int entry; + u32 flag; + + mapping = pci_map_single(priv->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2) + flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS; + else + flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS; + + if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2) + ieee80211_stop_queue(dev, 0); + + entry = priv->cur_tx % priv->tx_ring_size; + + priv->tx_buffers[entry].skb = skb; + priv->tx_buffers[entry].mapping = mapping; + memcpy(&priv->tx_buffers[entry].tx_control, control, sizeof(*control)); + priv->tx_buffers[entry].hdrlen = hdrlen; + priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); + + if (entry == priv->tx_ring_size - 1) + flag |= TDES1_CONTROL_TER; + priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len); + + /* Set TX rate (SIGNAL field in PLCP PPDU format) */ + flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */; + priv->tx_ring[entry].status = cpu_to_le32(flag); + + priv->cur_tx++; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Trigger transmit poll */ + ADM8211_CSR_WRITE(TDR, 0); +} + +/* Put adm8211_tx_hdr on skb and transmit */ +static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct adm8211_tx_hdr *txhdr; + u16 fc; + size_t payload_len, hdrlen; + int plcp, dur, len, plcp_signal, short_preamble; + struct ieee80211_hdr *hdr; + + if (control->tx_rate < 0) { + short_preamble = 1; + plcp_signal = -control->tx_rate; + } else { + short_preamble = 0; + plcp_signal = control->tx_rate; + } + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED; + hdrlen = ieee80211_get_hdrlen(fc); + memcpy(skb->cb, skb->data, hdrlen); + hdr = (struct ieee80211_hdr *)skb->cb; + skb_pull(skb, hdrlen); + payload_len = skb->len; + + txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr)); + memset(txhdr, 0, sizeof(*txhdr)); + memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN); + txhdr->signal = plcp_signal; + txhdr->frame_body_size = cpu_to_le16(payload_len); + txhdr->frame_control = hdr->frame_control; + + len = hdrlen + payload_len + FCS_LEN; + if (fc & IEEE80211_FCTL_PROTECTED) + len += 8; + + txhdr->frag = cpu_to_le16(0x0FFF); + adm8211_calc_durations(&dur, &plcp, payload_len, + len, plcp_signal, short_preamble); + txhdr->plcp_frag_head_len = cpu_to_le16(plcp); + txhdr->plcp_frag_tail_len = cpu_to_le16(plcp); + txhdr->dur_frag_head = cpu_to_le16(dur); + txhdr->dur_frag_tail = cpu_to_le16(dur); + + txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER); + + if (short_preamble) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); + + if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); + + if (fc & IEEE80211_FCTL_PROTECTED) + txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE); + + txhdr->retry_limit = control->retry_limit; + + adm8211_tx_raw(dev, skb, plcp_signal, control, hdrlen); + + return NETDEV_TX_OK; +} + +static int adm8211_alloc_rings(struct ieee80211_hw *dev) +{ + struct adm8211_priv *priv = dev->priv; + unsigned int ring_size; + + priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size + + sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL); + if (!priv->rx_buffers) + return -ENOMEM; + + priv->tx_buffers = (void *)priv->rx_buffers + + sizeof(*priv->rx_buffers) * priv->rx_ring_size; + + /* Allocate TX/RX descriptors */ + ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size; + priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size, + &priv->rx_ring_dma); + + if (!priv->rx_ring) { + kfree(priv->rx_buffers); + priv->rx_buffers = NULL; + priv->tx_buffers = NULL; + return -ENOMEM; + } + + priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring + + priv->rx_ring_size); + priv->tx_ring_dma = priv->rx_ring_dma + + sizeof(struct adm8211_desc) * priv->rx_ring_size; + + return 0; +} + +static const struct ieee80211_ops adm8211_ops = { + .tx = adm8211_tx, + .start = adm8211_start, + .stop = adm8211_stop, + .add_interface = adm8211_add_interface, + .remove_interface = adm8211_remove_interface, + .config = adm8211_config, + .config_interface = adm8211_config_interface, + .configure_filter = adm8211_configure_filter, + .get_stats = adm8211_get_stats, + .get_tx_stats = adm8211_get_tx_stats, + .get_tsf = adm8211_get_tsft +}; + +static int __devinit adm8211_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct ieee80211_hw *dev; + struct adm8211_priv *priv; + unsigned long mem_addr, mem_len; + unsigned int io_addr, io_len; + int err; + u32 reg; + u8 perm_addr[ETH_ALEN]; + DECLARE_MAC_BUF(mac); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n", + pci_name(pdev)); + return err; + } + + io_addr = pci_resource_start(pdev, 0); + io_len = pci_resource_len(pdev, 0); + mem_addr = pci_resource_start(pdev, 1); + mem_len = pci_resource_len(pdev, 1); + if (io_len < 256 || mem_len < 1024) { + printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", + pci_name(pdev)); + goto err_disable_pdev; + } + + + /* check signature */ + pci_read_config_dword(pdev, 0x80 /* CR32 */, ®); + if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { + printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", + pci_name(pdev), reg); + goto err_disable_pdev; + } + + err = pci_request_regions(pdev, "adm8211"); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n", + pci_name(pdev)); + return err; /* someone else grabbed it? don't disable it */ + } + + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || + pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; + } + + pci_set_master(pdev); + + dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops); + if (!dev) { + printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + priv = dev->priv; + priv->pdev = pdev; + + spin_lock_init(&priv->lock); + + SET_IEEE80211_DEV(dev, &pdev->dev); + + pci_set_drvdata(pdev, dev); + + priv->map = pci_iomap(pdev, 1, mem_len); + if (!priv->map) + priv->map = pci_iomap(pdev, 0, io_len); + + if (!priv->map) { + printk(KERN_ERR "%s (adm8211): Cannot map device memory\n", + pci_name(pdev)); + goto err_free_dev; + } + + priv->rx_ring_size = rx_ring_size; + priv->tx_ring_size = tx_ring_size; + + if (adm8211_alloc_rings(dev)) { + printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n", + pci_name(pdev)); + goto err_iounmap; + } + + *(u32 *)perm_addr = le32_to_cpu((__force __le32)ADM8211_CSR_READ(PAR0)); + *(u16 *)&perm_addr[4] = + le16_to_cpu((__force __le16)ADM8211_CSR_READ(PAR1) & 0xFFFF); + + if (!is_valid_ether_addr(perm_addr)) { + printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n", + pci_name(pdev)); + random_ether_addr(perm_addr); + } + SET_IEEE80211_PERM_ADDR(dev, perm_addr); + + dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); + dev->flags = IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED; + /* IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ + + dev->channel_change_time = 1000; + dev->max_rssi = 100; /* FIXME: find better value */ + + priv->modes[0].mode = MODE_IEEE80211B; + /* channel info filled in by adm8211_read_eeprom */ + memcpy(priv->rates, adm8211_rates, sizeof(adm8211_rates)); + priv->modes[0].num_rates = ARRAY_SIZE(adm8211_rates); + priv->modes[0].rates = priv->rates; + + dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ + + priv->retry_limit = 3; + priv->ant_power = 0x40; + priv->tx_power = 0x40; + priv->lpf_cutoff = 0xFF; + priv->lnags_threshold = 0xFF; + priv->mode = IEEE80211_IF_TYPE_INVALID; + + /* Power-on issue. EEPROM won't read correctly without */ + if (pdev->revision >= ADM8211_REV_BA) { + ADM8211_CSR_WRITE(FRCTL, 0); + ADM8211_CSR_READ(FRCTL); + ADM8211_CSR_WRITE(FRCTL, 1); + ADM8211_CSR_READ(FRCTL); + msleep(100); + } + + err = adm8211_read_eeprom(dev); + if (err) { + printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n", + pci_name(pdev)); + goto err_free_desc; + } + + priv->channel = priv->modes[0].channels[0].chan; + + err = ieee80211_register_hwmode(dev, &priv->modes[0]); + if (err) { + printk(KERN_ERR "%s (adm8211): Can't register hwmode\n", + pci_name(pdev)); + goto err_free_desc; + } + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "%s (adm8211): Cannot register device\n", + pci_name(pdev)); + goto err_free_desc; + } + + printk(KERN_INFO "%s: hwaddr %s, Rev 0x%02x\n", + wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), + pdev->revision); + + return 0; + + err_free_desc: + pci_free_consistent(pdev, + sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size, + priv->rx_ring, priv->rx_ring_dma); + kfree(priv->rx_buffers); + + err_iounmap: + pci_iounmap(pdev, priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(dev); + + err_free_reg: + pci_release_regions(pdev); + + err_disable_pdev: + pci_disable_device(pdev); + return err; +} + + +static void __devexit adm8211_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + + priv = dev->priv; + + pci_free_consistent(pdev, + sizeof(struct adm8211_desc) * priv->rx_ring_size + + sizeof(struct adm8211_desc) * priv->tx_ring_size, + priv->rx_ring, priv->rx_ring_dma); + + kfree(priv->rx_buffers); + kfree(priv->eeprom); + pci_iounmap(pdev, priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + ieee80211_free_hw(dev); +} + + +#ifdef CONFIG_PM +static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv = dev->priv; + + if (priv->mode != IEEE80211_IF_TYPE_INVALID) { + ieee80211_stop_queues(dev); + adm8211_stop(dev); + } + + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int adm8211_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct adm8211_priv *priv = dev->priv; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (priv->mode != IEEE80211_IF_TYPE_INVALID) { + adm8211_start(dev); + ieee80211_start_queues(dev); + } + + return 0; +} +#endif /* CONFIG_PM */ + + +MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table); + +/* TODO: implement enable_wake */ +static struct pci_driver adm8211_driver = { + .name = "adm8211", + .id_table = adm8211_pci_id_table, + .probe = adm8211_probe, + .remove = __devexit_p(adm8211_remove), +#ifdef CONFIG_PM + .suspend = adm8211_suspend, + .resume = adm8211_resume, +#endif /* CONFIG_PM */ +}; + + + +static int __init adm8211_init(void) +{ + return pci_register_driver(&adm8211_driver); +} + + +static void __exit adm8211_exit(void) +{ + pci_unregister_driver(&adm8211_driver); +} + + +module_init(adm8211_init); +module_exit(adm8211_exit); diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h new file mode 100644 index 0000000000000000000000000000000000000000..ef326fed42e4e8f9d7d4397585663b735365882f --- /dev/null +++ b/drivers/net/wireless/adm8211.h @@ -0,0 +1,656 @@ +#ifndef ADM8211_H +#define ADM8211_H + +/* ADM8211 Registers */ + +/* CR32 (SIG) signature */ +#define ADM8211_SIG1 0x82011317 /* ADM8211A */ +#define ADM8211_SIG2 0x82111317 /* ADM8211B/ADM8211C */ + +#define ADM8211_CSR_READ(r) ioread32(&priv->map->r) +#define ADM8211_CSR_WRITE(r, val) iowrite32((val), &priv->map->r) + +/* CSR (Host Control and Status Registers) */ +struct adm8211_csr { + __le32 PAR; /* 0x00 CSR0 */ + __le32 FRCTL; /* 0x04 CSR0A */ + __le32 TDR; /* 0x08 CSR1 */ + __le32 WTDP; /* 0x0C CSR1A */ + __le32 RDR; /* 0x10 CSR2 */ + __le32 WRDP; /* 0x14 CSR2A */ + __le32 RDB; /* 0x18 CSR3 */ + __le32 TDBH; /* 0x1C CSR3A */ + __le32 TDBD; /* 0x20 CSR4 */ + __le32 TDBP; /* 0x24 CSR4A */ + __le32 STSR; /* 0x28 CSR5 */ + __le32 TDBB; /* 0x2C CSR5A */ + __le32 NAR; /* 0x30 CSR6 */ + __le32 CSR6A; /* reserved */ + __le32 IER; /* 0x38 CSR7 */ + __le32 TKIPSCEP; /* 0x3C CSR7A */ + __le32 LPC; /* 0x40 CSR8 */ + __le32 CSR_TEST1; /* 0x44 CSR8A */ + __le32 SPR; /* 0x48 CSR9 */ + __le32 CSR_TEST0; /* 0x4C CSR9A */ + __le32 WCSR; /* 0x50 CSR10 */ + __le32 WPDR; /* 0x54 CSR10A */ + __le32 GPTMR; /* 0x58 CSR11 */ + __le32 GPIO; /* 0x5C CSR11A */ + __le32 BBPCTL; /* 0x60 CSR12 */ + __le32 SYNCTL; /* 0x64 CSR12A */ + __le32 PLCPHD; /* 0x68 CSR13 */ + __le32 MMIWA; /* 0x6C CSR13A */ + __le32 MMIRD0; /* 0x70 CSR14 */ + __le32 MMIRD1; /* 0x74 CSR14A */ + __le32 TXBR; /* 0x78 CSR15 */ + __le32 SYNDATA; /* 0x7C CSR15A */ + __le32 ALCS; /* 0x80 CSR16 */ + __le32 TOFS2; /* 0x84 CSR17 */ + __le32 CMDR; /* 0x88 CSR18 */ + __le32 PCIC; /* 0x8C CSR19 */ + __le32 PMCSR; /* 0x90 CSR20 */ + __le32 PAR0; /* 0x94 CSR21 */ + __le32 PAR1; /* 0x98 CSR22 */ + __le32 MAR0; /* 0x9C CSR23 */ + __le32 MAR1; /* 0xA0 CSR24 */ + __le32 ATIMDA0; /* 0xA4 CSR25 */ + __le32 ABDA1; /* 0xA8 CSR26 */ + __le32 BSSID0; /* 0xAC CSR27 */ + __le32 TXLMT; /* 0xB0 CSR28 */ + __le32 MIBCNT; /* 0xB4 CSR29 */ + __le32 BCNT; /* 0xB8 CSR30 */ + __le32 TSFTH; /* 0xBC CSR31 */ + __le32 TSC; /* 0xC0 CSR32 */ + __le32 SYNRF; /* 0xC4 CSR33 */ + __le32 BPLI; /* 0xC8 CSR34 */ + __le32 CAP0; /* 0xCC CSR35 */ + __le32 CAP1; /* 0xD0 CSR36 */ + __le32 RMD; /* 0xD4 CSR37 */ + __le32 CFPP; /* 0xD8 CSR38 */ + __le32 TOFS0; /* 0xDC CSR39 */ + __le32 TOFS1; /* 0xE0 CSR40 */ + __le32 IFST; /* 0xE4 CSR41 */ + __le32 RSPT; /* 0xE8 CSR42 */ + __le32 TSFTL; /* 0xEC CSR43 */ + __le32 WEPCTL; /* 0xF0 CSR44 */ + __le32 WESK; /* 0xF4 CSR45 */ + __le32 WEPCNT; /* 0xF8 CSR46 */ + __le32 MACTEST; /* 0xFC CSR47 */ + __le32 FER; /* 0x100 */ + __le32 FEMR; /* 0x104 */ + __le32 FPSR; /* 0x108 */ + __le32 FFER; /* 0x10C */ +} __attribute__ ((packed)); + +/* CSR0 - PAR (PCI Address Register) */ +#define ADM8211_PAR_MWIE (1 << 24) +#define ADM8211_PAR_MRLE (1 << 23) +#define ADM8211_PAR_MRME (1 << 21) +#define ADM8211_PAR_RAP ((1 << 18) | (1 << 17)) +#define ADM8211_PAR_CAL ((1 << 15) | (1 << 14)) +#define ADM8211_PAR_PBL 0x00003f00 +#define ADM8211_PAR_BLE (1 << 7) +#define ADM8211_PAR_DSL 0x0000007c +#define ADM8211_PAR_BAR (1 << 1) +#define ADM8211_PAR_SWR (1 << 0) + +/* CSR1 - FRCTL (Frame Control Register) */ +#define ADM8211_FRCTL_PWRMGT (1 << 31) +#define ADM8211_FRCTL_MAXPSP (1 << 27) +#define ADM8211_FRCTL_DRVPRSP (1 << 26) +#define ADM8211_FRCTL_DRVBCON (1 << 25) +#define ADM8211_FRCTL_AID 0x0000ffff +#define ADM8211_FRCTL_AID_ON 0x0000c000 + +/* CSR5 - STSR (Status Register) */ +#define ADM8211_STSR_PCF (1 << 31) +#define ADM8211_STSR_BCNTC (1 << 30) +#define ADM8211_STSR_GPINT (1 << 29) +#define ADM8211_STSR_LinkOff (1 << 28) +#define ADM8211_STSR_ATIMTC (1 << 27) +#define ADM8211_STSR_TSFTF (1 << 26) +#define ADM8211_STSR_TSCZ (1 << 25) +#define ADM8211_STSR_LinkOn (1 << 24) +#define ADM8211_STSR_SQL (1 << 23) +#define ADM8211_STSR_WEPTD (1 << 22) +#define ADM8211_STSR_ATIME (1 << 21) +#define ADM8211_STSR_TBTT (1 << 20) +#define ADM8211_STSR_NISS (1 << 16) +#define ADM8211_STSR_AISS (1 << 15) +#define ADM8211_STSR_TEIS (1 << 14) +#define ADM8211_STSR_FBE (1 << 13) +#define ADM8211_STSR_REIS (1 << 12) +#define ADM8211_STSR_GPTT (1 << 11) +#define ADM8211_STSR_RPS (1 << 8) +#define ADM8211_STSR_RDU (1 << 7) +#define ADM8211_STSR_RCI (1 << 6) +#define ADM8211_STSR_TUF (1 << 5) +#define ADM8211_STSR_TRT (1 << 4) +#define ADM8211_STSR_TLT (1 << 3) +#define ADM8211_STSR_TDU (1 << 2) +#define ADM8211_STSR_TPS (1 << 1) +#define ADM8211_STSR_TCI (1 << 0) + +/* CSR6 - NAR (Network Access Register) */ +#define ADM8211_NAR_TXCF (1 << 31) +#define ADM8211_NAR_HF (1 << 30) +#define ADM8211_NAR_UTR (1 << 29) +#define ADM8211_NAR_SQ (1 << 28) +#define ADM8211_NAR_CFP (1 << 27) +#define ADM8211_NAR_SF (1 << 21) +#define ADM8211_NAR_TR ((1 << 15) | (1 << 14)) +#define ADM8211_NAR_ST (1 << 13) +#define ADM8211_NAR_OM ((1 << 11) | (1 << 10)) +#define ADM8211_NAR_MM (1 << 7) +#define ADM8211_NAR_PR (1 << 6) +#define ADM8211_NAR_EA (1 << 5) +#define ADM8211_NAR_PB (1 << 3) +#define ADM8211_NAR_STPDMA (1 << 2) +#define ADM8211_NAR_SR (1 << 1) +#define ADM8211_NAR_CTX (1 << 0) + +#define ADM8211_IDLE() \ +do { \ + if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) { \ + ADM8211_CSR_WRITE(NAR, priv->nar & \ + ~(ADM8211_NAR_SR | ADM8211_NAR_ST));\ + ADM8211_CSR_READ(NAR); \ + msleep(20); \ + } \ +} while (0) + +#define ADM8211_IDLE_RX() \ +do { \ + if (priv->nar & ADM8211_NAR_SR) { \ + ADM8211_CSR_WRITE(NAR, priv->nar & ~ADM8211_NAR_SR); \ + ADM8211_CSR_READ(NAR); \ + mdelay(20); \ + } \ +} while (0) + +#define ADM8211_RESTORE() \ +do { \ + if (priv->nar & (ADM8211_NAR_SR | ADM8211_NAR_ST)) \ + ADM8211_CSR_WRITE(NAR, priv->nar); \ +} while (0) + +/* CSR7 - IER (Interrupt Enable Register) */ +#define ADM8211_IER_PCFIE (1 << 31) +#define ADM8211_IER_BCNTCIE (1 << 30) +#define ADM8211_IER_GPIE (1 << 29) +#define ADM8211_IER_LinkOffIE (1 << 28) +#define ADM8211_IER_ATIMTCIE (1 << 27) +#define ADM8211_IER_TSFTFIE (1 << 26) +#define ADM8211_IER_TSCZE (1 << 25) +#define ADM8211_IER_LinkOnIE (1 << 24) +#define ADM8211_IER_SQLIE (1 << 23) +#define ADM8211_IER_WEPIE (1 << 22) +#define ADM8211_IER_ATIMEIE (1 << 21) +#define ADM8211_IER_TBTTIE (1 << 20) +#define ADM8211_IER_NIE (1 << 16) +#define ADM8211_IER_AIE (1 << 15) +#define ADM8211_IER_TEIE (1 << 14) +#define ADM8211_IER_FBEIE (1 << 13) +#define ADM8211_IER_REIE (1 << 12) +#define ADM8211_IER_GPTIE (1 << 11) +#define ADM8211_IER_RSIE (1 << 8) +#define ADM8211_IER_RUIE (1 << 7) +#define ADM8211_IER_RCIE (1 << 6) +#define ADM8211_IER_TUIE (1 << 5) +#define ADM8211_IER_TRTIE (1 << 4) +#define ADM8211_IER_TLTTIE (1 << 3) +#define ADM8211_IER_TDUIE (1 << 2) +#define ADM8211_IER_TPSIE (1 << 1) +#define ADM8211_IER_TCIE (1 << 0) + +/* CSR9 - SPR (Serial Port Register) */ +#define ADM8211_SPR_SRS (1 << 11) +#define ADM8211_SPR_SDO (1 << 3) +#define ADM8211_SPR_SDI (1 << 2) +#define ADM8211_SPR_SCLK (1 << 1) +#define ADM8211_SPR_SCS (1 << 0) + +/* CSR9A - CSR_TEST0 */ +#define ADM8211_CSR_TEST0_EPNE (1 << 18) +#define ADM8211_CSR_TEST0_EPSNM (1 << 17) +#define ADM8211_CSR_TEST0_EPTYP (1 << 16) +#define ADM8211_CSR_TEST0_EPRLD (1 << 15) + +/* CSR10 - WCSR (Wake-up Control/Status Register) */ +#define ADM8211_WCSR_CRCT (1 << 30) +#define ADM8211_WCSR_TSFTWE (1 << 20) +#define ADM8211_WCSR_TIMWE (1 << 19) +#define ADM8211_WCSR_ATIMWE (1 << 18) +#define ADM8211_WCSR_KEYWE (1 << 17) +#define ADM8211_WCSR_MPRE (1 << 9) +#define ADM8211_WCSR_LSOE (1 << 8) +#define ADM8211_WCSR_KEYUP (1 << 6) +#define ADM8211_WCSR_TSFTW (1 << 5) +#define ADM8211_WCSR_TIMW (1 << 4) +#define ADM8211_WCSR_ATIMW (1 << 3) +#define ADM8211_WCSR_MPR (1 << 1) +#define ADM8211_WCSR_LSO (1 << 0) + +/* CSR11A - GPIO */ +#define ADM8211_CSR_GPIO_EN5 (1 << 17) +#define ADM8211_CSR_GPIO_EN4 (1 << 16) +#define ADM8211_CSR_GPIO_EN3 (1 << 15) +#define ADM8211_CSR_GPIO_EN2 (1 << 14) +#define ADM8211_CSR_GPIO_EN1 (1 << 13) +#define ADM8211_CSR_GPIO_EN0 (1 << 12) +#define ADM8211_CSR_GPIO_O5 (1 << 11) +#define ADM8211_CSR_GPIO_O4 (1 << 10) +#define ADM8211_CSR_GPIO_O3 (1 << 9) +#define ADM8211_CSR_GPIO_O2 (1 << 8) +#define ADM8211_CSR_GPIO_O1 (1 << 7) +#define ADM8211_CSR_GPIO_O0 (1 << 6) +#define ADM8211_CSR_GPIO_IN 0x0000003f + +/* CSR12 - BBPCTL (BBP Control port) */ +#define ADM8211_BBPCTL_MMISEL (1 << 31) +#define ADM8211_BBPCTL_SPICADD (0x7F << 24) +#define ADM8211_BBPCTL_RF3000 (0x20 << 24) +#define ADM8211_BBPCTL_TXCE (1 << 23) +#define ADM8211_BBPCTL_RXCE (1 << 22) +#define ADM8211_BBPCTL_CCAP (1 << 21) +#define ADM8211_BBPCTL_TYPE 0x001c0000 +#define ADM8211_BBPCTL_WR (1 << 17) +#define ADM8211_BBPCTL_RD (1 << 16) +#define ADM8211_BBPCTL_ADDR 0x0000ff00 +#define ADM8211_BBPCTL_DATA 0x000000ff + +/* CSR12A - SYNCTL (Synthesizer Control port) */ +#define ADM8211_SYNCTL_WR (1 << 31) +#define ADM8211_SYNCTL_RD (1 << 30) +#define ADM8211_SYNCTL_CS0 (1 << 29) +#define ADM8211_SYNCTL_CS1 (1 << 28) +#define ADM8211_SYNCTL_CAL (1 << 27) +#define ADM8211_SYNCTL_SELCAL (1 << 26) +#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22)) +#define ADM8211_SYNCTL_RFMD (1 << 22) +#define ADM8211_SYNCTL_GENERAL (0x7 << 22) +/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ + +/* CSR18 - CMDR (Command Register) */ +#define ADM8211_CMDR_PM (1 << 19) +#define ADM8211_CMDR_APM (1 << 18) +#define ADM8211_CMDR_RTE (1 << 4) +#define ADM8211_CMDR_DRT ((1 << 3) | (1 << 2)) +#define ADM8211_CMDR_DRT_8DW (0x0 << 2) +#define ADM8211_CMDR_DRT_16DW (0x1 << 2) +#define ADM8211_CMDR_DRT_SF (0x2 << 2) + +/* CSR33 - SYNRF (SYNRF direct control) */ +#define ADM8211_SYNRF_SELSYN (1 << 31) +#define ADM8211_SYNRF_SELRF (1 << 30) +#define ADM8211_SYNRF_LERF (1 << 29) +#define ADM8211_SYNRF_LEIF (1 << 28) +#define ADM8211_SYNRF_SYNCLK (1 << 27) +#define ADM8211_SYNRF_SYNDATA (1 << 26) +#define ADM8211_SYNRF_PE1 (1 << 25) +#define ADM8211_SYNRF_PE2 (1 << 24) +#define ADM8211_SYNRF_PA_PE (1 << 23) +#define ADM8211_SYNRF_TR_SW (1 << 22) +#define ADM8211_SYNRF_TR_SWN (1 << 21) +#define ADM8211_SYNRF_RADIO (1 << 20) +#define ADM8211_SYNRF_CAL_EN (1 << 19) +#define ADM8211_SYNRF_PHYRST (1 << 18) + +#define ADM8211_SYNRF_IF_SELECT_0 (1 << 31) +#define ADM8211_SYNRF_IF_SELECT_1 ((1 << 31) | (1 << 28)) +#define ADM8211_SYNRF_WRITE_SYNDATA_0 (1 << 31) +#define ADM8211_SYNRF_WRITE_SYNDATA_1 ((1 << 31) | (1 << 26)) +#define ADM8211_SYNRF_WRITE_CLOCK_0 (1 << 31) +#define ADM8211_SYNRF_WRITE_CLOCK_1 ((1 << 31) | (1 << 27)) + +/* CSR44 - WEPCTL (WEP Control) */ +#define ADM8211_WEPCTL_WEPENABLE (1 << 31) +#define ADM8211_WEPCTL_WPAENABLE (1 << 30) +#define ADM8211_WEPCTL_CURRENT_TABLE (1 << 29) +#define ADM8211_WEPCTL_TABLE_WR (1 << 28) +#define ADM8211_WEPCTL_TABLE_RD (1 << 27) +#define ADM8211_WEPCTL_WEPRXBYP (1 << 25) +#define ADM8211_WEPCTL_SEL_WEPTABLE (1 << 23) +#define ADM8211_WEPCTL_ADDR (0x000001ff) + +/* CSR45 - WESK (Data Entry for Share/Individual Key) */ +#define ADM8211_WESK_DATA (0x0000ffff) + +/* FER (Function Event Register) */ +#define ADM8211_FER_INTR_EV_ENT (1 << 15) + + +/* Si4126 RF Synthesizer - Control Registers */ +#define SI4126_MAIN_CONF 0 +#define SI4126_PHASE_DET_GAIN 1 +#define SI4126_POWERDOWN 2 +#define SI4126_RF1_N_DIV 3 /* only Si4136 */ +#define SI4126_RF2_N_DIV 4 +#define SI4126_IF_N_DIV 5 +#define SI4126_RF1_R_DIV 6 /* only Si4136 */ +#define SI4126_RF2_R_DIV 7 +#define SI4126_IF_R_DIV 8 + +/* Main Configuration */ +#define SI4126_MAIN_XINDIV2 (1 << 6) +#define SI4126_MAIN_IFDIV ((1 << 11) | (1 << 10)) +/* Powerdown */ +#define SI4126_POWERDOWN_PDIB (1 << 1) +#define SI4126_POWERDOWN_PDRB (1 << 0) + + +/* RF3000 BBP - Control Port Registers */ +/* 0x00 - reserved */ +#define RF3000_MODEM_CTRL__RX_STATUS 0x01 +#define RF3000_CCA_CTRL 0x02 +#define RF3000_DIVERSITY__RSSI 0x03 +#define RF3000_RX_SIGNAL_FIELD 0x04 +#define RF3000_RX_LEN_MSB 0x05 +#define RF3000_RX_LEN_LSB 0x06 +#define RF3000_RX_SERVICE_FIELD 0x07 +#define RF3000_TX_VAR_GAIN__TX_LEN_EXT 0x11 +#define RF3000_TX_LEN_MSB 0x12 +#define RF3000_TX_LEN_LSB 0x13 +#define RF3000_LOW_GAIN_CALIB 0x14 +#define RF3000_HIGH_GAIN_CALIB 0x15 + +/* ADM8211 revisions */ +#define ADM8211_REV_AB 0x11 +#define ADM8211_REV_AF 0x15 +#define ADM8211_REV_BA 0x20 +#define ADM8211_REV_CA 0x30 + +struct adm8211_desc { + __le32 status; + __le32 length; + __le32 buffer1; + __le32 buffer2; +}; + +#define RDES0_STATUS_OWN (1 << 31) +#define RDES0_STATUS_ES (1 << 30) +#define RDES0_STATUS_SQL (1 << 29) +#define RDES0_STATUS_DE (1 << 28) +#define RDES0_STATUS_FS (1 << 27) +#define RDES0_STATUS_LS (1 << 26) +#define RDES0_STATUS_PCF (1 << 25) +#define RDES0_STATUS_SFDE (1 << 24) +#define RDES0_STATUS_SIGE (1 << 23) +#define RDES0_STATUS_CRC16E (1 << 22) +#define RDES0_STATUS_RXTOE (1 << 21) +#define RDES0_STATUS_CRC32E (1 << 20) +#define RDES0_STATUS_ICVE (1 << 19) +#define RDES0_STATUS_DA1 (1 << 17) +#define RDES0_STATUS_DA0 (1 << 16) +#define RDES0_STATUS_RXDR ((1 << 15) | (1 << 14) | (1 << 13) | (1 << 12)) +#define RDES0_STATUS_FL (0x00000fff) + +#define RDES1_CONTROL_RER (1 << 25) +#define RDES1_CONTROL_RCH (1 << 24) +#define RDES1_CONTROL_RBS2 (0x00fff000) +#define RDES1_CONTROL_RBS1 (0x00000fff) + +#define RDES1_STATUS_RSSI (0x0000007f) + + +#define TDES0_CONTROL_OWN (1 << 31) +#define TDES0_CONTROL_DONE (1 << 30) +#define TDES0_CONTROL_TXDR (0x0ff00000) + +#define TDES0_STATUS_OWN (1 << 31) +#define TDES0_STATUS_DONE (1 << 30) +#define TDES0_STATUS_ES (1 << 29) +#define TDES0_STATUS_TLT (1 << 28) +#define TDES0_STATUS_TRT (1 << 27) +#define TDES0_STATUS_TUF (1 << 26) +#define TDES0_STATUS_TRO (1 << 25) +#define TDES0_STATUS_SOFBR (1 << 24) +#define TDES0_STATUS_ACR (0x00000fff) + +#define TDES1_CONTROL_IC (1 << 31) +#define TDES1_CONTROL_LS (1 << 30) +#define TDES1_CONTROL_FS (1 << 29) +#define TDES1_CONTROL_TER (1 << 25) +#define TDES1_CONTROL_TCH (1 << 24) +#define TDES1_CONTROL_RBS2 (0x00fff000) +#define TDES1_CONTROL_RBS1 (0x00000fff) + +/* SRAM offsets */ +#define ADM8211_SRAM(x) (priv->pdev->revision < ADM8211_REV_BA ? \ + ADM8211_SRAM_A_ ## x : ADM8211_SRAM_B_ ## x) + +#define ADM8211_SRAM_INDIV_KEY 0x0000 +#define ADM8211_SRAM_A_SHARE_KEY 0x0160 +#define ADM8211_SRAM_B_SHARE_KEY 0x00c0 + +#define ADM8211_SRAM_A_SSID 0x0180 +#define ADM8211_SRAM_B_SSID 0x00d4 +#define ADM8211_SRAM_SSID ADM8211_SRAM(SSID) + +#define ADM8211_SRAM_A_SUPP_RATE 0x0191 +#define ADM8211_SRAM_B_SUPP_RATE 0x00dd +#define ADM8211_SRAM_SUPP_RATE ADM8211_SRAM(SUPP_RATE) + +#define ADM8211_SRAM_A_SIZE 0x0200 +#define ADM8211_SRAM_B_SIZE 0x01c0 +#define ADM8211_SRAM_SIZE ADM8211_SRAM(SIZE) + +struct adm8211_rx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; +}; + +struct adm8211_tx_ring_info { + struct sk_buff *skb; + dma_addr_t mapping; + struct ieee80211_tx_control tx_control; + size_t hdrlen; +}; + +#define PLCP_SIGNAL_1M 0x0a +#define PLCP_SIGNAL_2M 0x14 +#define PLCP_SIGNAL_5M5 0x37 +#define PLCP_SIGNAL_11M 0x6e + +struct adm8211_tx_hdr { + u8 da[6]; + u8 signal; /* PLCP signal / TX rate in 100 Kbps */ + u8 service; + __le16 frame_body_size; + __le16 frame_control; + __le16 plcp_frag_tail_len; + __le16 plcp_frag_head_len; + __le16 dur_frag_tail; + __le16 dur_frag_head; + u8 addr4[6]; + +#define ADM8211_TXHDRCTL_SHORT_PREAMBLE (1 << 0) +#define ADM8211_TXHDRCTL_MORE_FRAG (1 << 1) +#define ADM8211_TXHDRCTL_MORE_DATA (1 << 2) +#define ADM8211_TXHDRCTL_FRAG_NO (1 << 3) /* ? */ +#define ADM8211_TXHDRCTL_ENABLE_RTS (1 << 4) +#define ADM8211_TXHDRCTL_ENABLE_WEP_ENGINE (1 << 5) +#define ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER (1 << 15) /* ? */ + __le16 header_control; + __le16 frag; + u8 reserved_0; + u8 retry_limit; + + u32 wep2key0; + u32 wep2key1; + u32 wep2key2; + u32 wep2key3; + + u8 keyid; + u8 entry_control; // huh?? + u16 reserved_1; + u32 reserved_2; +} __attribute__ ((packed)); + + +#define RX_COPY_BREAK 128 +#define RX_PKT_SIZE 2500 + +struct adm8211_eeprom { + __le16 signature; /* 0x00 */ + u8 major_version; /* 0x02 */ + u8 minor_version; /* 0x03 */ + u8 reserved_1[4]; /* 0x04 */ + u8 hwaddr[6]; /* 0x08 */ + u8 reserved_2[8]; /* 0x1E */ + __le16 cr49; /* 0x16 */ + u8 cr03; /* 0x18 */ + u8 cr28; /* 0x19 */ + u8 cr29; /* 0x1A */ + u8 country_code; /* 0x1B */ + +/* specific bbp types */ +#define ADM8211_BBP_RFMD3000 0x00 +#define ADM8211_BBP_RFMD3002 0x01 +#define ADM8211_BBP_ADM8011 0x04 + u8 specific_bbptype; /* 0x1C */ + u8 specific_rftype; /* 0x1D */ + u8 reserved_3[2]; /* 0x1E */ + __le16 device_id; /* 0x20 */ + __le16 vendor_id; /* 0x22 */ + __le16 subsystem_id; /* 0x24 */ + __le16 subsystem_vendor_id; /* 0x26 */ + u8 maxlat; /* 0x28 */ + u8 mingnt; /* 0x29 */ + __le16 cis_pointer_low; /* 0x2A */ + __le16 cis_pointer_high; /* 0x2C */ + __le16 csr18; /* 0x2E */ + u8 reserved_4[16]; /* 0x30 */ + u8 d1_pwrdara; /* 0x40 */ + u8 d0_pwrdara; /* 0x41 */ + u8 d3_pwrdara; /* 0x42 */ + u8 d2_pwrdara; /* 0x43 */ + u8 antenna_power[14]; /* 0x44 */ + __le16 cis_wordcnt; /* 0x52 */ + u8 tx_power[14]; /* 0x54 */ + u8 lpf_cutoff[14]; /* 0x62 */ + u8 lnags_threshold[14]; /* 0x70 */ + __le16 checksum; /* 0x7E */ + u8 cis_data[0]; /* 0x80, 384 bytes */ +} __attribute__ ((packed)); + +static const struct ieee80211_rate adm8211_rates[] = { + { .rate = 10, + .val = 10, + .val2 = -10, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 20, + .val = 20, + .val2 = -20, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 55, + .val = 55, + .val2 = -55, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 110, + .val = 110, + .val2 = -110, + .flags = IEEE80211_RATE_CCK_2 } +}; + +struct ieee80211_chan_range { + u8 min; + u8 max; +}; + +static const struct ieee80211_channel adm8211_channels[] = { + { .chan = 1, + .freq = 2412}, + { .chan = 2, + .freq = 2417}, + { .chan = 3, + .freq = 2422}, + { .chan = 4, + .freq = 2427}, + { .chan = 5, + .freq = 2432}, + { .chan = 6, + .freq = 2437}, + { .chan = 7, + .freq = 2442}, + { .chan = 8, + .freq = 2447}, + { .chan = 9, + .freq = 2452}, + { .chan = 10, + .freq = 2457}, + { .chan = 11, + .freq = 2462}, + { .chan = 12, + .freq = 2467}, + { .chan = 13, + .freq = 2472}, + { .chan = 14, + .freq = 2484}, +}; + +struct adm8211_priv { + struct pci_dev *pdev; + spinlock_t lock; + struct adm8211_csr __iomem *map; + struct adm8211_desc *rx_ring; + struct adm8211_desc *tx_ring; + dma_addr_t rx_ring_dma; + dma_addr_t tx_ring_dma; + struct adm8211_rx_ring_info *rx_buffers; + struct adm8211_tx_ring_info *tx_buffers; + unsigned int rx_ring_size, tx_ring_size; + unsigned int cur_tx, dirty_tx, cur_rx; + + struct ieee80211_low_level_stats stats; + struct ieee80211_hw_mode modes[1]; + struct ieee80211_channel channels[ARRAY_SIZE(adm8211_channels)]; + struct ieee80211_rate rates[ARRAY_SIZE(adm8211_rates)]; + int mode; + + int channel; + u8 bssid[ETH_ALEN]; + u8 ssid[32]; + size_t ssid_len; + + u8 soft_rx_crc; + u8 retry_limit; + + u8 ant_power; + u8 tx_power; + u8 lpf_cutoff; + u8 lnags_threshold; + struct adm8211_eeprom *eeprom; + size_t eeprom_len; + + u32 nar; + +#define ADM8211_TYPE_INTERSIL 0x00 +#define ADM8211_TYPE_RFMD 0x01 +#define ADM8211_TYPE_MARVEL 0x02 +#define ADM8211_TYPE_AIROHA 0x03 +#define ADM8211_TYPE_ADMTEK 0x05 + unsigned int rf_type:3; + unsigned int bbp_type:3; + + u8 specific_bbptype; + enum { + ADM8211_RFMD2948 = 0x0, + ADM8211_RFMD2958 = 0x1, + ADM8211_RFMD2958_RF3000_CONTROL_POWER = 0x2, + ADM8211_MAX2820 = 0x8, + ADM8211_AL2210L = 0xC, /* Airoha */ + } transceiver_type; +}; + +static const struct ieee80211_chan_range cranges[] = { + {1, 11}, /* FCC */ + {1, 11}, /* IC */ + {1, 13}, /* ETSI */ + {10, 11}, /* SPAIN */ + {10, 13}, /* FRANCE */ + {14, 14}, /* MMK */ + {1, 14}, /* MMK2 */ +}; + +#endif /* ADM8211_H */ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index ee1cc14db389f91d9c8a7d09898c1eee7887d494..074055e18c5c510d879fafc903e9f65b27fae752 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -241,8 +241,8 @@ static int proc_perm = 0644; MODULE_AUTHOR("Benjamin Reed"); MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \ - cards. Direct support for ISA/PCI/MPI cards and support \ - for PCMCIA when used with airo_cs."); +cards. Direct support for ISA/PCI/MPI cards and support \ +for PCMCIA when used with airo_cs."); MODULE_LICENSE("Dual BSD/GPL"); MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); module_param_array(io, int, NULL, 0); @@ -2481,7 +2481,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) EXPORT_SYMBOL(stop_airo_card); -static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr) +static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); return ETH_ALEN; @@ -2696,14 +2696,13 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci) return rc; } +static const struct header_ops airo_header_ops = { + .parse = wll_header_parse, +}; + static void wifi_setup(struct net_device *dev) { - dev->hard_header = NULL; - dev->rebuild_header = NULL; - dev->hard_header_cache = NULL; - dev->header_cache_update= NULL; - - dev->hard_header_parse = wll_header_parse; + dev->header_ops = &airo_header_ops; dev->hard_start_xmit = &airo_start_xmit11; dev->get_stats = &airo_get_stats; dev->set_mac_address = &airo_set_mac_address; @@ -2821,6 +2820,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, struct net_device *dev; struct airo_info *ai; int i, rc; + DECLARE_MAC_BUF(mac); /* Create the network device object. */ dev = alloc_netdev(sizeof(*ai), "", ether_setup); @@ -2870,7 +2870,6 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, dev->base_addr = port; SET_NETDEV_DEV(dev, dmdev); - SET_MODULE_OWNER(dev); reset_card (dev, 1); msleep(400); @@ -2924,9 +2923,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, goto err_out_reg; set_bit(FLAG_REGISTERED,&ai->flags); - airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); + airo_print_info(dev->name, "MAC enabled %s", + print_mac(mac, dev->dev_addr)); /* Allocate the transmit buffers */ if (probe && !test_bit(FLAG_MPI,&ai->flags)) @@ -2983,6 +2981,7 @@ int reset_airo_card( struct net_device *dev ) { int i; struct airo_info *ai = dev->priv; + DECLARE_MAC_BUF(mac); if (reset_card (dev, 1)) return -1; @@ -2991,9 +2990,8 @@ int reset_airo_card( struct net_device *dev ) airo_print_err(dev->name, "MAC could not be enabled"); return -1; } - airo_print_info(dev->name, "MAC enabled %x:%x:%x:%x:%x:%x", - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + airo_print_info(dev->name, "MAC enabled %s", + print_mac(mac, dev->dev_addr)); /* Allocate the transmit buffers if needed */ if (!test_bit(FLAG_MPI,&ai->flags)) for( i = 0; i < MAX_FIDS; i++ ) @@ -5427,6 +5425,7 @@ static int proc_APList_open( struct inode *inode, struct file *file ) { int i; char *ptr; APListRid APList_rid; + DECLARE_MAC_BUF(mac); if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; @@ -5450,13 +5449,8 @@ static int proc_APList_open( struct inode *inode, struct file *file ) { // We end when we find a zero MAC if ( !*(int*)APList_rid.ap[i] && !*(int*)&APList_rid.ap[i][2]) break; - ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x\n", - (int)APList_rid.ap[i][0], - (int)APList_rid.ap[i][1], - (int)APList_rid.ap[i][2], - (int)APList_rid.ap[i][3], - (int)APList_rid.ap[i][4], - (int)APList_rid.ap[i][5]); + ptr += sprintf(ptr, "%s\n", + print_mac(mac, APList_rid.ap[i])); } if (i==0) ptr += sprintf(ptr, "Not using specific APs\n"); @@ -5475,6 +5469,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { int rc; /* If doLoseSync is not 1, we won't do a Lose Sync */ int doLoseSync = -1; + DECLARE_MAC_BUF(mac); if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) return -ENOMEM; @@ -5511,13 +5506,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != 0xffff) { - ptr += sprintf(ptr, "%02x:%02x:%02x:%02x:%02x:%02x %*s rssi = %d", - (int)BSSList_rid.bssid[0], - (int)BSSList_rid.bssid[1], - (int)BSSList_rid.bssid[2], - (int)BSSList_rid.bssid[3], - (int)BSSList_rid.bssid[4], - (int)BSSList_rid.bssid[5], + ptr += sprintf(ptr, "%s %*s rssi = %d", + print_mac(mac, BSSList_rid.bssid), (int)BSSList_rid.ssidLen, BSSList_rid.ssid, (int)BSSList_rid.dBm); @@ -7579,9 +7569,9 @@ static const iw_handler airo_private_handler[] = static const struct iw_handler_def airo_handler_def = { - .num_standard = sizeof(airo_handler)/sizeof(iw_handler), - .num_private = sizeof(airo_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(airo_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(airo_handler), + .num_private = ARRAY_SIZE(airo_private_handler), + .num_private_args = ARRAY_SIZE(airo_private_args), .standard = airo_handler, .private = airo_private_handler, .private_args = airo_private_args, diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c index 7d5b8c2cc614ef3970ce34c9279cad6700758bfc..6f7eb9f59223b43ccdd25b4b6d60e7906a054eaa 100644 --- a/drivers/net/wireless/airport.c +++ b/drivers/net/wireless/airport.c @@ -197,7 +197,6 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match) return -EBUSY; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); macio_set_drvdata(mdev, dev); diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c index 498e8486d125f450a0ee4140de312a567905636e..dbdfc9e39d20eec73f0e9424570010f9ac3022b0 100644 --- a/drivers/net/wireless/arlan-main.c +++ b/drivers/net/wireless/arlan-main.c @@ -1469,10 +1469,10 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short while (dmi) { if (dmi->dmi_addrlen == 6) { + DECLARE_MAC_BUF(mac); if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP) - printk(KERN_ERR "%s mcl %2x:%2x:%2x:%2x:%2x:%2x \n", dev->name, - dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], - dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]); + printk(KERN_ERR "%s mcl %s\n", + dev->name, print_mac(mac, dmi->dmi_addr)); for (i = 0; i < 6; i++) if (dmi->dmi_addr[i] != hw_dst_addr[i]) break; @@ -1512,17 +1512,18 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short { char immedDestAddress[6]; char immedSrcAddress[6]; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6); memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6); - printk(KERN_WARNING "%s t %2x:%2x:%2x:%2x:%2x:%2x f %2x:%2x:%2x:%2x:%2x:%2x imd %2x:%2x:%2x:%2x:%2x:%2x ims %2x:%2x:%2x:%2x:%2x:%2x\n", dev->name, - (unsigned char) skbtmp[0], (unsigned char) skbtmp[1], (unsigned char) skbtmp[2], (unsigned char) skbtmp[3], - (unsigned char) skbtmp[4], (unsigned char) skbtmp[5], (unsigned char) skbtmp[6], (unsigned char) skbtmp[7], - (unsigned char) skbtmp[8], (unsigned char) skbtmp[9], (unsigned char) skbtmp[10], (unsigned char) skbtmp[11], - immedDestAddress[0], immedDestAddress[1], immedDestAddress[2], - immedDestAddress[3], immedDestAddress[4], immedDestAddress[5], - immedSrcAddress[0], immedSrcAddress[1], immedSrcAddress[2], - immedSrcAddress[3], immedSrcAddress[4], immedSrcAddress[5]); + printk(KERN_WARNING "%s t %s f %s imd %s ims %s\n", + dev->name, print_mac(mac, skbtmp), + print_mac(mac2, &skbtmp[6]), + print_mac(mac3, immedDestAddress), + print_mac(mac4, immedSrcAddress)); } skb->protocol = eth_type_trans(skb, dev); IFDEBUG(ARLAN_DEBUG_HEADER_DUMP) @@ -1792,8 +1793,6 @@ struct net_device * __init arlan_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - SET_MODULE_OWNER(dev); - if (unit >= 0) { sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c index 015abd928ab0b156ad49562b6107810e32f774b8..c6e70dbc5de824a9ab63286c82d76ed6480a40ff 100644 --- a/drivers/net/wireless/arlan-proc.c +++ b/drivers/net/wireless/arlan-proc.c @@ -435,7 +435,7 @@ static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp, goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { @@ -654,7 +654,7 @@ static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -688,7 +688,7 @@ static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -716,7 +716,7 @@ static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp pos += sprintf(arlan_drive_info + pos, "No device found here \n"); goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -745,7 +745,7 @@ static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp, goto final; } else - priva = arlan_device[devnum]->priv; + priva = netdev_priv(arlan_device[devnum]); if (priva == NULL) { printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n "); @@ -780,7 +780,7 @@ static int arlan_configure(ctl_table * ctl, int write, struct file *filp, } else if (arlan_device[devnum] != NULL) { - priv = arlan_device[devnum]->priv; + priv = netdev_priv(arlan_device[devnum]); arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_CONF); } @@ -805,7 +805,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp, } else if (arlan_device[devnum] != NULL) { - priv = arlan_device[devnum]->priv; + priv = netdev_priv(arlan_device[devnum]); arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_RESET); } else diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 51a7db53afa5f5c02229891dfcf49d77d4260024..059ce3f07dba1bafe91eccaf4a68a9be26f367eb 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -1484,6 +1484,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, struct net_device *dev; struct atmel_private *priv; int rc; + DECLARE_MAC_BUF(mac); /* Create the network device object. */ dev = alloc_etherdev(sizeof(*priv)); @@ -1598,12 +1599,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port, if (!ent) printk(KERN_WARNING "atmel: unable to create /proc entry.\n"); - printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", - dev->name, DRIVER_MAJOR, DRIVER_MINOR, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5] ); + printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %s\n", + dev->name, DRIVER_MAJOR, DRIVER_MINOR, print_mac(mac, dev->dev_addr)); - SET_MODULE_OWNER(dev); return dev; err_out_res: diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e3c573e56b6368b72cf25751df30016b1ece3a6b --- /dev/null +++ b/drivers/net/wireless/b43/Kconfig @@ -0,0 +1,131 @@ +config B43 + tristate "Broadcom 43xx wireless support (mac80211 stack)" + depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 + select SSB + select FW_LOADER + select HW_RANDOM + ---help--- + b43 is a driver for the Broadcom 43xx series wireless devices. + + Check "lspci" for something like + "Broadcom Corporation BCM43XX 802.11 Wireless LAN Controller" + to determine whether you own such a device. + + This driver supports the new BCM43xx IEEE 802.11G devices, but not + the old IEEE 802.11B devices. Old devices are supported by + the b43legacy driver. + Note that this has nothing to do with the standard that your AccessPoint + supports (A, B, G or a combination). + IEEE 802.11G devices can talk to IEEE 802.11B AccessPoints. + + It is safe to include both b43 and b43legacy as the underlying glue + layer will automatically load the correct version for your device. + + This driver uses V4 firmware, which must be installed separately using + b43-fwcutter. + + This driver can be built as a module (recommended) that will be called "b43". + If unsure, say M. + +# Auto-select SSB PCI-HOST support, if possible +config B43_PCI_AUTOSELECT + bool + depends on B43 && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B43_PCICORE_AUTOSELECT + bool + depends on B43 && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B43_PCMCIA + bool "Broadcom 43xx PCMCIA device support (EXPERIMENTAL)" + depends on B43 && SSB_PCMCIAHOST_POSSIBLE && EXPERIMENTAL + select SSB_PCMCIAHOST + ---help--- + Broadcom 43xx PCMCIA device support. + + Support for 16bit PCMCIA devices. + Please note that most PC-CARD devices are _NOT_ 16bit PCMCIA + devices, but 32bit CardBUS devices. CardBUS devices are supported + out of the box by b43. + + With this config option you can drive b43 cards in + CompactFlash formfactor in a PCMCIA adaptor. + CF b43 cards can sometimes be found in handheld PCs. + + It's safe to select Y here, even if you don't have a B43 PCMCIA device. + + If unsure, say N. + +# LED support +config B43_LEDS + bool + depends on B43 && MAC80211_LEDS + default y + +# RFKILL support +config B43_RFKILL + bool + depends on B43 && RFKILL && RFKILL_INPUT && INPUT_POLLDEV + default y + +config B43_DEBUG + bool "Broadcom 43xx debugging" + depends on B43 + ---help--- + Broadcom 43xx debugging messages. + + Say Y, if you want to find out why the driver does not + work for you. + +config B43_DMA + bool + depends on B43 +config B43_PIO + bool + depends on B43 + +choice + prompt "Broadcom 43xx data transfer mode" + depends on B43 + default B43_DMA_AND_PIO_MODE + +config B43_DMA_AND_PIO_MODE + bool "DMA + PIO" + select B43_DMA + select B43_PIO + ---help--- + Include both, Direct Memory Access (DMA) and Programmed I/O (PIO) + data transfer modes. + The actually used mode is selectable through the module + parameter "pio". If the module parameter is pio=0, DMA is used. + Otherwise PIO is used. DMA is default. + + If unsure, choose this option. + +config B43_DMA_MODE + bool "DMA (Direct Memory Access) only" + select B43_DMA + ---help--- + Only include Direct Memory Access (DMA). + This reduces the size of the driver module, by omitting the PIO code. + +config B43_PIO_MODE + bool "PIO (Programmed I/O) only" + select B43_PIO + ---help--- + Only include Programmed I/O (PIO). + This reduces the size of the driver module, by omitting the DMA code. + Please note that PIO transfers are slow (compared to DMA). + + Also note that not all devices of the 43xx series support PIO. + The 4306 (Apple Airport Extreme and others) supports PIO, while + the 4318 is known to _not_ support PIO. + + Only use PIO, if DMA does not work for you. + +endchoice diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..485e59e2dfabc2c5104fbb61f7cddd19481b743b --- /dev/null +++ b/drivers/net/wireless/b43/Makefile @@ -0,0 +1,20 @@ +# b43 core +b43-y += main.o +b43-y += tables.o +b43-y += phy.o +b43-y += sysfs.o +b43-y += xmit.o +b43-y += lo.o +# b43 RFKILL button support +b43-$(CONFIG_B43_RFKILL) += rfkill.o +# b43 LED support +b43-$(CONFIG_B43_LEDS) += leds.o +# b43 PCMCIA support +b43-$(CONFIG_B43_PCMCIA) += pcmcia.o +# b43 debugging +b43-$(CONFIG_B43_DEBUG) += debugfs.o +# b43 DMA and PIO +b43-$(CONFIG_B43_DMA) += dma.o +b43-$(CONFIG_B43_PIO) += pio.o + +obj-$(CONFIG_B43) += b43.o diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h new file mode 100644 index 0000000000000000000000000000000000000000..a28ad230d63e884fee652d2fe420f3137f18e2c7 --- /dev/null +++ b/drivers/net/wireless/b43/b43.h @@ -0,0 +1,854 @@ +#ifndef B43_H_ +#define B43_H_ + +#include +#include +#include +#include +#include +#include + +#include "debugfs.h" +#include "leds.h" +#include "rfkill.h" +#include "lo.h" +#include "phy.h" + +#ifdef CONFIG_B43_DEBUG +# define B43_DEBUG 1 +#else +# define B43_DEBUG 0 +#endif + +#define B43_RX_MAX_SSI 60 + +/* MMIO offsets */ +#define B43_MMIO_DMA0_REASON 0x20 +#define B43_MMIO_DMA0_IRQ_MASK 0x24 +#define B43_MMIO_DMA1_REASON 0x28 +#define B43_MMIO_DMA1_IRQ_MASK 0x2C +#define B43_MMIO_DMA2_REASON 0x30 +#define B43_MMIO_DMA2_IRQ_MASK 0x34 +#define B43_MMIO_DMA3_REASON 0x38 +#define B43_MMIO_DMA3_IRQ_MASK 0x3C +#define B43_MMIO_DMA4_REASON 0x40 +#define B43_MMIO_DMA4_IRQ_MASK 0x44 +#define B43_MMIO_DMA5_REASON 0x48 +#define B43_MMIO_DMA5_IRQ_MASK 0x4C +#define B43_MMIO_MACCTL 0x120 +#define B43_MMIO_STATUS2_BITFIELD 0x124 +#define B43_MMIO_GEN_IRQ_REASON 0x128 +#define B43_MMIO_GEN_IRQ_MASK 0x12C +#define B43_MMIO_RAM_CONTROL 0x130 +#define B43_MMIO_RAM_DATA 0x134 +#define B43_MMIO_PS_STATUS 0x140 +#define B43_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43_MMIO_SHM_CONTROL 0x160 +#define B43_MMIO_SHM_DATA 0x164 +#define B43_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43_MMIO_XMITSTAT_0 0x170 +#define B43_MMIO_XMITSTAT_1 0x174 +#define B43_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ + +/* 32-bit DMA */ +#define B43_MMIO_DMA32_BASE0 0x200 +#define B43_MMIO_DMA32_BASE1 0x220 +#define B43_MMIO_DMA32_BASE2 0x240 +#define B43_MMIO_DMA32_BASE3 0x260 +#define B43_MMIO_DMA32_BASE4 0x280 +#define B43_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43_MMIO_DMA64_BASE0 0x200 +#define B43_MMIO_DMA64_BASE1 0x240 +#define B43_MMIO_DMA64_BASE2 0x280 +#define B43_MMIO_DMA64_BASE3 0x2C0 +#define B43_MMIO_DMA64_BASE4 0x300 +#define B43_MMIO_DMA64_BASE5 0x340 +/* PIO */ +#define B43_MMIO_PIO1_BASE 0x300 +#define B43_MMIO_PIO2_BASE 0x310 +#define B43_MMIO_PIO3_BASE 0x320 +#define B43_MMIO_PIO4_BASE 0x330 + +#define B43_MMIO_PHY_VER 0x3E0 +#define B43_MMIO_PHY_RADIO 0x3E2 +#define B43_MMIO_PHY0 0x3E6 +#define B43_MMIO_ANTENNA 0x3E8 +#define B43_MMIO_CHANNEL 0x3F0 +#define B43_MMIO_CHANNEL_EXT 0x3F4 +#define B43_MMIO_RADIO_CONTROL 0x3F6 +#define B43_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43_MMIO_RADIO_DATA_LOW 0x3FA +#define B43_MMIO_PHY_CONTROL 0x3FC +#define B43_MMIO_PHY_DATA 0x3FE +#define B43_MMIO_MACFILTER_CONTROL 0x420 +#define B43_MMIO_MACFILTER_DATA 0x422 +#define B43_MMIO_RCMTA_COUNT 0x43C +#define B43_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43_MMIO_GPIO_CONTROL 0x49C +#define B43_MMIO_GPIO_MASK 0x49E +#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43_MMIO_RNG 0x65A +#define B43_MMIO_POWERUP_DELAY 0x6A8 + +/* SPROM boardflags_lo values */ +#define B43_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */ +#define B43_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */ +#define B43_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */ +#define B43_BFL_RSSI 0x0008 /* software calculates nrssi slope. */ +#define B43_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */ +#define B43_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */ +#define B43_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */ +#define B43_BFL_ENETADM 0x0080 /* has ADMtek switch */ +#define B43_BFL_ENETVLAN 0x0100 /* can do vlan */ +#define B43_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */ +#define B43_BFL_NOPCI 0x0400 /* leaves PCI floating */ +#define B43_BFL_FEM 0x0800 /* supports the Front End Module */ +#define B43_BFL_EXTLNA 0x1000 /* has an external LNA */ +#define B43_BFL_HGPA 0x2000 /* had high gain PA */ +#define B43_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */ +#define B43_BFL_ALTIQ 0x8000 /* alternate I/Q settings */ + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43_GPIO_CONTROL 0x6c + +/* SHM Routing */ +enum { + B43_SHM_UCODE, /* Microcode memory */ + B43_SHM_SHARED, /* Shared memory */ + B43_SHM_SCRATCH, /* Scratch memory */ + B43_SHM_HW, /* Internal hardware register */ + B43_SHM_RCMTA, /* Receive match transmitter address (rev >= 5 only) */ +}; +/* SHM Routing modifiers */ +#define B43_SHM_AUTOINC_R 0x0200 /* Auto-increment address on read */ +#define B43_SHM_AUTOINC_W 0x0100 /* Auto-increment address on write */ +#define B43_SHM_AUTOINC_RW (B43_SHM_AUTOINC_R | \ + B43_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43_SHM_SH_PCTLWDPOS 0x0008 +#define B43_SHM_SH_RXPADOFF 0x0034 /* RX Padding data offset (PIO only) */ +#define B43_SHM_SH_PHYVER 0x0050 /* PHY version */ +#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ +#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ +#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ +#define B43_SHM_SH_HOSTFHI 0x0060 /* Hostflags for ucode options (high) */ +#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ +#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ +#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ +#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ +#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ +#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ +#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ +/* SHM_SHARED TX FIFO variables */ +#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */ +#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */ +#define B43_SHM_SH_SIZE45 0x009C /* TX FIFO size for FIFO 4 and 5 */ +#define B43_SHM_SH_SIZE67 0x009E /* TX FIFO size for FIFO 6 and 7 */ +/* SHM_SHARED background noise */ +#define B43_SHM_SH_JSSI0 0x0088 /* Measure JSSI 0 */ +#define B43_SHM_SH_JSSI1 0x008A /* Measure JSSI 1 */ +#define B43_SHM_SH_JSSIAUX 0x008C /* Measure JSSI AUX */ +/* SHM_SHARED crypto engine */ +#define B43_SHM_SH_DEFAULTIV 0x003C /* Default IV location */ +#define B43_SHM_SH_NRRXTRANS 0x003E /* # of soft RX transmitter addresses (max 8) */ +#define B43_SHM_SH_KTP 0x0056 /* Key table pointer */ +#define B43_SHM_SH_TKIPTSCTTAK 0x0318 +#define B43_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block (v4 firmware) */ +#define B43_SHM_SH_PSM 0x05F4 /* PSM transmitter address match block (rev < 5) */ +/* SHM_SHARED WME variables */ +#define B43_SHM_SH_EDCFSTAT 0x000E /* EDCF status */ +#define B43_SHM_SH_TXFCUR 0x0030 /* TXF current index */ +#define B43_SHM_SH_EDCFQ 0x0240 /* EDCF Q info */ +/* SHM_SHARED powersave mode related */ +#define B43_SHM_SH_SLOTT 0x0010 /* Slot time */ +#define B43_SHM_SH_DTIMPER 0x0012 /* DTIM period */ +#define B43_SHM_SH_NOSLPZNATDTIM 0x004C /* NOSLPZNAT DTIM */ +/* SHM_SHARED beacon variables */ +#define B43_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */ +#define B43_SHM_SH_BTL1 0x001A /* Beacon template length 1 */ +#define B43_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */ +#define B43_SHM_SH_TIMBPOS 0x001E /* TIM B position in beacon */ +#define B43_SHM_SH_SFFBLIM 0x0044 /* Short frame fallback retry limit */ +#define B43_SHM_SH_LFFBLIM 0x0046 /* Long frame fallback retry limit */ +#define B43_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word (see PHY TX control) */ +/* SHM_SHARED ACK/CTS control */ +#define B43_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word (see PHY TX control) */ +/* SHM_SHARED probe response variables */ +#define B43_SHM_SH_PRSSID 0x0160 /* Probe Response SSID */ +#define B43_SHM_SH_PRSSIDLEN 0x0048 /* Probe Response SSID length */ +#define B43_SHM_SH_PRTLEN 0x004A /* Probe Response template length */ +#define B43_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +#define B43_SHM_SH_PRPHYCTL 0x0188 /* Probe Response PHY TX control word */ +/* SHM_SHARED rate tables */ +#define B43_SHM_SH_OFDMDIRECT 0x01C0 /* Pointer to OFDM direct map */ +#define B43_SHM_SH_OFDMBASIC 0x01E0 /* Pointer to OFDM basic rate map */ +#define B43_SHM_SH_CCKDIRECT 0x0200 /* Pointer to CCK direct map */ +#define B43_SHM_SH_CCKBASIC 0x0220 /* Pointer to CCK basic rate map */ +/* SHM_SHARED microcode soft registers */ +#define B43_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43_SHM_SH_UCODETIME 0x0006 /* Microcode time */ +#define B43_SHM_SH_UCODESTAT 0x0040 /* Microcode debug status code */ +#define B43_SHM_SH_UCODESTAT_INVALID 0 +#define B43_SHM_SH_UCODESTAT_INIT 1 +#define B43_SHM_SH_UCODESTAT_ACTIVE 2 +#define B43_SHM_SH_UCODESTAT_SUSP 3 /* suspended */ +#define B43_SHM_SH_UCODESTAT_SLEEP 4 /* asleep (PS) */ +#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */ +#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */ +#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */ + +/* SHM_SCRATCH offsets */ +#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */ +#define B43_SHM_SC_MAXCONT 0x0004 /* Maximum contention window */ +#define B43_SHM_SC_CURCONT 0x0005 /* Current contention window */ +#define B43_SHM_SC_SRLIMIT 0x0006 /* Short retry count limit */ +#define B43_SHM_SC_LRLIMIT 0x0007 /* Long retry count limit */ +#define B43_SHM_SC_DTIMC 0x0008 /* Current DTIM count */ +#define B43_SHM_SC_BTL0LEN 0x0015 /* Beacon 0 template length */ +#define B43_SHM_SC_BTL1LEN 0x0016 /* Beacon 1 template length */ +#define B43_SHM_SC_SCFB 0x0017 /* Short frame transmit count threshold for rate fallback */ +#define B43_SHM_SC_LCFB 0x0018 /* Long frame transmit count threshold for rate fallback */ + +/* Hardware Radio Enable masks */ +#define B43_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43_hf_read/write() */ +#define B43_HF_ANTDIVHELP 0x00000001 /* ucode antenna div helper */ +#define B43_HF_SYMW 0x00000002 /* G-PHY SYM workaround */ +#define B43_HF_RXPULLW 0x00000004 /* RX pullup workaround */ +#define B43_HF_CCKBOOST 0x00000008 /* 4dB CCK power boost (exclusive with OFDM boost) */ +#define B43_HF_BTCOEX 0x00000010 /* Bluetooth coexistance */ +#define B43_HF_GDCW 0x00000020 /* G-PHY DV canceller filter bw workaround */ +#define B43_HF_OFDMPABOOST 0x00000040 /* Enable PA gain boost for OFDM */ +#define B43_HF_ACPR 0x00000080 /* Disable for Japan, channel 14 */ +#define B43_HF_EDCF 0x00000100 /* on if WME and MAC suspended */ +#define B43_HF_TSSIRPSMW 0x00000200 /* TSSI reset PSM ucode workaround */ +#define B43_HF_DSCRQ 0x00000400 /* Disable slow clock request in ucode */ +#define B43_HF_ACIW 0x00000800 /* ACI workaround: shift bits by 2 on PHY CRS */ +#define B43_HF_2060W 0x00001000 /* 2060 radio workaround */ +#define B43_HF_RADARW 0x00002000 /* Radar workaround */ +#define B43_HF_USEDEFKEYS 0x00004000 /* Enable use of default keys */ +#define B43_HF_BT4PRIOCOEX 0x00010000 /* Bluetooth 2-priority coexistance */ +#define B43_HF_FWKUP 0x00020000 /* Fast wake-up ucode */ +#define B43_HF_VCORECALC 0x00040000 /* Force VCO recalculation when powering up synthpu */ +#define B43_HF_PCISCW 0x00080000 /* PCI slow clock workaround */ +#define B43_HF_4318TSSI 0x00200000 /* 4318 TSSI */ +#define B43_HF_FBCMCFIFO 0x00400000 /* Flush bcast/mcast FIFO immediately */ +#define B43_HF_HWPCTL 0x00800000 /* Enable hardwarre power control */ +#define B43_HF_BTCOEXALT 0x01000000 /* Bluetooth coexistance in alternate pins */ +#define B43_HF_TXBTCHECK 0x02000000 /* Bluetooth check during transmission */ +#define B43_HF_SKCFPUP 0x04000000 /* Skip CFP update */ + +/* MacFilter offsets. */ +#define B43_MACFILTER_SELF 0x0000 +#define B43_MACFILTER_BSSID 0x0003 + +/* PowerControl */ +#define B43_PCTL_IN 0xB0 +#define B43_PCTL_OUT 0xB4 +#define B43_PCTL_OUTENABLE 0xB8 +#define B43_PCTL_XTAL_POWERUP 0x40 +#define B43_PCTL_PLL_POWERDOWN 0x80 + +/* PowerControl Clock Modes */ +#define B43_PCTL_CLK_FAST 0x00 +#define B43_PCTL_CLK_SLOW 0x01 +#define B43_PCTL_CLK_DYNAMIC 0x02 + +#define B43_PCTL_FORCE_SLOW 0x0800 +#define B43_PCTL_FORCE_PLL 0x1000 +#define B43_PCTL_DYN_XTAL 0x2000 + +/* PHYVersioning */ +#define B43_PHYTYPE_A 0x00 +#define B43_PHYTYPE_B 0x01 +#define B43_PHYTYPE_G 0x02 + +/* PHYRegisters */ +#define B43_PHY_ILT_A_CTRL 0x0072 +#define B43_PHY_ILT_A_DATA1 0x0073 +#define B43_PHY_ILT_A_DATA2 0x0074 +#define B43_PHY_G_LO_CONTROL 0x0810 +#define B43_PHY_ILT_G_CTRL 0x0472 +#define B43_PHY_ILT_G_DATA1 0x0473 +#define B43_PHY_ILT_G_DATA2 0x0474 +#define B43_PHY_A_PCTL 0x007B +#define B43_PHY_G_PCTL 0x0029 +#define B43_PHY_A_CRS 0x0029 +#define B43_PHY_RADIO_BITFIELD 0x0401 +#define B43_PHY_G_CRS 0x0429 +#define B43_PHY_NRSSILT_CTRL 0x0803 +#define B43_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43_MACCTL_ENABLED 0x00000001 /* MAC Enabled */ +#define B43_MACCTL_PSM_RUN 0x00000002 /* Run Microcode */ +#define B43_MACCTL_PSM_JMP0 0x00000004 /* Microcode jump to 0 */ +#define B43_MACCTL_SHM_ENABLED 0x00000100 /* SHM Enabled */ +#define B43_MACCTL_SHM_UPPER 0x00000200 /* SHM Upper */ +#define B43_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43_MACCTL_PSM_DBG 0x00002000 /* Microcode debugging enabled */ +#define B43_MACCTL_GPOUTSMSK 0x0000C000 /* GPOUT Select Mask */ +#define B43_MACCTL_BE 0x00010000 /* Big Endian mode */ +#define B43_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ +#define B43_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep frames with bad PLCP */ +#define B43_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43_MACCTL_HWPS 0x02000000 /* Hardware Power Saving */ +#define B43_MACCTL_AWAKE 0x04000000 /* Device is awake */ +#define B43_MACCTL_CLOSEDNET 0x08000000 /* Closed net (no SSID bcast) */ +#define B43_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */ +#define B43_MACCTL_DISCTXSTAT 0x20000000 /* Discard TX status */ +#define B43_MACCTL_DISCPMQ 0x40000000 /* Discard Power Management Queue */ +#define B43_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* 802.11 core specific TM State Low flags */ +#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select */ +#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */ +#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High flags */ +#define B43_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available (rev >= 5) */ +#define B43_TMSHIGH_APHY 0x00020000 /* A-PHY available (rev >= 5) */ +#define B43_TMSHIGH_GPHY 0x00010000 /* G-PHY available (rev >= 5) */ + +/* Generic-Interrupt reasons. */ +#define B43_IRQ_MAC_SUSPENDED 0x00000001 +#define B43_IRQ_BEACON 0x00000002 +#define B43_IRQ_TBTT_INDI 0x00000004 +#define B43_IRQ_BEACON_TX_OK 0x00000008 +#define B43_IRQ_BEACON_CANCEL 0x00000010 +#define B43_IRQ_ATIM_END 0x00000020 +#define B43_IRQ_PMQ 0x00000040 +#define B43_IRQ_PIO_WORKAROUND 0x00000100 +#define B43_IRQ_MAC_TXERR 0x00000200 +#define B43_IRQ_PHY_TXERR 0x00000800 +#define B43_IRQ_PMEVENT 0x00001000 +#define B43_IRQ_TIMER0 0x00002000 +#define B43_IRQ_TIMER1 0x00004000 +#define B43_IRQ_DMA 0x00008000 +#define B43_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43_IRQ_UCODE_DEBUG 0x08000000 +#define B43_IRQ_RFKILL 0x10000000 +#define B43_IRQ_TX_OK 0x20000000 +#define B43_IRQ_PHY_G_CHANGED 0x40000000 +#define B43_IRQ_TIMEOUT 0x80000000 + +#define B43_IRQ_ALL 0xFFFFFFFF +#define B43_IRQ_MASKTEMPLATE (B43_IRQ_MAC_SUSPENDED | \ + B43_IRQ_BEACON | \ + B43_IRQ_TBTT_INDI | \ + B43_IRQ_ATIM_END | \ + B43_IRQ_PMQ | \ + B43_IRQ_MAC_TXERR | \ + B43_IRQ_PHY_TXERR | \ + B43_IRQ_DMA | \ + B43_IRQ_TXFIFO_FLUSH_OK | \ + B43_IRQ_NOISESAMPLE_OK | \ + B43_IRQ_UCODE_DEBUG | \ + B43_IRQ_RFKILL | \ + B43_IRQ_TX_OK) + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43_CCK_RATE_1MB 0x02 +#define B43_CCK_RATE_2MB 0x04 +#define B43_CCK_RATE_5MB 0x0B +#define B43_CCK_RATE_11MB 0x16 +#define B43_OFDM_RATE_6MB 0x0C +#define B43_OFDM_RATE_9MB 0x12 +#define B43_OFDM_RATE_12MB 0x18 +#define B43_OFDM_RATE_18MB 0x24 +#define B43_OFDM_RATE_24MB 0x30 +#define B43_OFDM_RATE_36MB 0x48 +#define B43_OFDM_RATE_48MB 0x60 +#define B43_OFDM_RATE_54MB 0x6C +/* Convert a b43 rate value to a rate in 100kbps */ +#define B43_RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) + +#define B43_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43_DEFAULT_LONG_RETRY_LIMIT 4 + +/* Max size of a security key */ +#define B43_SEC_KEYSIZE 16 +/* Security algorithms. */ +enum { + B43_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43_SEC_ALGO_WEP40, + B43_SEC_ALGO_TKIP, + B43_SEC_ALGO_AES, + B43_SEC_ALGO_WEP104, + B43_SEC_ALGO_AES_LEGACY, +}; + +struct b43_dmaring; +struct b43_pioqueue; + +/* The firmware file header */ +#define B43_FW_TYPE_UCODE 'u' +#define B43_FW_TYPE_PCM 'p' +#define B43_FW_TYPE_IV 'i' +struct b43_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43_IV_OFFSET_MASK 0x7FFF +#define B43_IV_32BIT 0x8000 +struct b43_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + + +#define B43_PHYMODE(phytype) (1 << (phytype)) +#define B43_PHYMODE_A B43_PHYMODE(B43_PHYTYPE_A) +#define B43_PHYMODE_B B43_PHYMODE(B43_PHYTYPE_B) +#define B43_PHYMODE_G B43_PHYMODE(B43_PHYTYPE_G) + +struct b43_phy { + /* Possible PHYMODEs on this PHY */ + u8 possible_phymodes; + /* GMODE bit enabled? */ + bool gmode; + /* Possible ieee80211 subsystem hwmodes for this PHY. + * Which mode is selected, depends on thr GMODE enabled bit */ +#define B43_MAX_PHYHWMODES 2 + struct ieee80211_hw_mode hwmodes[B43_MAX_PHYHWMODES]; + + /* Analog Type */ + u8 analog; + /* B43_PHYTYPE_ */ + u8 type; + /* PHY revision number. */ + u8 rev; + + /* Radio versioning */ + u16 radio_manuf; /* Radio manufacturer */ + u16 radio_ver; /* Radio version */ + u8 radio_rev; /* Radio revision */ + + bool locked; /* Only used in b43_phy_{un}lock() */ + bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */ + + /* ACI (adjacent channel interference) flags. */ + bool aci_enable; + bool aci_wlan_automatic; + bool aci_hw_rssi; + + /* Radio switched on/off */ + bool radio_on; + struct { + /* Values saved when turning the radio off. + * They are needed when turning it on again. */ + bool valid; + u16 rfover; + u16 rfoverval; + } radio_off_context; + + u16 minlowsig[2]; + u16 minlowsigpos[2]; + + /* TSSI to dBm table in use */ + const s8 *tssi2dbm; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi; + + /* LocalOscillator control values. */ + struct b43_txpower_lo_control *lo_control; + /* Values from b43_calc_loopback_gain() */ + s16 max_lb_gain; /* Maximum Loopback gain in hdB */ + s16 trsw_rx_gain; /* TRSW RX gain in hdB */ + s16 lna_lod_gain; /* LNA lod */ + s16 lna_gain; /* LNA */ + s16 pga_gain; /* PGA */ + + /* PHY lock for core.rev < 3 + * This lock is only used by b43_phy_{un}lock() + */ + spinlock_t lock; + + /* Desired TX power level (in dBm). + * This is set by the user and adjusted in b43_phy_xmitpower(). */ + u8 power_level; + /* A-PHY TX Power control value. */ + u16 txpwr_offset; + + /* Current TX power level attenuation control values */ + struct b43_bbatt bbatt; + struct b43_rfatt rfatt; + u8 tx_control; /* B43_TXCTL_XXX */ +#ifdef CONFIG_B43_DEBUG + bool manual_txpower_control; /* Manual TX-power control enabled? */ +#endif + /* Hardware Power Control enabled? */ + bool hardware_power_control; + + /* Current Interference Mitigation mode */ + int interfmode; + /* Stack of saved values from the Interference Mitigation code. + * Each value in the stack is layed out as follows: + * bit 0-11: offset + * bit 12-15: register ID + * bit 16-32: value + * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT + */ +#define B43_INTERFSTACK_SIZE 26 + u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure + + /* Saved values from the NRSSI Slope calculation */ + s16 nrssi[2]; + s32 nrssislope; + /* In memory nrssi lookup table. */ + s8 nrssi_lt[64]; + + /* current channel */ + u8 channel; + + u16 lofcal; + + u16 initval; //FIXME rename? +}; + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43_dma { + struct b43_dmaring *tx_ring0; + struct b43_dmaring *tx_ring1; + struct b43_dmaring *tx_ring2; + struct b43_dmaring *tx_ring3; + struct b43_dmaring *tx_ring4; + struct b43_dmaring *tx_ring5; + + struct b43_dmaring *rx_ring0; + struct b43_dmaring *rx_ring3; /* only available on core.rev < 5 */ +}; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43_pio { + struct b43_pioqueue *queue0; + struct b43_pioqueue *queue1; + struct b43_pioqueue *queue2; + struct b43_pioqueue *queue3; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43_noise_calculation { + u8 channel_at_start; + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43_stats { + u8 link_noise; + /* Store the last TX/RX times here for updating the leds. */ + unsigned long last_tx; + unsigned long last_rx; +}; + +struct b43_key { + /* If keyconf is NULL, this key is disabled. + * keyconf is a cookie. Don't derefenrence it outside of the set_key + * path, because b43 doesn't own it. */ + struct ieee80211_key_conf *keyconf; + u8 algorithm; +}; + +struct b43_wldev; + +/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ +struct b43_wl { + /* Pointer to the active wireless device on this chip */ + struct b43_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + spinlock_t irq_lock; + struct mutex mutex; + spinlock_t leds_lock; + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + /* Opaque ID of the operating interface from the ieee80211 + * subsystem. Do not modify. + */ + int if_id; + /* The MAC address of the operating interface. */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID */ + u8 bssid[ETH_ALEN]; + /* Interface type. (IEEE80211_IF_TYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + + struct hwrng rng; + u8 rng_initialized; + char rng_name[30 + 1]; + + /* The RF-kill button */ + struct b43_rfkill rfkill; + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43_firmware { + /* Microcode */ + const struct firmware *ucode; + /* PCM code */ + const struct firmware *pcm; + /* Initial MMIO values for the firmware */ + const struct firmware *initvals; + /* Initial MMIO values for the firmware, band-specific */ + const struct firmware *initvals_band; + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43_STAT_UNINIT = 0, /* Uninitialized. */ + B43_STAT_INITIALIZED = 1, /* Initialized, but not started, yet. */ + B43_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* XXX--- HOW LOCKING WORKS IN B43 ---XXX + * + * You should always acquire both, wl->mutex and wl->irq_lock unless: + * - You don't need to acquire wl->irq_lock, if the interface is stopped. + * - You don't need to acquire wl->mutex in the IRQ handler, IRQ tasklet + * and packet TX path (and _ONLY_ there.) + */ + +/* Data structure for one wireless device (802.11 core) */ +struct b43_wldev { + struct ssb_device *dev; + struct b43_wl *wl; + + /* The device initialization status. + * Use b43_status() to query. */ + atomic_t __init_status; + /* Saved init status for handling suspend. */ + int suspend_init_status; + + bool __using_pio; /* Internal, use b43_using_pio(). */ + bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ + bool reg124_set_0x4; /* Some variable to keep track of IRQ stuff. */ + bool short_preamble; /* TRUE, if short preamble is enabled. */ + bool short_slot; /* TRUE, if short slot timing is enabled. */ + bool radio_hw_enable; /* saved state of radio hardware enabled state */ + + /* PHY/Radio device. */ + struct b43_phy phy; + union { + /* DMA engines. */ + struct b43_dma dma; + /* PIO engines. */ + struct b43_pio pio; + }; + + /* Various statistics about the physical device. */ + struct b43_stats stats; + + /* The device LEDs. */ + struct b43_led led_tx; + struct b43_led led_rx; + struct b43_led led_assoc; + struct b43_led led_radio; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* saved irq enable/disable state bitfield. */ + u32 irq_savedstate; + /* Link Quality calculation context. */ + struct b43_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Interrupt Service Routine tasklet (bottom-half) */ + struct tasklet_struct isr_tasklet; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + u8 max_nr_keys; + struct b43_key key[58]; + + /* Cached beacon template while uploading the template. */ + struct sk_buff *cached_beacon; + + /* Firmware data */ + struct b43_firmware fw; + + /* Devicelist in struct b43_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43_DEBUG + struct b43_dfsentry *dfsentry; +#endif +}; + +static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +/* Helper function, which returns a boolean. + * TRUE, if PIO is used; FALSE, if DMA is used. + */ +#if defined(CONFIG_B43_DMA) && defined(CONFIG_B43_PIO) +static inline int b43_using_pio(struct b43_wldev *dev) +{ + return dev->__using_pio; +} +#elif defined(CONFIG_B43_DMA) +static inline int b43_using_pio(struct b43_wldev *dev) +{ + return 0; +} +#elif defined(CONFIG_B43_PIO) +static inline int b43_using_pio(struct b43_wldev *dev) +{ + return 1; +} +#else +# error "Using neither DMA nor PIO? Confused..." +#endif + +static inline struct b43_wldev *dev_to_b43_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (IEEE80211_IF_TYPE_XXX). */ +static inline int b43_is_mode(struct b43_wl *wl, int type) +{ + return (wl->operating && wl->if_type == type); +} + +static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline u32 b43_read32(struct b43_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +/* Message printing */ +void b43info(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43err(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +void b43warn(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +#if B43_DEBUG +void b43dbg(struct b43_wl *wl, const char *fmt, ...) + __attribute__ ((format(printf, 2, 3))); +#else /* DEBUG */ +# define b43dbg(wl, fmt...) do { /* nothing */ } while (0) +#endif /* DEBUG */ + +/* A WARN_ON variant that vanishes when b43 debugging is disabled. + * This _also_ evaluates the arg with debugging disabled. */ +#if B43_DEBUG +# define B43_WARN_ON(x) WARN_ON(x) +#else +static inline bool __b43_warn_on_dummy(bool x) { return x; } +# define B43_WARN_ON(x) __b43_warn_on_dummy(unlikely(!!(x))) +#endif + +/** Limit a value between two limits */ +#ifdef limit_value +# undef limit_value +#endif +#define limit_value(value, min, max) \ + ({ \ + typeof(value) __value = (value); \ + typeof(value) __min = (min); \ + typeof(value) __max = (max); \ + if (__value < __min) \ + __value = __min; \ + else if (__value > __max) \ + __value = __max; \ + __value; \ + }) + +/* Convert an integer to a Q5.2 value */ +#define INT_TO_Q52(i) ((i) << 2) +/* Convert a Q5.2 value to an integer (precision loss!) */ +#define Q52_TO_INT(q52) ((q52) >> 2) +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) Q52_TO_INT(q52), ((((q52) & 0x3) * 100) / 4) + +#endif /* B43_H_ */ diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..734e70e1a06d557411b0273d24cf3399d1c6f6be --- /dev/null +++ b/drivers/net/wireless/b43/debugfs.c @@ -0,0 +1,656 @@ +/* + + Broadcom B43 wireless driver + + debugfs driver debugging code + + Copyright (c) 2005-2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "dma.h" +#include "pio.h" +#include "xmit.h" + + +/* The root directory. */ +static struct dentry *rootdir; + +struct b43_debugfs_fops { + ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43_wldev *dev, const char *buf, size_t count); + struct file_operations fops; + /* Offset of struct b43_dfs_file in struct b43_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ + bool take_irqlock; +}; + +static inline +struct b43_dfs_file * fops_to_dfs_file(struct b43_wldev *dev, + const struct b43_debugfs_fops *dfops) +{ + void *p; + + p = dev->dfsentry; + p += dfops->file_struct_offset; + + return p; +} + + +#define fappend(fmt, x...) \ + do { \ + if (bufsize - count) \ + count += snprintf(buf + count, \ + bufsize - count, \ + fmt , ##x); \ + else \ + printk(KERN_ERR "b43: fappend overflow\n"); \ + } while (0) + + +/* wl->irq_lock is locked */ +static ssize_t tsf_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + u64 tsf; + + b43_tsf_read(dev, &tsf); + fappend("0x%08x%08x\n", + (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(tsf & 0xFFFFFFFFULL)); + + return count; +} + +/* wl->irq_lock is locked */ +static int tsf_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + u64 tsf; + + if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1) + return -EINVAL; + b43_tsf_write(dev, tsf); + + return 0; +} + +/* wl->irq_lock is locked */ +static ssize_t ucode_regs_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + + for (i = 0; i < 64; i++) { + fappend("r%d = 0x%04x\n", i, + b43_shm_read16(dev, B43_SHM_SCRATCH, i)); + } + + return count; +} + +/* wl->irq_lock is locked */ +static ssize_t shm_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + u16 tmp; + __le16 *le16buf = (__le16 *)buf; + + for (i = 0; i < 0x1000; i++) { + if (bufsize <= 0) + break; + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 2 * i); + le16buf[i] = cpu_to_le16(tmp); + count += sizeof(tmp); + bufsize -= sizeof(tmp); + } + + return count; +} + +static ssize_t txstat_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + struct b43_txstatus_log *log = &dev->dfsentry->txstatlog; + ssize_t count = 0; + unsigned long flags; + int i, idx; + struct b43_txstatus *stat; + + spin_lock_irqsave(&log->lock, flags); + if (log->end < 0) { + fappend("Nothing transmitted, yet\n"); + goto out_unlock; + } + fappend("b43 TX status reports:\n\n" + "index | cookie | seq | phy_stat | frame_count | " + "rts_count | supp_reason | pm_indicated | " + "intermediate | for_ampdu | acked\n" "---\n"); + i = log->end + 1; + idx = 0; + while (1) { + if (i == B43_NR_LOGGED_TXSTATUS) + i = 0; + stat = &(log->log[i]); + if (stat->cookie) { + fappend("%03d | " + "0x%04X | 0x%04X | 0x%02X | " + "0x%X | 0x%X | " + "%u | %u | " + "%u | %u | %u\n", + idx, + stat->cookie, stat->seq, stat->phy_stat, + stat->frame_count, stat->rts_count, + stat->supp_reason, stat->pm_indicated, + stat->intermediate, stat->for_ampdu, + stat->acked); + idx++; + } + if (i == log->end) + break; + i++; + } +out_unlock: + spin_unlock_irqrestore(&log->lock, flags); + + return count; +} + +static ssize_t txpower_g_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + + if (dev->phy.type != B43_PHYTYPE_G) { + fappend("Device is not a G-PHY\n"); + goto out; + } + fappend("Control: %s\n", dev->phy.manual_txpower_control ? + "MANUAL" : "AUTOMATIC"); + fappend("Baseband attenuation: %u\n", dev->phy.bbatt.att); + fappend("Radio attenuation: %u\n", dev->phy.rfatt.att); + fappend("TX Mixer Gain: %s\n", + (dev->phy.tx_control & B43_TXCTL_TXMIX) ? "ON" : "OFF"); + fappend("PA Gain 2dB: %s\n", + (dev->phy.tx_control & B43_TXCTL_PA2DB) ? "ON" : "OFF"); + fappend("PA Gain 3dB: %s\n", + (dev->phy.tx_control & B43_TXCTL_PA3DB) ? "ON" : "OFF"); + fappend("\n\n"); + fappend("You can write to this file:\n"); + fappend("Writing \"auto\" enables automatic txpower control.\n"); + fappend + ("Writing the attenuation values as \"bbatt rfatt txmix pa2db pa3db\" " + "enables manual txpower control.\n"); + fappend("Example: 5 4 0 0 1\n"); + fappend("Enables manual control with Baseband attenuation 5, " + "Radio attenuation 4, No TX Mixer Gain, " + "No PA Gain 2dB, With PA Gain 3dB.\n"); +out: + return count; +} + +static int txpower_g_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + unsigned long phy_flags; + + if (dev->phy.type != B43_PHYTYPE_G) + return -ENODEV; + if ((count >= 4) && (memcmp(buf, "auto", 4) == 0)) { + /* Automatic control */ + dev->phy.manual_txpower_control = 0; + b43_phy_xmitpower(dev); + } else { + int bbatt = 0, rfatt = 0, txmix = 0, pa2db = 0, pa3db = 0; + /* Manual control */ + if (sscanf(buf, "%d %d %d %d %d", &bbatt, &rfatt, + &txmix, &pa2db, &pa3db) != 5) + return -EINVAL; + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + dev->phy.manual_txpower_control = 1; + dev->phy.bbatt.att = bbatt; + dev->phy.rfatt.att = rfatt; + dev->phy.tx_control = 0; + if (txmix) + dev->phy.tx_control |= B43_TXCTL_TXMIX; + if (pa2db) + dev->phy.tx_control |= B43_TXCTL_PA2DB; + if (pa3db) + dev->phy.tx_control |= B43_TXCTL_PA3DB; + b43_phy_lock(dev, phy_flags); + b43_radio_lock(dev); + b43_set_txpower_g(dev, &dev->phy.bbatt, + &dev->phy.rfatt, dev->phy.tx_control); + b43_radio_unlock(dev); + b43_phy_unlock(dev, phy_flags); + } + + return 0; +} + +/* wl->irq_lock is locked */ +static int restart_write_file(struct b43_wldev *dev, + const char *buf, size_t count) +{ + int err = 0; + + if (count > 0 && buf[0] == '1') { + b43_controller_restart(dev, "manually restarted"); + } else + err = -EINVAL; + + return err; +} + +static ssize_t append_lo_table(ssize_t count, char *buf, const size_t bufsize, + struct b43_loctl table[B43_NR_BB][B43_NR_RF]) +{ + unsigned int i, j; + struct b43_loctl *ctl; + + for (i = 0; i < B43_NR_BB; i++) { + for (j = 0; j < B43_NR_RF; j++) { + ctl = &(table[i][j]); + fappend("(bbatt %2u, rfatt %2u) -> " + "(I %+3d, Q %+3d, Used: %d, Calibrated: %d)\n", + i, j, ctl->i, ctl->q, + ctl->used, + b43_loctl_is_calibrated(ctl)); + } + } + + return count; +} + +static ssize_t loctls_read_file(struct b43_wldev *dev, + char *buf, size_t bufsize) +{ + ssize_t count = 0; + struct b43_txpower_lo_control *lo; + int i, err = 0; + + if (dev->phy.type != B43_PHYTYPE_G) { + fappend("Device is not a G-PHY\n"); + err = -ENODEV; + goto out; + } + lo = dev->phy.lo_control; + fappend("-- Local Oscillator calibration data --\n\n"); + fappend("Measured: %d, Rebuild: %d, HW-power-control: %d\n", + lo->lo_measured, + lo->rebuild, + dev->phy.hardware_power_control); + fappend("TX Bias: 0x%02X, TX Magn: 0x%02X\n", + lo->tx_bias, lo->tx_magn); + fappend("Power Vector: 0x%08X%08X\n", + (unsigned int)((lo->power_vector & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(lo->power_vector & 0x00000000FFFFFFFFULL)); + fappend("\nControl table WITH PADMIX:\n"); + count = append_lo_table(count, buf, bufsize, lo->with_padmix); + fappend("\nControl table WITHOUT PADMIX:\n"); + count = append_lo_table(count, buf, bufsize, lo->no_padmix); + fappend("\nUsed RF attenuation values: Value(WithPadmix flag)\n"); + for (i = 0; i < lo->rfatt_list.len; i++) { + fappend("%u(%d), ", + lo->rfatt_list.list[i].att, + lo->rfatt_list.list[i].with_padmix); + } + fappend("\n"); + fappend("\nUsed Baseband attenuation values:\n"); + for (i = 0; i < lo->bbatt_list.len; i++) { + fappend("%u, ", + lo->bbatt_list.list[i].att); + } + fappend("\n"); + +out: + return err ? err : count; +} + +#undef fappend + +static int b43_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t b43_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43_wldev *dev; + struct b43_debugfs_fops *dfops; + struct b43_dfs_file *dfile; + ssize_t ret; + char *buf; + const size_t bufsize = 1024 * 128; + const size_t buforder = get_order(bufsize); + int err = 0; + + if (!count) + return 0; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; + } + dfile = fops_to_dfs_file(dev, dfops); + + if (!dfile->buffer) { + buf = (char *)__get_free_pages(GFP_KERNEL, buforder); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + /* Sparse warns about the following memset, because it has a big + * size value. That warning is bogus, so I will ignore it. --mb */ + memset(buf, 0, bufsize); + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + ret = dfops->read(dev, buf, bufsize); + spin_unlock_irq(&dev->wl->irq_lock); + } else + ret = dfops->read(dev, buf, bufsize); + if (ret <= 0) { + free_pages((unsigned long)buf, buforder); + err = ret; + goto out_unlock; + } + dfile->data_len = ret; + dfile->buffer = buf; + } + + ret = simple_read_from_buffer(userbuf, count, ppos, + dfile->buffer, + dfile->data_len); + if (*ppos >= dfile->data_len) { + free_pages((unsigned long)dfile->buffer, buforder); + dfile->buffer = NULL; + dfile->data_len = 0; + } +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : ret; +} + +static ssize_t b43_debugfs_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43_wldev *dev; + struct b43_debugfs_fops *dfops; + char *buf; + int err = 0; + + if (!count) + return 0; + if (count > PAGE_SIZE) + return -E2BIG; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; + } + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, userbuf, count)) { + err = -EFAULT; + goto out_freepage; + } + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + err = dfops->write(dev, buf, count); + spin_unlock_irq(&dev->wl->irq_lock); + } else + err = dfops->write(dev, buf, count); + if (err) + goto out_freepage; + +out_freepage: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : count; +} + + +#define B43_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ + static struct b43_debugfs_fops fops_##name = { \ + .read = _read, \ + .write = _write, \ + .fops = { \ + .open = b43_debugfs_open, \ + .read = b43_debugfs_read, \ + .write = b43_debugfs_write, \ + }, \ + .file_struct_offset = offsetof(struct b43_dfsentry, \ + file_##name), \ + .take_irqlock = _take_irqlock, \ + } + +B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); +B43_DEBUGFS_FOPS(ucode_regs, ucode_regs_read_file, NULL, 1); +B43_DEBUGFS_FOPS(shm, shm_read_file, NULL, 1); +B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); +B43_DEBUGFS_FOPS(txpower_g, txpower_g_read_file, txpower_g_write_file, 0); +B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); +B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0); + + +int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) +{ + return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); +} + +static void b43_remove_dynamic_debug(struct b43_wldev *dev) +{ + struct b43_dfsentry *e = dev->dfsentry; + int i; + + for (i = 0; i < __B43_NR_DYNDBG; i++) + debugfs_remove(e->dyn_debug_dentries[i]); +} + +static void b43_add_dynamic_debug(struct b43_wldev *dev) +{ + struct b43_dfsentry *e = dev->dfsentry; + struct dentry *d; + +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + d = debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + if (!IS_ERR(d)) \ + e->dyn_debug_dentries[id] = d; \ + } while (0) + + add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, 0); + add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, 0); + add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0); + add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0); + add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); + +#undef add_dyn_dbg +} + +void b43_debugfs_add_device(struct b43_wldev *dev) +{ + struct b43_dfsentry *e; + struct b43_txstatus_log *log; + char devdir[16]; + + B43_WARN_ON(!dev); + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + b43err(dev->wl, "debugfs: add device OOM\n"); + return; + } + e->dev = dev; + log = &e->txstatlog; + log->log = kcalloc(B43_NR_LOGGED_TXSTATUS, + sizeof(struct b43_txstatus), GFP_KERNEL); + if (!log->log) { + b43err(dev->wl, "debugfs: add device txstatus OOM\n"); + kfree(e); + return; + } + log->end = -1; + spin_lock_init(&log->lock); + + dev->dfsentry = e; + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); + e->subdir = debugfs_create_dir(devdir, rootdir); + if (!e->subdir || IS_ERR(e->subdir)) { + if (e->subdir == ERR_PTR(-ENODEV)) { + b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + } else { + b43err(dev->wl, "debugfs: cannot create %s directory\n", + devdir); + } + dev->dfsentry = NULL; + kfree(log->log); + kfree(e); + return; + } + +#define ADD_FILE(name, mode) \ + do { \ + struct dentry *d; \ + d = debugfs_create_file(__stringify(name), \ + mode, e->subdir, dev, \ + &fops_##name.fops); \ + e->file_##name.dentry = NULL; \ + if (!IS_ERR(d)) \ + e->file_##name.dentry = d; \ + } while (0) + + + ADD_FILE(tsf, 0600); + ADD_FILE(ucode_regs, 0400); + ADD_FILE(shm, 0400); + ADD_FILE(txstat, 0400); + ADD_FILE(txpower_g, 0600); + ADD_FILE(restart, 0200); + ADD_FILE(loctls, 0400); + +#undef ADD_FILE + + b43_add_dynamic_debug(dev); +} + +void b43_debugfs_remove_device(struct b43_wldev *dev) +{ + struct b43_dfsentry *e; + + if (!dev) + return; + e = dev->dfsentry; + if (!e) + return; + b43_remove_dynamic_debug(dev); + + debugfs_remove(e->file_tsf.dentry); + debugfs_remove(e->file_ucode_regs.dentry); + debugfs_remove(e->file_shm.dentry); + debugfs_remove(e->file_txstat.dentry); + debugfs_remove(e->file_txpower_g.dentry); + debugfs_remove(e->file_restart.dentry); + debugfs_remove(e->file_loctls.dentry); + + debugfs_remove(e->subdir); + kfree(e->txstatlog.log); + kfree(e); +} + +void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + struct b43_dfsentry *e = dev->dfsentry; + struct b43_txstatus_log *log; + struct b43_txstatus *cur; + int i; + + if (!e) + return; + log = &e->txstatlog; + B43_WARN_ON(!irqs_disabled()); + spin_lock(&log->lock); + i = log->end + 1; + if (i == B43_NR_LOGGED_TXSTATUS) + i = 0; + log->end = i; + cur = &(log->log[i]); + memcpy(cur, status, sizeof(*cur)); + spin_unlock(&log->lock); +} + +void b43_debugfs_init(void) +{ + rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(rootdir)) + rootdir = NULL; +} + +void b43_debugfs_exit(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..6eebe858db5af265d51dbfb884ab0cf2e7110b57 --- /dev/null +++ b/drivers/net/wireless/b43/debugfs.h @@ -0,0 +1,89 @@ +#ifndef B43_DEBUGFS_H_ +#define B43_DEBUGFS_H_ + +struct b43_wldev; +struct b43_txstatus; + +enum b43_dyndbg { /* Dynamic debugging features */ + B43_DBG_XMITPOWER, + B43_DBG_DMAOVERFLOW, + B43_DBG_DMAVERBOSE, + B43_DBG_PWORK_FAST, + B43_DBG_PWORK_STOP, + __B43_NR_DYNDBG, +}; + +#ifdef CONFIG_B43_DEBUG + +struct dentry; + +#define B43_NR_LOGGED_TXSTATUS 100 + +struct b43_txstatus_log { + struct b43_txstatus *log; + int end; + spinlock_t lock; +}; + +struct b43_dfs_file { + struct dentry *dentry; + char *buffer; + size_t data_len; +}; + +struct b43_dfsentry { + struct b43_wldev *dev; + struct dentry *subdir; + + struct b43_dfs_file file_tsf; + struct b43_dfs_file file_ucode_regs; + struct b43_dfs_file file_shm; + struct b43_dfs_file file_txstat; + struct b43_dfs_file file_txpower_g; + struct b43_dfs_file file_restart; + struct b43_dfs_file file_loctls; + + struct b43_txstatus_log txstatlog; + + /* Enabled/Disabled list for the dynamic debugging features. */ + u32 dyn_debug[__B43_NR_DYNDBG]; + /* Dentries for the dynamic debugging entries. */ + struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG]; +}; + +int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature); + +void b43_debugfs_init(void); +void b43_debugfs_exit(void); +void b43_debugfs_add_device(struct b43_wldev *dev); +void b43_debugfs_remove_device(struct b43_wldev *dev); +void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status); + +#else /* CONFIG_B43_DEBUG */ + +static inline int b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature) +{ + return 0; +} + +static inline void b43_debugfs_init(void) +{ +} +static inline void b43_debugfs_exit(void) +{ +} +static inline void b43_debugfs_add_device(struct b43_wldev *dev) +{ +} +static inline void b43_debugfs_remove_device(struct b43_wldev *dev) +{ +} +static inline void b43_debugfs_log_txstat(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} + +#endif /* CONFIG_B43_DEBUG */ + +#endif /* B43_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c new file mode 100644 index 0000000000000000000000000000000000000000..5e8f8ac0f1dda1f2b38f26f13145fef370a95b00 --- /dev/null +++ b/drivers/net/wireless/b43/dma.c @@ -0,0 +1,1494 @@ +/* + + Broadcom B43 wireless driver + + DMA ringbuffer and descriptor allocation/management + + Copyright (c) 2005, 2006 Michael Buesch + + Some code in this file is derived from the b44.c driver + Copyright (C) 2002 David S. Miller + Copyright (C) Pekka Pietikainen + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "dma.h" +#include "main.h" +#include "debugfs.h" +#include "xmit.h" + +#include +#include +#include +#include + +/* 32bit DMA ops. */ +static +struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, + int slot, + struct b43_dmadesc_meta **meta) +{ + struct b43_dmadesc32 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43_dmadesc_generic *)desc; +} + +static void op32_fill_descriptor(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43_dmadesc32 *descbase = ring->descbase; + int slot; + u32 ctl; + u32 addr; + u32 addrext; + + slot = (int)(&(desc->dma32) - descbase); + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); + addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addr |= ssb_dma_translation(ring->dev->dev); + ctl = (bufsize - ring->frameoffset) + & B43_DMA32_DCTL_BYTECNT; + if (slot == ring->nr_slots - 1) + ctl |= B43_DMA32_DCTL_DTABLEEND; + if (start) + ctl |= B43_DMA32_DCTL_FRAMESTART; + if (end) + ctl |= B43_DMA32_DCTL_FRAMEEND; + if (irq) + ctl |= B43_DMA32_DCTL_IRQ; + ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) + & B43_DMA32_DCTL_ADDREXT_MASK; + + desc->dma32.control = cpu_to_le32(ctl); + desc->dma32.address = cpu_to_le32(addr); +} + +static void op32_poke_tx(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA32_TXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc32))); +} + +static void op32_tx_suspend(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) + | B43_DMA32_TXSUSPEND); +} + +static void op32_tx_resume(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL) + & ~B43_DMA32_TXSUSPEND); +} + +static int op32_get_current_rxslot(struct b43_dmaring *ring) +{ + u32 val; + + val = b43_dma_read(ring, B43_DMA32_RXSTATUS); + val &= B43_DMA32_RXDPTR; + + return (val / sizeof(struct b43_dmadesc32)); +} + +static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA32_RXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc32))); +} + +static const struct b43_dma_ops dma32_ops = { + .idx2desc = op32_idx2desc, + .fill_descriptor = op32_fill_descriptor, + .poke_tx = op32_poke_tx, + .tx_suspend = op32_tx_suspend, + .tx_resume = op32_tx_resume, + .get_current_rxslot = op32_get_current_rxslot, + .set_current_rxslot = op32_set_current_rxslot, +}; + +/* 64bit DMA ops. */ +static +struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring, + int slot, + struct b43_dmadesc_meta **meta) +{ + struct b43_dmadesc64 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43_dmadesc_generic *)desc; +} + +static void op64_fill_descriptor(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43_dmadesc64 *descbase = ring->descbase; + int slot; + u32 ctl0 = 0, ctl1 = 0; + u32 addrlo, addrhi; + u32 addrext; + + slot = (int)(&(desc->dma64) - descbase); + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addrlo = (u32) (dmaaddr & 0xFFFFFFFF); + addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); + addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addrhi |= ssb_dma_translation(ring->dev->dev); + if (slot == ring->nr_slots - 1) + ctl0 |= B43_DMA64_DCTL0_DTABLEEND; + if (start) + ctl0 |= B43_DMA64_DCTL0_FRAMESTART; + if (end) + ctl0 |= B43_DMA64_DCTL0_FRAMEEND; + if (irq) + ctl0 |= B43_DMA64_DCTL0_IRQ; + ctl1 |= (bufsize - ring->frameoffset) + & B43_DMA64_DCTL1_BYTECNT; + ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) + & B43_DMA64_DCTL1_ADDREXT_MASK; + + desc->dma64.control0 = cpu_to_le32(ctl0); + desc->dma64.control1 = cpu_to_le32(ctl1); + desc->dma64.address_low = cpu_to_le32(addrlo); + desc->dma64.address_high = cpu_to_le32(addrhi); +} + +static void op64_poke_tx(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA64_TXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc64))); +} + +static void op64_tx_suspend(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) + | B43_DMA64_TXSUSPEND); +} + +static void op64_tx_resume(struct b43_dmaring *ring) +{ + b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL) + & ~B43_DMA64_TXSUSPEND); +} + +static int op64_get_current_rxslot(struct b43_dmaring *ring) +{ + u32 val; + + val = b43_dma_read(ring, B43_DMA64_RXSTATUS); + val &= B43_DMA64_RXSTATDPTR; + + return (val / sizeof(struct b43_dmadesc64)); +} + +static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot) +{ + b43_dma_write(ring, B43_DMA64_RXINDEX, + (u32) (slot * sizeof(struct b43_dmadesc64))); +} + +static const struct b43_dma_ops dma64_ops = { + .idx2desc = op64_idx2desc, + .fill_descriptor = op64_fill_descriptor, + .poke_tx = op64_poke_tx, + .tx_suspend = op64_tx_suspend, + .tx_resume = op64_tx_resume, + .get_current_rxslot = op64_get_current_rxslot, + .set_current_rxslot = op64_set_current_rxslot, +}; + +static inline int free_slots(struct b43_dmaring *ring) +{ + return (ring->nr_slots - ring->used_slots); +} + +static inline int next_slot(struct b43_dmaring *ring, int slot) +{ + B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); + if (slot == ring->nr_slots - 1) + return 0; + return slot + 1; +} + +static inline int prev_slot(struct b43_dmaring *ring, int slot) +{ + B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); + if (slot == 0) + return ring->nr_slots - 1; + return slot - 1; +} + +#ifdef CONFIG_B43_DEBUG +static void update_max_used_slots(struct b43_dmaring *ring, + int current_used_slots) +{ + if (current_used_slots <= ring->max_used_slots) + return; + ring->max_used_slots = current_used_slots; + if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) { + b43dbg(ring->dev->wl, + "max_used_slots increased to %d on %s ring %d\n", + ring->max_used_slots, + ring->tx ? "TX" : "RX", ring->index); + } +} +#else +static inline + void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots) +{ +} +#endif /* DEBUG */ + +/* Request a slot for usage. */ +static inline int request_slot(struct b43_dmaring *ring) +{ + int slot; + + B43_WARN_ON(!ring->tx); + B43_WARN_ON(ring->stopped); + B43_WARN_ON(free_slots(ring) == 0); + + slot = next_slot(ring, ring->current_slot); + ring->current_slot = slot; + ring->used_slots++; + + update_max_used_slots(ring, ring->used_slots); + + return slot; +} + +/* Mac80211-queue to b43-ring mapping */ +static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev, + int queue_priority) +{ + struct b43_dmaring *ring; + +/*FIXME: For now we always run on TX-ring-1 */ + return dev->dma.tx_ring1; + + /* 0 = highest priority */ + switch (queue_priority) { + default: + B43_WARN_ON(1); + /* fallthrough */ + case 0: + ring = dev->dma.tx_ring3; + break; + case 1: + ring = dev->dma.tx_ring2; + break; + case 2: + ring = dev->dma.tx_ring1; + break; + case 3: + ring = dev->dma.tx_ring0; + break; + case 4: + ring = dev->dma.tx_ring4; + break; + case 5: + ring = dev->dma.tx_ring5; + break; + } + + return ring; +} + +/* Bcm43xx-ring to mac80211-queue mapping */ +static inline int txring_to_priority(struct b43_dmaring *ring) +{ + static const u8 idx_to_prio[] = { 3, 2, 1, 0, 4, 5, }; + +/*FIXME: have only one queue, for now */ + return 0; + + return idx_to_prio[ring->index]; +} + +u16 b43_dmacontroller_base(int dma64bit, int controller_idx) +{ + static const u16 map64[] = { + B43_MMIO_DMA64_BASE0, + B43_MMIO_DMA64_BASE1, + B43_MMIO_DMA64_BASE2, + B43_MMIO_DMA64_BASE3, + B43_MMIO_DMA64_BASE4, + B43_MMIO_DMA64_BASE5, + }; + static const u16 map32[] = { + B43_MMIO_DMA32_BASE0, + B43_MMIO_DMA32_BASE1, + B43_MMIO_DMA32_BASE2, + B43_MMIO_DMA32_BASE3, + B43_MMIO_DMA32_BASE4, + B43_MMIO_DMA32_BASE5, + }; + + if (dma64bit) { + B43_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map64))); + return map64[controller_idx]; + } + B43_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map32))); + return map32[controller_idx]; +} + +static inline + dma_addr_t map_descbuffer(struct b43_dmaring *ring, + unsigned char *buf, size_t len, int tx) +{ + dma_addr_t dmaaddr; + + if (tx) { + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, DMA_TO_DEVICE); + } else { + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, DMA_FROM_DEVICE); + } + + return dmaaddr; +} + +static inline + void unmap_descbuffer(struct b43_dmaring *ring, + dma_addr_t addr, size_t len, int tx) +{ + if (tx) { + dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE); + } else { + dma_unmap_single(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); + } +} + +static inline + void sync_descbuffer_for_cpu(struct b43_dmaring *ring, + dma_addr_t addr, size_t len) +{ + B43_WARN_ON(ring->tx); + dma_sync_single_for_cpu(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline + void sync_descbuffer_for_device(struct b43_dmaring *ring, + dma_addr_t addr, size_t len) +{ + B43_WARN_ON(ring->tx); + dma_sync_single_for_device(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline + void free_descriptor_buffer(struct b43_dmaring *ring, + struct b43_dmadesc_meta *meta) +{ + if (meta->skb) { + dev_kfree_skb_any(meta->skb); + meta->skb = NULL; + } +} + +static int alloc_ringmemory(struct b43_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE, + &(ring->dmabase), GFP_KERNEL); + if (!ring->descbase) { + b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); + return -ENOMEM; + } + memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); + + return 0; +} + +static void free_ringmemory(struct b43_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + dma_free_coherent(dev, B43_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase); +} + +/* Reset the RX DMA channel */ +int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL; + b43_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43_DMA64_RXSTAT; + if (value == B43_DMA64_RXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43_DMA32_RXSTATE; + if (value == B43_DMA32_RXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43err(dev->wl, "DMA RX reset timed out\n"); + return -ENODEV; + } + + return 0; +} + +/* Reset the RX DMA channel */ +int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + for (i = 0; i < 10; i++) { + offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43_DMA64_TXSTAT; + if (value == B43_DMA64_TXSTAT_DISABLED || + value == B43_DMA64_TXSTAT_IDLEWAIT || + value == B43_DMA64_TXSTAT_STOPPED) + break; + } else { + value &= B43_DMA32_TXSTATE; + if (value == B43_DMA32_TXSTAT_DISABLED || + value == B43_DMA32_TXSTAT_IDLEWAIT || + value == B43_DMA32_TXSTAT_STOPPED) + break; + } + msleep(1); + } + offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL; + b43_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS; + value = b43_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43_DMA64_TXSTAT; + if (value == B43_DMA64_TXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43_DMA32_TXSTATE; + if (value == B43_DMA32_TXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43err(dev->wl, "DMA TX reset timed out\n"); + return -ENODEV; + } + /* ensure the reset is completed. */ + msleep(1); + + return 0; +} + +static int setup_rx_descbuffer(struct b43_dmaring *ring, + struct b43_dmadesc_generic *desc, + struct b43_dmadesc_meta *meta, gfp_t gfp_flags) +{ + struct b43_rxhdr_fw4 *rxhdr; + struct b43_hwtxstatus *txstat; + dma_addr_t dmaaddr; + struct sk_buff *skb; + + B43_WARN_ON(ring->tx); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); + if (dma_mapping_error(dmaaddr)) { + /* ugh. try to realloc in zone_dma */ + gfp_flags |= GFP_DMA; + + dev_kfree_skb_any(skb); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + } + + if (dma_mapping_error(dmaaddr)) { + dev_kfree_skb_any(skb); + return -EIO; + } + + meta->skb = skb; + meta->dmaaddr = dmaaddr; + ring->ops->fill_descriptor(ring, desc, dmaaddr, + ring->rx_buffersize, 0, 0, 0); + + rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); + rxhdr->frame_len = 0; + txstat = (struct b43_hwtxstatus *)(skb->data); + txstat->cookie = 0; + + return 0; +} + +/* Allocate the initial descbuffers. + * This is used for an RX ring only. + */ +static int alloc_initial_descbuffers(struct b43_dmaring *ring) +{ + int i, err = -ENOMEM; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); + if (err) { + b43err(ring->dev->wl, + "Failed to allocate initial descbuffers\n"); + goto err_unwind; + } + } + mb(); + ring->used_slots = ring->nr_slots; + err = 0; + out: + return err; + + err_unwind: + for (i--; i >= 0; i--) { + desc = ring->ops->idx2desc(ring, i, &meta); + + unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); + dev_kfree_skb(meta->skb); + } + goto out; +} + +/* Do initial setup of the DMA controller. + * Reset the controller, write the ring busaddress + * and switch the "enable" bit on. + */ +static int dmacontroller_setup(struct b43_dmaring *ring) +{ + int err = 0; + u32 value; + u32 addrext; + u32 trans = ssb_dma_translation(ring->dev->dev); + + if (ring->tx) { + if (ring->dma64) { + u64 ringbase = (u64) (ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43_DMA64_TXENABLE; + value |= (addrext << B43_DMA64_TXADDREXT_SHIFT) + & B43_DMA64_TXADDREXT_MASK; + b43_dma_write(ring, B43_DMA64_TXCTL, value); + b43_dma_write(ring, B43_DMA64_TXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43_dma_write(ring, B43_DMA64_TXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + } else { + u32 ringbase = (u32) (ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43_DMA32_TXENABLE; + value |= (addrext << B43_DMA32_TXADDREXT_SHIFT) + & B43_DMA32_TXADDREXT_MASK; + b43_dma_write(ring, B43_DMA32_TXCTL, value); + b43_dma_write(ring, B43_DMA32_TXRING, + (ringbase & ~SSB_DMA_TRANSLATION_MASK) + | trans); + } + } else { + err = alloc_initial_descbuffers(ring); + if (err) + goto out; + if (ring->dma64) { + u64 ringbase = (u64) (ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT); + value |= B43_DMA64_RXENABLE; + value |= (addrext << B43_DMA64_RXADDREXT_SHIFT) + & B43_DMA64_RXADDREXT_MASK; + b43_dma_write(ring, B43_DMA64_RXCTL, value); + b43_dma_write(ring, B43_DMA64_RXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43_dma_write(ring, B43_DMA64_RXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43_dma_write(ring, B43_DMA64_RXINDEX, 200); + } else { + u32 ringbase = (u32) (ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT); + value |= B43_DMA32_RXENABLE; + value |= (addrext << B43_DMA32_RXADDREXT_SHIFT) + & B43_DMA32_RXADDREXT_MASK; + b43_dma_write(ring, B43_DMA32_RXCTL, value); + b43_dma_write(ring, B43_DMA32_RXRING, + (ringbase & ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43_dma_write(ring, B43_DMA32_RXINDEX, 200); + } + } + + out: + return err; +} + +/* Shutdown the DMA controller. */ +static void dmacontroller_cleanup(struct b43_dmaring *ring) +{ + if (ring->tx) { + b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43_dma_write(ring, B43_DMA64_TXRINGLO, 0); + b43_dma_write(ring, B43_DMA64_TXRINGHI, 0); + } else + b43_dma_write(ring, B43_DMA32_TXRING, 0); + } else { + b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43_dma_write(ring, B43_DMA64_RXRINGLO, 0); + b43_dma_write(ring, B43_DMA64_RXRINGHI, 0); + } else + b43_dma_write(ring, B43_DMA32_RXRING, 0); + } +} + +static void free_all_descbuffers(struct b43_dmaring *ring) +{ + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + int i; + + if (!ring->used_slots) + return; + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + if (!meta->skb) { + B43_WARN_ON(!ring->tx); + continue; + } + if (ring->tx) { + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + } else { + unmap_descbuffer(ring, meta->dmaaddr, + ring->rx_buffersize, 0); + } + free_descriptor_buffer(ring, meta); + } +} + +static u64 supported_dma_mask(struct b43_wldev *dev) +{ + u32 tmp; + u16 mmio_base; + + tmp = b43_read32(dev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_64BIT_MASK; + mmio_base = b43_dmacontroller_base(0, 0); + b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); + tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); + if (tmp & B43_DMA32_TXADDREXT_MASK) + return DMA_32BIT_MASK; + + return DMA_30BIT_MASK; +} + +/* Main initialization function. */ +static +struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, + int controller_index, + int for_tx, int dma64) +{ + struct b43_dmaring *ring; + int err; + int nr_slots; + dma_addr_t dma_test; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto out; + + nr_slots = B43_RXRING_SLOTS; + if (for_tx) + nr_slots = B43_TXRING_SLOTS; + + ring->meta = kcalloc(nr_slots, sizeof(struct b43_dmadesc_meta), + GFP_KERNEL); + if (!ring->meta) + goto err_kfree_ring; + if (for_tx) { + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43_txhdr_fw4), + GFP_KERNEL); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + /* test for ability to dma to txhdr_cache */ + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43_txhdr_fw4), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) { + /* ugh realloc */ + kfree(ring->txhdr_cache); + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct + b43_txhdr_fw4), + GFP_KERNEL | GFP_DMA); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43_txhdr_fw4), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) + goto err_kfree_txhdr_cache; + } + + dma_unmap_single(dev->dev->dev, + dma_test, sizeof(struct b43_txhdr_fw4), + DMA_TO_DEVICE); + } + + ring->dev = dev; + ring->nr_slots = nr_slots; + ring->mmio_base = b43_dmacontroller_base(dma64, controller_index); + ring->index = controller_index; + ring->dma64 = !!dma64; + if (dma64) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; + if (for_tx) { + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET; + } else if (ring->index == 3) { + ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE; + ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET; + } else + B43_WARN_ON(1); + } + spin_lock_init(&ring->lock); +#ifdef CONFIG_B43_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + err = alloc_ringmemory(ring); + if (err) + goto err_kfree_txhdr_cache; + err = dmacontroller_setup(ring); + if (err) + goto err_free_ringmemory; + + out: + return ring; + + err_free_ringmemory: + free_ringmemory(ring); + err_kfree_txhdr_cache: + kfree(ring->txhdr_cache); + err_kfree_meta: + kfree(ring->meta); + err_kfree_ring: + kfree(ring); + ring = NULL; + goto out; +} + +/* Main cleanup function. */ +static void b43_destroy_dmaring(struct b43_dmaring *ring) +{ + if (!ring) + return; + + b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n", + (ring->dma64) ? "64" : "32", + ring->mmio_base, + (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); + /* Device IRQs are disabled prior entering this function, + * so no need to take care of concurrency with rx handler stuff. + */ + dmacontroller_cleanup(ring); + free_all_descbuffers(ring); + free_ringmemory(ring); + + kfree(ring->txhdr_cache); + kfree(ring->meta); + kfree(ring); +} + +void b43_dma_free(struct b43_wldev *dev) +{ + struct b43_dma *dma; + + if (b43_using_pio(dev)) + return; + dma = &dev->dma; + + b43_destroy_dmaring(dma->rx_ring3); + dma->rx_ring3 = NULL; + b43_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + + b43_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + b43_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + b43_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + b43_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + b43_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + b43_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; +} + +int b43_dma_init(struct b43_wldev *dev) +{ + struct b43_dma *dma = &dev->dma; + struct b43_dmaring *ring; + int err; + u64 dmamask; + int dma64 = 0; + + dmamask = supported_dma_mask(dev); + if (dmamask == DMA_64BIT_MASK) + dma64 = 1; + + err = ssb_dma_set_mask(dev->dev, dmamask); + if (err) { +#ifdef B43_PIO + b43warn(dev->wl, "DMA for this device not supported. " + "Falling back to PIO\n"); + dev->__using_pio = 1; + return -EAGAIN; +#else + b43err(dev->wl, "DMA for this device not supported and " + "no PIO support compiled in\n"); + return -EOPNOTSUPP; +#endif + } + + err = -ENOMEM; + /* setup TX DMA channels. */ + ring = b43_setup_dmaring(dev, 0, 1, dma64); + if (!ring) + goto out; + dma->tx_ring0 = ring; + + ring = b43_setup_dmaring(dev, 1, 1, dma64); + if (!ring) + goto err_destroy_tx0; + dma->tx_ring1 = ring; + + ring = b43_setup_dmaring(dev, 2, 1, dma64); + if (!ring) + goto err_destroy_tx1; + dma->tx_ring2 = ring; + + ring = b43_setup_dmaring(dev, 3, 1, dma64); + if (!ring) + goto err_destroy_tx2; + dma->tx_ring3 = ring; + + ring = b43_setup_dmaring(dev, 4, 1, dma64); + if (!ring) + goto err_destroy_tx3; + dma->tx_ring4 = ring; + + ring = b43_setup_dmaring(dev, 5, 1, dma64); + if (!ring) + goto err_destroy_tx4; + dma->tx_ring5 = ring; + + /* setup RX DMA channels. */ + ring = b43_setup_dmaring(dev, 0, 0, dma64); + if (!ring) + goto err_destroy_tx5; + dma->rx_ring0 = ring; + + if (dev->dev->id.revision < 5) { + ring = b43_setup_dmaring(dev, 3, 0, dma64); + if (!ring) + goto err_destroy_rx0; + dma->rx_ring3 = ring; + } + + b43dbg(dev->wl, "%d-bit DMA initialized\n", + (dmamask == DMA_64BIT_MASK) ? 64 : + (dmamask == DMA_32BIT_MASK) ? 32 : 30); + err = 0; + out: + return err; + + err_destroy_rx0: + b43_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + err_destroy_tx5: + b43_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + err_destroy_tx4: + b43_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + err_destroy_tx3: + b43_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + err_destroy_tx2: + b43_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + err_destroy_tx1: + b43_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + err_destroy_tx0: + b43_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; + goto out; +} + +/* Generate a cookie for the TX header. */ +static u16 generate_cookie(struct b43_dmaring *ring, int slot) +{ + u16 cookie = 0x1000; + + /* Use the upper 4 bits of the cookie as + * DMA controller ID and store the slot number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + */ + switch (ring->index) { + case 0: + cookie = 0xA000; + break; + case 1: + cookie = 0xB000; + break; + case 2: + cookie = 0xC000; + break; + case 3: + cookie = 0xD000; + break; + case 4: + cookie = 0xE000; + break; + case 5: + cookie = 0xF000; + break; + } + B43_WARN_ON(slot & ~0x0FFF); + cookie |= (u16) slot; + + return cookie; +} + +/* Inspect a cookie and find out to which controller/slot it belongs. */ +static +struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) +{ + struct b43_dma *dma = &dev->dma; + struct b43_dmaring *ring = NULL; + + switch (cookie & 0xF000) { + case 0xA000: + ring = dma->tx_ring0; + break; + case 0xB000: + ring = dma->tx_ring1; + break; + case 0xC000: + ring = dma->tx_ring2; + break; + case 0xD000: + ring = dma->tx_ring3; + break; + case 0xE000: + ring = dma->tx_ring4; + break; + case 0xF000: + ring = dma->tx_ring5; + break; + default: + B43_WARN_ON(1); + } + *slot = (cookie & 0x0FFF); + B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); + + return ring; +} + +static int dma_tx_fragment(struct b43_dmaring *ring, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + const struct b43_dma_ops *ops = ring->ops; + u8 *header; + int slot; + int err; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + struct b43_dmadesc_meta *meta_hdr; + struct sk_buff *bounce_skb; + +#define SLOTS_PER_PACKET 2 + B43_WARN_ON(skb_shinfo(skb)->nr_frags); + + /* Get a slot for the header. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta_hdr); + memset(meta_hdr, 0, sizeof(*meta_hdr)); + + header = &(ring->txhdr_cache[slot * sizeof(struct b43_txhdr_fw4)]); + b43_generate_txhdr(ring->dev, header, + skb->data, skb->len, ctl, + generate_cookie(ring, slot)); + + meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, + sizeof(struct b43_txhdr_fw4), 1); + if (dma_mapping_error(meta_hdr->dmaaddr)) + return -EIO; + ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, + sizeof(struct b43_txhdr_fw4), 1, 0, 0); + + /* Get a slot for the payload. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta); + memset(meta, 0, sizeof(*meta)); + + memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); + meta->skb = skb; + meta->is_last_fragment = 1; + + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + /* create a bounce buffer in zone_dma on mapping failure. */ + if (dma_mapping_error(meta->dmaaddr)) { + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) { + err = -ENOMEM; + goto out_unmap_hdr; + } + + memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + meta->skb = skb; + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + if (dma_mapping_error(meta->dmaaddr)) { + err = -EIO; + goto out_free_bounce; + } + } + + ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1); + + /* Now transfer the whole frame. */ + wmb(); + ops->poke_tx(ring, next_slot(ring, slot)); + return 0; + + out_free_bounce: + dev_kfree_skb_any(skb); + out_unmap_hdr: + unmap_descbuffer(ring, meta_hdr->dmaaddr, + sizeof(struct b43_txhdr_fw4), 1); + return err; +} + +static inline int should_inject_overflow(struct b43_dmaring *ring) +{ +#ifdef CONFIG_B43_DEBUG + if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) { + /* Check if we should inject another ringbuffer overflow + * to test handling of this situation in the stack. */ + unsigned long next_overflow; + + next_overflow = ring->last_injected_overflow + HZ; + if (time_after(jiffies, next_overflow)) { + ring->last_injected_overflow = jiffies; + b43dbg(ring->dev->wl, + "Injecting TX ring overflow on " + "DMA controller %d\n", ring->index); + return 1; + } + } +#endif /* CONFIG_B43_DEBUG */ + return 0; +} + +int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct b43_dmaring *ring; + int err = 0; + unsigned long flags; + + ring = priority_to_txring(dev, ctl->queue); + spin_lock_irqsave(&ring->lock, flags); + B43_WARN_ON(!ring->tx); + if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { + b43warn(dev->wl, "DMA queue overflow\n"); + err = -ENOSPC; + goto out_unlock; + } + /* Check if the queue was stopped in mac80211, + * but we got called nevertheless. + * That would be a mac80211 bug. */ + B43_WARN_ON(ring->stopped); + + err = dma_tx_fragment(ring, skb, ctl); + if (unlikely(err)) { + b43err(dev->wl, "DMA tx mapping failure\n"); + goto out_unlock; + } + ring->nr_tx_packets++; + if ((free_slots(ring) < SLOTS_PER_PACKET) || + should_inject_overflow(ring)) { + /* This TX ring is full. */ + ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 1; + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { + b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); + } + } + out_unlock: + spin_unlock_irqrestore(&ring->lock, flags); + + return err; +} + +void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + const struct b43_dma_ops *ops; + struct b43_dmaring *ring; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + int slot; + + ring = parse_cookie(dev, status->cookie, &slot); + if (unlikely(!ring)) + return; + B43_WARN_ON(!irqs_disabled()); + spin_lock(&ring->lock); + + B43_WARN_ON(!ring->tx); + ops = ring->ops; + while (1) { + B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + desc = ops->idx2desc(ring, slot, &meta); + + if (meta->skb) + unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, + 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + sizeof(struct b43_txhdr_fw4), 1); + + if (meta->is_last_fragment) { + B43_WARN_ON(!meta->skb); + /* Call back to inform the ieee80211 subsystem about the + * status of the transmission. + * Some fields of txstat are already filled in dma_tx(). + */ + if (status->acked) { + meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; + } else { + if (!(meta->txstat.control.flags + & IEEE80211_TXCTL_NO_ACK)) + meta->txstat.excessive_retries = 1; + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + meta->txstat.retry_count = 0; + } else + meta->txstat.retry_count = status->frame_count - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, + &(meta->txstat)); + /* skb is freed by ieee80211_tx_status_irqsafe() */ + meta->skb = NULL; + } else { + /* No need to call free_descriptor_buffer here, as + * this is only the txhdr, which is not allocated. + */ + B43_WARN_ON(meta->skb); + } + + /* Everything unmapped and free'd. So it's not used anymore. */ + ring->used_slots--; + + if (meta->is_last_fragment) + break; + slot = next_slot(ring, slot); + } + dev->stats.last_tx = jiffies; + if (ring->stopped) { + B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); + ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 0; + if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { + b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); + } + } + + spin_unlock(&ring->lock); +} + +void b43_dma_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + const int nr_queues = dev->wl->hw->queues; + struct b43_dmaring *ring; + struct ieee80211_tx_queue_stats_data *data; + unsigned long flags; + int i; + + for (i = 0; i < nr_queues; i++) { + data = &(stats->data[i]); + ring = priority_to_txring(dev, i); + + spin_lock_irqsave(&ring->lock, flags); + data->len = ring->used_slots / SLOTS_PER_PACKET; + data->limit = ring->nr_slots / SLOTS_PER_PACKET; + data->count = ring->nr_tx_packets; + spin_unlock_irqrestore(&ring->lock, flags); + } +} + +static void dma_rx(struct b43_dmaring *ring, int *slot) +{ + const struct b43_dma_ops *ops = ring->ops; + struct b43_dmadesc_generic *desc; + struct b43_dmadesc_meta *meta; + struct b43_rxhdr_fw4 *rxhdr; + struct sk_buff *skb; + u16 len; + int err; + dma_addr_t dmaaddr; + + desc = ops->idx2desc(ring, *slot, &meta); + + sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); + skb = meta->skb; + + if (ring->index == 3) { + /* We received an xmit status. */ + struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data; + int i = 0; + + while (hw->cookie == 0) { + if (i > 100) + break; + i++; + udelay(2); + barrier(); + } + b43_handle_hwtxstatus(ring->dev, hw); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + + return; + } + rxhdr = (struct b43_rxhdr_fw4 *)skb->data; + len = le16_to_cpu(rxhdr->frame_len); + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rxhdr->frame_len); + } while (len == 0 && i++ < 5); + if (unlikely(len == 0)) { + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + goto drop; + } + } + if (unlikely(len > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers + * big enough. So simply ignore this packet. + */ + int cnt = 0; + s32 tmp = len; + + while (1) { + desc = ops->idx2desc(ring, *slot, &meta); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + *slot = next_slot(ring, *slot); + cnt++; + tmp -= ring->rx_buffersize; + if (tmp <= 0) + break; + } + b43err(ring->dev->wl, "DMA RX buffer too small " + "(len: %u, buffer: %u, nr-dropped: %d)\n", + len, ring->rx_buffersize, cnt); + goto drop; + } + + dmaaddr = meta->dmaaddr; + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); + if (unlikely(err)) { + b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); + sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); + goto drop; + } + + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); + skb_put(skb, len + ring->frameoffset); + skb_pull(skb, ring->frameoffset); + + b43_rx(ring->dev, skb, rxhdr); + drop: + return; +} + +void b43_dma_rx(struct b43_dmaring *ring) +{ + const struct b43_dma_ops *ops = ring->ops; + int slot, current_slot; + int used_slots = 0; + + B43_WARN_ON(ring->tx); + current_slot = ops->get_current_rxslot(ring); + B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); + + slot = ring->current_slot; + for (; slot != current_slot; slot = next_slot(ring, slot)) { + dma_rx(ring, &slot); + update_max_used_slots(ring, ++used_slots); + } + ops->set_current_rxslot(ring, slot); + ring->current_slot = slot; +} + +static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43_WARN_ON(!ring->tx); + ring->ops->tx_suspend(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void b43_dma_tx_resume_ring(struct b43_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43_WARN_ON(!ring->tx); + ring->ops->tx_resume(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +void b43_dma_tx_suspend(struct b43_wldev *dev) +{ + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_dma_tx_suspend_ring(dev->dma.tx_ring0); + b43_dma_tx_suspend_ring(dev->dma.tx_ring1); + b43_dma_tx_suspend_ring(dev->dma.tx_ring2); + b43_dma_tx_suspend_ring(dev->dma.tx_ring3); + b43_dma_tx_suspend_ring(dev->dma.tx_ring4); + b43_dma_tx_suspend_ring(dev->dma.tx_ring5); +} + +void b43_dma_tx_resume(struct b43_wldev *dev) +{ + b43_dma_tx_resume_ring(dev->dma.tx_ring5); + b43_dma_tx_resume_ring(dev->dma.tx_ring4); + b43_dma_tx_resume_ring(dev->dma.tx_ring3); + b43_dma_tx_resume_ring(dev->dma.tx_ring2); + b43_dma_tx_resume_ring(dev->dma.tx_ring1); + b43_dma_tx_resume_ring(dev->dma.tx_ring0); + b43_power_saving_ctl_bits(dev, 0); +} diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h new file mode 100644 index 0000000000000000000000000000000000000000..3eed185be72552194cab5a1f0c6f8f13e21b1a16 --- /dev/null +++ b/drivers/net/wireless/b43/dma.h @@ -0,0 +1,337 @@ +#ifndef B43_DMA_H_ +#define B43_DMA_H_ + +#include +#include +#include +#include +#include + +#include "b43.h" + +/* DMA-Interrupt reasons. */ +#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ + | (1 << 14) | (1 << 15)) +#define B43_DMAIRQ_NONFATALMASK (1 << 13) +#define B43_DMAIRQ_RX_DONE (1 << 16) + +/*** 32-bit DMA Engine. ***/ + +/* 32-bit DMA controller registers. */ +#define B43_DMA32_TXCTL 0x00 +#define B43_DMA32_TXENABLE 0x00000001 +#define B43_DMA32_TXSUSPEND 0x00000002 +#define B43_DMA32_TXLOOPBACK 0x00000004 +#define B43_DMA32_TXFLUSH 0x00000010 +#define B43_DMA32_TXADDREXT_MASK 0x00030000 +#define B43_DMA32_TXADDREXT_SHIFT 16 +#define B43_DMA32_TXRING 0x04 +#define B43_DMA32_TXINDEX 0x08 +#define B43_DMA32_TXSTATUS 0x0C +#define B43_DMA32_TXDPTR 0x00000FFF +#define B43_DMA32_TXSTATE 0x0000F000 +#define B43_DMA32_TXSTAT_DISABLED 0x00000000 +#define B43_DMA32_TXSTAT_ACTIVE 0x00001000 +#define B43_DMA32_TXSTAT_IDLEWAIT 0x00002000 +#define B43_DMA32_TXSTAT_STOPPED 0x00003000 +#define B43_DMA32_TXSTAT_SUSP 0x00004000 +#define B43_DMA32_TXERROR 0x000F0000 +#define B43_DMA32_TXERR_NOERR 0x00000000 +#define B43_DMA32_TXERR_PROT 0x00010000 +#define B43_DMA32_TXERR_UNDERRUN 0x00020000 +#define B43_DMA32_TXERR_BUFREAD 0x00030000 +#define B43_DMA32_TXERR_DESCREAD 0x00040000 +#define B43_DMA32_TXACTIVE 0xFFF00000 +#define B43_DMA32_RXCTL 0x10 +#define B43_DMA32_RXENABLE 0x00000001 +#define B43_DMA32_RXFROFF_MASK 0x000000FE +#define B43_DMA32_RXFROFF_SHIFT 1 +#define B43_DMA32_RXDIRECTFIFO 0x00000100 +#define B43_DMA32_RXADDREXT_MASK 0x00030000 +#define B43_DMA32_RXADDREXT_SHIFT 16 +#define B43_DMA32_RXRING 0x14 +#define B43_DMA32_RXINDEX 0x18 +#define B43_DMA32_RXSTATUS 0x1C +#define B43_DMA32_RXDPTR 0x00000FFF +#define B43_DMA32_RXSTATE 0x0000F000 +#define B43_DMA32_RXSTAT_DISABLED 0x00000000 +#define B43_DMA32_RXSTAT_ACTIVE 0x00001000 +#define B43_DMA32_RXSTAT_IDLEWAIT 0x00002000 +#define B43_DMA32_RXSTAT_STOPPED 0x00003000 +#define B43_DMA32_RXERROR 0x000F0000 +#define B43_DMA32_RXERR_NOERR 0x00000000 +#define B43_DMA32_RXERR_PROT 0x00010000 +#define B43_DMA32_RXERR_OVERFLOW 0x00020000 +#define B43_DMA32_RXERR_BUFWRITE 0x00030000 +#define B43_DMA32_RXERR_DESCREAD 0x00040000 +#define B43_DMA32_RXACTIVE 0xFFF00000 + +/* 32-bit DMA descriptor. */ +struct b43_dmadesc32 { + __le32 control; + __le32 address; +} __attribute__ ((__packed__)); +#define B43_DMA32_DCTL_BYTECNT 0x00001FFF +#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000 +#define B43_DMA32_DCTL_ADDREXT_SHIFT 16 +#define B43_DMA32_DCTL_DTABLEEND 0x10000000 +#define B43_DMA32_DCTL_IRQ 0x20000000 +#define B43_DMA32_DCTL_FRAMEEND 0x40000000 +#define B43_DMA32_DCTL_FRAMESTART 0x80000000 + +/*** 64-bit DMA Engine. ***/ + +/* 64-bit DMA controller registers. */ +#define B43_DMA64_TXCTL 0x00 +#define B43_DMA64_TXENABLE 0x00000001 +#define B43_DMA64_TXSUSPEND 0x00000002 +#define B43_DMA64_TXLOOPBACK 0x00000004 +#define B43_DMA64_TXFLUSH 0x00000010 +#define B43_DMA64_TXADDREXT_MASK 0x00030000 +#define B43_DMA64_TXADDREXT_SHIFT 16 +#define B43_DMA64_TXINDEX 0x04 +#define B43_DMA64_TXRINGLO 0x08 +#define B43_DMA64_TXRINGHI 0x0C +#define B43_DMA64_TXSTATUS 0x10 +#define B43_DMA64_TXSTATDPTR 0x00001FFF +#define B43_DMA64_TXSTAT 0xF0000000 +#define B43_DMA64_TXSTAT_DISABLED 0x00000000 +#define B43_DMA64_TXSTAT_ACTIVE 0x10000000 +#define B43_DMA64_TXSTAT_IDLEWAIT 0x20000000 +#define B43_DMA64_TXSTAT_STOPPED 0x30000000 +#define B43_DMA64_TXSTAT_SUSP 0x40000000 +#define B43_DMA64_TXERROR 0x14 +#define B43_DMA64_TXERRDPTR 0x0001FFFF +#define B43_DMA64_TXERR 0xF0000000 +#define B43_DMA64_TXERR_NOERR 0x00000000 +#define B43_DMA64_TXERR_PROT 0x10000000 +#define B43_DMA64_TXERR_UNDERRUN 0x20000000 +#define B43_DMA64_TXERR_TRANSFER 0x30000000 +#define B43_DMA64_TXERR_DESCREAD 0x40000000 +#define B43_DMA64_TXERR_CORE 0x50000000 +#define B43_DMA64_RXCTL 0x20 +#define B43_DMA64_RXENABLE 0x00000001 +#define B43_DMA64_RXFROFF_MASK 0x000000FE +#define B43_DMA64_RXFROFF_SHIFT 1 +#define B43_DMA64_RXDIRECTFIFO 0x00000100 +#define B43_DMA64_RXADDREXT_MASK 0x00030000 +#define B43_DMA64_RXADDREXT_SHIFT 16 +#define B43_DMA64_RXINDEX 0x24 +#define B43_DMA64_RXRINGLO 0x28 +#define B43_DMA64_RXRINGHI 0x2C +#define B43_DMA64_RXSTATUS 0x30 +#define B43_DMA64_RXSTATDPTR 0x00001FFF +#define B43_DMA64_RXSTAT 0xF0000000 +#define B43_DMA64_RXSTAT_DISABLED 0x00000000 +#define B43_DMA64_RXSTAT_ACTIVE 0x10000000 +#define B43_DMA64_RXSTAT_IDLEWAIT 0x20000000 +#define B43_DMA64_RXSTAT_STOPPED 0x30000000 +#define B43_DMA64_RXSTAT_SUSP 0x40000000 +#define B43_DMA64_RXERROR 0x34 +#define B43_DMA64_RXERRDPTR 0x0001FFFF +#define B43_DMA64_RXERR 0xF0000000 +#define B43_DMA64_RXERR_NOERR 0x00000000 +#define B43_DMA64_RXERR_PROT 0x10000000 +#define B43_DMA64_RXERR_UNDERRUN 0x20000000 +#define B43_DMA64_RXERR_TRANSFER 0x30000000 +#define B43_DMA64_RXERR_DESCREAD 0x40000000 +#define B43_DMA64_RXERR_CORE 0x50000000 + +/* 64-bit DMA descriptor. */ +struct b43_dmadesc64 { + __le32 control0; + __le32 control1; + __le32 address_low; + __le32 address_high; +} __attribute__ ((__packed__)); +#define B43_DMA64_DCTL0_DTABLEEND 0x10000000 +#define B43_DMA64_DCTL0_IRQ 0x20000000 +#define B43_DMA64_DCTL0_FRAMEEND 0x40000000 +#define B43_DMA64_DCTL0_FRAMESTART 0x80000000 +#define B43_DMA64_DCTL1_BYTECNT 0x00001FFF +#define B43_DMA64_DCTL1_ADDREXT_MASK 0x00030000 +#define B43_DMA64_DCTL1_ADDREXT_SHIFT 16 + +struct b43_dmadesc_generic { + union { + struct b43_dmadesc32 dma32; + struct b43_dmadesc64 dma64; + } __attribute__ ((__packed__)); +} __attribute__ ((__packed__)); + +/* Misc DMA constants */ +#define B43_DMA_RINGMEMSIZE PAGE_SIZE +#define B43_DMA0_RX_FRAMEOFFSET 30 +#define B43_DMA3_RX_FRAMEOFFSET 0 + +/* DMA engine tuning knobs */ +#define B43_TXRING_SLOTS 128 +#define B43_RXRING_SLOTS 64 +#define B43_DMA0_RX_BUFFERSIZE (2304 + 100) +#define B43_DMA3_RX_BUFFERSIZE 16 + +#ifdef CONFIG_B43_DMA + +struct sk_buff; +struct b43_private; +struct b43_txstatus; + +struct b43_dmadesc_meta { + /* The kernel DMA-able buffer. */ + struct sk_buff *skb; + /* DMA base bus-address of the descriptor buffer. */ + dma_addr_t dmaaddr; + /* ieee80211 TX status. Only used once per 802.11 frag. */ + bool is_last_fragment; + struct ieee80211_tx_status txstat; +}; + +struct b43_dmaring; + +/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */ +struct b43_dma_ops { + struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring, + int slot, + struct b43_dmadesc_meta ** + meta); + void (*fill_descriptor) (struct b43_dmaring * ring, + struct b43_dmadesc_generic * desc, + dma_addr_t dmaaddr, u16 bufsize, int start, + int end, int irq); + void (*poke_tx) (struct b43_dmaring * ring, int slot); + void (*tx_suspend) (struct b43_dmaring * ring); + void (*tx_resume) (struct b43_dmaring * ring); + int (*get_current_rxslot) (struct b43_dmaring * ring); + void (*set_current_rxslot) (struct b43_dmaring * ring, int slot); +}; + +struct b43_dmaring { + /* Lowlevel DMA ops. */ + const struct b43_dma_ops *ops; + /* Kernel virtual base address of the ring memory. */ + void *descbase; + /* Meta data about all descriptors. */ + struct b43_dmadesc_meta *meta; + /* Cache of TX headers for each slot. + * This is to avoid an allocation on each TX. + * This is NULL for an RX ring. + */ + u8 *txhdr_cache; + /* (Unadjusted) DMA base bus-address of the ring memory. */ + dma_addr_t dmabase; + /* Number of descriptor slots in the ring. */ + int nr_slots; + /* Number of used descriptor slots. */ + int used_slots; + /* Currently used slot in the ring. */ + int current_slot; + /* Total number of packets sent. Statistics only. */ + unsigned int nr_tx_packets; + /* Frameoffset in octets. */ + u32 frameoffset; + /* Descriptor buffer size. */ + u16 rx_buffersize; + /* The MMIO base register of the DMA controller. */ + u16 mmio_base; + /* DMA controller index number (0-5). */ + int index; + /* Boolean. Is this a TX ring? */ + bool tx; + /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ + bool dma64; + /* Boolean. Is this ring stopped at ieee80211 level? */ + bool stopped; + /* Lock, only used for TX. */ + spinlock_t lock; + struct b43_wldev *dev; +#ifdef CONFIG_B43_DEBUG + /* Maximum number of used slots. */ + int max_used_slots; + /* Last time we injected a ring overflow. */ + unsigned long last_injected_overflow; +#endif /* CONFIG_B43_DEBUG */ +}; + +static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) +{ + return b43_read32(ring->dev, ring->mmio_base + offset); +} + +static inline + void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value) +{ + b43_write32(ring->dev, ring->mmio_base + offset, value); +} + +int b43_dma_init(struct b43_wldev *dev); +void b43_dma_free(struct b43_wldev *dev); + +int b43_dmacontroller_rx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64); +int b43_dmacontroller_tx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64); + +u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx); + +void b43_dma_tx_suspend(struct b43_wldev *dev); +void b43_dma_tx_resume(struct b43_wldev *dev); + +void b43_dma_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats); + +int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl); +void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); + +void b43_dma_rx(struct b43_dmaring *ring); + +#else /* CONFIG_B43_DMA */ + +static inline int b43_dma_init(struct b43_wldev *dev) +{ + return 0; +} +static inline void b43_dma_free(struct b43_wldev *dev) +{ +} +static inline + int b43_dmacontroller_rx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64) +{ + return 0; +} +static inline + int b43_dmacontroller_tx_reset(struct b43_wldev *dev, + u16 dmacontroller_mmio_base, int dma64) +{ + return 0; +} +static inline + void b43_dma_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline + int b43_dma_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline + void b43_dma_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} +static inline void b43_dma_rx(struct b43_dmaring *ring) +{ +} +static inline void b43_dma_tx_suspend(struct b43_wldev *dev) +{ +} +static inline void b43_dma_tx_resume(struct b43_wldev *dev) +{ +} + +#endif /* CONFIG_B43_DMA */ +#endif /* B43_DMA_H_ */ diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c new file mode 100644 index 0000000000000000000000000000000000000000..19e588582c7c4d42ed22493afcdca825ca64e9c9 --- /dev/null +++ b/drivers/net/wireless/b43/leds.c @@ -0,0 +1,235 @@ +/* + + Broadcom B43 wireless driver + LED control + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005-2007 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "leds.h" + + +static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index, + bool activelow) +{ + struct b43_wl *wl = dev->wl; + unsigned long flags; + u16 ctl; + + spin_lock_irqsave(&wl->leds_lock, flags); + ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); + if (activelow) + ctl &= ~(1 << led_index); + else + ctl |= (1 << led_index); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); + spin_unlock_irqrestore(&wl->leds_lock, flags); +} + +static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index, + bool activelow) +{ + struct b43_wl *wl = dev->wl; + unsigned long flags; + u16 ctl; + + spin_lock_irqsave(&wl->leds_lock, flags); + ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL); + if (activelow) + ctl |= (1 << led_index); + else + ctl &= ~(1 << led_index); + b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl); + spin_unlock_irqrestore(&wl->leds_lock, flags); +} + +/* Callback from the LED subsystem. */ +static void b43_led_brightness_set(struct led_classdev *led_dev, + enum led_brightness brightness) +{ + struct b43_led *led = container_of(led_dev, struct b43_led, led_dev); + struct b43_wldev *dev = led->dev; + bool radio_enabled; + + /* Checking the radio-enabled status here is slightly racy, + * but we want to avoid the locking overhead and we don't care + * whether the LED has the wrong state for a second. */ + radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable); + + if (brightness == LED_OFF || !radio_enabled) + b43_led_turn_off(dev, led->index, led->activelow); + else + b43_led_turn_on(dev, led->index, led->activelow); +} + +static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, + const char *name, char *default_trigger, + u8 led_index, bool activelow) +{ + int err; + + b43_led_turn_off(dev, led_index, activelow); + if (led->dev) + return -EEXIST; + if (!default_trigger) + return -EINVAL; + led->dev = dev; + led->index = led_index; + led->activelow = activelow; + strncpy(led->name, name, sizeof(led->name)); + + led->led_dev.name = led->name; + led->led_dev.default_trigger = default_trigger; + led->led_dev.brightness_set = b43_led_brightness_set; + + err = led_classdev_register(dev->dev->dev, &led->led_dev); + if (err) { + b43warn(dev->wl, "LEDs: Failed to register %s\n", name); + led->dev = NULL; + return err; + } + return 0; +} + +static void b43_unregister_led(struct b43_led *led) +{ + if (!led->dev) + return; + led_classdev_unregister(&led->led_dev); + b43_led_turn_off(led->dev, led->index, led->activelow); + led->dev = NULL; +} + +static void b43_map_led(struct b43_wldev *dev, + u8 led_index, + enum b43_led_behaviour behaviour, + bool activelow) +{ + struct ieee80211_hw *hw = dev->wl->hw; + char name[B43_LED_MAX_NAME_LEN + 1]; + + /* Map the b43 specific LED behaviour value to the + * generic LED triggers. */ + switch (behaviour) { + case B43_LED_INACTIVE: + break; + case B43_LED_OFF: + b43_led_turn_off(dev, led_index, activelow); + break; + case B43_LED_ON: + b43_led_turn_on(dev, led_index, activelow); + break; + case B43_LED_ACTIVITY: + case B43_LED_TRANSFER: + case B43_LED_APTRANSFER: + snprintf(name, sizeof(name), + "b43-%s:tx", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_tx, name, + ieee80211_get_tx_led_name(hw), + led_index, activelow); + snprintf(name, sizeof(name), + "b43-%s:rx", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_rx, name, + ieee80211_get_rx_led_name(hw), + led_index, activelow); + break; + case B43_LED_RADIO_ALL: + case B43_LED_RADIO_A: + case B43_LED_RADIO_B: + case B43_LED_MODE_BG: + snprintf(name, sizeof(name), + "b43-%s:radio", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_radio, name, + b43_rfkill_led_name(dev), + led_index, activelow); + break; + case B43_LED_WEIRD: + case B43_LED_ASSOC: + snprintf(name, sizeof(name), + "b43-%s:assoc", wiphy_name(hw->wiphy)); + b43_register_led(dev, &dev->led_assoc, name, + ieee80211_get_assoc_led_name(hw), + led_index, activelow); + break; + default: + b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n", + behaviour); + break; + } +} + +void b43_leds_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + u8 sprom[4]; + int i; + enum b43_led_behaviour behaviour; + bool activelow; + + sprom[0] = bus->sprom.r1.gpio0; + sprom[1] = bus->sprom.r1.gpio1; + sprom[2] = bus->sprom.r1.gpio2; + sprom[3] = bus->sprom.r1.gpio3; + + for (i = 0; i < 4; i++) { + if (sprom[i] == 0xFF) { + /* There is no LED information in the SPROM + * for this LED. Hardcode it here. */ + activelow = 0; + switch (i) { + case 0: + behaviour = B43_LED_ACTIVITY; + activelow = 1; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) + behaviour = B43_LED_RADIO_ALL; + break; + case 1: + behaviour = B43_LED_RADIO_B; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) + behaviour = B43_LED_ASSOC; + break; + case 2: + behaviour = B43_LED_RADIO_A; + break; + case 3: + behaviour = B43_LED_OFF; + break; + default: + B43_WARN_ON(1); + return; + } + } else { + behaviour = sprom[i] & B43_LED_BEHAVIOUR; + activelow = !!(sprom[i] & B43_LED_ACTIVELOW); + } + b43_map_led(dev, i, behaviour, activelow); + } +} + +void b43_leds_exit(struct b43_wldev *dev) +{ + b43_unregister_led(&dev->led_tx); + b43_unregister_led(&dev->led_rx); + b43_unregister_led(&dev->led_assoc); +} diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h new file mode 100644 index 0000000000000000000000000000000000000000..b8b1dd5212434d1afda597c4ab1d5624ee9dcfbe --- /dev/null +++ b/drivers/net/wireless/b43/leds.h @@ -0,0 +1,64 @@ +#ifndef B43_LEDS_H_ +#define B43_LEDS_H_ + +struct b43_wldev; + +#ifdef CONFIG_B43_LEDS + +#include +#include + + +#define B43_LED_MAX_NAME_LEN 31 + +struct b43_led { + struct b43_wldev *dev; + /* The LED class device */ + struct led_classdev led_dev; + /* The index number of the LED. */ + u8 index; + /* If activelow is true, the LED is ON if the + * bit is switched off. */ + bool activelow; + /* The unique name string for this LED device. */ + char name[B43_LED_MAX_NAME_LEN + 1]; +}; + +#define B43_LED_BEHAVIOUR 0x7F +#define B43_LED_ACTIVELOW 0x80 +/* LED behaviour values */ +enum b43_led_behaviour { + B43_LED_OFF, + B43_LED_ON, + B43_LED_ACTIVITY, + B43_LED_RADIO_ALL, + B43_LED_RADIO_A, + B43_LED_RADIO_B, + B43_LED_MODE_BG, + B43_LED_TRANSFER, + B43_LED_APTRANSFER, + B43_LED_WEIRD, //FIXME + B43_LED_ASSOC, + B43_LED_INACTIVE, +}; + +void b43_leds_init(struct b43_wldev *dev); +void b43_leds_exit(struct b43_wldev *dev); + + +#else /* CONFIG_B43_LEDS */ +/* LED support disabled */ + +struct b43_led { + /* empty */ +}; + +static inline void b43_leds_init(struct b43_wldev *dev) +{ +} +static inline void b43_leds_exit(struct b43_wldev *dev) +{ +} +#endif /* CONFIG_B43_LEDS */ + +#endif /* B43_LEDS_H_ */ diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c new file mode 100644 index 0000000000000000000000000000000000000000..b14a1753a0d7a7e1f0770d00aef2b3e80bbec26b --- /dev/null +++ b/drivers/net/wireless/b43/lo.c @@ -0,0 +1,1261 @@ +/* + + Broadcom B43 wireless driver + + G PHY LO (LocalOscillator) Measuring and Control routines + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005, 2006 Stefano Brivio + Copyright (c) 2005-2007 Michael Buesch + Copyright (c) 2005, 2006 Danny van Dyk + Copyright (c) 2005, 2006 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "lo.h" +#include "phy.h" +#include "main.h" + +#include +#include + + +/* Define to 1 to always calibrate all possible LO control pairs. + * This is a workaround until we fix the partial LO calibration optimization. */ +#define B43_CALIB_ALL_LOCTLS 1 + + +/* Write the LocalOscillator Control (adjust) value-pair. */ +static void b43_lo_write(struct b43_wldev *dev, struct b43_loctl *control) +{ + struct b43_phy *phy = &dev->phy; + u16 value; + u16 reg; + + if (B43_DEBUG) { + if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { + b43dbg(dev->wl, "Invalid LO control pair " + "(I: %d, Q: %d)\n", control->i, control->q); + dump_stack(); + return; + } + } + + value = (u8) (control->q); + value |= ((u8) (control->i)) << 8; + + reg = (phy->type == B43_PHYTYPE_B) ? 0x002F : B43_PHY_LO_CTL; + b43_phy_write(dev, reg, value); +} + +static int assert_rfatt_and_bbatt(const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt, + struct b43_wldev *dev) +{ + int err = 0; + + /* Check the attenuation values against the LO control array sizes. */ + if (unlikely(rfatt->att >= B43_NR_RF)) { + b43err(dev->wl, "rfatt(%u) >= size of LO array\n", rfatt->att); + err = -EINVAL; + } + if (unlikely(bbatt->att >= B43_NR_BB)) { + b43err(dev->wl, "bbatt(%u) >= size of LO array\n", bbatt->att); + err = -EINVAL; + } + + return err; +} + +#if !B43_CALIB_ALL_LOCTLS +static +struct b43_loctl *b43_get_lo_g_ctl_nopadmix(struct b43_wldev *dev, + const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + if (assert_rfatt_and_bbatt(rfatt, bbatt, dev)) + return &(lo->no_padmix[0][0]); /* Just prevent a crash */ + return &(lo->no_padmix[bbatt->att][rfatt->att]); +} +#endif /* !B43_CALIB_ALL_LOCTLS */ + +struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev, + const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + if (assert_rfatt_and_bbatt(rfatt, bbatt, dev)) + return &(lo->no_padmix[0][0]); /* Just prevent a crash */ + if (rfatt->with_padmix) + return &(lo->with_padmix[bbatt->att][rfatt->att]); + return &(lo->no_padmix[bbatt->att][rfatt->att]); +} + +/* Call a function for every possible LO control value-pair. */ +static void b43_call_for_each_loctl(struct b43_wldev *dev, + void (*func) (struct b43_wldev *, + struct b43_loctl *)) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *ctl = phy->lo_control; + int i, j; + + for (i = 0; i < B43_NR_BB; i++) { + for (j = 0; j < B43_NR_RF; j++) + func(dev, &(ctl->with_padmix[i][j])); + } + for (i = 0; i < B43_NR_BB; i++) { + for (j = 0; j < B43_NR_RF; j++) + func(dev, &(ctl->no_padmix[i][j])); + } +} + +static u16 lo_b_r15_loop(struct b43_wldev *dev) +{ + int i; + u16 ret = 0; + + for (i = 0; i < 10; i++) { + b43_phy_write(dev, 0x0015, 0xAFA0); + udelay(1); + b43_phy_write(dev, 0x0015, 0xEFA0); + udelay(10); + b43_phy_write(dev, 0x0015, 0xFFA0); + udelay(40); + ret += b43_phy_read(dev, 0x002C); + } + + return ret; +} + +void b43_lo_b_measure(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 regstack[12] = { 0 }; + u16 mls; + u16 fval; + int i, j; + + regstack[0] = b43_phy_read(dev, 0x0015); + regstack[1] = b43_radio_read16(dev, 0x0052) & 0xFFF0; + + if (phy->radio_ver == 0x2053) { + regstack[2] = b43_phy_read(dev, 0x000A); + regstack[3] = b43_phy_read(dev, 0x002A); + regstack[4] = b43_phy_read(dev, 0x0035); + regstack[5] = b43_phy_read(dev, 0x0003); + regstack[6] = b43_phy_read(dev, 0x0001); + regstack[7] = b43_phy_read(dev, 0x0030); + + regstack[8] = b43_radio_read16(dev, 0x0043); + regstack[9] = b43_radio_read16(dev, 0x007A); + regstack[10] = b43_read16(dev, 0x03EC); + regstack[11] = b43_radio_read16(dev, 0x0052) & 0x00F0; + + b43_phy_write(dev, 0x0030, 0x00FF); + b43_write16(dev, 0x03EC, 0x3F3F); + b43_phy_write(dev, 0x0035, regstack[4] & 0xFF7F); + b43_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0); + } + b43_phy_write(dev, 0x0015, 0xB000); + b43_phy_write(dev, 0x002B, 0x0004); + + if (phy->radio_ver == 0x2053) { + b43_phy_write(dev, 0x002B, 0x0203); + b43_phy_write(dev, 0x002A, 0x08A3); + } + + phy->minlowsig[0] = 0xFFFF; + + for (i = 0; i < 4; i++) { + b43_radio_write16(dev, 0x0052, regstack[1] | i); + lo_b_r15_loop(dev); + } + for (i = 0; i < 10; i++) { + b43_radio_write16(dev, 0x0052, regstack[1] | i); + mls = lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[0]) { + phy->minlowsig[0] = mls; + phy->minlowsigpos[0] = i; + } + } + b43_radio_write16(dev, 0x0052, regstack[1] | phy->minlowsigpos[0]); + + phy->minlowsig[1] = 0xFFFF; + + for (i = -4; i < 5; i += 2) { + for (j = -4; j < 5; j += 2) { + if (j < 0) + fval = (0x0100 * i) + j + 0x0100; + else + fval = (0x0100 * i) + j; + b43_phy_write(dev, 0x002F, fval); + mls = lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[1]) { + phy->minlowsig[1] = mls; + phy->minlowsigpos[1] = fval; + } + } + } + phy->minlowsigpos[1] += 0x0101; + + b43_phy_write(dev, 0x002F, phy->minlowsigpos[1]); + if (phy->radio_ver == 0x2053) { + b43_phy_write(dev, 0x000A, regstack[2]); + b43_phy_write(dev, 0x002A, regstack[3]); + b43_phy_write(dev, 0x0035, regstack[4]); + b43_phy_write(dev, 0x0003, regstack[5]); + b43_phy_write(dev, 0x0001, regstack[6]); + b43_phy_write(dev, 0x0030, regstack[7]); + + b43_radio_write16(dev, 0x0043, regstack[8]); + b43_radio_write16(dev, 0x007A, regstack[9]); + + b43_radio_write16(dev, 0x0052, + (b43_radio_read16(dev, 0x0052) & 0x000F) + | regstack[11]); + + b43_write16(dev, 0x03EC, regstack[10]); + } + b43_phy_write(dev, 0x0015, regstack[0]); +} + +static u16 lo_measure_feedthrough(struct b43_wldev *dev, + u16 lna, u16 pga, u16 trsw_rx) +{ + struct b43_phy *phy = &dev->phy; + u16 rfover; + u16 feedthrough; + + if (phy->gmode) { + lna <<= B43_PHY_RFOVERVAL_LNA_SHIFT; + pga <<= B43_PHY_RFOVERVAL_PGA_SHIFT; + + B43_WARN_ON(lna & ~B43_PHY_RFOVERVAL_LNA); + B43_WARN_ON(pga & ~B43_PHY_RFOVERVAL_PGA); +/*FIXME This assertion fails B43_WARN_ON(trsw_rx & ~(B43_PHY_RFOVERVAL_TRSWRX | + B43_PHY_RFOVERVAL_BW)); +*/ + trsw_rx &= (B43_PHY_RFOVERVAL_TRSWRX | B43_PHY_RFOVERVAL_BW); + + /* Construct the RF Override Value */ + rfover = B43_PHY_RFOVERVAL_UNK; + rfover |= pga; + rfover |= lna; + rfover |= trsw_rx; + if ((dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_EXTLNA) && + phy->rev > 6) + rfover |= B43_PHY_RFOVERVAL_EXTLNA; + + b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + rfover |= B43_PHY_RFOVERVAL_BW_LBW; + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + rfover |= B43_PHY_RFOVERVAL_BW_LPF; + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); + udelay(10); + b43_phy_write(dev, B43_PHY_PGACTL, 0xF300); + } else { + pga |= B43_PHY_PGACTL_UNKNOWN; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + udelay(10); + pga |= B43_PHY_PGACTL_LOWBANDW; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + udelay(10); + pga |= B43_PHY_PGACTL_LPF; + b43_phy_write(dev, B43_PHY_PGACTL, pga); + } + udelay(21); + feedthrough = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + + /* This is a good place to check if we need to relax a bit, + * as this is the main function called regularly + * in the LO calibration. */ + cond_resched(); + + return feedthrough; +} + +/* TXCTL Register and Value Table. + * Returns the "TXCTL Register". + * "value" is the "TXCTL Value". + * "pad_mix_gain" is the PAD Mixer Gain. + */ +static u16 lo_txctl_register_table(struct b43_wldev *dev, + u16 * value, u16 * pad_mix_gain) +{ + struct b43_phy *phy = &dev->phy; + u16 reg, v, padmix; + + if (phy->type == B43_PHYTYPE_B) { + v = 0x30; + if (phy->radio_rev <= 5) { + reg = 0x43; + padmix = 0; + } else { + reg = 0x52; + padmix = 5; + } + } else { + if (phy->rev >= 2 && phy->radio_rev == 8) { + reg = 0x43; + v = 0x10; + padmix = 2; + } else { + reg = 0x52; + v = 0x30; + padmix = 5; + } + } + if (value) + *value = v; + if (pad_mix_gain) + *pad_mix_gain = padmix; + + return reg; +} + +static void lo_measure_txctl_values(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 reg, mask; + u16 trsw_rx, pga; + u16 radio_pctl_reg; + + static const u8 tx_bias_values[] = { + 0x09, 0x08, 0x0A, 0x01, 0x00, + 0x02, 0x05, 0x04, 0x06, + }; + static const u8 tx_magn_values[] = { + 0x70, 0x40, + }; + + if (!has_loopback_gain(phy)) { + radio_pctl_reg = 6; + trsw_rx = 2; + pga = 0; + } else { + int lb_gain; /* Loopback gain (in dB) */ + + trsw_rx = 0; + lb_gain = phy->max_lb_gain / 2; + if (lb_gain > 10) { + radio_pctl_reg = 0; + pga = abs(10 - lb_gain) / 6; + pga = limit_value(pga, 0, 15); + } else { + int cmp_val; + int tmp; + + pga = 0; + cmp_val = 0x24; + if ((phy->rev >= 2) && + (phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) + cmp_val = 0x3C; + tmp = lb_gain; + if ((10 - lb_gain) < cmp_val) + tmp = (10 - lb_gain); + if (tmp < 0) + tmp += 6; + else + tmp += 3; + cmp_val /= 4; + tmp /= 4; + if (tmp >= cmp_val) + radio_pctl_reg = cmp_val; + else + radio_pctl_reg = tmp; + } + } + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | radio_pctl_reg); + b43_phy_set_baseband_attenuation(dev, 2); + + reg = lo_txctl_register_table(dev, &mask, NULL); + mask = ~mask; + b43_radio_write16(dev, reg, b43_radio_read16(dev, reg) + & mask); + + if (has_tx_magnification(phy)) { + int i, j; + int feedthrough; + int min_feedth = 0xFFFF; + u8 tx_magn, tx_bias; + + for (i = 0; i < ARRAY_SIZE(tx_magn_values); i++) { + tx_magn = tx_magn_values[i]; + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFF0F) | tx_magn); + for (j = 0; j < ARRAY_SIZE(tx_bias_values); j++) { + tx_bias = tx_bias_values[j]; + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFFF0) | tx_bias); + feedthrough = + lo_measure_feedthrough(dev, 0, pga, + trsw_rx); + if (feedthrough < min_feedth) { + lo->tx_bias = tx_bias; + lo->tx_magn = tx_magn; + min_feedth = feedthrough; + } + if (lo->tx_bias == 0) + break; + } + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) + & 0xFF00) | lo->tx_bias | lo-> + tx_magn); + } + } else { + lo->tx_magn = 0; + lo->tx_bias = 0; + b43_radio_write16(dev, 0x52, b43_radio_read16(dev, 0x52) + & 0xFFF0); /* TX bias == 0 */ + } +} + +static void lo_read_power_vector(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 i; + u64 tmp; + u64 power_vector = 0; + int rf_offset, bb_offset; + struct b43_loctl *loctl; + + for (i = 0; i < 8; i += 2) { + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x310 + i); + /* Clear the top byte. We get holes in the bitmap... */ + tmp &= 0xFF; + power_vector |= (tmp << (i * 8)); + /* Clear the vector on the device. */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x310 + i, 0); + } + + if (power_vector) + lo->power_vector = power_vector; + power_vector = lo->power_vector; + + for (i = 0; i < 64; i++) { + if (power_vector & ((u64) 1ULL << i)) { + /* Now figure out which b43_loctl corresponds + * to this bit. + */ + rf_offset = i / lo->rfatt_list.len; + bb_offset = i % lo->rfatt_list.len; //FIXME? + loctl = + b43_get_lo_g_ctl(dev, + &lo->rfatt_list.list[rf_offset], + &lo->bbatt_list.list[bb_offset]); + /* And mark it as "used", as the device told us + * through the bitmap it is using it. + */ + loctl->used = 1; + } + } +} + +/* 802.11/LO/GPHY/MeasuringGains */ +static void lo_measure_gain_values(struct b43_wldev *dev, + s16 max_rx_gain, int use_trsw_rx) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + + if (max_rx_gain < 0) + max_rx_gain = 0; + + if (has_loopback_gain(phy)) { + int trsw_rx = 0; + int trsw_rx_gain; + + if (use_trsw_rx) { + trsw_rx_gain = phy->trsw_rx_gain / 2; + if (max_rx_gain >= trsw_rx_gain) { + trsw_rx_gain = max_rx_gain - trsw_rx_gain; + trsw_rx = 0x20; + } + } else + trsw_rx_gain = max_rx_gain; + if (trsw_rx_gain < 9) { + phy->lna_lod_gain = 0; + } else { + phy->lna_lod_gain = 1; + trsw_rx_gain -= 8; + } + trsw_rx_gain = limit_value(trsw_rx_gain, 0, 0x2D); + phy->pga_gain = trsw_rx_gain / 3; + if (phy->pga_gain >= 5) { + phy->pga_gain -= 5; + phy->lna_gain = 2; + } else + phy->lna_gain = 0; + } else { + phy->lna_gain = 0; + phy->trsw_rx_gain = 0x20; + if (max_rx_gain >= 0x14) { + phy->lna_lod_gain = 1; + phy->pga_gain = 2; + } else if (max_rx_gain >= 0x12) { + phy->lna_lod_gain = 1; + phy->pga_gain = 1; + } else if (max_rx_gain >= 0xF) { + phy->lna_lod_gain = 1; + phy->pga_gain = 0; + } else { + phy->lna_lod_gain = 0; + phy->pga_gain = 0; + } + } + + tmp = b43_radio_read16(dev, 0x7A); + if (phy->lna_lod_gain == 0) + tmp &= ~0x0008; + else + tmp |= 0x0008; + b43_radio_write16(dev, 0x7A, tmp); +} + +struct lo_g_saved_values { + u8 old_channel; + + /* Core registers */ + u16 reg_3F4; + u16 reg_3E2; + + /* PHY registers */ + u16 phy_lo_mask; + u16 phy_extg_01; + u16 phy_dacctl_hwpctl; + u16 phy_dacctl; + u16 phy_base_14; + u16 phy_hpwr_tssictl; + u16 phy_analogover; + u16 phy_analogoverval; + u16 phy_rfover; + u16 phy_rfoverval; + u16 phy_classctl; + u16 phy_base_3E; + u16 phy_crs0; + u16 phy_pgactl; + u16 phy_base_2A; + u16 phy_syncctl; + u16 phy_base_30; + u16 phy_base_06; + + /* Radio registers */ + u16 radio_43; + u16 radio_7A; + u16 radio_52; +}; + +static void lo_measure_setup(struct b43_wldev *dev, + struct lo_g_saved_values *sav) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 tmp; + + if (b43_has_hardware_pctl(phy)) { + sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); + sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01)); + sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL); + sav->phy_base_14 = b43_phy_read(dev, B43_PHY_BASE(0x14)); + sav->phy_hpwr_tssictl = b43_phy_read(dev, B43_PHY_HPWR_TSSICTL); + + b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, + b43_phy_read(dev, B43_PHY_HPWR_TSSICTL) + | 0x100); + b43_phy_write(dev, B43_PHY_EXTG(0x01), + b43_phy_read(dev, B43_PHY_EXTG(0x01)) + | 0x40); + b43_phy_write(dev, B43_PHY_DACCTL, + b43_phy_read(dev, B43_PHY_DACCTL) + | 0x40); + b43_phy_write(dev, B43_PHY_BASE(0x14), + b43_phy_read(dev, B43_PHY_BASE(0x14)) + | 0x200); + } + if (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev < 6) { + b43_phy_write(dev, B43_PHY_BASE(0x16), 0x410); + b43_phy_write(dev, B43_PHY_BASE(0x17), 0x820); + } + if (!lo->rebuild && b43_has_hardware_pctl(phy)) + lo_read_power_vector(dev); + if (phy->rev >= 2) { + sav->phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); + sav->phy_analogoverval = + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + sav->phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); + sav->phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + sav->phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); + sav->phy_base_3E = b43_phy_read(dev, B43_PHY_BASE(0x3E)); + sav->phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); + + b43_phy_write(dev, B43_PHY_CLASSCTL, + b43_phy_read(dev, B43_PHY_CLASSCTL) + & 0xFFFC); + b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) + | 0x0003); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL) + & 0xFFFC); + if (phy->type == B43_PHYTYPE_G) { + if ((phy->rev >= 7) && + (sprom->r1.boardflags_lo & B43_BFL_EXTLNA)) { + b43_phy_write(dev, B43_PHY_RFOVER, 0x933); + } else { + b43_phy_write(dev, B43_PHY_RFOVER, 0x133); + } + } else { + b43_phy_write(dev, B43_PHY_RFOVER, 0); + } + b43_phy_write(dev, B43_PHY_BASE(0x3E), 0); + } + sav->reg_3F4 = b43_read16(dev, 0x3F4); + sav->reg_3E2 = b43_read16(dev, 0x3E2); + sav->radio_43 = b43_radio_read16(dev, 0x43); + sav->radio_7A = b43_radio_read16(dev, 0x7A); + sav->phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); + sav->phy_base_2A = b43_phy_read(dev, B43_PHY_BASE(0x2A)); + sav->phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); + sav->phy_dacctl = b43_phy_read(dev, B43_PHY_DACCTL); + + if (!has_tx_magnification(phy)) { + sav->radio_52 = b43_radio_read16(dev, 0x52); + sav->radio_52 &= 0x00F0; + } + if (phy->type == B43_PHYTYPE_B) { + sav->phy_base_30 = b43_phy_read(dev, B43_PHY_BASE(0x30)); + sav->phy_base_06 = b43_phy_read(dev, B43_PHY_BASE(0x06)); + b43_phy_write(dev, B43_PHY_BASE(0x30), 0x00FF); + b43_phy_write(dev, B43_PHY_BASE(0x06), 0x3F3F); + } else { + b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) + | 0x8000); + } + b43_write16(dev, 0x3F4, b43_read16(dev, 0x3F4) + & 0xF000); + + tmp = + (phy->type == B43_PHYTYPE_G) ? B43_PHY_LO_MASK : B43_PHY_BASE(0x2E); + b43_phy_write(dev, tmp, 0x007F); + + tmp = sav->phy_syncctl; + b43_phy_write(dev, B43_PHY_SYNCCTL, tmp & 0xFF7F); + tmp = sav->radio_7A; + b43_radio_write16(dev, 0x007A, tmp & 0xFFF0); + + b43_phy_write(dev, B43_PHY_BASE(0x2A), 0x8A3); + if (phy->type == B43_PHYTYPE_G || + (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev >= 6)) { + b43_phy_write(dev, B43_PHY_BASE(0x2B), 0x1003); + } else + b43_phy_write(dev, B43_PHY_BASE(0x2B), 0x0802); + if (phy->rev >= 2) + b43_dummy_transmission(dev); + b43_radio_selectchannel(dev, 6, 0); + b43_radio_read16(dev, 0x51); /* dummy read */ + if (phy->type == B43_PHYTYPE_G) + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0); + if (lo->rebuild) + lo_measure_txctl_values(dev); + if (phy->type == B43_PHYTYPE_G && phy->rev >= 3) { + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC078); + } else { + if (phy->type == B43_PHYTYPE_B) + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x8078); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); + } +} + +static void lo_measure_restore(struct b43_wldev *dev, + struct lo_g_saved_values *sav) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 tmp; + + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); + tmp = (phy->pga_gain << 8); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0); + udelay(5); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2); + udelay(2); + b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3); + } else { + tmp = (phy->pga_gain | 0xEFA0); + b43_phy_write(dev, B43_PHY_PGACTL, tmp); + } + if (b43_has_hardware_pctl(phy)) { + b43_gphy_dc_lt_init(dev); + } else { + if (lo->rebuild) + b43_lo_g_adjust_to(dev, 3, 2, 0); + else + b43_lo_g_adjust(dev); + } + if (phy->type == B43_PHYTYPE_G) { + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0xC078); + else + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x8078); + if (phy->rev >= 2) + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x0202); + else + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x0101); + } + b43_write16(dev, 0x3F4, sav->reg_3F4); + b43_phy_write(dev, B43_PHY_PGACTL, sav->phy_pgactl); + b43_phy_write(dev, B43_PHY_BASE(0x2A), sav->phy_base_2A); + b43_phy_write(dev, B43_PHY_SYNCCTL, sav->phy_syncctl); + b43_phy_write(dev, B43_PHY_DACCTL, sav->phy_dacctl); + b43_radio_write16(dev, 0x43, sav->radio_43); + b43_radio_write16(dev, 0x7A, sav->radio_7A); + if (!has_tx_magnification(phy)) { + tmp = sav->radio_52; + b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) + & 0xFF0F) | tmp); + } + b43_write16(dev, 0x3E2, sav->reg_3E2); + if (phy->type == B43_PHYTYPE_B && + phy->radio_ver == 0x2050 && phy->radio_rev <= 5) { + b43_phy_write(dev, B43_PHY_BASE(0x30), sav->phy_base_30); + b43_phy_write(dev, B43_PHY_BASE(0x06), sav->phy_base_06); + } + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_ANALOGOVER, sav->phy_analogover); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + sav->phy_analogoverval); + b43_phy_write(dev, B43_PHY_CLASSCTL, sav->phy_classctl); + b43_phy_write(dev, B43_PHY_RFOVER, sav->phy_rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, sav->phy_rfoverval); + b43_phy_write(dev, B43_PHY_BASE(0x3E), sav->phy_base_3E); + b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0); + } + if (b43_has_hardware_pctl(phy)) { + tmp = (sav->phy_lo_mask & 0xBFFF); + b43_phy_write(dev, B43_PHY_LO_MASK, tmp); + b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01); + b43_phy_write(dev, B43_PHY_DACCTL, sav->phy_dacctl_hwpctl); + b43_phy_write(dev, B43_PHY_BASE(0x14), sav->phy_base_14); + b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); + } + b43_radio_selectchannel(dev, sav->old_channel, 1); +} + +struct b43_lo_g_statemachine { + int current_state; + int nr_measured; + int state_val_multiplier; + u16 lowest_feedth; + struct b43_loctl min_loctl; +}; + +/* Loop over each possible value in this state. */ +static int lo_probe_possible_loctls(struct b43_wldev *dev, + struct b43_loctl *probe_loctl, + struct b43_lo_g_statemachine *d) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_loctl test_loctl; + struct b43_loctl orig_loctl; + struct b43_loctl prev_loctl = { + .i = -100, + .q = -100, + }; + int i; + int begin, end; + int found_lower = 0; + u16 feedth; + + static const struct b43_loctl modifiers[] = { + {.i = 1,.q = 1,}, + {.i = 1,.q = 0,}, + {.i = 1,.q = -1,}, + {.i = 0,.q = -1,}, + {.i = -1,.q = -1,}, + {.i = -1,.q = 0,}, + {.i = -1,.q = 1,}, + {.i = 0,.q = 1,}, + }; + + if (d->current_state == 0) { + begin = 1; + end = 8; + } else if (d->current_state % 2 == 0) { + begin = d->current_state - 1; + end = d->current_state + 1; + } else { + begin = d->current_state - 2; + end = d->current_state + 2; + } + if (begin < 1) + begin += 8; + if (end > 8) + end -= 8; + + memcpy(&orig_loctl, probe_loctl, sizeof(struct b43_loctl)); + i = begin; + d->current_state = i; + while (1) { + B43_WARN_ON(!(i >= 1 && i <= 8)); + memcpy(&test_loctl, &orig_loctl, sizeof(struct b43_loctl)); + test_loctl.i += modifiers[i - 1].i * d->state_val_multiplier; + test_loctl.q += modifiers[i - 1].q * d->state_val_multiplier; + if ((test_loctl.i != prev_loctl.i || + test_loctl.q != prev_loctl.q) && + (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) { + b43_lo_write(dev, &test_loctl); + feedth = lo_measure_feedthrough(dev, phy->lna_gain, + phy->pga_gain, + phy->trsw_rx_gain); + if (feedth < d->lowest_feedth) { + memcpy(probe_loctl, &test_loctl, + sizeof(struct b43_loctl)); + found_lower = 1; + d->lowest_feedth = feedth; + if ((d->nr_measured < 2) && + (!has_loopback_gain(phy) || lo->rebuild)) + break; + } + } + memcpy(&prev_loctl, &test_loctl, sizeof(prev_loctl)); + if (i == end) + break; + if (i == 8) + i = 1; + else + i++; + d->current_state = i; + } + + return found_lower; +} + +static void lo_probe_loctls_statemachine(struct b43_wldev *dev, + struct b43_loctl *loctl, + int *max_rx_gain) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_lo_g_statemachine d; + u16 feedth; + int found_lower; + struct b43_loctl probe_loctl; + int max_repeat = 1, repeat_cnt = 0; + + d.nr_measured = 0; + d.state_val_multiplier = 1; + if (has_loopback_gain(phy) && !lo->rebuild) + d.state_val_multiplier = 3; + + memcpy(&d.min_loctl, loctl, sizeof(struct b43_loctl)); + if (has_loopback_gain(phy) && lo->rebuild) + max_repeat = 4; + do { + b43_lo_write(dev, &d.min_loctl); + feedth = lo_measure_feedthrough(dev, phy->lna_gain, + phy->pga_gain, + phy->trsw_rx_gain); + if (!lo->rebuild && feedth < 0x258) { + if (feedth >= 0x12C) + *max_rx_gain += 6; + else + *max_rx_gain += 3; + feedth = lo_measure_feedthrough(dev, phy->lna_gain, + phy->pga_gain, + phy->trsw_rx_gain); + } + d.lowest_feedth = feedth; + + d.current_state = 0; + do { + B43_WARN_ON(! + (d.current_state >= 0 + && d.current_state <= 8)); + memcpy(&probe_loctl, &d.min_loctl, + sizeof(struct b43_loctl)); + found_lower = + lo_probe_possible_loctls(dev, &probe_loctl, &d); + if (!found_lower) + break; + if ((probe_loctl.i == d.min_loctl.i) && + (probe_loctl.q == d.min_loctl.q)) + break; + memcpy(&d.min_loctl, &probe_loctl, + sizeof(struct b43_loctl)); + d.nr_measured++; + } while (d.nr_measured < 24); + memcpy(loctl, &d.min_loctl, sizeof(struct b43_loctl)); + + if (has_loopback_gain(phy)) { + if (d.lowest_feedth > 0x1194) + *max_rx_gain -= 6; + else if (d.lowest_feedth < 0x5DC) + *max_rx_gain += 3; + if (repeat_cnt == 0) { + if (d.lowest_feedth <= 0x5DC) { + d.state_val_multiplier = 1; + repeat_cnt++; + } else + d.state_val_multiplier = 2; + } else if (repeat_cnt == 2) + d.state_val_multiplier = 1; + } + lo_measure_gain_values(dev, *max_rx_gain, + has_loopback_gain(phy)); + } while (++repeat_cnt < max_repeat); +} + +#if B43_CALIB_ALL_LOCTLS +static const struct b43_rfatt b43_full_rfatt_list_items[] = { + { .att = 0, .with_padmix = 0, }, + { .att = 1, .with_padmix = 0, }, + { .att = 2, .with_padmix = 0, }, + { .att = 3, .with_padmix = 0, }, + { .att = 4, .with_padmix = 0, }, + { .att = 5, .with_padmix = 0, }, + { .att = 6, .with_padmix = 0, }, + { .att = 7, .with_padmix = 0, }, + { .att = 8, .with_padmix = 0, }, + { .att = 9, .with_padmix = 0, }, + { .att = 10, .with_padmix = 0, }, + { .att = 11, .with_padmix = 0, }, + { .att = 12, .with_padmix = 0, }, + { .att = 13, .with_padmix = 0, }, + { .att = 14, .with_padmix = 0, }, + { .att = 15, .with_padmix = 0, }, + { .att = 0, .with_padmix = 1, }, + { .att = 1, .with_padmix = 1, }, + { .att = 2, .with_padmix = 1, }, + { .att = 3, .with_padmix = 1, }, + { .att = 4, .with_padmix = 1, }, + { .att = 5, .with_padmix = 1, }, + { .att = 6, .with_padmix = 1, }, + { .att = 7, .with_padmix = 1, }, + { .att = 8, .with_padmix = 1, }, + { .att = 9, .with_padmix = 1, }, + { .att = 10, .with_padmix = 1, }, + { .att = 11, .with_padmix = 1, }, + { .att = 12, .with_padmix = 1, }, + { .att = 13, .with_padmix = 1, }, + { .att = 14, .with_padmix = 1, }, + { .att = 15, .with_padmix = 1, }, +}; +static const struct b43_rfatt_list b43_full_rfatt_list = { + .list = b43_full_rfatt_list_items, + .len = ARRAY_SIZE(b43_full_rfatt_list_items), +}; + +static const struct b43_bbatt b43_full_bbatt_list_items[] = { + { .att = 0, }, + { .att = 1, }, + { .att = 2, }, + { .att = 3, }, + { .att = 4, }, + { .att = 5, }, + { .att = 6, }, + { .att = 7, }, + { .att = 8, }, + { .att = 9, }, + { .att = 10, }, + { .att = 11, }, +}; +static const struct b43_bbatt_list b43_full_bbatt_list = { + .list = b43_full_bbatt_list_items, + .len = ARRAY_SIZE(b43_full_bbatt_list_items), +}; +#endif /* B43_CALIB_ALL_LOCTLS */ + +static void lo_measure(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_loctl loctl = { + .i = 0, + .q = 0, + }; + struct b43_loctl *ploctl; + int max_rx_gain; + int rfidx, bbidx; + const struct b43_bbatt_list *bbatt_list; + const struct b43_rfatt_list *rfatt_list; + + /* Values from the "TXCTL Register and Value Table" */ + u16 txctl_reg; + u16 txctl_value; + u16 pad_mix_gain; + + bbatt_list = &lo->bbatt_list; + rfatt_list = &lo->rfatt_list; +#if B43_CALIB_ALL_LOCTLS + bbatt_list = &b43_full_bbatt_list; + rfatt_list = &b43_full_rfatt_list; +#endif + + txctl_reg = lo_txctl_register_table(dev, &txctl_value, &pad_mix_gain); + + for (rfidx = 0; rfidx < rfatt_list->len; rfidx++) { + + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | + rfatt_list->list[rfidx].att); + b43_radio_write16(dev, txctl_reg, + (b43_radio_read16(dev, txctl_reg) + & ~txctl_value) + | (rfatt_list->list[rfidx].with_padmix ? + txctl_value : 0)); + + for (bbidx = 0; bbidx < bbatt_list->len; bbidx++) { + if (lo->rebuild) { +#if B43_CALIB_ALL_LOCTLS + ploctl = b43_get_lo_g_ctl(dev, + &rfatt_list->list[rfidx], + &bbatt_list->list[bbidx]); +#else + ploctl = b43_get_lo_g_ctl_nopadmix(dev, + &rfatt_list-> + list[rfidx], + &bbatt_list-> + list[bbidx]); +#endif + } else { + ploctl = b43_get_lo_g_ctl(dev, + &rfatt_list->list[rfidx], + &bbatt_list->list[bbidx]); + if (!ploctl->used) + continue; + } + memcpy(&loctl, ploctl, sizeof(loctl)); + loctl.i = 0; + loctl.q = 0; + + max_rx_gain = rfatt_list->list[rfidx].att * 2; + max_rx_gain += bbatt_list->list[bbidx].att / 2; + if (rfatt_list->list[rfidx].with_padmix) + max_rx_gain -= pad_mix_gain; + if (has_loopback_gain(phy)) + max_rx_gain += phy->max_lb_gain; + lo_measure_gain_values(dev, max_rx_gain, + has_loopback_gain(phy)); + + b43_phy_set_baseband_attenuation(dev, + bbatt_list->list[bbidx].att); + lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain); + if (phy->type == B43_PHYTYPE_B) { + loctl.i++; + loctl.q++; + } + b43_loctl_set_calibrated(&loctl, 1); + memcpy(ploctl, &loctl, sizeof(loctl)); + } + } +} + +#if B43_DEBUG +static void do_validate_loctl(struct b43_wldev *dev, struct b43_loctl *control) +{ + const int is_initializing = (b43_status(dev) == B43_STAT_UNINIT); + int i = control->i; + int q = control->q; + + if (b43_loctl_is_calibrated(control)) { + if ((abs(i) > 16) || (abs(q) > 16)) + goto error; + } else { + if (control->used) + goto error; + if (dev->phy.lo_control->rebuild) { + control->i = 0; + control->q = 0; + if ((i != B43_LOCTL_POISON) || + (q != B43_LOCTL_POISON)) + goto error; + } + } + if (is_initializing && control->used) + goto error; + + return; +error: + b43err(dev->wl, "LO control pair validation failed " + "(I: %d, Q: %d, used %u, calib: %u, initing: %d)\n", + i, q, control->used, + b43_loctl_is_calibrated(control), + is_initializing); +} + +static void validate_all_loctls(struct b43_wldev *dev) +{ + b43_call_for_each_loctl(dev, do_validate_loctl); +} + +static void do_reset_calib(struct b43_wldev *dev, struct b43_loctl *control) +{ + if (dev->phy.lo_control->rebuild || + control->used) { + b43_loctl_set_calibrated(control, 0); + control->i = B43_LOCTL_POISON; + control->q = B43_LOCTL_POISON; + } +} + +static void reset_all_loctl_calibration_states(struct b43_wldev *dev) +{ + b43_call_for_each_loctl(dev, do_reset_calib); +} + +#else /* B43_DEBUG */ +static inline void validate_all_loctls(struct b43_wldev *dev) { } +static inline void reset_all_loctl_calibration_states(struct b43_wldev *dev) { } +#endif /* B43_DEBUG */ + +void b43_lo_g_measure(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct lo_g_saved_values uninitialized_var(sav); + + B43_WARN_ON((phy->type != B43_PHYTYPE_B) && + (phy->type != B43_PHYTYPE_G)); + + sav.old_channel = phy->channel; + lo_measure_setup(dev, &sav); + reset_all_loctl_calibration_states(dev); + lo_measure(dev); + lo_measure_restore(dev, &sav); + + validate_all_loctls(dev); + + phy->lo_control->lo_measured = 1; + phy->lo_control->rebuild = 0; +} + +#if B43_DEBUG +static void validate_loctl_calibration(struct b43_wldev *dev, + struct b43_loctl *loctl, + struct b43_rfatt *rfatt, + struct b43_bbatt *bbatt) +{ + if (b43_loctl_is_calibrated(loctl)) + return; + if (!dev->phy.lo_control->lo_measured) { + /* On init we set the attenuation values before we + * calibrated the LO. I guess that's OK. */ + return; + } + b43err(dev->wl, "Adjusting Local Oscillator to an uncalibrated " + "control pair: rfatt=%u,%spadmix bbatt=%u\n", + rfatt->att, + (rfatt->with_padmix) ? "" : "no-", + bbatt->att); +} +#else +static inline void validate_loctl_calibration(struct b43_wldev *dev, + struct b43_loctl *loctl, + struct b43_rfatt *rfatt, + struct b43_bbatt *bbatt) +{ +} +#endif + +static inline void fixup_rfatt_for_txcontrol(struct b43_rfatt *rf, + u8 tx_control) +{ + if (tx_control & B43_TXCTL_TXMIX) { + if (rf->att < 5) + rf->att = 4; + } +} + +void b43_lo_g_adjust(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_rfatt rf; + struct b43_loctl *loctl; + + memcpy(&rf, &phy->rfatt, sizeof(rf)); + fixup_rfatt_for_txcontrol(&rf, phy->tx_control); + + loctl = b43_get_lo_g_ctl(dev, &rf, &phy->bbatt); + validate_loctl_calibration(dev, loctl, &rf, &phy->bbatt); + b43_lo_write(dev, loctl); +} + +void b43_lo_g_adjust_to(struct b43_wldev *dev, + u16 rfatt, u16 bbatt, u16 tx_control) +{ + struct b43_rfatt rf; + struct b43_bbatt bb; + struct b43_loctl *loctl; + + memset(&rf, 0, sizeof(rf)); + memset(&bb, 0, sizeof(bb)); + rf.att = rfatt; + bb.att = bbatt; + fixup_rfatt_for_txcontrol(&rf, tx_control); + loctl = b43_get_lo_g_ctl(dev, &rf, &bb); + validate_loctl_calibration(dev, loctl, &rf, &bb); + b43_lo_write(dev, loctl); +} + +static void do_mark_unused(struct b43_wldev *dev, struct b43_loctl *control) +{ + control->used = 0; +} + +void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + b43_call_for_each_loctl(dev, do_mark_unused); + lo->rebuild = 1; +} + +void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_rfatt rf; + + memcpy(&rf, &phy->rfatt, sizeof(rf)); + fixup_rfatt_for_txcontrol(&rf, phy->tx_control); + + b43_get_lo_g_ctl(dev, &rf, &phy->bbatt)->used = 1; +} diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h new file mode 100644 index 0000000000000000000000000000000000000000..455615d1f8c657698b77dfbfbd14af8862e07438 --- /dev/null +++ b/drivers/net/wireless/b43/lo.h @@ -0,0 +1,112 @@ +#ifndef B43_LO_H_ +#define B43_LO_H_ + +#include "phy.h" + +struct b43_wldev; + +/* Local Oscillator control value-pair. */ +struct b43_loctl { + /* Control values. */ + s8 i; + s8 q; + /* "Used by hardware" flag. */ + bool used; +#ifdef CONFIG_B43_DEBUG + /* Is this lo-control-array entry calibrated? */ + bool calibrated; +#endif +}; + +/* Debugging: Poison value for i and q values. */ +#define B43_LOCTL_POISON 111 + +/* loctl->calibrated debugging mechanism */ +#ifdef CONFIG_B43_DEBUG +static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, + bool calibrated) +{ + loctl->calibrated = calibrated; +} +static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) +{ + return loctl->calibrated; +} +#else +static inline void b43_loctl_set_calibrated(struct b43_loctl *loctl, + bool calibrated) +{ +} +static inline bool b43_loctl_is_calibrated(struct b43_loctl *loctl) +{ + return 1; +} +#endif + +/* TX Power LO Control Array. + * Value-pairs to adjust the LocalOscillator are stored + * in this structure. + * There are two different set of values. One for "Flag is Set" + * and one for "Flag is Unset". + * By "Flag" the flag in struct b43_rfatt is meant. + * The Value arrays are two-dimensional. The first index + * is the baseband attenuation and the second index + * is the radio attenuation. + * Use b43_get_lo_g_ctl() to retrieve a value from the lists. + */ +struct b43_txpower_lo_control { +#define B43_NR_BB 12 +#define B43_NR_RF 16 + /* LO Control values, with PAD Mixer */ + struct b43_loctl with_padmix[B43_NR_BB][B43_NR_RF]; + /* LO Control values, without PAD Mixer */ + struct b43_loctl no_padmix[B43_NR_BB][B43_NR_RF]; + + /* Flag to indicate a complete rebuild of the two tables above + * to the LO measuring code. */ + bool rebuild; + + /* Lists of valid RF and BB attenuation values for this device. */ + struct b43_rfatt_list rfatt_list; + struct b43_bbatt_list bbatt_list; + + /* Current TX Bias value */ + u8 tx_bias; + /* Current TX Magnification Value (if used by the device) */ + u8 tx_magn; + + /* GPHY LO is measured. */ + bool lo_measured; + + /* Saved device PowerVector */ + u64 power_vector; +}; + +/* Measure the BPHY Local Oscillator. */ +void b43_lo_b_measure(struct b43_wldev *dev); +/* Measure the BPHY/GPHY Local Oscillator. */ +void b43_lo_g_measure(struct b43_wldev *dev); + +/* Adjust the Local Oscillator to the saved attenuation + * and txctl values. + */ +void b43_lo_g_adjust(struct b43_wldev *dev); +/* Adjust to specific values. */ +void b43_lo_g_adjust_to(struct b43_wldev *dev, + u16 rfatt, u16 bbatt, u16 tx_control); + +/* Mark all possible b43_lo_g_ctl as "unused" */ +void b43_lo_g_ctl_mark_all_unused(struct b43_wldev *dev); +/* Mark the b43_lo_g_ctl corresponding to the current + * attenuation values as used. + */ +void b43_lo_g_ctl_mark_cur_used(struct b43_wldev *dev); + +/* Get a reference to a LO Control value pair in the + * TX Power LO Control Array. + */ +struct b43_loctl *b43_get_lo_g_ctl(struct b43_wldev *dev, + const struct b43_rfatt *rfatt, + const struct b43_bbatt *bbatt); + +#endif /* B43_LO_H_ */ diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c new file mode 100644 index 0000000000000000000000000000000000000000..c141a264ac45bfb16e323ee8b1bbb2b08a727666 --- /dev/null +++ b/drivers/net/wireless/b43/main.c @@ -0,0 +1,4070 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005, 2006 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "b43.h" +#include "main.h" +#include "debugfs.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "sysfs.h" +#include "lo.h" +#include "pcmcia.h" + +MODULE_DESCRIPTION("Broadcom B43 wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_LICENSE("GPL"); + +extern char *nvram_get(char *name); + +#if defined(CONFIG_B43_DMA) && defined(CONFIG_B43_PIO) +static int modparam_pio; +module_param_named(pio, modparam_pio, int, 0444); +MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode"); +#elif defined(CONFIG_B43_DMA) +# define modparam_pio 0 +#elif defined(CONFIG_B43_PIO) +# define modparam_pio 1 +#endif + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, + "enable(1) / disable(0) Bad Frames Preemption"); + +static int modparam_short_retry = B43_DEFAULT_SHORT_RETRY_LIMIT; +module_param_named(short_retry, modparam_short_retry, int, 0444); +MODULE_PARM_DESC(short_retry, "Short-Retry-Limit (0 - 15)"); + +static int modparam_long_retry = B43_DEFAULT_LONG_RETRY_LIMIT; +module_param_named(long_retry, modparam_long_retry, int, 0444); +MODULE_PARM_DESC(long_retry, "Long-Retry-Limit (0 - 15)"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the .fw files to load."); + +static int modparam_hwpctl; +module_param_named(hwpctl, modparam_hwpctl, int, 0444); +MODULE_PARM_DESC(hwpctl, "Enable hardware-side power control (default off)"); + +static int modparam_nohwcrypt; +module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); +MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); + +static const struct ssb_device_id b43_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 7), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), + SSB_DEVTABLE_END +}; + +MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl); + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .rate = B43_RATE_TO_BASE100KBPS(_rateid), \ + .val = (_rateid), \ + .val2 = (_rateid), \ + .flags = (_flags), \ + } +static struct ieee80211_rate __b43_ratetable[] = { + RATETAB_ENT(B43_CCK_RATE_1MB, IEEE80211_RATE_CCK), + RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43_OFDM_RATE_6MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_9MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_12MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_18MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_24MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_36MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_48MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43_OFDM_RATE_54MB, IEEE80211_RATE_OFDM), +}; + +#define b43_a_ratetable (__b43_ratetable + 4) +#define b43_a_ratetable_size 8 +#define b43_b_ratetable (__b43_ratetable + 0) +#define b43_b_ratetable_size 4 +#define b43_g_ratetable (__b43_ratetable + 0) +#define b43_g_ratetable_size 12 + +#define CHANTAB_ENT(_chanid, _freq) \ + { \ + .chan = (_chanid), \ + .freq = (_freq), \ + .val = (_chanid), \ + .flag = IEEE80211_CHAN_W_SCAN | \ + IEEE80211_CHAN_W_ACTIVE_SCAN | \ + IEEE80211_CHAN_W_IBSS, \ + .power_level = 0xFF, \ + .antenna_max = 0xFF, \ + } +static struct ieee80211_channel b43_bg_chantable[] = { + CHANTAB_ENT(1, 2412), + CHANTAB_ENT(2, 2417), + CHANTAB_ENT(3, 2422), + CHANTAB_ENT(4, 2427), + CHANTAB_ENT(5, 2432), + CHANTAB_ENT(6, 2437), + CHANTAB_ENT(7, 2442), + CHANTAB_ENT(8, 2447), + CHANTAB_ENT(9, 2452), + CHANTAB_ENT(10, 2457), + CHANTAB_ENT(11, 2462), + CHANTAB_ENT(12, 2467), + CHANTAB_ENT(13, 2472), + CHANTAB_ENT(14, 2484), +}; + +#define b43_bg_chantable_size ARRAY_SIZE(b43_bg_chantable) +static struct ieee80211_channel b43_a_chantable[] = { + CHANTAB_ENT(36, 5180), + CHANTAB_ENT(40, 5200), + CHANTAB_ENT(44, 5220), + CHANTAB_ENT(48, 5240), + CHANTAB_ENT(52, 5260), + CHANTAB_ENT(56, 5280), + CHANTAB_ENT(60, 5300), + CHANTAB_ENT(64, 5320), + CHANTAB_ENT(149, 5745), + CHANTAB_ENT(153, 5765), + CHANTAB_ENT(157, 5785), + CHANTAB_ENT(161, 5805), + CHANTAB_ENT(165, 5825), +}; + +#define b43_a_chantable_size ARRAY_SIZE(b43_a_chantable) + +static void b43_wireless_core_exit(struct b43_wldev *dev); +static int b43_wireless_core_init(struct b43_wldev *dev); +static void b43_wireless_core_stop(struct b43_wldev *dev); +static int b43_wireless_core_start(struct b43_wldev *dev); + +static int b43_ratelimit(struct b43_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43_status(wl->current_dev) < B43_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43info(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43err(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43warn(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +#if B43_DEBUG +void b43dbg(struct b43_wl *wl, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + printk(KERN_DEBUG "b43-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} +#endif /* DEBUG */ + +static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) +{ + u32 macctl; + + B43_WARN_ON(offset % 4 != 0); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (macctl & B43_MACCTL_BE) + val = swab32(val); + + b43_write32(dev, B43_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43_write32(dev, B43_MMIO_RAM_DATA, val); +} + +static inline + void b43_shm_control_word(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + + control = routing; + control <<= 16; + control |= offset; + b43_write32(dev, B43_MMIO_SHM_CONTROL, control); +} + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + ret <<= 16; + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + ret |= b43_read16(dev, B43_MMIO_SHM_DATA); + + return ret; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read32(dev, B43_MMIO_SHM_DATA); + + return ret; +} + +u16 b43_shm_read16(struct b43_wldev * dev, u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); + + return ret; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + ret = b43_read16(dev, B43_MMIO_SHM_DATA); + + return ret; +} + +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, + (value >> 16) & 0xffff); + mmiowb(); + b43_shm_control_word(dev, routing, (offset >> 2) + 1); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA, value & 0xffff); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + mmiowb(); + b43_write32(dev, B43_MMIO_SHM_DATA, value); +} + +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value) +{ + if (routing == B43_SHM_SHARED) { + B43_WARN_ON(offset & 0x0001); + if (offset & 0x0003) { + /* Unaligned access */ + b43_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, value); + return; + } + offset >>= 2; + } + b43_shm_control_word(dev, routing, offset); + mmiowb(); + b43_write16(dev, B43_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u32 b43_hf_read(struct b43_wldev * dev) +{ + u32 ret; + + ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43_hf_write(struct b43_wldev *dev, u32 value) +{ + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_HOSTFLO, (value & 0x0000FFFF)); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_HOSTFHI, ((value & 0xFFFF0000) >> 16)); +} + +void b43_tsf_read(struct b43_wldev *dev, u64 * tsf) +{ + /* We need to be careful. As we read the TSF from multiple + * registers, we should take care of register overflows. + * In theory, the whole tsf read process should be atomic. + * We try to be atomic here, by restaring the read process, + * if any of the high registers changed (overflew). + */ + if (dev->dev->id.revision >= 3) { + u32 low, high, high2; + + do { + high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); + low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW); + high2 = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); + } while (unlikely(high != high2)); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; + } else { + u64 tmp; + u16 v0, v1, v2, v3; + u16 test1, test2, test3; + + do { + v3 = b43_read16(dev, B43_MMIO_TSF_3); + v2 = b43_read16(dev, B43_MMIO_TSF_2); + v1 = b43_read16(dev, B43_MMIO_TSF_1); + v0 = b43_read16(dev, B43_MMIO_TSF_0); + + test3 = b43_read16(dev, B43_MMIO_TSF_3); + test2 = b43_read16(dev, B43_MMIO_TSF_2); + test1 = b43_read16(dev, B43_MMIO_TSF_1); + } while (v3 != test3 || v2 != test2 || v1 != test1); + + *tsf = v3; + *tsf <<= 48; + tmp = v2; + tmp <<= 32; + *tsf |= tmp; + tmp = v1; + tmp <<= 16; + *tsf |= tmp; + *tsf |= v0; + } +} + +static void b43_time_lock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_time_unlock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_TBTTHOLD; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write */ + b43_read32(dev, B43_MMIO_MACCTL); +} + +static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) +{ + /* Be careful with the in-progress timer. + * First zero out the low register, so we have a full + * register-overflow duration to complete the operation. + */ + if (dev->dev->id.revision >= 3) { + u32 lo = (tsf & 0x00000000FFFFFFFFULL); + u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32; + + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, 0); + mmiowb(); + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, hi); + mmiowb(); + b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, lo); + } else { + u16 v0 = (tsf & 0x000000000000FFFFULL); + u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16; + u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32; + u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48; + + b43_write16(dev, B43_MMIO_TSF_0, 0); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_3, v3); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_2, v2); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_1, v1); + mmiowb(); + b43_write16(dev, B43_MMIO_TSF_0, v0); + } +} + +void b43_tsf_write(struct b43_wldev *dev, u64 tsf) +{ + b43_time_lock(dev); + b43_tsf_write_locked(dev, tsf); + b43_time_unlock(dev); +} + +static +void b43_macfilter_set(struct b43_wldev *dev, u16 offset, const u8 * mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43_write16(dev, B43_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); +} + +static void b43_write_mac_bssid_templates(struct b43_wldev *dev) +{ + const u8 *mac; + const u8 *bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + bssid = dev->wl->bssid; + mac = dev->wl->mac_addr; + + b43_macfilter_set(dev, B43_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32) (mac_bssid[i + 0]); + tmp |= (u32) (mac_bssid[i + 1]) << 8; + tmp |= (u32) (mac_bssid[i + 2]) << 16; + tmp |= (u32) (mac_bssid[i + 3]) << 24; + b43_ram_write(dev, 0x20 + i, tmp); + } +} + +static void b43_upload_card_macaddress(struct b43_wldev *dev) +{ + b43_write_mac_bssid_templates(dev); + b43_macfilter_set(dev, B43_MACFILTER_SELF, dev->wl->mac_addr); +} + +static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) +{ + /* slot_time is in usec. */ + if (dev->phy.type != B43_PHYTYPE_G) + return; + b43_write16(dev, 0x684, 510 + slot_time); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); +} + +static void b43_short_slot_timing_enable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 9); + dev->short_slot = 1; +} + +static void b43_short_slot_timing_disable(struct b43_wldev *dev) +{ + b43_set_slot_time(dev, 20); + dev->short_slot = 0; +} + +/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43_interrupt_enable(struct b43_wldev *dev, u32 mask) +{ + u32 old_mask; + + old_mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, old_mask | mask); + + return old_mask; +} + +/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43_interrupt_disable(struct b43_wldev *dev, u32 mask) +{ + u32 old_mask; + + old_mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, old_mask & ~mask); + + return old_mask; +} + +/* Synchronize IRQ top- and bottom-half. + * IRQs must be masked before calling this. + * This must not be called with the irq_lock held. + */ +static void b43_synchronize_irq(struct b43_wldev *dev) +{ + synchronize_irq(dev->dev->irq); + tasklet_kill(&dev->isr_tasklet); +} + +/* DummyTransmission function, as documented on + * http://bcm-specs.sipsolutions.net/DummyTransmission + */ +void b43_dummy_transmission(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + unsigned int i, max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + switch (phy->type) { + case B43_PHYTYPE_A: + max_loop = 0x1E; + buffer[0] = 0x000201CC; + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + max_loop = 0xFA; + buffer[0] = 0x000B846E; + break; + default: + B43_WARN_ON(1); + return; + } + + for (i = 0; i < 5; i++) + b43_ram_write(dev, i * 4, buffer[i]); + + /* Commit writes */ + b43_read32(dev, B43_MMIO_MACCTL); + + b43_write16(dev, 0x0568, 0x0000); + b43_write16(dev, 0x07C0, 0x0000); + value = ((phy->type == B43_PHYTYPE_A) ? 1 : 0); + b43_write16(dev, 0x050C, value); + b43_write16(dev, 0x0508, 0x0000); + b43_write16(dev, 0x050A, 0x0000); + b43_write16(dev, 0x054C, 0x0000); + b43_write16(dev, 0x056A, 0x0014); + b43_write16(dev, 0x0568, 0x0826); + b43_write16(dev, 0x0500, 0x0000); + b43_write16(dev, 0x0502, 0x0030); + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43_radio_write16(dev, 0x0051, 0x0037); +} + +static void key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, const u8 * key) +{ + unsigned int i; + u32 offset; + u16 value; + u16 kidx; + + /* Key index/algo block */ + kidx = b43_kidx_to_fw(dev, index); + value = ((kidx << 4) | algorithm); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_KEYIDXBLOCK + (kidx * 2), value); + + /* Write the key to the Key Table Pointer offset */ + offset = dev->ktp + (index * B43_SEC_KEYSIZE); + for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { + value = key[i]; + value |= (u16) (key[i + 1]) << 8; + b43_shm_write16(dev, B43_SHM_SHARED, offset + i, value); + } +} + +static void keymac_write(struct b43_wldev *dev, u8 index, const u8 * addr) +{ + u32 addrtmp[2] = { 0, 0, }; + u8 per_sta_keys_start = 8; + + if (b43_new_kidx_api(dev)) + per_sta_keys_start = 4; + + B43_WARN_ON(index < per_sta_keys_start); + /* We have two default TX keys and possibly two default RX keys. + * Physical mac 0 is mapped to physical key 4 or 8, depending + * on the firmware version. + * So we must adjust the index here. + */ + index -= per_sta_keys_start; + + if (addr) { + addrtmp[0] = addr[0]; + addrtmp[0] |= ((u32) (addr[1]) << 8); + addrtmp[0] |= ((u32) (addr[2]) << 16); + addrtmp[0] |= ((u32) (addr[3]) << 24); + addrtmp[1] = addr[4]; + addrtmp[1] |= ((u32) (addr[5]) << 8); + } + + if (dev->dev->id.revision >= 5) { + /* Receive match transmitter address mechanism */ + b43_shm_write32(dev, B43_SHM_RCMTA, + (index * 2) + 0, addrtmp[0]); + b43_shm_write16(dev, B43_SHM_RCMTA, + (index * 2) + 1, addrtmp[1]); + } else { + /* RXE (Receive Engine) and + * PSM (Programmable State Machine) mechanism + */ + if (index < 8) { + /* TODO write to RCM 16, 19, 22 and 25 */ + } else { + b43_shm_write32(dev, B43_SHM_SHARED, + B43_SHM_SH_PSM + (index * 6) + 0, + addrtmp[0]); + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_PSM + (index * 6) + 4, + addrtmp[1]); + } + } +} + +static void do_key_write(struct b43_wldev *dev, + u8 index, u8 algorithm, + const u8 * key, size_t key_len, const u8 * mac_addr) +{ + u8 buf[B43_SEC_KEYSIZE] = { 0, }; + u8 per_sta_keys_start = 8; + + if (b43_new_kidx_api(dev)) + per_sta_keys_start = 4; + + B43_WARN_ON(index >= dev->max_nr_keys); + B43_WARN_ON(key_len > B43_SEC_KEYSIZE); + + if (index >= per_sta_keys_start) + keymac_write(dev, index, NULL); /* First zero out mac. */ + if (key) + memcpy(buf, key, key_len); + key_write(dev, index, algorithm, buf); + if (index >= per_sta_keys_start) + keymac_write(dev, index, mac_addr); + + dev->key[index].algorithm = algorithm; +} + +static int b43_key_write(struct b43_wldev *dev, + int index, u8 algorithm, + const u8 * key, size_t key_len, + const u8 * mac_addr, + struct ieee80211_key_conf *keyconf) +{ + int i; + int sta_keys_start; + + if (key_len > B43_SEC_KEYSIZE) + return -EINVAL; + for (i = 0; i < dev->max_nr_keys; i++) { + /* Check that we don't already have this key. */ + B43_WARN_ON(dev->key[i].keyconf == keyconf); + } + if (index < 0) { + /* Either pairwise key or address is 00:00:00:00:00:00 + * for transmit-only keys. Search the index. */ + if (b43_new_kidx_api(dev)) + sta_keys_start = 4; + else + sta_keys_start = 8; + for (i = sta_keys_start; i < dev->max_nr_keys; i++) { + if (!dev->key[i].keyconf) { + /* found empty */ + index = i; + break; + } + } + if (index < 0) { + b43err(dev->wl, "Out of hardware key memory\n"); + return -ENOSPC; + } + } else + B43_WARN_ON(index > 3); + + do_key_write(dev, index, algorithm, key, key_len, mac_addr); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + /* Default RX key */ + B43_WARN_ON(mac_addr); + do_key_write(dev, index + 4, algorithm, key, key_len, NULL); + } + keyconf->hw_key_idx = index; + dev->key[index].keyconf = keyconf; + + return 0; +} + +static int b43_key_clear(struct b43_wldev *dev, int index) +{ + if (B43_WARN_ON((index < 0) || (index >= dev->max_nr_keys))) + return -EINVAL; + do_key_write(dev, index, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + if ((index <= 3) && !b43_new_kidx_api(dev)) { + do_key_write(dev, index + 4, B43_SEC_ALGO_NONE, + NULL, B43_SEC_KEYSIZE, NULL); + } + dev->key[index].keyconf = NULL; + + return 0; +} + +static void b43_clear_keys(struct b43_wldev *dev) +{ + int i; + + for (i = 0; i < dev->max_nr_keys; i++) + b43_key_clear(dev, i); +} + +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) +{ + u32 macctl; + u16 ucstat; + bool hwps; + bool awake; + int i; + + B43_WARN_ON((ps_flags & B43_PS_ENABLED) && + (ps_flags & B43_PS_DISABLED)); + B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); + + if (ps_flags & B43_PS_ENABLED) { + hwps = 1; + } else if (ps_flags & B43_PS_DISABLED) { + hwps = 0; + } else { + //TODO: If powersave is not off and FIXME is not set and we are not in adhoc + // and thus is not an AP and we are associated, set bit 25 + } + if (ps_flags & B43_PS_AWAKE) { + awake = 1; + } else if (ps_flags & B43_PS_ASLEEP) { + awake = 0; + } else { + //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, + // or we are associated, or FIXME, or the latest PS-Poll packet sent was + // successful, set bit26 + } + +/* FIXME: For now we force awake-on and hwps-off */ + hwps = 0; + awake = 1; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + if (hwps) + macctl |= B43_MACCTL_HWPS; + else + macctl &= ~B43_MACCTL_HWPS; + if (awake) + macctl |= B43_MACCTL_AWAKE; + else + macctl &= ~B43_MACCTL_AWAKE; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit write */ + b43_read32(dev, B43_MMIO_MACCTL); + if (awake && dev->dev->id.revision >= 5) { + /* Wait for the microcode to wake up. */ + for (i = 0; i < 100; i++) { + ucstat = b43_shm_read16(dev, B43_SHM_SHARED, + B43_SHM_SH_UCODESTAT); + if (ucstat != B43_SHM_SH_UCODESTAT_SLEEP) + break; + udelay(10); + } + } +} + +/* Turn the Analog ON/OFF */ +static void b43_switch_analog(struct b43_wldev *dev, int on) +{ + b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4); +} + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43_TMSLOW_PHYCLKEN; + flags |= B43_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON */ + b43_switch_analog(dev, 1); + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_GMODE; + if (flags & B43_TMSLOW_GMODE) + macctl |= B43_MACCTL_GMODE; + macctl |= B43_MACCTL_IHR_ENABLED; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43_wldev *dev) +{ + u32 v0, v1; + u16 tmp; + struct b43_txstatus stat; + + while (1) { + v0 = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43_read32(dev, B43_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43_read32(dev, B43_MMIO_XMITSTAT_1); + } +} + +static u32 b43_jssi_read(struct b43_wldev *dev) +{ + u32 val = 0; + + val = b43_shm_read16(dev, B43_SHM_SHARED, 0x08A); + val <<= 16; + val |= b43_shm_read16(dev, B43_SHM_SHARED, 0x088); + + return val; +} + +static void b43_jssi_write(struct b43_wldev *dev, u32 jssi) +{ + b43_shm_write16(dev, B43_SHM_SHARED, 0x088, (jssi & 0x0000FFFF)); + b43_shm_write16(dev, B43_SHM_SHARED, 0x08A, (jssi & 0xFFFF0000) >> 16); +} + +static void b43_generate_noise_sample(struct b43_wldev *dev) +{ + b43_jssi_write(dev, 0x7F7F7F7F); + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, + b43_read32(dev, B43_MMIO_STATUS2_BITFIELD) + | (1 << 4)); + B43_WARN_ON(dev->noisecalc.channel_at_start != dev->phy.channel); +} + +static void b43_calculate_link_quality(struct b43_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.channel_at_start = dev->phy.channel; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + u8 noise[4]; + u8 i, j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + B43_WARN_ON(!dev->noisecalc.calculation_running); + if (dev->noisecalc.channel_at_start != phy->channel) + goto drop_calculation; + *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; + drop_calculation: + dev->noisecalc.calculation_running = 0; + return; + } + generate_new: + b43_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43_wldev *dev) +{ + if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { + ///TODO: PS TBTT + } else { + if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) + b43_power_saving_ctl_bits(dev, 0); + } + dev->reg124_set_0x4 = 0; + if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) + dev->reg124_set_0x4 = 1; +} + +static void handle_irq_atim_end(struct b43_wldev *dev) +{ + if (!dev->reg124_set_0x4 /*FIXME rename this variable */ ) + return; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, + b43_read32(dev, B43_MMIO_STATUS2_BITFIELD) + | 0x4); +} + +static void handle_irq_pmq(struct b43_wldev *dev) +{ + u32 tmp; + + //TODO: AP mode. + + while (1) { + tmp = b43_read32(dev, B43_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43_write16(dev, B43_MMIO_PS_STATUS, 0x0002); +} + +static void b43_write_template_common(struct b43_wldev *dev, + const u8 * data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i, tmp; + struct b43_plcp_hdr4 plcp; + + plcp.data = 0; + b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32) (data[0]) << 16; + tmp |= (u32) (data[1]) << 24; + b43_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32) (data[i + 0]); + if (i + 1 < size) + tmp |= (u32) (data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32) (data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32) (data[i + 3]) << 24; + b43_ram_write(dev, ram_offset + i - 2, tmp); + } + b43_shm_write16(dev, B43_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43_plcp_hdr6)); +} + +static void b43_write_beacon_template(struct b43_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + int len; + const u8 *data; + + B43_WARN_ON(!dev->cached_beacon); + len = min((size_t) dev->cached_beacon->len, + 0x200 - sizeof(struct b43_plcp_hdr6)); + data = (const u8 *)(dev->cached_beacon->data); + b43_write_template_common(dev, data, + len, ram_offset, shm_size_offset, rate); +} + +static void b43_write_probe_resp_plcp(struct b43_wldev *dev, + u16 shm_offset, u16 size, u8 rate) +{ + struct b43_plcp_hdr4 plcp; + u32 tmp; + __le16 dur; + + plcp.data = 0; + b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, size, + B43_RATE_TO_BASE100KBPS(rate)); + /* Write PLCP in two parts and timing for packet transfer */ + tmp = le32_to_cpu(plcp.data); + b43_shm_write16(dev, B43_SHM_SHARED, shm_offset, tmp & 0xFFFF); + b43_shm_write16(dev, B43_SHM_SHARED, shm_offset + 2, tmp >> 16); + b43_shm_write16(dev, B43_SHM_SHARED, shm_offset + 6, le16_to_cpu(dur)); +} + +/* Instead of using custom probe response template, this function + * just patches custom beacon template by: + * 1) Changing packet type + * 2) Patching duration field + * 3) Stripping TIM + */ +static u8 *b43_generate_probe_resp(struct b43_wldev *dev, + u16 * dest_size, u8 rate) +{ + const u8 *src_data; + u8 *dest_data; + u16 src_size, elem_size, src_pos, dest_pos; + __le16 dur; + struct ieee80211_hdr *hdr; + + B43_WARN_ON(!dev->cached_beacon); + src_size = dev->cached_beacon->len; + src_data = (const u8 *)dev->cached_beacon->data; + + if (unlikely(src_size < 0x24)) { + b43dbg(dev->wl, "b43_generate_probe_resp: " "invalid beacon\n"); + return NULL; + } + + dest_data = kmalloc(src_size, GFP_ATOMIC); + if (unlikely(!dest_data)) + return NULL; + + /* 0x24 is offset of first variable-len Information-Element + * in beacon frame. + */ + memcpy(dest_data, src_data, 0x24); + src_pos = dest_pos = 0x24; + for (; src_pos < src_size - 2; src_pos += elem_size) { + elem_size = src_data[src_pos + 1] + 2; + if (src_data[src_pos] != 0x05) { /* TIM */ + memcpy(dest_data + dest_pos, src_data + src_pos, + elem_size); + dest_pos += elem_size; + } + } + *dest_size = dest_pos; + hdr = (struct ieee80211_hdr *)dest_data; + + /* Set the frame control. */ + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, *dest_size, + B43_RATE_TO_BASE100KBPS(rate)); + hdr->duration_id = dur; + + return dest_data; +} + +static void b43_write_probe_resp_template(struct b43_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u8 *probe_resp_data; + u16 size; + + B43_WARN_ON(!dev->cached_beacon); + size = dev->cached_beacon->len; + probe_resp_data = b43_generate_probe_resp(dev, &size, rate); + if (unlikely(!probe_resp_data)) + return; + + /* Looks like PLCP headers plus packet timings are stored for + * all possible basic rates + */ + b43_write_probe_resp_plcp(dev, 0x31A, size, B43_CCK_RATE_1MB); + b43_write_probe_resp_plcp(dev, 0x32C, size, B43_CCK_RATE_2MB); + b43_write_probe_resp_plcp(dev, 0x33E, size, B43_CCK_RATE_5MB); + b43_write_probe_resp_plcp(dev, 0x350, size, B43_CCK_RATE_11MB); + + size = min((size_t) size, 0x200 - sizeof(struct b43_plcp_hdr6)); + b43_write_template_common(dev, probe_resp_data, + size, ram_offset, shm_size_offset, rate); + kfree(probe_resp_data); +} + +static int b43_refresh_cached_beacon(struct b43_wldev *dev, + struct sk_buff *beacon) +{ + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = beacon; + + return 0; +} + +static void b43_update_templates(struct b43_wldev *dev) +{ + u32 status; + + B43_WARN_ON(!dev->cached_beacon); + + b43_write_beacon_template(dev, 0x68, 0x18, B43_CCK_RATE_1MB); + b43_write_beacon_template(dev, 0x468, 0x1A, B43_CCK_RATE_1MB); + b43_write_probe_resp_template(dev, 0x268, 0x4A, B43_CCK_RATE_11MB); + + status = b43_read32(dev, B43_MMIO_STATUS2_BITFIELD); + status |= 0x03; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, status); +} + +static void b43_refresh_templates(struct b43_wldev *dev, struct sk_buff *beacon) +{ + int err; + + err = b43_refresh_cached_beacon(dev, beacon); + if (unlikely(err)) + return; + b43_update_templates(dev); +} + +static void b43_set_ssid(struct b43_wldev *dev, const u8 * ssid, u8 ssid_len) +{ + u32 tmp; + u16 i, len; + + len = min((u16) ssid_len, (u16) 0x100); + for (i = 0; i < len; i += sizeof(u32)) { + tmp = (u32) (ssid[i + 0]); + if (i + 1 < len) + tmp |= (u32) (ssid[i + 1]) << 8; + if (i + 2 < len) + tmp |= (u32) (ssid[i + 2]) << 16; + if (i + 3 < len) + tmp |= (u32) (ssid[i + 3]) << 24; + b43_shm_write32(dev, B43_SHM_SHARED, 0x380 + i, tmp); + } + b43_shm_write16(dev, B43_SHM_SHARED, 0x48, len); +} + +static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) +{ + b43_time_lock(dev); + if (dev->dev->id.revision >= 3) { + b43_write32(dev, 0x188, (beacon_int << 16)); + } else { + b43_write16(dev, 0x606, (beacon_int >> 6)); + b43_write16(dev, 0x610, beacon_int); + } + b43_time_unlock(dev); +} + +static void handle_irq_beacon(struct b43_wldev *dev) +{ + u32 status; + + if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + return; + + dev->irq_savedstate &= ~B43_IRQ_BEACON; + status = b43_read32(dev, B43_MMIO_STATUS2_BITFIELD); + + if (!dev->cached_beacon || ((status & 0x1) && (status & 0x2))) { + /* ACK beacon IRQ. */ + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON); + dev->irq_savedstate |= B43_IRQ_BEACON; + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = NULL; + return; + } + if (!(status & 0x1)) { + b43_write_beacon_template(dev, 0x68, 0x18, B43_CCK_RATE_1MB); + status |= 0x1; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, status); + } + if (!(status & 0x2)) { + b43_write_beacon_template(dev, 0x468, 0x1A, B43_CCK_RATE_1MB); + status |= 0x2; + b43_write32(dev, B43_MMIO_STATUS2_BITFIELD, status); + } +} + +static void handle_irq_ucode_debug(struct b43_wldev *dev) +{ + //TODO +} + +/* Interrupt handler bottom-half */ +static void b43_interrupt_tasklet(struct b43_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + B43_WARN_ON(b43_status(dev) != B43_STAT_STARTED); + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43_IRQ_MAC_TXERR)) + b43err(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43_IRQ_PHY_TXERR)) + b43err(dev->wl, "PHY transmission error\n"); + + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | + B43_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { + b43err(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43_controller_restart(dev, "DMA error"); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + return; + } + if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { + b43err(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + } + + if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43_IRQ_TXFIFO_FLUSH_OK) + ;/* TODO */ + if (reason & B43_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { + if (b43_using_pio(dev)) + b43_pio_rx(dev->pio.queue0); + else + b43_dma_rx(dev->dma.rx_ring0); + } + B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); + if (dma_reason[3] & B43_DMAIRQ_RX_DONE) { + if (b43_using_pio(dev)) + b43_pio_rx(dev->pio.queue3); + else + b43_dma_rx(dev->dma.rx_ring3); + } + B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE); + B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE); + + if (reason & B43_IRQ_TX_OK) + handle_irq_transmit_status(dev); + + b43_interrupt_enable(dev, dev->irq_savedstate); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void pio_irq_workaround(struct b43_wldev *dev, u16 base, int queueidx) +{ + u16 rxctl; + + rxctl = b43_read16(dev, base + B43_PIO_RXCTL); + if (rxctl & B43_PIO_RXCTL_DATAAVAILABLE) + dev->dma_reason[queueidx] |= B43_DMAIRQ_RX_DONE; + else + dev->dma_reason[queueidx] &= ~B43_DMAIRQ_RX_DONE; +} + +static void b43_interrupt_ack(struct b43_wldev *dev, u32 reason) +{ + if (b43_using_pio(dev) && + (dev->dev->id.revision < 3) && + (!(reason & B43_IRQ_PIO_WORKAROUND))) { + /* Apply a PIO specific workaround to the dma_reasons */ + pio_irq_workaround(dev, B43_MMIO_PIO1_BASE, 0); + pio_irq_workaround(dev, B43_MMIO_PIO2_BASE, 1); + pio_irq_workaround(dev, B43_MMIO_PIO3_BASE, 2); + pio_irq_workaround(dev, B43_MMIO_PIO4_BASE, 3); + } + + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason); + + b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]); + b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]); + b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]); + b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]); + b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]); + b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]); +} + +/* Interrupt handler top-half */ +static irqreturn_t b43_interrupt_handler(int irq, void *dev_id) +{ + irqreturn_t ret = IRQ_NONE; + struct b43_wldev *dev = dev_id; + u32 reason; + + if (!dev) + return IRQ_NONE; + + spin_lock(&dev->wl->irq_lock); + + if (b43_status(dev) < B43_STAT_STARTED) + goto out; + reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + goto out; + ret = IRQ_HANDLED; + reason &= b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + if (!reason) + goto out; + + dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43_read32(dev, B43_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43_read32(dev, B43_MMIO_DMA4_REASON) + & 0x0000DC00; + dev->dma_reason[5] = b43_read32(dev, B43_MMIO_DMA5_REASON) + & 0x0000DC00; + + b43_interrupt_ack(dev, reason); + /* disable all IRQs. They are enabled again in the bottom half. */ + dev->irq_savedstate = b43_interrupt_disable(dev, B43_IRQ_ALL); + /* save the reason code and call our bottom half. */ + dev->irq_reason = reason; + tasklet_schedule(&dev->isr_tasklet); + out: + mmiowb(); + spin_unlock(&dev->wl->irq_lock); + + return ret; +} + +static void b43_release_firmware(struct b43_wldev *dev) +{ + release_firmware(dev->fw.ucode); + dev->fw.ucode = NULL; + release_firmware(dev->fw.pcm); + dev->fw.pcm = NULL; + release_firmware(dev->fw.initvals); + dev->fw.initvals = NULL; + release_firmware(dev->fw.initvals_band); + dev->fw.initvals_band = NULL; +} + +static void b43_print_fw_helptext(struct b43_wl *wl) +{ + b43err(wl, "You must go to " + "http://linuxwireless.org/en/users/Drivers/bcm43xx#devicefirmware " + "and download the correct firmware (version 4).\n"); +} + +static int do_request_fw(struct b43_wldev *dev, + const char *name, + const struct firmware **fw) +{ + char path[sizeof(modparam_fwpostfix) + 32]; + struct b43_fw_header *hdr; + u32 size; + int err; + + if (!name) + return 0; + + snprintf(path, ARRAY_SIZE(path), + "b43%s/%s.fw", + modparam_fwpostfix, name); + err = request_firmware(fw, path, dev->dev->dev); + if (err) { + b43err(dev->wl, "Firmware file \"%s\" not found " + "or load failed.\n", path); + return err; + } + if ((*fw)->size < sizeof(struct b43_fw_header)) + goto err_format; + hdr = (struct b43_fw_header *)((*fw)->data); + switch (hdr->type) { + case B43_FW_TYPE_UCODE: + case B43_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != (*fw)->size - sizeof(struct b43_fw_header)) + goto err_format; + /* fallthrough */ + case B43_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + return err; + +err_format: + b43err(dev->wl, "Firmware file \"%s\" format error.\n", path); + return -EPROTO; +} + +static int b43_request_firmware(struct b43_wldev *dev) +{ + struct b43_firmware *fw = &dev->fw; + const u8 rev = dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if (!fw->ucode) { + if ((rev >= 5) && (rev <= 10)) + filename = "ucode5"; + else if ((rev >= 11) && (rev <= 12)) + filename = "ucode11"; + else if (rev >= 13) + filename = "ucode13"; + else + goto err_no_ucode; + err = do_request_fw(dev, filename, &fw->ucode); + if (err) + goto err_load; + } + if (!fw->pcm) { + if ((rev >= 5) && (rev <= 10)) + filename = "pcm5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_pcm; + err = do_request_fw(dev, filename, &fw->pcm); + if (err) + goto err_load; + } + if (!fw->initvals) { + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_GPHY) + filename = "a0g1initvals5"; + else + filename = "a0g0initvals5"; + } else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev >= 13) + filename = "lp0initvals13"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals); + if (err) + goto err_load; + } + if (!fw->initvals_band) { + switch (dev->phy.type) { + case B43_PHYTYPE_A: + if ((rev >= 5) && (rev <= 10)) { + if (tmshigh & B43_TMSHIGH_GPHY) + filename = "a0g1bsinitvals5"; + else + filename = "a0g0bsinitvals5"; + } else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + case B43_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals_band); + if (err) + goto err_load; + } + + return 0; + +err_load: + b43_print_fw_helptext(dev->wl); + goto error; + +err_no_ucode: + err = -ENODEV; + b43err(dev->wl, "No microcode available for core rev %u\n", rev); + goto error; + +err_no_pcm: + err = -ENODEV; + b43err(dev->wl, "No PCM available for core rev %u\n", rev); + goto error; + +err_no_initvals: + err = -ENODEV; + b43err(dev->wl, "No Initial Values firmware file for PHY %u, " + "core rev %u\n", dev->phy.type, rev); + goto error; + +error: + b43_release_firmware(dev); + return err; +} + +static int b43_upload_microcode(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const __be32 *data; + unsigned int i, len; + u16 fwrev, fwpatch, fwdate, fwtime; + u32 tmp; + int err = 0; + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode->data + hdr_len); + len = (dev->fw.ucode->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_UCODE | B43_SHM_AUTOINC_W, 0x0000); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm->data + hdr_len); + len = (dev->fw.pcm->size - hdr_len) / sizeof(__be32); + b43_shm_control_word(dev, B43_SHM_HW, 0x01EA); + b43_write32(dev, B43_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43_shm_control_word(dev, B43_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); + udelay(10); + } + } + + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_ALL); + b43_write32(dev, B43_MMIO_MACCTL, + B43_MACCTL_PSM_RUN | + B43_MACCTL_IHR_ENABLED | B43_MACCTL_INFRA); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp == B43_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= 50) { + b43err(dev->wl, "Microcode not responding\n"); + b43_print_fw_helptext(dev->wl); + err = -ENODEV; + goto out; + } + udelay(10); + } + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); /* dummy read */ + + /* Get and check the revisions. */ + fwrev = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEREV); + fwpatch = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEPATCH); + fwdate = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEDATE); + fwtime = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODETIME); + + if (fwrev <= 0x128) { + b43err(dev->wl, "YOUR FIRMWARE IS TOO OLD. Firmware from " + "binary drivers older than version 4.x is unsupported. " + "You must upgrade your firmware files.\n"); + b43_print_fw_helptext(dev->wl); + b43_write32(dev, B43_MMIO_MACCTL, 0); + err = -EOPNOTSUPP; + goto out; + } + b43dbg(dev->wl, "Loading firmware version %u.%u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", + fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); + + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + + out: + return err; +} + +static int b43_write_initvals(struct b43_wldev *dev, + const struct b43_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43_IV_32BIT); + offset &= B43_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = be32_to_cpu(get_unaligned(&iv->data.d32)); + b43_write32(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43_write16(dev, offset, value); + + iv = (const struct b43_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43err(dev->wl, "Initial Values Firmware file-format error.\n"); + b43_print_fw_helptext(dev->wl); + + return -EPROTO; +} + +static int b43_upload_initvals(struct b43_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43_fw_header); + const struct b43_fw_header *hdr; + struct b43_firmware *fw = &dev->fw; + const struct b43_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43_fw_header *)(fw->initvals->data); + ivals = (const struct b43_iv *)(fw->initvals->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band) { + hdr = (const struct b43_fw_header *)(fw->initvals_band->data); + ivals = (const struct b43_iv *)(fw->initvals_band->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43_write_initvals(dev, ivals, count, + fw->initvals_band->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43_gpio_init(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask, set; + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_GPOUTSMSK); + + b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (0 /* FIXME: conditional unknown */ ) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0100); + mask |= 0x0180; + set |= 0x0180; + } + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_PACTRL) { + b43_write16(dev, B43_MMIO_GPIO_MASK, + b43_read16(dev, B43_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43_GPIO_CONTROL, + (ssb_read32(gpiodev, B43_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43_gpio_cleanup(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43_mac_enable(struct b43_wldev *dev) +{ + dev->mac_suspended--; + B43_WARN_ON(dev->mac_suspended < 0); + B43_WARN_ON(irqs_disabled()); + if (dev->mac_suspended == 0) { + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_ENABLED); + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, + B43_IRQ_MAC_SUSPENDED); + /* Commit writes */ + b43_read32(dev, B43_MMIO_MACCTL); + b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + b43_power_saving_ctl_bits(dev, 0); + + /* Re-enable IRQs. */ + spin_lock_irq(&dev->wl->irq_lock); + b43_interrupt_enable(dev, dev->irq_savedstate); + spin_unlock_irq(&dev->wl->irq_lock); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43_mac_suspend(struct b43_wldev *dev) +{ + int i; + u32 tmp; + + might_sleep(); + B43_WARN_ON(irqs_disabled()); + B43_WARN_ON(dev->mac_suspended < 0); + + if (dev->mac_suspended == 0) { + /* Mask IRQs before suspending MAC. Otherwise + * the MAC stays busy and won't suspend. */ + spin_lock_irq(&dev->wl->irq_lock); + tmp = b43_interrupt_disable(dev, B43_IRQ_ALL); + spin_unlock_irq(&dev->wl->irq_lock); + b43_synchronize_irq(dev); + dev->irq_savedstate = tmp; + + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + b43_write32(dev, B43_MMIO_MACCTL, + b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_ENABLED); + /* force pci to flush the write */ + b43_read32(dev, B43_MMIO_MACCTL); + for (i = 40; i; i--) { + tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (tmp & B43_IRQ_MAC_SUSPENDED) + goto out; + msleep(1); + } + b43err(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43_adjust_opmode(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43_read32(dev, B43_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43_MACCTL_AP; + ctl &= ~B43_MACCTL_KEEP_CTL; + ctl &= ~B43_MACCTL_KEEP_BADPLCP; + ctl &= ~B43_MACCTL_KEEP_BAD; + ctl &= ~B43_MACCTL_PROMISC; + ctl &= ~B43_MACCTL_BEACPROMISC; + ctl |= B43_MACCTL_INFRA; + + if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) + ctl |= B43_MACCTL_AP; + else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) + ctl &= ~B43_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43_MACCTL_PROMISC; + + b43_write32(dev, B43_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43_write16(dev, 0x612, cfp_pretbtt); +} + +static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43_shm_write16(dev, B43_SHM_SHARED, offset + 0x20, + b43_shm_read16(dev, B43_SHM_SHARED, offset)); +} + +static void b43_rate_memory_init(struct b43_wldev *dev) +{ + switch (dev->phy.type) { + case B43_PHYTYPE_A: + case B43_PHYTYPE_G: + b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_24MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); + b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); + if (dev->phy.type == B43_PHYTYPE_A) + break; + /* fallthrough */ + case B43_PHYTYPE_B: + b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_5MB, 0); + b43_rate_memory_write(dev, B43_CCK_RATE_11MB, 0); + break; + default: + B43_WARN_ON(1); + } +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna) +{ + u16 ant = 0; + u16 tmp; + + switch (antenna) { + case B43_ANTENNA0: + ant |= B43_TX4_PHY_ANT0; + break; + case B43_ANTENNA1: + ant |= B43_TX4_PHY_ANT1; + break; + case B43_ANTENNA_AUTO: + ant |= B43_TX4_PHY_ANTLAST; + break; + default: + B43_WARN_ON(1); + } + + /* FIXME We also need to set the other flags of the PHY control field somewhere. */ + + /* For Beacons */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); + tmp = (tmp & ~B43_TX4_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, tmp); + /* For ACK/CTS */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43_TX4_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43_TX4_PHY_ANT) | ant; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, tmp); +} + +/* This is the opposite of b43_chip_init() */ +static void b43_chip_exit(struct b43_wldev *dev) +{ + b43_radio_turn_off(dev, 1); + b43_leds_exit(dev); + b43_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43_chip_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err, tmp; + u32 value32; + u16 value16; + + b43_write32(dev, B43_MMIO_MACCTL, + B43_MACCTL_PSM_JMP0 | B43_MACCTL_IHR_ENABLED); + + err = b43_request_firmware(dev); + if (err) + goto out; + err = b43_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + b43_leds_init(dev); + + err = b43_upload_initvals(dev); + if (err) + goto err_leds_exit; + b43_radio_turn_on(dev); + + b43_write16(dev, 0x03E6, 0x0000); + err = b43_phy_init(dev); + if (err) + goto err_radio_off; + + /* Select initial Interference Mitigation. */ + tmp = phy->interfmode; + phy->interfmode = B43_INTERFMODE_NONE; + b43_radio_set_interference_mitigation(dev, tmp); + + b43_set_rx_antenna(dev, B43_ANTENNA_DEFAULT); + b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); + + if (phy->type == B43_PHYTYPE_B) { + value16 = b43_read16(dev, 0x005E); + value16 |= 0x0004; + b43_write16(dev, 0x005E, value16); + } + b43_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43_write32(dev, 0x010C, 0x01000000); + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_INFRA); + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + | B43_MACCTL_INFRA); + + if (b43_using_pio(dev)) { + b43_write32(dev, 0x0210, 0x00000100); + b43_write32(dev, 0x0230, 0x00000100); + b43_write32(dev, 0x0250, 0x00000100); + b43_write32(dev, 0x0270, 0x00000100); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0034, 0x0000); + } + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43_shm_write16(dev, B43_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43_write16(dev, 0x060E, 0x0000); + b43_write16(dev, 0x0610, 0x8000); + b43_write16(dev, 0x0604, 0x0000); + b43_write16(dev, 0x0606, 0x0200); + } else { + b43_write32(dev, 0x0188, 0x80000000); + b43_write32(dev, 0x018C, 0x02000000); + } + b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43_write16(dev, B43_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + err = 0; + b43dbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_radio_off: + b43_radio_turn_off(dev, 1); +err_leds_exit: + b43_leds_exit(dev); + b43_gpio_cleanup(dev); + return err; +} + +static void b43_periodic_every120sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->type != B43_PHYTYPE_G || phy->rev < 2) + return; + + b43_mac_suspend(dev); + b43_lo_g_measure(dev); + b43_mac_enable(dev); + if (b43_has_hardware_pctl(phy)) + b43_lo_g_ctl_mark_all_unused(dev); +} + +static void b43_periodic_every60sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(phy)) + b43_lo_g_ctl_mark_all_unused(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI) { + b43_mac_suspend(dev); + b43_calc_nrssi_slope(dev); + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) { + u8 old_chan = phy->channel; + + /* VCO Calibration */ + if (old_chan >= 8) + b43_radio_selectchannel(dev, 1, 0); + else + b43_radio_selectchannel(dev, 13, 0); + b43_radio_selectchannel(dev, old_chan, 0); + } + b43_mac_enable(dev); + } +} + +static void b43_periodic_every30sec(struct b43_wldev *dev) +{ + /* Update device statistics. */ + b43_calculate_link_quality(dev); +} + +static void b43_periodic_every15sec(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->type == B43_PHYTYPE_G) { + //TODO: update_aci_moving_average + if (phy->aci_enable && phy->aci_wlan_automatic) { + b43_mac_suspend(dev); + if (!phy->aci_enable && 1 /*TODO: not scanning? */ ) { + if (0 /*TODO: bunch of conditions */ ) { + b43_radio_set_interference_mitigation + (dev, B43_INTERFMODE_MANUALWLAN); + } + } else if (1 /*TODO*/) { + /* + if ((aci_average > 1000) && !(b43_radio_aci_scan(dev))) { + b43_radio_set_interference_mitigation(dev, + B43_INTERFMODE_NONE); + } + */ + } + b43_mac_enable(dev); + } else if (phy->interfmode == B43_INTERFMODE_NONWLAN && + phy->rev == 1) { + //TODO: implement rev1 workaround + } + } + b43_phy_xmitpower(dev); //FIXME: unless scanning? + //TODO for APHY (temperature?) +} + +static void do_periodic_work(struct b43_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 8 == 0) + b43_periodic_every120sec(dev); + if (state % 4 == 0) + b43_periodic_every60sec(dev); + if (state % 2 == 0) + b43_periodic_every30sec(dev); + b43_periodic_every15sec(dev); +} + +/* Periodic work locking policy: + * The whole periodic work handler is protected by + * wl->mutex. If another lock is needed somewhere in the + * pwork callchain, it's aquired in-place, where it's needed. + */ +static void b43_periodic_work_handler(struct work_struct *work) +{ + struct b43_wldev *dev = container_of(work, struct b43_wldev, + periodic_work.work); + struct b43_wl *wl = dev->wl; + unsigned long delay; + + mutex_lock(&wl->mutex); + + if (unlikely(b43_status(dev) != B43_STAT_STARTED)) + goto out; + if (b43_debug(dev, B43_DBG_PWORK_STOP)) + goto out_requeue; + + do_periodic_work(dev); + + dev->periodic_state++; +out_requeue: + if (b43_debug(dev, B43_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies(HZ * 15); + queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); +out: + mutex_unlock(&wl->mutex); +} + +static void b43_periodic_tasks_setup(struct b43_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43_periodic_work_handler); + queue_delayed_work(dev->wl->hw->workqueue, work, 0); +} + +/* Validate access to the chip (SHM) */ +static int b43_validate_chipaccess(struct b43_wldev *dev) +{ + u32 value; + u32 shm_backup; + + shm_backup = b43_shm_read32(dev, B43_SHM_SHARED, 0); + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0xAA5555AA); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0xAA5555AA) + goto error; + b43_shm_write32(dev, B43_SHM_SHARED, 0, 0x55AAAA55); + if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0x55AAAA55) + goto error; + b43_shm_write32(dev, B43_SHM_SHARED, 0, shm_backup); + + value = b43_read32(dev, B43_MMIO_MACCTL); + if ((value | B43_MACCTL_GMODE) != + (B43_MACCTL_GMODE | B43_MACCTL_IHR_ENABLED)) + goto error; + + value = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); + if (value) + goto error; + + return 0; + error: + b43err(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43_security_init(struct b43_wldev *dev) +{ + dev->max_nr_keys = (dev->dev->id.revision >= 5) ? 58 : 20; + B43_WARN_ON(dev->max_nr_keys > ARRAY_SIZE(dev->key)); + dev->ktp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_KTP); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + if (dev->dev->id.revision >= 5) { + /* Number of RCMTA address slots */ + b43_write16(dev, B43_MMIO_RCMTA_COUNT, dev->max_nr_keys - 8); + } + b43_clear_keys(dev); +} + +static int b43_rng_read(struct hwrng *rng, u32 * data) +{ + struct b43_wl *wl = (struct b43_wl *)rng->priv; + unsigned long flags; + + /* Don't take wl->mutex here, as it could deadlock with + * hwrng internal locking. It's not needed to take + * wl->mutex here, anyway. */ + + spin_lock_irqsave(&wl->irq_lock, flags); + *data = b43_read16(wl->current_dev, B43_MMIO_RNG); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return (sizeof(u16)); +} + +static void b43_rng_exit(struct b43_wl *wl) +{ + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +} + +static int b43_rng_init(struct b43_wl *wl) +{ + int err; + + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43err(wl, "Failed to register the random " + "number generator (%d)\n", err); + } + + return err; +} + +static int b43_tx(struct ieee80211_hw *hw, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + int err = -ENODEV; + unsigned long flags; + + if (unlikely(!dev)) + goto out; + if (unlikely(b43_status(dev) < B43_STAT_STARTED)) + goto out; + /* DMA-TX is done without a global lock. */ + if (b43_using_pio(dev)) { + spin_lock_irqsave(&wl->irq_lock, flags); + err = b43_pio_tx(dev, skb, ctl); + spin_unlock_irqrestore(&wl->irq_lock, flags); + } else + err = b43_dma_tx(dev, skb, ctl); + out: + if (unlikely(err)) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +static int b43_conf_tx(struct ieee80211_hw *hw, + int queue, + const struct ieee80211_tx_queue_params *params) +{ + return 0; +} + +static int b43_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + int err = -ENODEV; + + if (!dev) + goto out; + spin_lock_irqsave(&wl->irq_lock, flags); + if (likely(b43_status(dev) >= B43_STAT_STARTED)) { + if (b43_using_pio(dev)) + b43_pio_get_tx_stats(dev, stats); + else + b43_dma_get_tx_stats(dev, stats); + err = 0; + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + out: + return err; +} + +static int b43_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + unsigned long flags; + + spin_lock_irqsave(&wl->irq_lock, flags); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return 0; +} + +static const char *phymode_to_string(unsigned int phymode) +{ + switch (phymode) { + case B43_PHYMODE_A: + return "A"; + case B43_PHYMODE_B: + return "B"; + case B43_PHYMODE_G: + return "G"; + default: + B43_WARN_ON(1); + } + return ""; +} + +static int find_wldev_for_phymode(struct b43_wl *wl, + unsigned int phymode, + struct b43_wldev **dev, bool * gmode) +{ + struct b43_wldev *d; + + list_for_each_entry(d, &wl->devlist, list) { + if (d->phy.possible_phymodes & phymode) { + /* Ok, this device supports the PHY-mode. + * Now figure out how the gmode bit has to be + * set to support it. */ + if (phymode == B43_PHYMODE_A) + *gmode = 0; + else + *gmode = 1; + *dev = d; + + return 0; + } + } + + return -ESRCH; +} + +static void b43_put_phy_into_reset(struct b43_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43_TMSLOW_GMODE; + tmslow |= B43_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +/* Expects wl->mutex locked */ +static int b43_switch_phymode(struct b43_wl *wl, unsigned int new_mode) +{ + struct b43_wldev *up_dev; + struct b43_wldev *down_dev; + int err; + bool gmode = 0; + int prev_status; + + err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); + if (err) { + b43err(wl, "Could not find a device for %s-PHY mode\n", + phymode_to_string(new_mode)); + return err; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) { + /* This device is already running. */ + return 0; + } + b43dbg(wl, "Reconfiguring PHYmode to %s-PHY\n", + phymode_to_string(new_mode)); + down_dev = wl->current_dev; + + prev_status = b43_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43_STAT_STARTED) + b43_wireless_core_stop(down_dev); + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(down_dev); + + if (down_dev != up_dev) { + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43_put_phy_into_reset(down_dev); + } + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(up_dev); + if (err) { + b43err(wl, "Fatal: Could not initialize device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + goto init_failure; + } + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(up_dev); + if (err) { + b43err(wl, "Fatal: Coult not start device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + b43_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43_WARN_ON(b43_status(up_dev) != prev_status); + + wl->current_dev = up_dev; + + return 0; + init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +static int b43_antenna_from_ieee80211(u8 antenna) +{ + switch (antenna) { + case 0: /* default/diversity */ + return B43_ANTENNA_DEFAULT; + case 1: /* Antenna 0 */ + return B43_ANTENNA0; + case 2: /* Antenna 1 */ + return B43_ANTENNA1; + default: + return B43_ANTENNA_DEFAULT; + } +} + +static int b43_dev_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + struct b43_phy *phy; + unsigned long flags; + unsigned int new_phymode = 0xFFFF; + int antenna_tx; + int antenna_rx; + int err = 0; + u32 savedirqs; + + antenna_tx = b43_antenna_from_ieee80211(conf->antenna_sel_tx); + antenna_rx = b43_antenna_from_ieee80211(conf->antenna_sel_rx); + + mutex_lock(&wl->mutex); + + /* Switch the PHY mode (if necessary). */ + switch (conf->phymode) { + case MODE_IEEE80211A: + new_phymode = B43_PHYMODE_A; + break; + case MODE_IEEE80211B: + new_phymode = B43_PHYMODE_B; + break; + case MODE_IEEE80211G: + new_phymode = B43_PHYMODE_G; + break; + default: + B43_WARN_ON(1); + } + err = b43_switch_phymode(wl, new_phymode); + if (err) + goto out_unlock_mutex; + dev = wl->current_dev; + phy = &dev->phy; + + /* Disable IRQs while reconfiguring the device. + * This makes it possible to drop the spinlock throughout + * the reconfiguration process. */ + spin_lock_irqsave(&wl->irq_lock, flags); + if (b43_status(dev) < B43_STAT_STARTED) { + spin_unlock_irqrestore(&wl->irq_lock, flags); + goto out_unlock_mutex; + } + savedirqs = b43_interrupt_disable(dev, B43_IRQ_ALL); + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43_synchronize_irq(dev); + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel_val != phy->channel) + b43_radio_selectchannel(dev, conf->channel_val, 0); + + /* Enable/Disable ShortSlot timing. */ + if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) != + dev->short_slot) { + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) + b43_short_slot_timing_enable(dev); + else + b43_short_slot_timing_disable(dev); + } + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->power_level) { + phy->power_level = conf->power_level; + b43_phy_xmitpower(dev); + } + } + + /* Antennas for RX and management frame TX. */ + b43_mgmtframe_txantenna(dev, antenna_tx); + b43_set_rx_antenna(dev, antenna_rx); + + /* Update templates for AP mode. */ + if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) + b43_set_beacon_int(dev, conf->beacon_int); + + if (!!conf->radio_enabled != phy->radio_on) { + if (conf->radio_enabled) { + b43_radio_turn_on(dev); + b43info(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) { + b43info(dev->wl, "The hardware RF-kill button " + "still turns the radio physically off. " + "Press the button to turn it on.\n"); + } + } else { + b43_radio_turn_off(dev, 0); + b43info(dev->wl, "Radio turned off by software\n"); + } + } + + spin_lock_irqsave(&wl->irq_lock, flags); + b43_interrupt_enable(dev, savedirqs); + mmiowb(); + spin_unlock_irqrestore(&wl->irq_lock, flags); + out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static int b43_dev_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + u8 algorithm; + u8 index; + int err = -EINVAL; + DECLARE_MAC_BUF(mac); + + if (modparam_nohwcrypt) + return -ENOSPC; /* User disabled HW-crypto */ + + if (!dev) + return -ENODEV; + switch (key->alg) { + case ALG_WEP: + if (key->keylen == 5) + algorithm = B43_SEC_ALGO_WEP40; + else + algorithm = B43_SEC_ALGO_WEP104; + break; + case ALG_TKIP: + algorithm = B43_SEC_ALGO_TKIP; + break; + case ALG_CCMP: + algorithm = B43_SEC_ALGO_AES; + break; + default: + B43_WARN_ON(1); + goto out; + } + + index = (u8) (key->keyidx); + if (index > 3) + goto out; + + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + switch (cmd) { + case SET_KEY: + if (algorithm == B43_SEC_ALGO_TKIP) { + /* FIXME: No TKIP hardware encryption for now. */ + err = -EOPNOTSUPP; + goto out_unlock; + } + + if (is_broadcast_ether_addr(addr)) { + /* addr is FF:FF:FF:FF:FF:FF for default keys */ + err = b43_key_write(dev, index, algorithm, + key->key, key->keylen, NULL, key); + } else { + /* + * either pairwise key or address is 00:00:00:00:00:00 + * for transmit-only keys + */ + err = b43_key_write(dev, -1, algorithm, + key->key, key->keylen, addr, key); + } + if (err) + goto out_unlock; + + if (algorithm == B43_SEC_ALGO_WEP40 || + algorithm == B43_SEC_ALGO_WEP104) { + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_USEDEFKEYS); + } else { + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_USEDEFKEYS); + } + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + break; + case DISABLE_KEY: { + err = b43_key_clear(dev, key->hw_key_idx); + if (err) + goto out_unlock; + break; + } + default: + B43_WARN_ON(1); + } +out_unlock: + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); +out: + if (!err) { + b43dbg(wl, "%s hardware based encryption for keyidx: %d, " + "mac: %s\n", + cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, + print_mac(mac, addr)); + } + return err; +} + +static void b43_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, unsigned int *fflags, + int mc_count, struct dev_addr_list *mc_list) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) { + *fflags = 0; + return; + } + + spin_lock_irqsave(&wl->irq_lock, flags); + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43_status(dev) >= B43_STAT_INITIALIZED) + b43_adjust_opmode(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); +} + +static int b43_config_interface(struct ieee80211_hw *hw, + int if_id, struct ieee80211_if_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) + return -ENODEV; + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + B43_WARN_ON(wl->if_id != if_id); + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + if (b43_status(dev) >= B43_STAT_INITIALIZED) { + if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) { + B43_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); + b43_set_ssid(dev, conf->ssid, conf->ssid_len); + if (conf->beacon) + b43_refresh_templates(dev, conf->beacon); + } + b43_write_mac_bssid_templates(dev); + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); + + return 0; +} + +/* Locking: wl->mutex */ +static void b43_wireless_core_stop(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + unsigned long flags; + + if (b43_status(dev) < B43_STAT_STARTED) + return; + b43_set_status(dev, B43_STAT_INITIALIZED); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel the possibly running self-rearming periodic work. */ + cancel_delayed_work_sync(&dev->periodic_work); + mutex_lock(&wl->mutex); + + ieee80211_stop_queues(wl->hw); //FIXME this could cause a deadlock, as mac80211 seems buggy. + + /* Disable and sync interrupts. */ + spin_lock_irqsave(&wl->irq_lock, flags); + dev->irq_savedstate = b43_interrupt_disable(dev, B43_IRQ_ALL); + b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* flush */ + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43_synchronize_irq(dev); + + b43_mac_suspend(dev); + free_irq(dev->dev->irq, dev); + b43dbg(wl, "Wireless interface stopped\n"); +} + +/* Locking: wl->mutex */ +static int b43_wireless_core_start(struct b43_wldev *dev) +{ + int err; + + B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + err = request_irq(dev->dev->irq, b43_interrupt_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); + goto out; + } + + /* We are ready to run. */ + b43_set_status(dev, B43_STAT_STARTED); + + /* Start data flow (TX/RX). */ + b43_mac_enable(dev); + b43_interrupt_enable(dev, dev->irq_savedstate); + ieee80211_start_queues(dev->wl->hw); + + /* Start maintainance work */ + b43_periodic_tasks_setup(dev); + + b43dbg(dev->wl, "Wireless interface started\n"); + out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43_phy_versioning(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43_read16(dev, B43_MMIO_PHY_VER); + analog_type = (tmp & B43_PHYVER_ANALOG) >> B43_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43_PHYVER_TYPE) >> B43_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43_PHYVER_VERSION); + switch (phy_type) { + case B43_PHYTYPE_A: + if (phy_rev >= 4) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6 + && phy_rev != 7) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (phy_rev > 8) + unsupported = 1; + break; + default: + unsupported = 1; + }; + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH); + tmp <<= 16; + b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); + tmp |= b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + switch (phy_type) { + case B43_PHYTYPE_A: + if (radio_ver != 0x2060) + unsupported = 1; + if (radio_rev != 1) + unsupported = 1; + if (radio_manuf != 0x17F) + unsupported = 1; + break; + case B43_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + default: + B43_WARN_ON(1); + } + if (unsupported) { + b43err(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43dbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X, Revision %u\n", + radio_manuf, radio_ver, radio_rev); + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43_wldev *dev, + struct b43_phy *phy) +{ + struct b43_txpower_lo_control *lo; + int i; + + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->locked = 0; + + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + phy->radio_off_context.valid = 0; + + lo = phy->lo_control; + if (lo) { + memset(lo, 0, sizeof(*(phy->lo_control))); + lo->rebuild = 1; + lo->tx_bias = 0xFF; + } + phy->max_lb_gain = 0; + phy->trsw_rx_gain = 0; + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + spin_lock_init(&phy->lock); + phy->interfmode = B43_INTERFMODE_NONE; + phy->channel = 0xFF; + + phy->hardware_power_control = !!modparam_hwpctl; +} + +static void setup_struct_wldev_for_init(struct b43_wldev *dev) +{ + /* Flags */ + dev->reg124_set_0x4 = 0; + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_savedstate = B43_IRQ_MASKTEMPLATE; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43_bluetooth_coext_enable(struct b43_wldev *dev) +{ + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + u32 hf; + + if (!(sprom->r1.boardflags_lo & B43_BFL_BTCOEXIST)) + return; + if (dev->phy.type != B43_PHYTYPE_B && !dev->phy.gmode) + return; + + hf = b43_hf_read(dev); + if (sprom->r1.boardflags_lo & B43_BFL_BTCMOD) + hf |= B43_HF_BTCOEXALT; + else + hf |= B43_HF_BTCOEX; + b43_hf_write(dev, hf); + //TODO +} + +static void b43_bluetooth_coext_disable(struct b43_wldev *dev) +{ //TODO +} + +static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp |= 0x53; + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43_wireless_core_exit(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED); + if (b43_status(dev) != B43_STAT_INITIALIZED) + return; + b43_set_status(dev, B43_STAT_UNINIT); + + mutex_unlock(&dev->wl->mutex); + b43_rfkill_exit(dev); + mutex_lock(&dev->wl->mutex); + + b43_rng_exit(dev->wl); + b43_pio_free(dev); + b43_dma_free(dev); + b43_chip_exit(dev); + b43_radio_turn_off(dev, 1); + b43_switch_analog(dev, 0); + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +/* Initialize a wireless core */ +static int b43_wireless_core_init(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct ssb_sprom *sprom = &bus->sprom; + struct b43_phy *phy = &dev->phy; + int err; + u32 hf, tmp; + + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + } + + if ((phy->type == B43_PHYTYPE_B) || (phy->type == B43_PHYTYPE_G)) { + phy->lo_control = + kzalloc(sizeof(*(phy->lo_control)), GFP_KERNEL); + if (!phy->lo_control) { + err = -ENOMEM; + goto err_busdown; + } + } + setup_struct_wldev_for_init(dev); + + err = b43_phy_init_tssi2dbm_table(dev); + if (err) + goto err_kfree_lo_control; + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43_imcfglo_timeouts_workaround(dev); + b43_bluetooth_coext_disable(dev); + b43_phy_early_init(dev); + err = b43_chip_init(dev); + if (err) + goto err_kfree_tssitbl; + b43_shm_write16(dev, B43_SHM_SHARED, + B43_SHM_SH_WLCOREREV, dev->dev->id.revision); + hf = b43_hf_read(dev); + if (phy->type == B43_PHYTYPE_G) { + hf |= B43_HF_SYMW; + if (phy->rev == 1) + hf |= B43_HF_GDCW; + if (sprom->r1.boardflags_lo & B43_BFL_PACTRL) + hf |= B43_HF_OFDMPABOOST; + } else if (phy->type == B43_PHYTYPE_B) { + hf |= B43_HF_SYMW; + if (phy->rev >= 2 && phy->radio_ver == 0x2050) + hf &= ~B43_HF_GDCW; + } + b43_hf_write(dev, hf); + + /* Short/Long Retry Limit. + * The retry-limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. + */ + tmp = limit_value(modparam_short_retry, 0, 0xF); + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT, tmp); + tmp = limit_value(modparam_long_retry, 0, 0xF); + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT, tmp); + + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SFFBLIM, 3); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_LFFBLIM, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1); + + b43_rate_memory_init(dev); + + /* Minimum Contention Window */ + if (phy->type == B43_PHYTYPE_B) { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); + } else { + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); + } + /* Maximum Contention Window */ + b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); + + do { + if (b43_using_pio(dev)) { + err = b43_pio_init(dev); + } else { + err = b43_dma_init(dev); + if (!err) + b43_qos_init(dev); + } + } while (err == -EAGAIN); + if (err) + goto err_chip_exit; + +//FIXME +#if 1 + b43_write16(dev, 0x0612, 0x0050); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0416, 0x0050); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0414, 0x01F4); +#endif + + b43_bluetooth_coext_enable(dev); + + ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + b43_upload_card_macaddress(dev); + b43_security_init(dev); + b43_rfkill_init(dev); + b43_rng_init(wl); + + b43_set_status(dev, B43_STAT_INITIALIZED); + + out: + return err; + + err_chip_exit: + b43_chip_exit(dev); + err_kfree_tssitbl: + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + err_kfree_lo_control: + kfree(phy->lo_control); + phy->lo_control = NULL; + err_busdown: + ssb_bus_may_powerdown(bus); + B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); + return err; +} + +static int b43_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev; + unsigned long flags; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (conf->type != IEEE80211_IF_TYPE_AP && + conf->type != IEEE80211_IF_TYPE_STA && + conf->type != IEEE80211_IF_TYPE_WDS && + conf->type != IEEE80211_IF_TYPE_IBSS) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43dbg(wl, "Adding Interface type %d\n", conf->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->if_id = conf->if_id; + wl->if_type = conf->type; + memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); + + spin_lock_irqsave(&wl->irq_lock, flags); + b43_adjust_opmode(dev); + b43_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + unsigned long flags; + + b43dbg(wl, "Removing Interface type %d\n", conf->type); + + mutex_lock(&wl->mutex); + + B43_WARN_ON(!wl->operating); + B43_WARN_ON(wl->if_id != conf->if_id); + + wl->operating = 0; + + spin_lock_irqsave(&wl->irq_lock, flags); + b43_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + mutex_unlock(&wl->mutex); +} + +static int b43_start(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + int did_init = 0; + int err; + + mutex_lock(&wl->mutex); + + if (b43_status(dev) < B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43_status(dev) < B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + if (did_init) + b43_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +void b43_stop(struct ieee80211_hw *hw) +{ + struct b43_wl *wl = hw_to_b43_wl(hw); + struct b43_wldev *dev = wl->current_dev; + + mutex_lock(&wl->mutex); + if (b43_status(dev) >= B43_STAT_STARTED) + b43_wireless_core_stop(dev); + b43_wireless_core_exit(dev); + mutex_unlock(&wl->mutex); +} + +static const struct ieee80211_ops b43_hw_ops = { + .tx = b43_tx, + .conf_tx = b43_conf_tx, + .add_interface = b43_add_interface, + .remove_interface = b43_remove_interface, + .config = b43_dev_config, + .config_interface = b43_config_interface, + .configure_filter = b43_configure_filter, + .set_key = b43_dev_set_key, + .get_stats = b43_get_stats, + .get_tx_stats = b43_get_tx_stats, + .start = b43_start, + .stop = b43_stop, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43_controller_restart() + */ +static void b43_chip_reset(struct work_struct *work) +{ + struct b43_wldev *dev = + container_of(work, struct b43_wldev, restart_work); + struct b43_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43_status(dev); + /* Bring the device down... */ + if (prev_status >= B43_STAT_STARTED) + b43_wireless_core_stop(dev); + if (prev_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(dev); + if (err) { + b43_wireless_core_exit(dev); + goto out; + } + } + out: + mutex_unlock(&wl->mutex); + if (err) + b43err(wl, "Controller restart FAILED\n"); + else + b43info(wl, "Controller restarted\n"); +} + +static int b43_setup_modes(struct b43_wldev *dev, + int have_aphy, int have_bphy, int have_gphy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + struct ieee80211_hw_mode *mode; + struct b43_phy *phy = &dev->phy; + int cnt = 0; + int err; + +/*FIXME: Don't tell ieee80211 about an A-PHY, because we currently don't support A-PHY. */ + have_aphy = 0; + + phy->possible_phymodes = 0; + for (; 1; cnt++) { + if (have_aphy) { + B43_WARN_ON(cnt >= B43_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211A; + mode->num_channels = b43_a_chantable_size; + mode->channels = b43_a_chantable; + mode->num_rates = b43_a_ratetable_size; + mode->rates = b43_a_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43_PHYMODE_A; + have_aphy = 0; + continue; + } + if (have_bphy) { + B43_WARN_ON(cnt >= B43_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211B; + mode->num_channels = b43_bg_chantable_size; + mode->channels = b43_bg_chantable; + mode->num_rates = b43_b_ratetable_size; + mode->rates = b43_b_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43_PHYMODE_B; + have_bphy = 0; + continue; + } + if (have_gphy) { + B43_WARN_ON(cnt >= B43_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211G; + mode->num_channels = b43_bg_chantable_size; + mode->channels = b43_bg_chantable; + mode->num_rates = b43_g_ratetable_size; + mode->rates = b43_g_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43_PHYMODE_G; + have_gphy = 0; + continue; + } + break; + } + + return 0; +} + +static void b43_wireless_core_detach(struct b43_wldev *dev) +{ + b43_rfkill_free(dev); + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43_release_firmware(dev); +} + +static int b43_wireless_core_attach(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = bus->host_pci; + int err; + int have_aphy = 0, have_bphy = 0, have_gphy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to have + * that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43err(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_aphy = !!(tmshigh & B43_TMSHIGH_APHY); + have_gphy = !!(tmshigh & B43_TMSHIGH_GPHY); + if (!have_aphy && !have_gphy) + have_bphy = 1; + } else if (dev->dev->id.revision == 4) { + have_gphy = 1; + have_aphy = 1; + } else + have_bphy = 1; + + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_phy_versioning(dev); + if (err) + goto err_powerdown; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && pdev->device != 0x4324)) { + /* No multiband support. */ + have_aphy = 0; + have_bphy = 0; + have_gphy = 0; + switch (dev->phy.type) { + case B43_PHYTYPE_A: + have_aphy = 1; + break; + case B43_PHYTYPE_B: + have_bphy = 1; + break; + case B43_PHYTYPE_G: + have_gphy = 1; + break; + default: + B43_WARN_ON(1); + } + } + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; + b43_wireless_core_reset(dev, tmp); + + err = b43_validate_chipaccess(dev); + if (err) + goto err_powerdown; + err = b43_setup_modes(dev, have_aphy, have_bphy, have_gphy); + if (err) + goto err_powerdown; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43_chip_reset); + b43_rfkill_alloc(dev); + + b43_radio_turn_off(dev, 1); + b43_switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43_one_core_detach(struct ssb_device *dev) +{ + struct b43_wldev *wldev; + struct b43_wl *wl; + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + cancel_work_sync(&wldev->restart_work); + b43_debugfs_remove_device(wldev); + b43_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) +{ + struct b43_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = dev->bus->host_pci; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && (pdev->device != 0x431A))) { + b43dbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->dev = dev; + wldev->wl = wl; + b43_set_status(wldev, B43_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + tasklet_init(&wldev->isr_tasklet, + (void (*)(unsigned long))b43_interrupt_tasklet, + (unsigned long)wldev); + if (modparam_pio) + wldev->__using_pio = 1; + INIT_LIST_HEAD(&wldev->list); + + err = b43_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43_debugfs_add_device(wldev); + + out: + return err; + + err_kfree_wldev: + kfree(wldev); + return err; +} + +static void b43_sprom_fixup(struct ssb_bus *bus) +{ + /* boardflags workarounds */ + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL && + bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74) + bus->sprom.r1.boardflags_lo |= B43_BFL_BTCOEXIST; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40) + bus->sprom.r1.boardflags_lo |= B43_BFL_PACTRL; + + /* Handle case when gain is not set in sprom */ + if (bus->sprom.r1.antenna_gain_a == 0xFF) + bus->sprom.r1.antenna_gain_a = 2; + if (bus->sprom.r1.antenna_gain_bg == 0xFF) + bus->sprom.r1.antenna_gain_bg = 2; + + /* Convert Antennagain values to Q5.2 */ + bus->sprom.r1.antenna_gain_a <<= 2; + bus->sprom.r1.antenna_gain_bg <<= 2; +} + +static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43_wl *wl; + int err = -ENOMEM; + + b43_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); + if (!hw) { + b43err(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + + /* fill hw info */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; + hw->max_signal = 100; + hw->max_rssi = -110; + hw->max_noise = -110; + hw->queues = 1; /* FIXME: hardware has more queues */ + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->r1.et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.il0mac); + + /* Get and initialize struct b43_wl */ + wl = hw_to_b43_wl(hw); + memset(wl, 0, sizeof(*wl)); + wl->hw = hw; + spin_lock_init(&wl->irq_lock); + spin_lock_init(&wl->leds_lock); + mutex_init(&wl->mutex); + INIT_LIST_HEAD(&wl->devlist); + + ssb_set_devtypedata(dev, wl); + b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); + err = 0; + out: + return err; +} + +static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id) +{ + struct b43_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core. Must setup common struct b43_wl */ + first = 1; + err = b43_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43_WARN_ON(!wl); + } + err = b43_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + } + + out: + return err; + + err_one_core_detach: + b43_one_core_detach(dev); + err_wireless_exit: + if (first) + b43_wireless_exit(dev, wl); + return err; +} + +static void b43_remove(struct ssb_device *dev) +{ + struct b43_wl *wl = ssb_get_devtypedata(dev); + struct b43_wldev *wldev = ssb_get_drvdata(dev); + + B43_WARN_ON(!wl); + if (wl->current_dev == wldev) + ieee80211_unregister_hw(wl->hw); + + b43_one_core_detach(dev); + + if (list_empty(&wl->devlist)) { + /* Last core on the chip unregistered. + * We can destroy common struct b43_wl. + */ + b43_wireless_exit(dev, wl); + } +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43_controller_restart(struct b43_wldev *dev, const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43_status(dev) < B43_STAT_INITIALIZED) + return; + b43info(dev->wl, "Controller RESET (%s) ...\n", reason); + queue_work(dev->wl->hw->workqueue, &dev->restart_work); +} + +#ifdef CONFIG_PM + +static int b43_suspend(struct ssb_device *dev, pm_message_t state) +{ + struct b43_wldev *wldev = ssb_get_drvdata(dev); + struct b43_wl *wl = wldev->wl; + + b43dbg(wl, "Suspending...\n"); + + mutex_lock(&wl->mutex); + wldev->suspend_init_status = b43_status(wldev); + if (wldev->suspend_init_status >= B43_STAT_STARTED) + b43_wireless_core_stop(wldev); + if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) + b43_wireless_core_exit(wldev); + mutex_unlock(&wl->mutex); + + b43dbg(wl, "Device suspended.\n"); + + return 0; +} + +static int b43_resume(struct ssb_device *dev) +{ + struct b43_wldev *wldev = ssb_get_drvdata(dev); + struct b43_wl *wl = wldev->wl; + int err = 0; + + b43dbg(wl, "Resuming...\n"); + + mutex_lock(&wl->mutex); + if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) { + err = b43_wireless_core_init(wldev); + if (err) { + b43err(wl, "Resume failed at core init\n"); + goto out; + } + } + if (wldev->suspend_init_status >= B43_STAT_STARTED) { + err = b43_wireless_core_start(wldev); + if (err) { + b43_wireless_core_exit(wldev); + b43err(wl, "Resume failed at core start\n"); + goto out; + } + } + mutex_unlock(&wl->mutex); + + b43dbg(wl, "Device resumed.\n"); + out: + return err; +} + +#else /* CONFIG_PM */ +# define b43_suspend NULL +# define b43_resume NULL +#endif /* CONFIG_PM */ + +static struct ssb_driver b43_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43_ssb_tbl, + .probe = b43_probe, + .remove = b43_remove, + .suspend = b43_suspend, + .resume = b43_resume, +}; + +static int __init b43_init(void) +{ + int err; + + b43_debugfs_init(); + err = b43_pcmcia_init(); + if (err) + goto err_dfs_exit; + err = ssb_driver_register(&b43_ssb_driver); + if (err) + goto err_pcmcia_exit; + + return err; + +err_pcmcia_exit: + b43_pcmcia_exit(); +err_dfs_exit: + b43_debugfs_exit(); + return err; +} + +static void __exit b43_exit(void) +{ + ssb_driver_unregister(&b43_ssb_driver); + b43_pcmcia_exit(); + b43_debugfs_exit(); +} + +module_init(b43_init) +module_exit(b43_exit) diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h new file mode 100644 index 0000000000000000000000000000000000000000..284d17da17d1ae73c56deb79e389697cf69c9068 --- /dev/null +++ b/drivers/net/wireless/b43/main.h @@ -0,0 +1,125 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43_MAIN_H_ +#define B43_MAIN_H_ + +#include "b43.h" + +#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] +#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) +/* Magic helper macro to pad structures. Ignore those above. It's magic. */ +#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes)) + +/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ +static inline u8 b43_freq_to_channel_a(int freq) +{ + return ((freq - 5000) / 5); +} +static inline u8 b43_freq_to_channel_bg(int freq) +{ + u8 channel; + + if (freq == 2484) + channel = 14; + else + channel = (freq - 2407) / 5; + + return channel; +} +static inline u8 b43_freq_to_channel(struct b43_wldev *dev, int freq) +{ + if (dev->phy.type == B43_PHYTYPE_A) + return b43_freq_to_channel_a(freq); + return b43_freq_to_channel_bg(freq); +} + +/* Lightweight function to convert a channel number to a frequency (in Mhz). */ +static inline int b43_channel_to_freq_a(u8 channel) +{ + return (5000 + (5 * channel)); +} +static inline int b43_channel_to_freq_bg(u8 channel) +{ + int freq; + + if (channel == 14) + freq = 2484; + else + freq = 2407 + (5 * channel); + + return freq; +} +static inline int b43_channel_to_freq(struct b43_wldev *dev, u8 channel) +{ + if (dev->phy.type == B43_PHYTYPE_A) + return b43_channel_to_freq_a(channel); + return b43_channel_to_freq_bg(channel); +} + +static inline int b43_is_cck_rate(int rate) +{ + return (rate == B43_CCK_RATE_1MB || + rate == B43_CCK_RATE_2MB || + rate == B43_CCK_RATE_5MB || rate == B43_CCK_RATE_11MB); +} + +static inline int b43_is_ofdm_rate(int rate) +{ + return !b43_is_cck_rate(rate); +} + +void b43_tsf_read(struct b43_wldev *dev, u64 * tsf); +void b43_tsf_write(struct b43_wldev *dev, u64 tsf); + +u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset); +u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset); +void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value); +void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value); + +u32 b43_hf_read(struct b43_wldev *dev); +void b43_hf_write(struct b43_wldev *dev, u32 value); + +void b43_dummy_transmission(struct b43_wldev *dev); + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags); + +void b43_mac_suspend(struct b43_wldev *dev); +void b43_mac_enable(struct b43_wldev *dev); + +void b43_controller_restart(struct b43_wldev *dev, const char *reason); + +#define B43_PS_ENABLED (1 << 0) /* Force enable hardware power saving */ +#define B43_PS_DISABLED (1 << 1) /* Force disable hardware power saving */ +#define B43_PS_AWAKE (1 << 2) /* Force device awake */ +#define B43_PS_ASLEEP (1 << 3) /* Force device asleep */ +void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags); + +#endif /* B43_MAIN_H_ */ diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c new file mode 100644 index 0000000000000000000000000000000000000000..b242a9a90dd2309d0f0f8b95de5dd64ab1c6e015 --- /dev/null +++ b/drivers/net/wireless/b43/pcmcia.c @@ -0,0 +1,160 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "pcmcia.h" + +#include + +#include +#include +#include +#include +#include +#include + + +static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = { + PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448), + PCMCIA_DEVICE_NULL, +}; + +MODULE_DEVICE_TABLE(pcmcia, b43_pcmcia_tbl); + +#ifdef CONFIG_PM +static int b43_pcmcia_suspend(struct pcmcia_device *dev) +{ + //TODO + return 0; +} + +static int b43_pcmcia_resume(struct pcmcia_device *dev) +{ + //TODO + return 0; +} +#else /* CONFIG_PM */ +# define b43_pcmcia_suspend NULL +# define b43_pcmcia_resume NULL +#endif /* CONFIG_PM */ + +static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb; + win_req_t win; + memreq_t mem; + tuple_t tuple; + cisparse_t parse; + int err = -ENOMEM; + int res; + unsigned char buf[64]; + + ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); + if (!ssb) + goto out; + + err = -ENODEV; + tuple.DesiredTuple = CISTPL_CONFIG; + tuple.Attributes = 0; + tuple.TupleData = buf; + tuple.TupleDataMax = sizeof(buf); + tuple.TupleOffset = 0; + + res = pcmcia_get_first_tuple(dev, &tuple); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + res = pcmcia_get_tuple_data(dev, &tuple); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + res = pcmcia_parse_tuple(dev, &tuple, &parse); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + + dev->conf.ConfigBase = parse.config.base; + dev->conf.Present = parse.config.rmask[0]; + + dev->io.BasePort2 = 0; + dev->io.NumPorts2 = 0; + dev->io.Attributes2 = 0; + + win.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; + win.Base = 0; + win.Size = SSB_CORE_SIZE; + win.AccessSpeed = 1000; + res = pcmcia_request_window(&dev, &win, &dev->win); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + + mem.CardOffset = 0; + mem.Page = 0; + res = pcmcia_map_mem_page(dev->win, &mem); + if (res != CS_SUCCESS) + goto err_kfree_ssb; + + res = pcmcia_request_configuration(dev, &dev->conf); + if (res != CS_SUCCESS) + goto err_disable; + + err = ssb_bus_pcmciabus_register(ssb, dev, win.Base); + dev->priv = ssb; + + out: + return err; + err_disable: + pcmcia_disable_device(dev); + err_kfree_ssb: + kfree(ssb); + return err; +} + +static void __devexit b43_pcmcia_remove(struct pcmcia_device *dev) +{ + struct ssb_bus *ssb = dev->priv; + + ssb_bus_unregister(ssb); + pcmcia_release_window(dev->win); + pcmcia_disable_device(dev); + kfree(ssb); + dev->priv = NULL; +} + +static struct pcmcia_driver b43_pcmcia_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "b43-pcmcia", + }, + .id_table = b43_pcmcia_tbl, + .probe = b43_pcmcia_probe, + .remove = b43_pcmcia_remove, + .suspend = b43_pcmcia_suspend, + .resume = b43_pcmcia_resume, +}; + +int b43_pcmcia_init(void) +{ + return pcmcia_register_driver(&b43_pcmcia_driver); +} + +void b43_pcmcia_exit(void) +{ + pcmcia_unregister_driver(&b43_pcmcia_driver); +} diff --git a/drivers/net/wireless/b43/pcmcia.h b/drivers/net/wireless/b43/pcmcia.h new file mode 100644 index 0000000000000000000000000000000000000000..85f120a67cbe9265b302a4ed31eee6bc50b1e84c --- /dev/null +++ b/drivers/net/wireless/b43/pcmcia.h @@ -0,0 +1,20 @@ +#ifndef B43_PCMCIA_H_ +#define B43_PCMCIA_H_ + +#ifdef CONFIG_B43_PCMCIA + +int b43_pcmcia_init(void); +void b43_pcmcia_exit(void); + +#else /* CONFIG_B43_PCMCIA */ + +static inline int b43_pcmcia_init(void) +{ + return 0; +} +static inline void b43_pcmcia_exit(void) +{ +} + +#endif /* CONFIG_B43_PCMCIA */ +#endif /* B43_PCMCIA_H_ */ diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c new file mode 100644 index 0000000000000000000000000000000000000000..5f7ffa0a76c0973ab6d27fc3ff68a8944a2366f8 --- /dev/null +++ b/drivers/net/wireless/b43/phy.c @@ -0,0 +1,4380 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005, 2006 Stefano Brivio + Copyright (c) 2005, 2006 Michael Buesch + Copyright (c) 2005, 2006 Danny van Dyk + Copyright (c) 2005, 2006 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include + +#include "b43.h" +#include "phy.h" +#include "main.h" +#include "tables.h" +#include "lo.h" + +static const s8 b43_tssi2dbm_b_table[] = { + 0x4D, 0x4C, 0x4B, 0x4A, + 0x4A, 0x49, 0x48, 0x47, + 0x47, 0x46, 0x45, 0x45, + 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3F, 0x3E, + 0x3D, 0x3C, 0x3B, 0x3A, + 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x32, 0x31, + 0x30, 0x2F, 0x2D, 0x2C, + 0x2B, 0x29, 0x28, 0x26, + 0x25, 0x23, 0x21, 0x1F, + 0x1D, 0x1A, 0x17, 0x14, + 0x10, 0x0C, 0x06, 0x00, + -7, -7, -7, -7, + -7, -7, -7, -7, + -7, -7, -7, -7, +}; + +static const s8 b43_tssi2dbm_g_table[] = { + 77, 77, 77, 76, + 76, 76, 75, 75, + 74, 74, 73, 73, + 73, 72, 72, 71, + 71, 70, 70, 69, + 68, 68, 67, 67, + 66, 65, 65, 64, + 63, 63, 62, 61, + 60, 59, 58, 57, + 56, 55, 54, 53, + 52, 50, 49, 47, + 45, 43, 40, 37, + 33, 28, 22, 14, + 5, -7, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, +}; + +const u8 b43_radio_channel_codes_bg[] = { + 12, 17, 22, 27, + 32, 37, 42, 47, + 52, 57, 62, 67, + 72, 84, +}; + +static void b43_phy_initg(struct b43_wldev *dev); + +/* Reverse the bits of a 4bit value. + * Example: 1101 is flipped 1011 + */ +static u16 flip_4bit(u16 value) +{ + u16 flipped = 0x0000; + + B43_WARN_ON(value & ~0x000F); + + flipped |= (value & 0x0001) << 3; + flipped |= (value & 0x0002) << 1; + flipped |= (value & 0x0004) >> 1; + flipped |= (value & 0x0008) >> 3; + + return flipped; +} + +static void generate_rfatt_list(struct b43_wldev *dev, + struct b43_rfatt_list *list) +{ + struct b43_phy *phy = &dev->phy; + + /* APHY.rev < 5 || GPHY.rev < 6 */ + static const struct b43_rfatt rfatt_0[] = { + {.att = 3,.with_padmix = 0,}, + {.att = 1,.with_padmix = 0,}, + {.att = 5,.with_padmix = 0,}, + {.att = 7,.with_padmix = 0,}, + {.att = 9,.with_padmix = 0,}, + {.att = 2,.with_padmix = 0,}, + {.att = 0,.with_padmix = 0,}, + {.att = 4,.with_padmix = 0,}, + {.att = 6,.with_padmix = 0,}, + {.att = 8,.with_padmix = 0,}, + {.att = 1,.with_padmix = 1,}, + {.att = 2,.with_padmix = 1,}, + {.att = 3,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + }; + /* Radio.rev == 8 && Radio.version == 0x2050 */ + static const struct b43_rfatt rfatt_1[] = { + {.att = 2,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + {.att = 6,.with_padmix = 1,}, + {.att = 8,.with_padmix = 1,}, + {.att = 10,.with_padmix = 1,}, + {.att = 12,.with_padmix = 1,}, + {.att = 14,.with_padmix = 1,}, + }; + /* Otherwise */ + static const struct b43_rfatt rfatt_2[] = { + {.att = 0,.with_padmix = 1,}, + {.att = 2,.with_padmix = 1,}, + {.att = 4,.with_padmix = 1,}, + {.att = 6,.with_padmix = 1,}, + {.att = 8,.with_padmix = 1,}, + {.att = 9,.with_padmix = 1,}, + {.att = 9,.with_padmix = 1,}, + }; + + if ((phy->type == B43_PHYTYPE_A && phy->rev < 5) || + (phy->type == B43_PHYTYPE_G && phy->rev < 6)) { + /* Software pctl */ + list->list = rfatt_0; + list->len = ARRAY_SIZE(rfatt_0); + list->min_val = 0; + list->max_val = 9; + return; + } + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + /* Hardware pctl */ + list->list = rfatt_1; + list->len = ARRAY_SIZE(rfatt_1); + list->min_val = 2; + list->max_val = 14; + return; + } + /* Hardware pctl */ + list->list = rfatt_2; + list->len = ARRAY_SIZE(rfatt_2); + list->min_val = 0; + list->max_val = 9; +} + +static void generate_bbatt_list(struct b43_wldev *dev, + struct b43_bbatt_list *list) +{ + static const struct b43_bbatt bbatt_0[] = { + {.att = 0,}, + {.att = 1,}, + {.att = 2,}, + {.att = 3,}, + {.att = 4,}, + {.att = 5,}, + {.att = 6,}, + {.att = 7,}, + {.att = 8,}, + }; + + list->list = bbatt_0; + list->len = ARRAY_SIZE(bbatt_0); + list->min_val = 0; + list->max_val = 8; +} + +bool b43_has_hardware_pctl(struct b43_phy *phy) +{ + if (!phy->hardware_power_control) + return 0; + switch (phy->type) { + case B43_PHYTYPE_A: + if (phy->rev >= 5) + return 1; + break; + case B43_PHYTYPE_G: + if (phy->rev >= 6) + return 1; + break; + default: + B43_WARN_ON(1); + } + return 0; +} + +static void b43_shm_clear_tssi(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + switch (phy->type) { + case B43_PHYTYPE_A: + b43_shm_write16(dev, B43_SHM_SHARED, 0x0068, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x006a, 0x7F7F); + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F); + b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F); + break; + } +} + +void b43_raw_phy_lock(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + B43_WARN_ON(!irqs_disabled()); + + /* We had a check for MACCTL==0 here, but I think that doesn't + * make sense, as MACCTL is never 0 when this is called. + * --mb */ + B43_WARN_ON(b43_read32(dev, B43_MMIO_MACCTL) == 0); + + if (dev->dev->id.revision < 3) { + b43_mac_suspend(dev); + spin_lock(&phy->lock); + } else { + if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); + } + phy->locked = 1; +} + +void b43_raw_phy_unlock(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + B43_WARN_ON(!irqs_disabled()); + if (dev->dev->id.revision < 3) { + if (phy->locked) { + spin_unlock(&phy->lock); + b43_mac_enable(dev); + } + } else { + if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43_power_saving_ctl_bits(dev, 0); + } + phy->locked = 0; +} + +/* Different PHYs require different register routing flags. + * This adjusts (and does sanity checks on) the routing flags. + */ +static inline u16 adjust_phyreg_for_phytype(struct b43_phy *phy, + u16 offset, struct b43_wldev *dev) +{ + if (phy->type == B43_PHYTYPE_A) { + /* OFDM registers are base-registers for the A-PHY. */ + offset &= ~B43_PHYROUTE_OFDM_GPHY; + } + if (offset & B43_PHYROUTE_EXT_GPHY) { + /* Ext-G registers are only available on G-PHYs */ + if (phy->type != B43_PHYTYPE_G) { + b43dbg(dev->wl, "EXT-G PHY access at " + "0x%04X on %u type PHY\n", offset, phy->type); + } + } + + return offset; +} + +u16 b43_phy_read(struct b43_wldev * dev, u16 offset) +{ + struct b43_phy *phy = &dev->phy; + + offset = adjust_phyreg_for_phytype(phy, offset, dev); + b43_write16(dev, B43_MMIO_PHY_CONTROL, offset); + return b43_read16(dev, B43_MMIO_PHY_DATA); +} + +void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val) +{ + struct b43_phy *phy = &dev->phy; + + offset = adjust_phyreg_for_phytype(phy, offset, dev); + b43_write16(dev, B43_MMIO_PHY_CONTROL, offset); + mmiowb(); + b43_write16(dev, B43_MMIO_PHY_DATA, val); +} + +static void b43_radio_set_txpower_a(struct b43_wldev *dev, u16 txpower); + +/* Adjust the transmission power output (G-PHY) */ +void b43_set_txpower_g(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt, u8 tx_control) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 bb, rf; + u16 tx_bias, tx_magn; + + bb = bbatt->att; + rf = rfatt->att; + tx_bias = lo->tx_bias; + tx_magn = lo->tx_magn; + if (unlikely(tx_bias == 0xFF)) + tx_bias = 0; + + /* Save the values for later */ + phy->tx_control = tx_control; + memcpy(&phy->rfatt, rfatt, sizeof(*rfatt)); + memcpy(&phy->bbatt, bbatt, sizeof(*bbatt)); + + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), " + "rfatt(%u), tx_control(0x%02X), " + "tx_bias(0x%02X), tx_magn(0x%02X)\n", + bb, rf, tx_control, tx_bias, tx_magn); + } + + b43_phy_set_baseband_attenuation(dev, bb); + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, + (rf & 0x000F) | (tx_control & 0x0070)); + } else { + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | (rf & 0x000F)); + b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) + & ~0x0070) | (tx_control & + 0x0070)); + } + if (has_tx_magnification(phy)) { + b43_radio_write16(dev, 0x52, tx_magn | tx_bias); + } else { + b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) + & 0xFFF0) | (tx_bias & 0x000F)); + } + if (phy->type == B43_PHYTYPE_G) + b43_lo_g_adjust(dev); +} + +static void default_baseband_attenuation(struct b43_wldev *dev, + struct b43_bbatt *bb) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) + bb->att = 0; + else + bb->att = 2; +} + +static void default_radio_attenuation(struct b43_wldev *dev, + struct b43_rfatt *rf) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + rf->with_padmix = 0; + + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM && + bus->boardinfo.type == SSB_BOARD_BCM4309G) { + if (bus->boardinfo.rev < 0x43) { + rf->att = 2; + return; + } else if (bus->boardinfo.rev < 0x51) { + rf->att = 3; + return; + } + } + + if (phy->type == B43_PHYTYPE_A) { + rf->att = 0x60; + return; + } + + switch (phy->radio_ver) { + case 0x2053: + switch (phy->radio_rev) { + case 1: + rf->att = 6; + return; + } + break; + case 0x2050: + switch (phy->radio_rev) { + case 0: + rf->att = 5; + return; + case 1: + if (phy->type == B43_PHYTYPE_G) { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 3; + else if (bus->boardinfo.vendor == + SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == + SSB_BOARD_BU4306) + rf->att = 3; + else + rf->att = 1; + } else { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 7; + else + rf->att = 6; + } + return; + case 2: + if (phy->type == B43_PHYTYPE_G) { + if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == SSB_BOARD_BCM4309G + && bus->boardinfo.rev >= 30) + rf->att = 3; + else if (bus->boardinfo.vendor == + SSB_BOARDVENDOR_BCM + && bus->boardinfo.type == + SSB_BOARD_BU4306) + rf->att = 5; + else if (bus->chip_id == 0x4320) + rf->att = 4; + else + rf->att = 3; + } else + rf->att = 6; + return; + case 3: + rf->att = 5; + return; + case 4: + case 5: + rf->att = 1; + return; + case 6: + case 7: + rf->att = 5; + return; + case 8: + rf->att = 0xA; + rf->with_padmix = 1; + return; + case 9: + default: + rf->att = 5; + return; + } + } + rf->att = 5; +} + +static u16 default_tx_control(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->radio_ver != 0x2050) + return 0; + if (phy->radio_rev == 1) + return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX; + if (phy->radio_rev < 6) + return B43_TXCTL_PA2DB; + if (phy->radio_rev == 8) + return B43_TXCTL_TXMIX; + return 0; +} + +/* This func is called "PHY calibrate" in the specs... */ +void b43_phy_early_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + + default_baseband_attenuation(dev, &phy->bbatt); + default_radio_attenuation(dev, &phy->rfatt); + phy->tx_control = (default_tx_control(dev) << 4); + + /* Commit previous writes */ + b43_read32(dev, B43_MMIO_MACCTL); + + if (phy->type == B43_PHYTYPE_B || phy->type == B43_PHYTYPE_G) { + generate_rfatt_list(dev, &lo->rfatt_list); + generate_bbatt_list(dev, &lo->bbatt_list); + } + if (phy->type == B43_PHYTYPE_G && phy->rev == 1) { + /* Workaround: Temporarly disable gmode through the early init + * phase, as the gmode stuff is not needed for phy rev 1 */ + phy->gmode = 0; + b43_wireless_core_reset(dev, 0); + b43_phy_initg(dev); + phy->gmode = 1; + b43_wireless_core_reset(dev, B43_TMSLOW_GMODE); + } +} + +/* GPHY_TSSI_Power_Lookup_Table_Init */ +static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int i; + u16 value; + + for (i = 0; i < 32; i++) + b43_ofdmtab_write16(dev, 0x3C20, i, phy->tssi2dbm[i]); + for (i = 32; i < 64; i++) + b43_ofdmtab_write16(dev, 0x3C00, i - 32, phy->tssi2dbm[i]); + for (i = 0; i < 64; i += 2) { + value = (u16) phy->tssi2dbm[i]; + value |= ((u16) phy->tssi2dbm[i + 1]) << 8; + b43_phy_write(dev, 0x380 + (i / 2), value); + } +} + +/* GPHY_Gain_Lookup_Table_Init */ +static void b43_gphy_gain_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + u16 nr_written = 0; + u16 tmp; + u8 rf, bb; + + if (!lo->lo_measured) { + b43_phy_write(dev, 0x3FF, 0); + return; + } + + for (rf = 0; rf < lo->rfatt_list.len; rf++) { + for (bb = 0; bb < lo->bbatt_list.len; bb++) { + if (nr_written >= 0x40) + return; + tmp = lo->bbatt_list.list[bb].att; + tmp <<= 8; + if (phy->radio_rev == 8) + tmp |= 0x50; + else + tmp |= 0x40; + tmp |= lo->rfatt_list.list[rf].att; + b43_phy_write(dev, 0x3C0 + nr_written, tmp); + nr_written++; + } + } +} + +/* GPHY_DC_Lookup_Table */ +void b43_gphy_dc_lt_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct b43_txpower_lo_control *lo = phy->lo_control; + struct b43_loctl *loctl0; + struct b43_loctl *loctl1; + int i; + int rf_offset, bb_offset; + u16 tmp; + + for (i = 0; i < lo->rfatt_list.len + lo->bbatt_list.len; i += 2) { + rf_offset = i / lo->rfatt_list.len; + bb_offset = i % lo->rfatt_list.len; + + loctl0 = b43_get_lo_g_ctl(dev, &lo->rfatt_list.list[rf_offset], + &lo->bbatt_list.list[bb_offset]); + if (i + 1 < lo->rfatt_list.len * lo->bbatt_list.len) { + rf_offset = (i + 1) / lo->rfatt_list.len; + bb_offset = (i + 1) % lo->rfatt_list.len; + + loctl1 = + b43_get_lo_g_ctl(dev, + &lo->rfatt_list.list[rf_offset], + &lo->bbatt_list.list[bb_offset]); + } else + loctl1 = loctl0; + + tmp = ((u16) loctl0->q & 0xF); + tmp |= ((u16) loctl0->i & 0xF) << 4; + tmp |= ((u16) loctl1->q & 0xF) << 8; + tmp |= ((u16) loctl1->i & 0xF) << 12; //FIXME? + b43_phy_write(dev, 0x3A0 + (i / 2), tmp); + } +} + +static void hardware_pctl_init_aphy(struct b43_wldev *dev) +{ + //TODO +} + +static void hardware_pctl_init_gphy(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0) + | (phy->tgt_idle_tssi - phy->cur_idle_tssi)); + b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00) + | (phy->tgt_idle_tssi - phy->cur_idle_tssi)); + b43_gphy_tssi_power_lt_init(dev); + b43_gphy_gain_lt_init(dev); + b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF); + b43_phy_write(dev, 0x0014, 0x0000); + + B43_WARN_ON(phy->rev < 6); + b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478) + | 0x0800); + b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478) + & 0xFEFF); + b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801) + & 0xFFBF); + + b43_gphy_dc_lt_init(dev); +} + +/* HardwarePowerControl init for A and G PHY */ +static void b43_hardware_pctl_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(phy)) { + /* No hardware power control */ + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL); + return; + } + /* Init the hwpctl related hardware */ + switch (phy->type) { + case B43_PHYTYPE_A: + hardware_pctl_init_aphy(dev); + break; + case B43_PHYTYPE_G: + hardware_pctl_init_gphy(dev); + break; + default: + B43_WARN_ON(1); + } + /* Enable hardware pctl in firmware. */ + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL); +} + +static void b43_hardware_pctl_early_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (!b43_has_hardware_pctl(phy)) { + b43_phy_write(dev, 0x047A, 0xC111); + return; + } + + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF); + b43_phy_write(dev, 0x002F, 0x0202); + b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002); + b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { + b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A) + & 0xFF0F) | 0x0010); + b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D) + | 0x8000); + b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E) + & 0xFFC0) | 0x0010); + b43_phy_write(dev, 0x002E, 0xC07F); + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) + | 0x0400); + } else { + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) + | 0x0200); + b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) + | 0x0400); + b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D) + & 0x7FFF); + b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F) + & 0xFFFE); + b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E) + & 0xFFC0) | 0x0010); + b43_phy_write(dev, 0x002E, 0xC07F); + b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A) + & 0xFF0F) | 0x0010); + } +} + +/* Intialize B/G PHY power control + * as described in http://bcm-specs.sipsolutions.net/InitPowerControl + */ +static void b43_phy_init_pctl(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + struct b43_rfatt old_rfatt; + struct b43_bbatt old_bbatt; + u8 old_tx_control = 0; + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306)) + return; + + b43_phy_write(dev, 0x0028, 0x8018); + + /* This does something with the Analog... */ + b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0) + & 0xFFDF); + + if (phy->type == B43_PHYTYPE_G && !phy->gmode) + return; + b43_hardware_pctl_early_init(dev); + if (phy->cur_idle_tssi == 0) { + if (phy->radio_ver == 0x2050 && phy->analog == 0) { + b43_radio_write16(dev, 0x0076, + (b43_radio_read16(dev, 0x0076) + & 0x00F7) | 0x0084); + } else { + struct b43_rfatt rfatt; + struct b43_bbatt bbatt; + + memcpy(&old_rfatt, &phy->rfatt, sizeof(old_rfatt)); + memcpy(&old_bbatt, &phy->bbatt, sizeof(old_bbatt)); + old_tx_control = phy->tx_control; + + bbatt.att = 11; + if (phy->radio_rev == 8) { + rfatt.att = 15; + rfatt.with_padmix = 1; + } else { + rfatt.att = 9; + rfatt.with_padmix = 0; + } + b43_set_txpower_g(dev, &bbatt, &rfatt, 0); + } + b43_dummy_transmission(dev); + phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI); + if (B43_DEBUG) { + /* Current-Idle-TSSI sanity check. */ + if (abs(phy->cur_idle_tssi - phy->tgt_idle_tssi) >= 20) { + b43dbg(dev->wl, + "!WARNING! Idle-TSSI phy->cur_idle_tssi " + "measuring failed. (cur=%d, tgt=%d). Disabling TX power " + "adjustment.\n", phy->cur_idle_tssi, + phy->tgt_idle_tssi); + phy->cur_idle_tssi = 0; + } + } + if (phy->radio_ver == 0x2050 && phy->analog == 0) { + b43_radio_write16(dev, 0x0076, + b43_radio_read16(dev, 0x0076) + & 0xFF7B); + } else { + b43_set_txpower_g(dev, &old_bbatt, + &old_rfatt, old_tx_control); + } + } + b43_hardware_pctl_init(dev); + b43_shm_clear_tssi(dev); +} + +static void b43_phy_agcsetup(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset = 0x0000; + + if (phy->rev == 1) + offset = 0x4C00; + + b43_ofdmtab_write16(dev, offset, 0, 0x00FE); + b43_ofdmtab_write16(dev, offset, 1, 0x000D); + b43_ofdmtab_write16(dev, offset, 2, 0x0013); + b43_ofdmtab_write16(dev, offset, 3, 0x0019); + + if (phy->rev == 1) { + b43_ofdmtab_write16(dev, 0x1800, 0, 0x2710); + b43_ofdmtab_write16(dev, 0x1801, 0, 0x9B83); + b43_ofdmtab_write16(dev, 0x1802, 0, 0x9B83); + b43_ofdmtab_write16(dev, 0x1803, 0, 0x0F8D); + b43_phy_write(dev, 0x0455, 0x0004); + } + + b43_phy_write(dev, 0x04A5, (b43_phy_read(dev, 0x04A5) + & 0x00FF) | 0x5700); + b43_phy_write(dev, 0x041A, (b43_phy_read(dev, 0x041A) + & 0xFF80) | 0x000F); + b43_phy_write(dev, 0x041A, (b43_phy_read(dev, 0x041A) + & 0xC07F) | 0x2B80); + b43_phy_write(dev, 0x048C, (b43_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0300); + + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) + | 0x0008); + + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xFFF0) | 0x0008); + b43_phy_write(dev, 0x04A1, (b43_phy_read(dev, 0x04A1) + & 0xF0FF) | 0x0600); + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0700); + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0100); + + if (phy->rev == 1) { + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x0007); + } + + b43_phy_write(dev, 0x0488, (b43_phy_read(dev, 0x0488) + & 0xFF00) | 0x001C); + b43_phy_write(dev, 0x0488, (b43_phy_read(dev, 0x0488) + & 0xC0FF) | 0x0200); + b43_phy_write(dev, 0x0496, (b43_phy_read(dev, 0x0496) + & 0xFF00) | 0x001C); + b43_phy_write(dev, 0x0489, (b43_phy_read(dev, 0x0489) + & 0xFF00) | 0x0020); + b43_phy_write(dev, 0x0489, (b43_phy_read(dev, 0x0489) + & 0xC0FF) | 0x0200); + b43_phy_write(dev, 0x0482, (b43_phy_read(dev, 0x0482) + & 0xFF00) | 0x002E); + b43_phy_write(dev, 0x0496, (b43_phy_read(dev, 0x0496) + & 0x00FF) | 0x1A00); + b43_phy_write(dev, 0x0481, (b43_phy_read(dev, 0x0481) + & 0xFF00) | 0x0028); + b43_phy_write(dev, 0x0481, (b43_phy_read(dev, 0x0481) + & 0x00FF) | 0x2C00); + + if (phy->rev == 1) { + b43_phy_write(dev, 0x0430, 0x092B); + b43_phy_write(dev, 0x041B, (b43_phy_read(dev, 0x041B) + & 0xFFE1) | 0x0002); + } else { + b43_phy_write(dev, 0x041B, b43_phy_read(dev, 0x041B) + & 0xFFE1); + b43_phy_write(dev, 0x041F, 0x287A); + b43_phy_write(dev, 0x0420, (b43_phy_read(dev, 0x0420) + & 0xFFF0) | 0x0004); + } + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0422, 0x287A); + b43_phy_write(dev, 0x0420, (b43_phy_read(dev, 0x0420) + & 0x0FFF) | 0x3000); + } + + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0x8080) | 0x7874); + b43_phy_write(dev, 0x048E, 0x1C00); + + offset = 0x0800; + if (phy->rev == 1) { + offset = 0x5400; + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0600); + b43_phy_write(dev, 0x048B, 0x005E); + b43_phy_write(dev, 0x048C, (b43_phy_read(dev, 0x048C) + & 0xFF00) | 0x001E); + b43_phy_write(dev, 0x048D, 0x0002); + } + b43_ofdmtab_write16(dev, offset, 0, 0x00); + b43_ofdmtab_write16(dev, offset, 1, 0x07); + b43_ofdmtab_write16(dev, offset, 2, 0x10); + b43_ofdmtab_write16(dev, offset, 3, 0x1C); + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0426, b43_phy_read(dev, 0x0426) + & 0xFFFC); + b43_phy_write(dev, 0x0426, b43_phy_read(dev, 0x0426) + & 0xEFFF); + } +} + +static void b43_phy_setupg(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + u16 i; + + B43_WARN_ON(phy->type != B43_PHYTYPE_G); + if (phy->rev == 1) { + b43_phy_write(dev, 0x0406, 0x4F19); + b43_phy_write(dev, B43_PHY_G_CRS, + (b43_phy_read(dev, B43_PHY_G_CRS) & 0xFC3F) | + 0x0340); + b43_phy_write(dev, 0x042C, 0x005A); + b43_phy_write(dev, 0x0427, 0x001A); + + for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5800, i, + b43_tab_finefreqg[i]); + for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noiseg1[i]); + for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) + b43_ofdmtab_write16(dev, 0x2000, i, b43_tab_rotor[i]); + } else { + /* nrssi values are signed 6-bit values. Not sure why we write 0x7654 here... */ + b43_nrssi_hw_write(dev, 0xBA98, (s16) 0x7654); + + if (phy->rev == 2) { + b43_phy_write(dev, 0x04C0, 0x1861); + b43_phy_write(dev, 0x04C1, 0x0271); + } else if (phy->rev > 2) { + b43_phy_write(dev, 0x04C0, 0x0098); + b43_phy_write(dev, 0x04C1, 0x0070); + b43_phy_write(dev, 0x04C9, 0x0080); + } + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x800); + + for (i = 0; i < 64; i++) + b43_ofdmtab_write16(dev, 0x4000, i, i); + for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noiseg2[i]); + } + + if (phy->rev <= 2) + for (i = 0; i < B43_TAB_NOISESCALEG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1400, i, + b43_tab_noisescaleg1[i]); + else if ((phy->rev >= 7) && (b43_phy_read(dev, 0x0449) & 0x0200)) + for (i = 0; i < B43_TAB_NOISESCALEG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1400, i, + b43_tab_noisescaleg3[i]); + else + for (i = 0; i < B43_TAB_NOISESCALEG_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1400, i, + b43_tab_noisescaleg2[i]); + + if (phy->rev == 2) + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5000, i, + b43_tab_sigmasqr1[i]); + else if ((phy->rev > 2) && (phy->rev <= 8)) + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5000, i, + b43_tab_sigmasqr2[i]); + + if (phy->rev == 1) { + for (i = 0; i < B43_TAB_RETARD_SIZE; i++) + b43_ofdmtab_write32(dev, 0x2400, i, b43_tab_retard[i]); + for (i = 4; i < 20; i++) + b43_ofdmtab_write16(dev, 0x5400, i, 0x0020); + b43_phy_agcsetup(dev); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306) && + (bus->boardinfo.rev == 0x17)) + return; + + b43_ofdmtab_write16(dev, 0x5001, 0, 0x0002); + b43_ofdmtab_write16(dev, 0x5002, 0, 0x0001); + } else { + for (i = 0; i < 0x20; i++) + b43_ofdmtab_write16(dev, 0x1000, i, 0x0820); + b43_phy_agcsetup(dev); + b43_phy_read(dev, 0x0400); /* dummy read */ + b43_phy_write(dev, 0x0403, 0x1000); + b43_ofdmtab_write16(dev, 0x3C02, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x3C03, 0, 0x0014); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306) && + (bus->boardinfo.rev == 0x17)) + return; + + b43_ofdmtab_write16(dev, 0x0401, 0, 0x0002); + b43_ofdmtab_write16(dev, 0x0402, 0, 0x0001); + } +} + +/* Initialize the noisescaletable for APHY */ +static void b43_phy_init_noisescaletbl(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int i; + + for (i = 0; i < 12; i++) { + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x6767); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x2323); + } + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x6700); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x2300); + for (i = 0; i < 11; i++) { + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x6767); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x2323); + } + if (phy->rev == 2) + b43_ofdmtab_write16(dev, 0x1400, i, 0x0067); + else + b43_ofdmtab_write16(dev, 0x1400, i, 0x0023); +} + +static void b43_phy_setupa(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 i; + + B43_WARN_ON(phy->type != B43_PHYTYPE_A); + switch (phy->rev) { + case 2: + b43_phy_write(dev, 0x008E, 0x3800); + b43_phy_write(dev, 0x0035, 0x03FF); + b43_phy_write(dev, 0x0036, 0x0400); + + b43_ofdmtab_write16(dev, 0x3807, 0, 0x0051); + + b43_phy_write(dev, 0x001C, 0x0FF9); + b43_phy_write(dev, 0x0020, b43_phy_read(dev, 0x0020) & 0xFF0F); + b43_ofdmtab_write16(dev, 0x3C0C, 0, 0x07BF); + b43_radio_write16(dev, 0x0002, 0x07BF); + + b43_phy_write(dev, 0x0024, 0x4680); + b43_phy_write(dev, 0x0020, 0x0003); + b43_phy_write(dev, 0x001D, 0x0F40); + b43_phy_write(dev, 0x001F, 0x1C00); + + b43_phy_write(dev, 0x002A, (b43_phy_read(dev, 0x002A) + & 0x00FF) | 0x0400); + b43_phy_write(dev, 0x002B, b43_phy_read(dev, 0x002B) + & 0xFBFF); + b43_phy_write(dev, 0x008E, 0x58C1); + + b43_ofdmtab_write16(dev, 0x0803, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x0804, 0, 0x001F); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x002A); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x0030); + b43_ofdmtab_write16(dev, 0x0807, 0, 0x003A); + + b43_ofdmtab_write16(dev, 0x0000, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 1, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 2, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 3, 0x0013); + b43_ofdmtab_write16(dev, 0x0000, 4, 0x0015); + b43_ofdmtab_write16(dev, 0x0000, 5, 0x0015); + b43_ofdmtab_write16(dev, 0x0000, 6, 0x0019); + + b43_ofdmtab_write16(dev, 0x0404, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0405, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0406, 0, 0x0007); + + for (i = 0; i < 16; i++) + b43_ofdmtab_write16(dev, 0x4000, i, (0x8 + i) & 0x000F); + + b43_ofdmtab_write16(dev, 0x3003, 0, 0x1044); + b43_ofdmtab_write16(dev, 0x3004, 0, 0x7201); + b43_ofdmtab_write16(dev, 0x3006, 0, 0x0040); + b43_ofdmtab_write16(dev, 0x3001, 0, + (b43_ofdmtab_read16(dev, 0x3001, 0) & + 0x0010) | 0x0008); + + for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) + b43_ofdmtab_write16(dev, 0x5800, i, + b43_tab_finefreqa[i]); + for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noisea2[i]); + for (i = 0; i < B43_TAB_ROTOR_SIZE; i++) + b43_ofdmtab_write32(dev, 0x2000, i, b43_tab_rotor[i]); + b43_phy_init_noisescaletbl(dev); + for (i = 0; i < B43_TAB_RETARD_SIZE; i++) + b43_ofdmtab_write32(dev, 0x2400, i, b43_tab_retard[i]); + break; + case 3: + for (i = 0; i < 64; i++) + b43_ofdmtab_write16(dev, 0x4000, i, i); + + b43_ofdmtab_write16(dev, 0x3807, 0, 0x0051); + + b43_phy_write(dev, 0x001C, 0x0FF9); + b43_phy_write(dev, 0x0020, b43_phy_read(dev, 0x0020) & 0xFF0F); + b43_radio_write16(dev, 0x0002, 0x07BF); + + b43_phy_write(dev, 0x0024, 0x4680); + b43_phy_write(dev, 0x0020, 0x0003); + b43_phy_write(dev, 0x001D, 0x0F40); + b43_phy_write(dev, 0x001F, 0x1C00); + b43_phy_write(dev, 0x002A, (b43_phy_read(dev, 0x002A) + & 0x00FF) | 0x0400); + + b43_ofdmtab_write16(dev, 0x3000, 1, + (b43_ofdmtab_read16(dev, 0x3000, 1) + & 0x0010) | 0x0008); + for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) { + b43_ofdmtab_write16(dev, 0x1800, i, b43_tab_noisea3[i]); + } + b43_phy_init_noisescaletbl(dev); + for (i = 0; i < B43_TAB_SIGMASQR_SIZE; i++) { + b43_ofdmtab_write16(dev, 0x5000, i, + b43_tab_sigmasqr1[i]); + } + + b43_phy_write(dev, 0x0003, 0x1808); + + b43_ofdmtab_write16(dev, 0x0803, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x0804, 0, 0x001F); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x002A); + b43_ofdmtab_write16(dev, 0x0805, 0, 0x0030); + b43_ofdmtab_write16(dev, 0x0807, 0, 0x003A); + + b43_ofdmtab_write16(dev, 0x0000, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0001, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0002, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0003, 0, 0x0013); + b43_ofdmtab_write16(dev, 0x0004, 0, 0x0015); + b43_ofdmtab_write16(dev, 0x0005, 0, 0x0015); + b43_ofdmtab_write16(dev, 0x0006, 0, 0x0019); + + b43_ofdmtab_write16(dev, 0x0404, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0405, 0, 0x0003); + b43_ofdmtab_write16(dev, 0x0406, 0, 0x0007); + + b43_ofdmtab_write16(dev, 0x3C02, 0, 0x000F); + b43_ofdmtab_write16(dev, 0x3C03, 0, 0x0014); + break; + default: + B43_WARN_ON(1); + } +} + +/* Initialize APHY. This is also called for the GPHY in some cases. */ +static void b43_phy_inita(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + u16 tval; + + might_sleep(); + + if (phy->type == B43_PHYTYPE_A) { + b43_phy_setupa(dev); + } else { + b43_phy_setupg(dev); + if (phy->gmode && + (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_PACTRL)) + b43_phy_write(dev, 0x046E, 0x03CF); + return; + } + + b43_phy_write(dev, B43_PHY_A_CRS, + (b43_phy_read(dev, B43_PHY_A_CRS) & 0xF83C) | 0x0340); + b43_phy_write(dev, 0x0034, 0x0001); + + //TODO: RSSI AGC + b43_phy_write(dev, B43_PHY_A_CRS, + b43_phy_read(dev, B43_PHY_A_CRS) | (1 << 14)); + b43_radio_init2060(dev); + + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + ((bus->boardinfo.type == SSB_BOARD_BU4306) || + (bus->boardinfo.type == SSB_BOARD_BU4309))) { + if (phy->lofcal == 0xFFFF) { + //TODO: LOF Cal + b43_radio_set_tx_iq(dev); + } else + b43_radio_write16(dev, 0x001E, phy->lofcal); + } + + b43_phy_write(dev, 0x007A, 0xF111); + + if (phy->cur_idle_tssi == 0) { + b43_radio_write16(dev, 0x0019, 0x0000); + b43_radio_write16(dev, 0x0017, 0x0020); + + tval = b43_ofdmtab_read16(dev, 0x3001, 0); + if (phy->rev == 1) { + b43_ofdmtab_write16(dev, 0x3001, 0, + (b43_ofdmtab_read16(dev, 0x3001, 0) + & 0xFF87) + | 0x0058); + } else { + b43_ofdmtab_write16(dev, 0x3001, 0, + (b43_ofdmtab_read16(dev, 0x3001, 0) + & 0xFFC3) + | 0x002C); + } + b43_dummy_transmission(dev); + phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_A_PCTL); + b43_ofdmtab_write16(dev, 0x3001, 0, tval); + + b43_radio_set_txpower_a(dev, 0x0018); + } + b43_shm_clear_tssi(dev); +} + +static void b43_phy_initb2(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset, val; + + b43_write16(dev, 0x03EC, 0x3F22); + b43_phy_write(dev, 0x0020, 0x301C); + b43_phy_write(dev, 0x0026, 0x0000); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + b43_phy_write(dev, 0x03E4, 0x3000); + b43_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + b43_radio_write16(dev, 0x007A, 0x000F); + b43_phy_write(dev, 0x0038, 0x0677); + b43_radio_init2050(dev); + } + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + b43_phy_write(dev, 0x0032, 0x00CC); + b43_phy_write(dev, 0x0035, 0x07C2); + b43_lo_b_measure(dev); + b43_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver != 0x2050) + b43_phy_write(dev, 0x0026, 0xCE00); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1000); + b43_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver != 0x2050) + b43_phy_write(dev, 0x002A, 0x88C2); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + b43_phy_init_pctl(dev); +} + +static void b43_phy_initb4(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset, val; + + b43_write16(dev, 0x03EC, 0x3F22); + b43_phy_write(dev, 0x0020, 0x301C); + b43_phy_write(dev, 0x0026, 0x0000); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + b43_phy_write(dev, 0x03E4, 0x3000); + b43_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + b43_radio_write16(dev, 0x007A, 0x000F); + b43_phy_write(dev, 0x0038, 0x0677); + b43_radio_init2050(dev); + } + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0032, 0x00E0); + b43_phy_write(dev, 0x0035, 0x07C2); + + b43_lo_b_measure(dev); + + b43_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0026, 0xCE00); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, 0x1100); + b43_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x002A, 0x88C2); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI) { + b43_calc_nrssi_slope(dev); + b43_calc_nrssi_threshold(dev); + } + b43_phy_init_pctl(dev); +} + +static void b43_phy_initb5(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + u16 offset, value; + u8 old_channel; + + if (phy->analog == 1) { + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) + | 0x0050); + } + if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type != SSB_BOARD_BU4306)) { + value = 0x2120; + for (offset = 0x00A8; offset < 0x00C7; offset++) { + b43_phy_write(dev, offset, value); + value += 0x202; + } + } + b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF) + | 0x0700); + if (phy->radio_ver == 0x2050) + b43_phy_write(dev, 0x0038, 0x0667); + + if (phy->gmode || phy->rev >= 2) { + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) + | 0x0020); + b43_radio_write16(dev, 0x0051, + b43_radio_read16(dev, 0x0051) + | 0x0004); + } + b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000); + + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100); + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000); + + b43_phy_write(dev, 0x001C, 0x186A); + + b43_phy_write(dev, 0x0013, + (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900); + b43_phy_write(dev, 0x0035, + (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064); + b43_phy_write(dev, 0x005D, + (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A); + } + + if (dev->bad_frames_preempt) { + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, + B43_PHY_RADIO_BITFIELD) | (1 << 11)); + } + + if (phy->analog == 1) { + b43_phy_write(dev, 0x0026, 0xCE00); + b43_phy_write(dev, 0x0021, 0x3763); + b43_phy_write(dev, 0x0022, 0x1BC3); + b43_phy_write(dev, 0x0023, 0x06F9); + b43_phy_write(dev, 0x0024, 0x037E); + } else + b43_phy_write(dev, 0x0026, 0xCC00); + b43_phy_write(dev, 0x0030, 0x00C6); + b43_write16(dev, 0x03EC, 0x3F22); + + if (phy->analog == 1) + b43_phy_write(dev, 0x0020, 0x3E1C); + else + b43_phy_write(dev, 0x0020, 0x301C); + + if (phy->analog == 0) + b43_write16(dev, 0x03E4, 0x3000); + + old_channel = phy->channel; + /* Force to channel 7, even if not supported. */ + b43_radio_selectchannel(dev, 7, 0); + + if (phy->radio_ver != 0x2050) { + b43_radio_write16(dev, 0x0075, 0x0080); + b43_radio_write16(dev, 0x0079, 0x0081); + } + + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + + if (phy->radio_ver == 0x2050) { + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x005A, 0x0070); + } + + b43_radio_write16(dev, 0x005B, 0x007B); + b43_radio_write16(dev, 0x005C, 0x00B0); + + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007); + + b43_radio_selectchannel(dev, old_channel, 0); + + b43_phy_write(dev, 0x0014, 0x0080); + b43_phy_write(dev, 0x0032, 0x00CA); + b43_phy_write(dev, 0x002A, 0x88A3); + + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + + if (phy->radio_ver == 0x2050) + b43_radio_write16(dev, 0x005D, 0x000D); + + b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); +} + +static void b43_phy_initb6(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 offset, val; + u8 old_channel; + + b43_phy_write(dev, 0x003E, 0x817A); + b43_radio_write16(dev, 0x007A, + (b43_radio_read16(dev, 0x007A) | 0x0058)); + if (phy->radio_rev == 4 || phy->radio_rev == 5) { + b43_radio_write16(dev, 0x51, 0x37); + b43_radio_write16(dev, 0x52, 0x70); + b43_radio_write16(dev, 0x53, 0xB3); + b43_radio_write16(dev, 0x54, 0x9B); + b43_radio_write16(dev, 0x5A, 0x88); + b43_radio_write16(dev, 0x5B, 0x88); + b43_radio_write16(dev, 0x5D, 0x88); + b43_radio_write16(dev, 0x5E, 0x88); + b43_radio_write16(dev, 0x7D, 0x88); + b43_hf_write(dev, b43_hf_read(dev) + | B43_HF_TSSIRPSMW); + } + B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */ + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x51, 0); + b43_radio_write16(dev, 0x52, 0x40); + b43_radio_write16(dev, 0x53, 0xB7); + b43_radio_write16(dev, 0x54, 0x98); + b43_radio_write16(dev, 0x5A, 0x88); + b43_radio_write16(dev, 0x5B, 0x6B); + b43_radio_write16(dev, 0x5C, 0x0F); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_ALTIQ) { + b43_radio_write16(dev, 0x5D, 0xFA); + b43_radio_write16(dev, 0x5E, 0xD8); + } else { + b43_radio_write16(dev, 0x5D, 0xF5); + b43_radio_write16(dev, 0x5E, 0xB8); + } + b43_radio_write16(dev, 0x0073, 0x0003); + b43_radio_write16(dev, 0x007D, 0x00A8); + b43_radio_write16(dev, 0x007C, 0x0001); + b43_radio_write16(dev, 0x007E, 0x0008); + } + val = 0x1E1F; + for (offset = 0x0088; offset < 0x0098; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x3E3F; + for (offset = 0x0098; offset < 0x00A8; offset++) { + b43_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x2120; + for (offset = 0x00A8; offset < 0x00C8; offset++) { + b43_phy_write(dev, offset, (val & 0x3F3F)); + val += 0x0202; + } + if (phy->type == B43_PHYTYPE_G) { + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0020); + b43_radio_write16(dev, 0x0051, + b43_radio_read16(dev, 0x0051) | 0x0004); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100); + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000); + b43_phy_write(dev, 0x5B, 0); + b43_phy_write(dev, 0x5C, 0); + } + + old_channel = phy->channel; + if (old_channel >= 8) + b43_radio_selectchannel(dev, 1, 0); + else + b43_radio_selectchannel(dev, 13, 0); + + b43_radio_write16(dev, 0x0050, 0x0020); + b43_radio_write16(dev, 0x0050, 0x0023); + udelay(40); + if (phy->radio_rev < 6 || phy->radio_rev == 8) { + b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C) + | 0x0002)); + b43_radio_write16(dev, 0x50, 0x20); + } + if (phy->radio_rev <= 2) { + b43_radio_write16(dev, 0x7C, 0x20); + b43_radio_write16(dev, 0x5A, 0x70); + b43_radio_write16(dev, 0x5B, 0x7B); + b43_radio_write16(dev, 0x5C, 0xB0); + } + b43_radio_write16(dev, 0x007A, + (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007); + + b43_radio_selectchannel(dev, old_channel, 0); + + b43_phy_write(dev, 0x0014, 0x0200); + if (phy->radio_rev >= 6) + b43_phy_write(dev, 0x2A, 0x88C2); + else + b43_phy_write(dev, 0x2A, 0x8AC0); + b43_phy_write(dev, 0x0038, 0x0668); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control); + if (phy->radio_rev <= 5) { + b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D) + & 0xFF80) | 0x0003); + } + if (phy->radio_rev <= 2) + b43_radio_write16(dev, 0x005D, 0x000D); + + if (phy->analog == 4) { + b43_write16(dev, 0x3E4, 9); + b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61) + & 0x0FFF); + } else { + b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0) + | 0x0004); + } + if (phy->type == B43_PHYTYPE_B) { + b43_write16(dev, 0x03E6, 0x8140); + b43_phy_write(dev, 0x0016, 0x0410); + b43_phy_write(dev, 0x0017, 0x0820); + b43_phy_write(dev, 0x0062, 0x0007); + b43_radio_init2050(dev); + b43_lo_g_measure(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI) { + b43_calc_nrssi_slope(dev); + b43_calc_nrssi_threshold(dev); + } + b43_phy_init_pctl(dev); + } else if (phy->type == B43_PHYTYPE_G) + b43_write16(dev, 0x03E6, 0x0); +} + +static void b43_calc_loopback_gain(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup_phy[16] = { 0 }; + u16 backup_radio[3]; + u16 backup_bband; + u16 i, j, loop_i_max; + u16 trsw_rx; + u16 loop1_outer_done, loop1_inner_done; + + backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0); + backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG); + backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER); + backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER); + backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + } + backup_phy[6] = b43_phy_read(dev, B43_PHY_BASE(0x5A)); + backup_phy[7] = b43_phy_read(dev, B43_PHY_BASE(0x59)); + backup_phy[8] = b43_phy_read(dev, B43_PHY_BASE(0x58)); + backup_phy[9] = b43_phy_read(dev, B43_PHY_BASE(0x0A)); + backup_phy[10] = b43_phy_read(dev, B43_PHY_BASE(0x03)); + backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK); + backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL); + backup_phy[13] = b43_phy_read(dev, B43_PHY_BASE(0x2B)); + backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL); + backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + backup_bband = phy->bbatt.att; + backup_radio[0] = b43_radio_read16(dev, 0x52); + backup_radio[1] = b43_radio_read16(dev, 0x43); + backup_radio[2] = b43_radio_read16(dev, 0x7A); + + b43_phy_write(dev, B43_PHY_CRS0, + b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF); + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, + b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000); + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD); + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, + B43_PHY_ANALOGOVERVAL) & 0xFFFE); + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, + B43_PHY_ANALOGOVERVAL) & 0xFFFD); + } + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C); + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + (b43_phy_read(dev, B43_PHY_RFOVERVAL) + & 0xFFCF) | 0x10); + + b43_phy_write(dev, B43_PHY_BASE(0x5A), 0x0780); + b43_phy_write(dev, B43_PHY_BASE(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0x000D); + + b43_phy_write(dev, B43_PHY_BASE(0x0A), + b43_phy_read(dev, B43_PHY_BASE(0x0A)) | 0x2000); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, + B43_PHY_ANALOGOVERVAL) & 0xFFFB); + } + b43_phy_write(dev, B43_PHY_BASE(0x03), + (b43_phy_read(dev, B43_PHY_BASE(0x03)) + & 0xFF9F) | 0x40); + + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, 0x000F); + } else { + b43_radio_write16(dev, 0x52, 0); + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | 0x9); + } + b43_phy_set_baseband_attenuation(dev, 11); + + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); + b43_phy_write(dev, B43_PHY_LO_CTL, 0); + + b43_phy_write(dev, B43_PHY_BASE(0x2B), + (b43_phy_read(dev, B43_PHY_BASE(0x2B)) + & 0xFFC0) | 0x01); + b43_phy_write(dev, B43_PHY_BASE(0x2B), + (b43_phy_read(dev, B43_PHY_BASE(0x2B)) + & 0xC0FF) | 0x800); + + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF); + + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_EXTLNA) { + if (phy->rev >= 7) { + b43_phy_write(dev, B43_PHY_RFOVER, + b43_phy_read(dev, B43_PHY_RFOVER) + | 0x0800); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) + | 0x8000); + } + } + b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A) + & 0x00F7); + + j = 0; + loop_i_max = (phy->radio_rev == 8) ? 15 : 9; + for (i = 0; i < loop_i_max; i++) { + for (j = 0; j < 16; j++) { + b43_radio_write16(dev, 0x43, i); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + (b43_phy_read(dev, B43_PHY_RFOVERVAL) + & 0xF0FF) | (j << 8)); + b43_phy_write(dev, B43_PHY_PGACTL, + (b43_phy_read(dev, B43_PHY_PGACTL) + & 0x0FFF) | 0xA000); + b43_phy_write(dev, B43_PHY_PGACTL, + b43_phy_read(dev, B43_PHY_PGACTL) + | 0xF000); + udelay(20); + if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) + goto exit_loop1; + } + } + exit_loop1: + loop1_outer_done = i; + loop1_inner_done = j; + if (j >= 8) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + b43_phy_read(dev, B43_PHY_RFOVERVAL) + | 0x30); + trsw_rx = 0x1B; + for (j = j - 8; j < 16; j++) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + (b43_phy_read(dev, B43_PHY_RFOVERVAL) + & 0xF0FF) | (j << 8)); + b43_phy_write(dev, B43_PHY_PGACTL, + (b43_phy_read(dev, B43_PHY_PGACTL) + & 0x0FFF) | 0xA000); + b43_phy_write(dev, B43_PHY_PGACTL, + b43_phy_read(dev, B43_PHY_PGACTL) + | 0xF000); + udelay(20); + trsw_rx -= 3; + if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC) + goto exit_loop2; + } + } else + trsw_rx = 0x18; + exit_loop2: + + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]); + } + b43_phy_write(dev, B43_PHY_BASE(0x5A), backup_phy[6]); + b43_phy_write(dev, B43_PHY_BASE(0x59), backup_phy[7]); + b43_phy_write(dev, B43_PHY_BASE(0x58), backup_phy[8]); + b43_phy_write(dev, B43_PHY_BASE(0x0A), backup_phy[9]); + b43_phy_write(dev, B43_PHY_BASE(0x03), backup_phy[10]); + b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]); + b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]); + b43_phy_write(dev, B43_PHY_BASE(0x2B), backup_phy[13]); + b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]); + + b43_phy_set_baseband_attenuation(dev, backup_bband); + + b43_radio_write16(dev, 0x52, backup_radio[0]); + b43_radio_write16(dev, 0x43, backup_radio[1]); + b43_radio_write16(dev, 0x7A, backup_radio[2]); + + b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003); + udelay(10); + b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]); + b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]); + b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]); + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]); + + phy->max_lb_gain = + ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11; + phy->trsw_rx_gain = trsw_rx * 2; +} + +static void b43_phy_initg(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp; + + if (phy->rev == 1) + b43_phy_initb5(dev); + else + b43_phy_initb6(dev); + + if (phy->rev >= 2 || phy->gmode) + b43_phy_inita(dev); + + if (phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_ANALOGOVER, 0); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0); + } + if (phy->rev == 2) { + b43_phy_write(dev, B43_PHY_RFOVER, 0); + b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); + } + if (phy->rev > 5) { + b43_phy_write(dev, B43_PHY_RFOVER, 0x400); + b43_phy_write(dev, B43_PHY_PGACTL, 0xC0); + } + if (phy->gmode || phy->rev >= 2) { + tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM); + tmp &= B43_PHYVER_VERSION; + if (tmp == 3 || tmp == 5) { + b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816); + b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006); + } + if (tmp == 5) { + b43_phy_write(dev, B43_PHY_OFDM(0xCC), + (b43_phy_read(dev, B43_PHY_OFDM(0xCC)) + & 0x00FF) | 0x1F00); + } + } + if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2) + b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78); + if (phy->radio_rev == 8) { + b43_phy_write(dev, B43_PHY_EXTG(0x01), + b43_phy_read(dev, B43_PHY_EXTG(0x01)) + | 0x80); + b43_phy_write(dev, B43_PHY_OFDM(0x3E), + b43_phy_read(dev, B43_PHY_OFDM(0x3E)) + | 0x4); + } + if (has_loopback_gain(phy)) + b43_calc_loopback_gain(dev); + + if (phy->radio_rev != 8) { + if (phy->initval == 0xFFFF) + phy->initval = b43_radio_init2050(dev); + else + b43_radio_write16(dev, 0x0078, phy->initval); + } + if (phy->lo_control->tx_bias == 0xFF) { + b43_lo_g_measure(dev); + } else { + if (has_tx_magnification(phy)) { + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) & 0xFF00) + | phy->lo_control->tx_bias | phy-> + lo_control->tx_magn); + } else { + b43_radio_write16(dev, 0x52, + (b43_radio_read16(dev, 0x52) & 0xFFF0) + | phy->lo_control->tx_bias); + } + if (phy->rev >= 6) { + b43_phy_write(dev, B43_PHY_BASE(0x36), + (b43_phy_read(dev, B43_PHY_BASE(0x36)) + & 0x0FFF) | (phy->lo_control-> + tx_bias << 12)); + } + if (dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_PACTRL) + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x8075); + else + b43_phy_write(dev, B43_PHY_BASE(0x2E), 0x807F); + if (phy->rev < 2) + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x101); + else + b43_phy_write(dev, B43_PHY_BASE(0x2F), 0x202); + } + if (phy->gmode || phy->rev >= 2) { + b43_lo_g_adjust(dev); + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); + } + + if (!(dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI)) { + /* The specs state to update the NRSSI LT with + * the value 0x7FFFFFFF here. I think that is some weird + * compiler optimization in the original driver. + * Essentially, what we do here is resetting all NRSSI LT + * entries to -32 (see the limit_value() in nrssi_hw_update()) + */ + b43_nrssi_hw_update(dev, 0xFFFF); //FIXME? + b43_calc_nrssi_threshold(dev); + } else if (phy->gmode || phy->rev >= 2) { + if (phy->nrssi[0] == -1000) { + B43_WARN_ON(phy->nrssi[1] != -1000); + b43_calc_nrssi_slope(dev); + } else + b43_calc_nrssi_threshold(dev); + } + if (phy->radio_rev == 8) + b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230); + b43_phy_init_pctl(dev); + /* FIXME: The spec says in the following if, the 0 should be replaced + 'if OFDM may not be used in the current locale' + but OFDM is legal everywhere */ + if ((dev->dev->bus->chip_id == 0x4306 + && dev->dev->bus->chip_package == 2) || 0) { + b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) + & 0xBFFF); + b43_phy_write(dev, B43_PHY_OFDM(0xC3), + b43_phy_read(dev, B43_PHY_OFDM(0xC3)) + & 0x7FFF); + } +} + +/* Set the baseband attenuation value on chip. */ +void b43_phy_set_baseband_attenuation(struct b43_wldev *dev, + u16 baseband_attenuation) +{ + struct b43_phy *phy = &dev->phy; + + if (phy->analog == 0) { + b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0) + & 0xFFF0) | + baseband_attenuation); + } else if (phy->analog > 1) { + b43_phy_write(dev, B43_PHY_DACCTL, + (b43_phy_read(dev, B43_PHY_DACCTL) + & 0xFFC3) | (baseband_attenuation << 2)); + } else { + b43_phy_write(dev, B43_PHY_DACCTL, + (b43_phy_read(dev, B43_PHY_DACCTL) + & 0xFF87) | (baseband_attenuation << 3)); + } +} + +/* http://bcm-specs.sipsolutions.net/EstimatePowerOut + * This function converts a TSSI value to dBm in Q5.2 + */ +static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi) +{ + struct b43_phy *phy = &dev->phy; + s8 dbm = 0; + s32 tmp; + + tmp = (phy->tgt_idle_tssi - phy->cur_idle_tssi + tssi); + + switch (phy->type) { + case B43_PHYTYPE_A: + tmp += 0x80; + tmp = limit_value(tmp, 0x00, 0xFF); + dbm = phy->tssi2dbm[tmp]; + //TODO: There's a FIXME on the specs + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + tmp = limit_value(tmp, 0x00, 0x3F); + dbm = phy->tssi2dbm[tmp]; + break; + default: + B43_WARN_ON(1); + } + + return dbm; +} + +void b43_put_attenuation_into_ranges(struct b43_wldev *dev, + int *_bbatt, int *_rfatt) +{ + int rfatt = *_rfatt; + int bbatt = *_bbatt; + struct b43_txpower_lo_control *lo = dev->phy.lo_control; + + /* Get baseband and radio attenuation values into their permitted ranges. + * Radio attenuation affects power level 4 times as much as baseband. */ + + /* Range constants */ + const int rf_min = lo->rfatt_list.min_val; + const int rf_max = lo->rfatt_list.max_val; + const int bb_min = lo->bbatt_list.min_val; + const int bb_max = lo->bbatt_list.max_val; + + while (1) { + if (rfatt > rf_max && bbatt > bb_max - 4) + break; /* Can not get it into ranges */ + if (rfatt < rf_min && bbatt < bb_min + 4) + break; /* Can not get it into ranges */ + if (bbatt > bb_max && rfatt > rf_max - 1) + break; /* Can not get it into ranges */ + if (bbatt < bb_min && rfatt < rf_min + 1) + break; /* Can not get it into ranges */ + + if (bbatt > bb_max) { + bbatt -= 4; + rfatt += 1; + continue; + } + if (bbatt < bb_min) { + bbatt += 4; + rfatt -= 1; + continue; + } + if (rfatt > rf_max) { + rfatt -= 1; + bbatt += 4; + continue; + } + if (rfatt < rf_min) { + rfatt += 1; + bbatt -= 4; + continue; + } + break; + } + + *_rfatt = limit_value(rfatt, rf_min, rf_max); + *_bbatt = limit_value(bbatt, bb_min, bb_max); +} + +/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ +void b43_phy_xmitpower(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct b43_phy *phy = &dev->phy; + + if (phy->cur_idle_tssi == 0) + return; + if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (bus->boardinfo.type == SSB_BOARD_BU4306)) + return; +#ifdef CONFIG_B43_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + switch (phy->type) { + case B43_PHYTYPE_A:{ + + //TODO: Nothing for A PHYs yet :-/ + + break; + } + case B43_PHYTYPE_B: + case B43_PHYTYPE_G:{ + u16 tmp; + s8 v0, v1, v2, v3; + s8 average; + int max_pwr; + int desired_pwr, estimated_pwr, pwr_adjust; + int rfatt_delta, bbatt_delta; + int rfatt, bbatt; + u8 tx_control; + unsigned long phylock_flags; + + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x0058); + v0 = (s8) (tmp & 0x00FF); + v1 = (s8) ((tmp & 0xFF00) >> 8); + tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x005A); + v2 = (s8) (tmp & 0x00FF); + v3 = (s8) ((tmp & 0xFF00) >> 8); + tmp = 0; + + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F + || v3 == 0x7F) { + tmp = + b43_shm_read16(dev, B43_SHM_SHARED, 0x0070); + v0 = (s8) (tmp & 0x00FF); + v1 = (s8) ((tmp & 0xFF00) >> 8); + tmp = + b43_shm_read16(dev, B43_SHM_SHARED, 0x0072); + v2 = (s8) (tmp & 0x00FF); + v3 = (s8) ((tmp & 0xFF00) >> 8); + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F + || v3 == 0x7F) + return; + v0 = (v0 + 0x20) & 0x3F; + v1 = (v1 + 0x20) & 0x3F; + v2 = (v2 + 0x20) & 0x3F; + v3 = (v3 + 0x20) & 0x3F; + tmp = 1; + } + b43_shm_clear_tssi(dev); + + average = (v0 + v1 + v2 + v3 + 2) / 4; + + if (tmp + && (b43_shm_read16(dev, B43_SHM_SHARED, 0x005E) & + 0x8)) + average -= 13; + + estimated_pwr = + b43_phy_estimate_power_out(dev, average); + + max_pwr = dev->dev->bus->sprom.r1.maxpwr_bg; + if ((dev->dev->bus->sprom.r1. + boardflags_lo & B43_BFL_PACTRL) + && (phy->type == B43_PHYTYPE_G)) + max_pwr -= 0x3; + if (unlikely(max_pwr <= 0)) { + b43warn(dev->wl, + "Invalid max-TX-power value in SPROM.\n"); + max_pwr = 60; /* fake it */ + dev->dev->bus->sprom.r1.maxpwr_bg = max_pwr; + } + + /*TODO: + max_pwr = min(REG - dev->dev->bus->sprom.antennagain_bgphy - 0x6, max_pwr) + where REG is the max power as per the regulatory domain + */ + + /* Get desired power (in Q5.2) */ + desired_pwr = INT_TO_Q52(phy->power_level); + /* And limit it. max_pwr already is Q5.2 */ + desired_pwr = limit_value(desired_pwr, 0, max_pwr); + if (b43_debug(dev, B43_DBG_XMITPOWER)) { + b43dbg(dev->wl, + "Current TX power output: " Q52_FMT + " dBm, " "Desired TX power output: " + Q52_FMT " dBm\n", Q52_ARG(estimated_pwr), + Q52_ARG(desired_pwr)); + } + + /* Calculate the adjustment delta. */ + pwr_adjust = desired_pwr - estimated_pwr; + + /* RF attenuation delta. */ + rfatt_delta = ((pwr_adjust + 7) / 8); + /* Lower attenuation => Bigger power output. Negate it. */ + rfatt_delta = -rfatt_delta; + + /* Baseband attenuation delta. */ + bbatt_delta = pwr_adjust / 2; + /* Lower attenuation => Bigger power output. Negate it. */ + bbatt_delta = -bbatt_delta; + /* RF att affects power level 4 times as much as + * Baseband attennuation. Subtract it. */ + bbatt_delta -= 4 * rfatt_delta; + + /* So do we finally need to adjust something? */ + if ((rfatt_delta == 0) && (bbatt_delta == 0)) { + b43_lo_g_ctl_mark_cur_used(dev); + return; + } + + /* Calculate the new attenuation values. */ + bbatt = phy->bbatt.att; + bbatt += bbatt_delta; + rfatt = phy->rfatt.att; + rfatt += rfatt_delta; + + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + tx_control = phy->tx_control; + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { + if (rfatt <= 1) { + if (tx_control == 0) { + tx_control = + B43_TXCTL_PA2DB | + B43_TXCTL_TXMIX; + rfatt += 2; + bbatt += 2; + } else if (dev->dev->bus->sprom.r1. + boardflags_lo & + B43_BFL_PACTRL) { + bbatt += 4 * (rfatt - 2); + rfatt = 2; + } + } else if (rfatt > 4 && tx_control) { + tx_control = 0; + if (bbatt < 3) { + rfatt -= 3; + bbatt += 2; + } else { + rfatt -= 2; + bbatt -= 2; + } + } + } + /* Save the control values */ + phy->tx_control = tx_control; + b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt); + phy->rfatt.att = rfatt; + phy->bbatt.att = bbatt; + + /* Adjust the hardware */ + b43_phy_lock(dev, phylock_flags); + b43_radio_lock(dev); + b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, + phy->tx_control); + b43_lo_g_ctl_mark_cur_used(dev); + b43_radio_unlock(dev); + b43_phy_unlock(dev, phylock_flags); + break; + } + default: + B43_WARN_ON(1); + } +} + +static inline s32 b43_tssi2dbm_ad(s32 num, s32 den) +{ + if (num < 0) + return num / den; + else + return (num + den / 2) / den; +} + +static inline + s8 b43_tssi2dbm_entry(s8 entry[], u8 index, s16 pab0, s16 pab1, s16 pab2) +{ + s32 m1, m2, f = 256, q, delta; + s8 i = 0; + + m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32); + m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1); + do { + if (i > 15) + return -EINVAL; + q = b43_tssi2dbm_ad(f * 4096 - + b43_tssi2dbm_ad(m2 * f, 16) * f, 2048); + delta = abs(q - f); + f = q; + i++; + } while (delta >= 2); + entry[index] = limit_value(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128); + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */ +int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + s16 pab0, pab1, pab2; + u8 idx; + s8 *dyn_tssi2dbm; + + if (phy->type == B43_PHYTYPE_A) { + pab0 = (s16) (dev->dev->bus->sprom.r1.pa1b0); + pab1 = (s16) (dev->dev->bus->sprom.r1.pa1b1); + pab2 = (s16) (dev->dev->bus->sprom.r1.pa1b2); + } else { + pab0 = (s16) (dev->dev->bus->sprom.r1.pa0b0); + pab1 = (s16) (dev->dev->bus->sprom.r1.pa0b1); + pab2 = (s16) (dev->dev->bus->sprom.r1.pa0b2); + } + + if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) { + phy->tgt_idle_tssi = 0x34; + phy->tssi2dbm = b43_tssi2dbm_b_table; + return 0; + } + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if (phy->type == B43_PHYTYPE_A) { + if ((s8) dev->dev->bus->sprom.r1.itssi_a != 0 && + (s8) dev->dev->bus->sprom.r1.itssi_a != -1) + phy->tgt_idle_tssi = + (s8) (dev->dev->bus->sprom.r1.itssi_a); + else + phy->tgt_idle_tssi = 62; + } else { + if ((s8) dev->dev->bus->sprom.r1.itssi_bg != 0 && + (s8) dev->dev->bus->sprom.r1.itssi_bg != -1) + phy->tgt_idle_tssi = + (s8) (dev->dev->bus->sprom.r1.itssi_bg); + else + phy->tgt_idle_tssi = 62; + } + dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); + if (dyn_tssi2dbm == NULL) { + b43err(dev->wl, "Could not allocate memory" + "for tssi2dbm table\n"); + return -ENOMEM; + } + for (idx = 0; idx < 64; idx++) + if (b43_tssi2dbm_entry + (dyn_tssi2dbm, idx, pab0, pab1, pab2)) { + phy->tssi2dbm = NULL; + b43err(dev->wl, "Could not generate " + "tssi2dBm table\n"); + kfree(dyn_tssi2dbm); + return -ENODEV; + } + phy->tssi2dbm = dyn_tssi2dbm; + phy->dyn_tssi_tbl = 1; + } else { + /* pabX values not set in SPROM. */ + switch (phy->type) { + case B43_PHYTYPE_A: + /* APHY needs a generated table. */ + phy->tssi2dbm = NULL; + b43err(dev->wl, "Could not generate tssi2dBm " + "table (wrong SPROM info)!\n"); + return -ENODEV; + case B43_PHYTYPE_B: + phy->tgt_idle_tssi = 0x34; + phy->tssi2dbm = b43_tssi2dbm_b_table; + break; + case B43_PHYTYPE_G: + phy->tgt_idle_tssi = 0x34; + phy->tssi2dbm = b43_tssi2dbm_g_table; + break; + } + } + + return 0; +} + +int b43_phy_init(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err = -ENODEV; + + switch (phy->type) { + case B43_PHYTYPE_A: + if (phy->rev == 2 || phy->rev == 3) { + b43_phy_inita(dev); + err = 0; + } + break; + case B43_PHYTYPE_B: + switch (phy->rev) { + case 2: + b43_phy_initb2(dev); + err = 0; + break; + case 4: + b43_phy_initb4(dev); + err = 0; + break; + case 5: + b43_phy_initb5(dev); + err = 0; + break; + case 6: + b43_phy_initb6(dev); + err = 0; + break; + } + break; + case B43_PHYTYPE_G: + b43_phy_initg(dev); + err = 0; + break; + } + if (err) + b43err(dev->wl, "Unknown PHYTYPE found\n"); + + return err; +} + +void b43_set_rx_antenna(struct b43_wldev *dev, int antenna) +{ + struct b43_phy *phy = &dev->phy; + u32 hf; + u16 tmp; + int autodiv = 0; + + if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) + autodiv = 1; + + hf = b43_hf_read(dev); + hf &= ~B43_HF_ANTDIVHELP; + b43_hf_write(dev, hf); + + switch (phy->type) { + case B43_PHYTYPE_A: + case B43_PHYTYPE_G: + tmp = b43_phy_read(dev, B43_PHY_BBANDCFG); + tmp &= ~B43_PHY_BBANDCFG_RXANT; + tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna) + << B43_PHY_BBANDCFG_RXANT_SHIFT; + b43_phy_write(dev, B43_PHY_BBANDCFG, tmp); + + if (autodiv) { + tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); + if (antenna == B43_ANTENNA_AUTO0) + tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; + else + tmp |= B43_PHY_ANTDWELL_AUTODIV1; + b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); + } + if (phy->type == B43_PHYTYPE_G) { + tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT); + if (autodiv) + tmp |= B43_PHY_ANTWRSETT_ARXDIV; + else + tmp &= ~B43_PHY_ANTWRSETT_ARXDIV; + b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp); + if (phy->rev >= 2) { + tmp = b43_phy_read(dev, B43_PHY_OFDM61); + tmp |= B43_PHY_OFDM61_10; + b43_phy_write(dev, B43_PHY_OFDM61, tmp); + + tmp = + b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK); + tmp = (tmp & 0xFF00) | 0x15; + b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK, + tmp); + + if (phy->rev == 2) { + b43_phy_write(dev, B43_PHY_ADIVRELATED, + 8); + } else { + tmp = + b43_phy_read(dev, + B43_PHY_ADIVRELATED); + tmp = (tmp & 0xFF00) | 8; + b43_phy_write(dev, B43_PHY_ADIVRELATED, + tmp); + } + } + if (phy->rev >= 6) + b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC); + } else { + if (phy->rev < 3) { + tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); + tmp = (tmp & 0xFF00) | 0x24; + b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); + } else { + tmp = b43_phy_read(dev, B43_PHY_OFDM61); + tmp |= 0x10; + b43_phy_write(dev, B43_PHY_OFDM61, tmp); + if (phy->analog == 3) { + b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, + 0x1D); + b43_phy_write(dev, B43_PHY_ADIVRELATED, + 8); + } else { + b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, + 0x3A); + tmp = + b43_phy_read(dev, + B43_PHY_ADIVRELATED); + tmp = (tmp & 0xFF00) | 8; + b43_phy_write(dev, B43_PHY_ADIVRELATED, + tmp); + } + } + } + break; + case B43_PHYTYPE_B: + tmp = b43_phy_read(dev, B43_PHY_CCKBBANDCFG); + tmp &= ~B43_PHY_BBANDCFG_RXANT; + tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna) + << B43_PHY_BBANDCFG_RXANT_SHIFT; + b43_phy_write(dev, B43_PHY_CCKBBANDCFG, tmp); + break; + default: + B43_WARN_ON(1); + } + + hf |= B43_HF_ANTDIVHELP; + b43_hf_write(dev, hf); +} + +/* Get the freq, as it has to be written to the device. */ +static inline u16 channel2freq_bg(u8 channel) +{ + B43_WARN_ON(!(channel >= 1 && channel <= 14)); + + return b43_radio_channel_codes_bg[channel - 1]; +} + +/* Get the freq, as it has to be written to the device. */ +static inline u16 channel2freq_a(u8 channel) +{ + B43_WARN_ON(channel > 200); + + return (5000 + 5 * channel); +} + +void b43_radio_lock(struct b43_wldev *dev) +{ + u32 macctl; + + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl |= B43_MACCTL_RADIOLOCK; + b43_write32(dev, B43_MMIO_MACCTL, macctl); + /* Commit the write and wait for the device + * to exit any radio register access. */ + b43_read32(dev, B43_MMIO_MACCTL); + udelay(10); +} + +void b43_radio_unlock(struct b43_wldev *dev) +{ + u32 macctl; + + /* Commit any write */ + b43_read16(dev, B43_MMIO_PHY_VER); + /* unlock */ + macctl = b43_read32(dev, B43_MMIO_MACCTL); + macctl &= ~B43_MACCTL_RADIOLOCK; + b43_write32(dev, B43_MMIO_MACCTL, macctl); +} + +u16 b43_radio_read16(struct b43_wldev *dev, u16 offset) +{ + struct b43_phy *phy = &dev->phy; + + switch (phy->type) { + case B43_PHYTYPE_A: + offset |= 0x0040; + break; + case B43_PHYTYPE_B: + if (phy->radio_ver == 0x2053) { + if (offset < 0x70) + offset += 0x80; + else if (offset < 0x80) + offset += 0x70; + } else if (phy->radio_ver == 0x2050) { + offset |= 0x80; + } else + B43_WARN_ON(1); + break; + case B43_PHYTYPE_G: + offset |= 0x80; + break; + } + + b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset); + return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); +} + +void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val) +{ + b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset); + mmiowb(); + b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, val); +} + +static void b43_set_all_gains(struct b43_wldev *dev, + s16 first, s16 second, s16 third) +{ + struct b43_phy *phy = &dev->phy; + u16 i; + u16 start = 0x08, end = 0x18; + u16 tmp; + u16 table; + + if (phy->rev <= 1) { + start = 0x10; + end = 0x20; + } + + table = B43_OFDMTAB_GAINX; + if (phy->rev <= 1) + table = B43_OFDMTAB_GAINX_R1; + for (i = 0; i < 4; i++) + b43_ofdmtab_write16(dev, table, i, first); + + for (i = start; i < end; i++) + b43_ofdmtab_write16(dev, table, i, second); + + if (third != -1) { + tmp = ((u16) third << 14) | ((u16) third << 6); + b43_phy_write(dev, 0x04A0, + (b43_phy_read(dev, 0x04A0) & 0xBFBF) | tmp); + b43_phy_write(dev, 0x04A1, + (b43_phy_read(dev, 0x04A1) & 0xBFBF) | tmp); + b43_phy_write(dev, 0x04A2, + (b43_phy_read(dev, 0x04A2) & 0xBFBF) | tmp); + } + b43_dummy_transmission(dev); +} + +static void b43_set_original_gains(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 i, tmp; + u16 table; + u16 start = 0x0008, end = 0x0018; + + if (phy->rev <= 1) { + start = 0x0010; + end = 0x0020; + } + + table = B43_OFDMTAB_GAINX; + if (phy->rev <= 1) + table = B43_OFDMTAB_GAINX_R1; + for (i = 0; i < 4; i++) { + tmp = (i & 0xFFFC); + tmp |= (i & 0x0001) << 1; + tmp |= (i & 0x0002) >> 1; + + b43_ofdmtab_write16(dev, table, i, tmp); + } + + for (i = start; i < end; i++) + b43_ofdmtab_write16(dev, table, i, i - start); + + b43_phy_write(dev, 0x04A0, + (b43_phy_read(dev, 0x04A0) & 0xBFBF) | 0x4040); + b43_phy_write(dev, 0x04A1, + (b43_phy_read(dev, 0x04A1) & 0xBFBF) | 0x4040); + b43_phy_write(dev, 0x04A2, + (b43_phy_read(dev, 0x04A2) & 0xBFBF) | 0x4000); + b43_dummy_transmission(dev); +} + +/* Synthetic PU workaround */ +static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel) +{ + struct b43_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) { + /* We do not need the workaround. */ + return; + } + + if (channel <= 10) { + b43_write16(dev, B43_MMIO_CHANNEL, + channel2freq_bg(channel + 4)); + } else { + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1)); + } + msleep(1); + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); +} + +u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel) +{ + struct b43_phy *phy = &dev->phy; + u8 ret = 0; + u16 saved, rssi, temp; + int i, j = 0; + + saved = b43_phy_read(dev, 0x0403); + b43_radio_selectchannel(dev, channel, 0); + b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); + if (phy->aci_hw_rssi) + rssi = b43_phy_read(dev, 0x048A) & 0x3F; + else + rssi = saved & 0x3F; + /* clamp temp to signed 5bit */ + if (rssi > 32) + rssi -= 64; + for (i = 0; i < 100; i++) { + temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F; + if (temp > 32) + temp -= 64; + if (temp < rssi) + j++; + if (j >= 20) + ret = 1; + } + b43_phy_write(dev, 0x0403, saved); + + return ret; +} + +u8 b43_radio_aci_scan(struct b43_wldev * dev) +{ + struct b43_phy *phy = &dev->phy; + u8 ret[13]; + unsigned int channel = phy->channel; + unsigned int i, j, start, end; + unsigned long phylock_flags; + + if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0))) + return 0; + + b43_phy_lock(dev, phylock_flags); + b43_radio_lock(dev); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF); + b43_set_all_gains(dev, 3, 8, 1); + + start = (channel - 5 > 0) ? channel - 5 : 1; + end = (channel + 5 < 14) ? channel + 5 : 13; + + for (i = start; i <= end; i++) { + if (abs(channel - i) > 2) + ret[i - 1] = b43_radio_aci_detect(dev, i); + } + b43_radio_selectchannel(dev, channel, 0); + b43_phy_write(dev, 0x0802, + (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003); + b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000); + b43_set_original_gains(dev); + for (i = 0; i < 13; i++) { + if (!ret[i]) + continue; + end = (i + 5 < 13) ? i + 5 : 13; + for (j = i; j < end; j++) + ret[j] = 1; + } + b43_radio_unlock(dev); + b43_phy_unlock(dev, phylock_flags); + + return ret[channel - 1]; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) +{ + b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); + mmiowb(); + b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) +{ + u16 val; + + b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); + val = b43_phy_read(dev, B43_PHY_NRSSILT_DATA); + + return (s16) val; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) +{ + u16 i; + s16 tmp; + + for (i = 0; i < 64; i++) { + tmp = b43_nrssi_hw_read(dev, i); + tmp -= val; + tmp = limit_value(tmp, -32, 31); + b43_nrssi_hw_write(dev, i, tmp); + } +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43_nrssi_mem_update(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + s16 i, delta; + s32 tmp; + + delta = 0x1F - phy->nrssi[0]; + for (i = 0; i < 64; i++) { + tmp = (i - delta) * phy->nrssislope; + tmp /= 0x10000; + tmp += 0x3A; + tmp = limit_value(tmp, 0, 0x3F); + phy->nrssi_lt[i] = tmp; + } +} + +static void b43_calc_nrssi_offset(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup[20] = { 0 }; + s16 v47F; + u16 i; + u16 saved = 0xFFFF; + + backup[0] = b43_phy_read(dev, 0x0001); + backup[1] = b43_phy_read(dev, 0x0811); + backup[2] = b43_phy_read(dev, 0x0812); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + backup[3] = b43_phy_read(dev, 0x0814); + backup[4] = b43_phy_read(dev, 0x0815); + } + backup[5] = b43_phy_read(dev, 0x005A); + backup[6] = b43_phy_read(dev, 0x0059); + backup[7] = b43_phy_read(dev, 0x0058); + backup[8] = b43_phy_read(dev, 0x000A); + backup[9] = b43_phy_read(dev, 0x0003); + backup[10] = b43_radio_read16(dev, 0x007A); + backup[11] = b43_radio_read16(dev, 0x0043); + + b43_phy_write(dev, 0x0429, b43_phy_read(dev, 0x0429) & 0x7FFF); + b43_phy_write(dev, 0x0001, + (b43_phy_read(dev, 0x0001) & 0x3FFF) | 0x4000); + b43_phy_write(dev, 0x0811, b43_phy_read(dev, 0x0811) | 0x000C); + b43_phy_write(dev, 0x0812, + (b43_phy_read(dev, 0x0812) & 0xFFF3) | 0x0004); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & ~(0x1 | 0x2)); + if (phy->rev >= 6) { + backup[12] = b43_phy_read(dev, 0x002E); + backup[13] = b43_phy_read(dev, 0x002F); + backup[14] = b43_phy_read(dev, 0x080F); + backup[15] = b43_phy_read(dev, 0x0810); + backup[16] = b43_phy_read(dev, 0x0801); + backup[17] = b43_phy_read(dev, 0x0060); + backup[18] = b43_phy_read(dev, 0x0014); + backup[19] = b43_phy_read(dev, 0x0478); + + b43_phy_write(dev, 0x002E, 0); + b43_phy_write(dev, 0x002F, 0); + b43_phy_write(dev, 0x080F, 0); + b43_phy_write(dev, 0x0810, 0); + b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478) | 0x0100); + b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801) | 0x0040); + b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) | 0x0040); + b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014) | 0x0200); + } + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0070); + b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0080); + udelay(30); + + v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == 31) { + for (i = 7; i >= 4; i--) { + b43_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = + (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F < 31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 4; + } else { + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x007F); + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, + b43_phy_read(dev, 0x0814) | 0x0001); + b43_phy_write(dev, 0x0815, + b43_phy_read(dev, 0x0815) & 0xFFFE); + } + b43_phy_write(dev, 0x0811, b43_phy_read(dev, 0x0811) | 0x000C); + b43_phy_write(dev, 0x0812, b43_phy_read(dev, 0x0812) | 0x000C); + b43_phy_write(dev, 0x0811, b43_phy_read(dev, 0x0811) | 0x0030); + b43_phy_write(dev, 0x0812, b43_phy_read(dev, 0x0812) | 0x0030); + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + if (phy->rev == 0) { + b43_phy_write(dev, 0x0003, 0x0122); + } else { + b43_phy_write(dev, 0x000A, b43_phy_read(dev, 0x000A) + | 0x2000); + } + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, + b43_phy_read(dev, 0x0814) | 0x0004); + b43_phy_write(dev, 0x0815, + b43_phy_read(dev, 0x0815) & 0xFFFB); + } + b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003) & 0xFF9F) + | 0x0040); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x000F); + b43_set_all_gains(dev, 3, 0, 1); + b43_radio_write16(dev, 0x0043, (b43_radio_read16(dev, 0x0043) + & 0x00F0) | 0x000F); + udelay(30); + v47F = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == -32) { + for (i = 0; i < 4; i++) { + b43_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = + (s16) ((b43_phy_read(dev, 0x047F) >> 8) & + 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F > -31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 3; + } else + saved = 0; + } + b43_radio_write16(dev, 0x007B, saved); + + if (phy->rev >= 6) { + b43_phy_write(dev, 0x002E, backup[12]); + b43_phy_write(dev, 0x002F, backup[13]); + b43_phy_write(dev, 0x080F, backup[14]); + b43_phy_write(dev, 0x0810, backup[15]); + } + if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */ + b43_phy_write(dev, 0x0814, backup[3]); + b43_phy_write(dev, 0x0815, backup[4]); + } + b43_phy_write(dev, 0x005A, backup[5]); + b43_phy_write(dev, 0x0059, backup[6]); + b43_phy_write(dev, 0x0058, backup[7]); + b43_phy_write(dev, 0x000A, backup[8]); + b43_phy_write(dev, 0x0003, backup[9]); + b43_radio_write16(dev, 0x0043, backup[11]); + b43_radio_write16(dev, 0x007A, backup[10]); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x1 | 0x2); + b43_phy_write(dev, 0x0429, b43_phy_read(dev, 0x0429) | 0x8000); + b43_set_original_gains(dev); + if (phy->rev >= 6) { + b43_phy_write(dev, 0x0801, backup[16]); + b43_phy_write(dev, 0x0060, backup[17]); + b43_phy_write(dev, 0x0014, backup[18]); + b43_phy_write(dev, 0x0478, backup[19]); + } + b43_phy_write(dev, 0x0001, backup[0]); + b43_phy_write(dev, 0x0812, backup[2]); + b43_phy_write(dev, 0x0811, backup[1]); +} + +void b43_calc_nrssi_slope(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + u16 backup[18] = { 0 }; + u16 tmp; + s16 nrssi0, nrssi1; + + switch (phy->type) { + case B43_PHYTYPE_B: + backup[0] = b43_radio_read16(dev, 0x007A); + backup[1] = b43_radio_read16(dev, 0x0052); + backup[2] = b43_radio_read16(dev, 0x0043); + backup[3] = b43_phy_read(dev, 0x0030); + backup[4] = b43_phy_read(dev, 0x0026); + backup[5] = b43_phy_read(dev, 0x0015); + backup[6] = b43_phy_read(dev, 0x002A); + backup[7] = b43_phy_read(dev, 0x0020); + backup[8] = b43_phy_read(dev, 0x005A); + backup[9] = b43_phy_read(dev, 0x0059); + backup[10] = b43_phy_read(dev, 0x0058); + backup[11] = b43_read16(dev, 0x03E2); + backup[12] = b43_read16(dev, 0x03E6); + backup[13] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); + + tmp = b43_radio_read16(dev, 0x007A); + tmp &= (phy->rev >= 5) ? 0x007F : 0x000F; + b43_radio_write16(dev, 0x007A, tmp); + b43_phy_write(dev, 0x0030, 0x00FF); + b43_write16(dev, 0x03EC, 0x7F7F); + b43_phy_write(dev, 0x0026, 0x0000); + b43_phy_write(dev, 0x0015, b43_phy_read(dev, 0x0015) | 0x0020); + b43_phy_write(dev, 0x002A, 0x08A3); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0080); + + nrssi0 = (s16) b43_phy_read(dev, 0x0027); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x007F); + if (phy->rev >= 2) { + b43_write16(dev, 0x03E6, 0x0040); + } else if (phy->rev == 0) { + b43_write16(dev, 0x03E6, 0x0122); + } else { + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, + B43_MMIO_CHANNEL_EXT) & 0x2000); + } + b43_phy_write(dev, 0x0020, 0x3F3F); + b43_phy_write(dev, 0x0015, 0xF330); + b43_radio_write16(dev, 0x005A, 0x0060); + b43_radio_write16(dev, 0x0043, + b43_radio_read16(dev, 0x0043) & 0x00F0); + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + udelay(20); + + nrssi1 = (s16) b43_phy_read(dev, 0x0027); + b43_phy_write(dev, 0x0030, backup[3]); + b43_radio_write16(dev, 0x007A, backup[0]); + b43_write16(dev, 0x03E2, backup[11]); + b43_phy_write(dev, 0x0026, backup[4]); + b43_phy_write(dev, 0x0015, backup[5]); + b43_phy_write(dev, 0x002A, backup[6]); + b43_synth_pu_workaround(dev, phy->channel); + if (phy->rev != 0) + b43_write16(dev, 0x03F4, backup[13]); + + b43_phy_write(dev, 0x0020, backup[7]); + b43_phy_write(dev, 0x005A, backup[8]); + b43_phy_write(dev, 0x0059, backup[9]); + b43_phy_write(dev, 0x0058, backup[10]); + b43_radio_write16(dev, 0x0052, backup[1]); + b43_radio_write16(dev, 0x0043, backup[2]); + + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + + if (nrssi0 <= -4) { + phy->nrssi[0] = nrssi0; + phy->nrssi[1] = nrssi1; + } + break; + case B43_PHYTYPE_G: + if (phy->radio_rev >= 9) + return; + if (phy->radio_rev == 8) + b43_calc_nrssi_offset(dev); + + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF); + b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC); + backup[7] = b43_read16(dev, 0x03E2); + b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000); + backup[0] = b43_radio_read16(dev, 0x007A); + backup[1] = b43_radio_read16(dev, 0x0052); + backup[2] = b43_radio_read16(dev, 0x0043); + backup[3] = b43_phy_read(dev, 0x0015); + backup[4] = b43_phy_read(dev, 0x005A); + backup[5] = b43_phy_read(dev, 0x0059); + backup[6] = b43_phy_read(dev, 0x0058); + backup[8] = b43_read16(dev, 0x03E6); + backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); + if (phy->rev >= 3) { + backup[10] = b43_phy_read(dev, 0x002E); + backup[11] = b43_phy_read(dev, 0x002F); + backup[12] = b43_phy_read(dev, 0x080F); + backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL); + backup[14] = b43_phy_read(dev, 0x0801); + backup[15] = b43_phy_read(dev, 0x0060); + backup[16] = b43_phy_read(dev, 0x0014); + backup[17] = b43_phy_read(dev, 0x0478); + b43_phy_write(dev, 0x002E, 0); + b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0); + switch (phy->rev) { + case 4: + case 6: + case 7: + b43_phy_write(dev, 0x0478, + b43_phy_read(dev, 0x0478) + | 0x0100); + b43_phy_write(dev, 0x0801, + b43_phy_read(dev, 0x0801) + | 0x0040); + break; + case 3: + case 5: + b43_phy_write(dev, 0x0801, + b43_phy_read(dev, 0x0801) + & 0xFFBF); + break; + } + b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) + | 0x0040); + b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014) + | 0x0200); + } + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0070); + b43_set_all_gains(dev, 0, 8, 0); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x00F7); + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0811, + (b43_phy_read(dev, 0x0811) & 0xFFCF) | + 0x0030); + b43_phy_write(dev, 0x0812, + (b43_phy_read(dev, 0x0812) & 0xFFCF) | + 0x0010); + } + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x0080); + udelay(20); + + nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi0 >= 0x0020) + nrssi0 -= 0x0040; + + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) & 0x007F); + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + } + + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + | 0x2000); + b43_radio_write16(dev, 0x007A, + b43_radio_read16(dev, 0x007A) | 0x000F); + b43_phy_write(dev, 0x0015, 0xF330); + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0812, + (b43_phy_read(dev, 0x0812) & 0xFFCF) | + 0x0020); + b43_phy_write(dev, 0x0811, + (b43_phy_read(dev, 0x0811) & 0xFFCF) | + 0x0020); + } + + b43_set_all_gains(dev, 3, 0, 1); + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x0043, 0x001F); + } else { + tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F; + b43_radio_write16(dev, 0x0052, tmp | 0x0060); + tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0; + b43_radio_write16(dev, 0x0043, tmp | 0x0009); + } + b43_phy_write(dev, 0x005A, 0x0480); + b43_phy_write(dev, 0x0059, 0x0810); + b43_phy_write(dev, 0x0058, 0x000D); + udelay(20); + nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi1 >= 0x0020) + nrssi1 -= 0x0040; + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + if (nrssi0 >= -4) { + phy->nrssi[0] = nrssi1; + phy->nrssi[1] = nrssi0; + } + if (phy->rev >= 3) { + b43_phy_write(dev, 0x002E, backup[10]); + b43_phy_write(dev, 0x002F, backup[11]); + b43_phy_write(dev, 0x080F, backup[12]); + b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]); + } + if (phy->rev >= 2) { + b43_phy_write(dev, 0x0812, + b43_phy_read(dev, 0x0812) & 0xFFCF); + b43_phy_write(dev, 0x0811, + b43_phy_read(dev, 0x0811) & 0xFFCF); + } + + b43_radio_write16(dev, 0x007A, backup[0]); + b43_radio_write16(dev, 0x0052, backup[1]); + b43_radio_write16(dev, 0x0043, backup[2]); + b43_write16(dev, 0x03E2, backup[7]); + b43_write16(dev, 0x03E6, backup[8]); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]); + b43_phy_write(dev, 0x0015, backup[3]); + b43_phy_write(dev, 0x005A, backup[4]); + b43_phy_write(dev, 0x0059, backup[5]); + b43_phy_write(dev, 0x0058, backup[6]); + b43_synth_pu_workaround(dev, phy->channel); + b43_phy_write(dev, 0x0802, + b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002)); + b43_set_original_gains(dev); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000); + if (phy->rev >= 3) { + b43_phy_write(dev, 0x0801, backup[14]); + b43_phy_write(dev, 0x0060, backup[15]); + b43_phy_write(dev, 0x0014, backup[16]); + b43_phy_write(dev, 0x0478, backup[17]); + } + b43_nrssi_mem_update(dev); + b43_calc_nrssi_threshold(dev); + break; + default: + B43_WARN_ON(1); + } +} + +void b43_calc_nrssi_threshold(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + s32 threshold; + s32 a, b; + s16 tmp16; + u16 tmp_u16; + + switch (phy->type) { + case B43_PHYTYPE_B:{ + if (phy->radio_ver != 0x2050) + return; + if (! + (dev->dev->bus->sprom.r1. + boardflags_lo & B43_BFL_RSSI)) + return; + + if (phy->radio_rev >= 6) { + threshold = + (phy->nrssi[1] - phy->nrssi[0]) * 32; + threshold += 20 * (phy->nrssi[0] + 1); + threshold /= 40; + } else + threshold = phy->nrssi[1] - 5; + + threshold = limit_value(threshold, 0, 0x3E); + b43_phy_read(dev, 0x0020); /* dummy read */ + b43_phy_write(dev, 0x0020, + (((u16) threshold) << 8) | 0x001C); + + if (phy->radio_rev >= 6) { + b43_phy_write(dev, 0x0087, 0x0E0D); + b43_phy_write(dev, 0x0086, 0x0C0B); + b43_phy_write(dev, 0x0085, 0x0A09); + b43_phy_write(dev, 0x0084, 0x0808); + b43_phy_write(dev, 0x0083, 0x0808); + b43_phy_write(dev, 0x0082, 0x0604); + b43_phy_write(dev, 0x0081, 0x0302); + b43_phy_write(dev, 0x0080, 0x0100); + } + break; + } + case B43_PHYTYPE_G: + if (!phy->gmode || + !(dev->dev->bus->sprom.r1.boardflags_lo & B43_BFL_RSSI)) { + tmp16 = b43_nrssi_hw_read(dev, 0x20); + if (tmp16 >= 0x20) + tmp16 -= 0x40; + if (tmp16 < 3) { + b43_phy_write(dev, 0x048A, + (b43_phy_read(dev, 0x048A) + & 0xF000) | 0x09EB); + } else { + b43_phy_write(dev, 0x048A, + (b43_phy_read(dev, 0x048A) + & 0xF000) | 0x0AED); + } + } else { + if (phy->interfmode == B43_INTERFMODE_NONWLAN) { + a = 0xE; + b = 0xA; + } else if (!phy->aci_wlan_automatic && phy->aci_enable) { + a = 0x13; + b = 0x12; + } else { + a = 0xE; + b = 0x11; + } + + a = a * (phy->nrssi[1] - phy->nrssi[0]); + a += (phy->nrssi[0] << 6); + if (a < 32) + a += 31; + else + a += 32; + a = a >> 6; + a = limit_value(a, -31, 31); + + b = b * (phy->nrssi[1] - phy->nrssi[0]); + b += (phy->nrssi[0] << 6); + if (b < 32) + b += 31; + else + b += 32; + b = b >> 6; + b = limit_value(b, -31, 31); + + tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000; + tmp_u16 |= ((u32) b & 0x0000003F); + tmp_u16 |= (((u32) a & 0x0000003F) << 6); + b43_phy_write(dev, 0x048A, tmp_u16); + } + break; + default: + B43_WARN_ON(1); + } +} + +/* Stack implementation to save/restore values from the + * interference mitigation code. + * It is save to restore values in random order. + */ +static void _stack_save(u32 * _stackptr, size_t * stackidx, + u8 id, u16 offset, u16 value) +{ + u32 *stackptr = &(_stackptr[*stackidx]); + + B43_WARN_ON(offset & 0xF000); + B43_WARN_ON(id & 0xF0); + *stackptr = offset; + *stackptr |= ((u32) id) << 12; + *stackptr |= ((u32) value) << 16; + (*stackidx)++; + B43_WARN_ON(*stackidx >= B43_INTERFSTACK_SIZE); +} + +static u16 _stack_restore(u32 * stackptr, u8 id, u16 offset) +{ + size_t i; + + B43_WARN_ON(offset & 0xF000); + B43_WARN_ON(id & 0xF0); + for (i = 0; i < B43_INTERFSTACK_SIZE; i++, stackptr++) { + if ((*stackptr & 0x00000FFF) != offset) + continue; + if (((*stackptr & 0x0000F000) >> 12) != id) + continue; + return ((*stackptr & 0xFFFF0000) >> 16); + } + B43_WARN_ON(1); + + return 0; +} + +#define phy_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x1, (offset), \ + b43_phy_read(dev, (offset))); \ + } while (0) +#define phy_stackrestore(offset) \ + do { \ + b43_phy_write(dev, (offset), \ + _stack_restore(stack, 0x1, \ + (offset))); \ + } while (0) +#define radio_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x2, (offset), \ + b43_radio_read16(dev, (offset))); \ + } while (0) +#define radio_stackrestore(offset) \ + do { \ + b43_radio_write16(dev, (offset), \ + _stack_restore(stack, 0x2, \ + (offset))); \ + } while (0) +#define ofdmtab_stacksave(table, offset) \ + do { \ + _stack_save(stack, &stackidx, 0x3, (offset)|(table), \ + b43_ofdmtab_read16(dev, (table), (offset))); \ + } while (0) +#define ofdmtab_stackrestore(table, offset) \ + do { \ + b43_ofdmtab_write16(dev, (table), (offset), \ + _stack_restore(stack, 0x3, \ + (offset)|(table))); \ + } while (0) + +static void +b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + u16 tmp, flipped; + size_t stackidx = 0; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43_phy_write(dev, 0x042B, + b43_phy_read(dev, 0x042B) | 0x0800); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, + B43_PHY_G_CRS) & ~0x4000); + break; + } + radio_stacksave(0x0078); + tmp = (b43_radio_read16(dev, 0x0078) & 0x001E); + flipped = flip_4bit(tmp); + if (flipped < 10 && flipped >= 8) + flipped = 7; + else if (flipped >= 10) + flipped -= 3; + flipped = flip_4bit(flipped); + flipped = (flipped << 1) | 0x0020; + b43_radio_write16(dev, 0x0078, flipped); + + b43_calc_nrssi_threshold(dev); + + phy_stacksave(0x0406); + b43_phy_write(dev, 0x0406, 0x7E28); + + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x0800); + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, + B43_PHY_RADIO_BITFIELD) | 0x1000); + + phy_stacksave(0x04A0); + b43_phy_write(dev, 0x04A0, + (b43_phy_read(dev, 0x04A0) & 0xC0C0) | 0x0008); + phy_stacksave(0x04A1); + b43_phy_write(dev, 0x04A1, + (b43_phy_read(dev, 0x04A1) & 0xC0C0) | 0x0605); + phy_stacksave(0x04A2); + b43_phy_write(dev, 0x04A2, + (b43_phy_read(dev, 0x04A2) & 0xC0C0) | 0x0204); + phy_stacksave(0x04A8); + b43_phy_write(dev, 0x04A8, + (b43_phy_read(dev, 0x04A8) & 0xC0C0) | 0x0803); + phy_stacksave(0x04AB); + b43_phy_write(dev, 0x04AB, + (b43_phy_read(dev, 0x04AB) & 0xC0C0) | 0x0605); + + phy_stacksave(0x04A7); + b43_phy_write(dev, 0x04A7, 0x0002); + phy_stacksave(0x04A3); + b43_phy_write(dev, 0x04A3, 0x287A); + phy_stacksave(0x04A9); + b43_phy_write(dev, 0x04A9, 0x2027); + phy_stacksave(0x0493); + b43_phy_write(dev, 0x0493, 0x32F5); + phy_stacksave(0x04AA); + b43_phy_write(dev, 0x04AA, 0x2027); + phy_stacksave(0x04AC); + b43_phy_write(dev, 0x04AC, 0x32F5); + break; + case B43_INTERFMODE_MANUALWLAN: + if (b43_phy_read(dev, 0x0033) & 0x0800) + break; + + phy->aci_enable = 1; + + phy_stacksave(B43_PHY_RADIO_BITFIELD); + phy_stacksave(B43_PHY_G_CRS); + if (phy->rev < 2) { + phy_stacksave(0x0406); + } else { + phy_stacksave(0x04C0); + phy_stacksave(0x04C1); + } + phy_stacksave(0x0033); + phy_stacksave(0x04A7); + phy_stacksave(0x04A3); + phy_stacksave(0x04A9); + phy_stacksave(0x04AA); + phy_stacksave(0x04AC); + phy_stacksave(0x0493); + phy_stacksave(0x04A1); + phy_stacksave(0x04A0); + phy_stacksave(0x04A2); + phy_stacksave(0x048A); + phy_stacksave(0x04A8); + phy_stacksave(0x04AB); + if (phy->rev == 2) { + phy_stacksave(0x04AD); + phy_stacksave(0x04AE); + } else if (phy->rev >= 3) { + phy_stacksave(0x04AD); + phy_stacksave(0x0415); + phy_stacksave(0x0416); + phy_stacksave(0x0417); + ofdmtab_stacksave(0x1A00, 0x2); + ofdmtab_stacksave(0x1A00, 0x3); + } + phy_stacksave(0x042B); + phy_stacksave(0x048C); + + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, B43_PHY_RADIO_BITFIELD) + & ~0x1000); + b43_phy_write(dev, B43_PHY_G_CRS, + (b43_phy_read(dev, B43_PHY_G_CRS) + & 0xFFFC) | 0x0002); + + b43_phy_write(dev, 0x0033, 0x0800); + b43_phy_write(dev, 0x04A3, 0x2027); + b43_phy_write(dev, 0x04A9, 0x1CA8); + b43_phy_write(dev, 0x0493, 0x287A); + b43_phy_write(dev, 0x04AA, 0x1CA8); + b43_phy_write(dev, 0x04AC, 0x287A); + + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xFFC0) | 0x001A); + b43_phy_write(dev, 0x04A7, 0x000D); + + if (phy->rev < 2) { + b43_phy_write(dev, 0x0406, 0xFF0D); + } else if (phy->rev == 2) { + b43_phy_write(dev, 0x04C0, 0xFFFF); + b43_phy_write(dev, 0x04C1, 0x00A9); + } else { + b43_phy_write(dev, 0x04C0, 0x00C1); + b43_phy_write(dev, 0x04C1, 0x0059); + } + + b43_phy_write(dev, 0x04A1, (b43_phy_read(dev, 0x04A1) + & 0xC0FF) | 0x1800); + b43_phy_write(dev, 0x04A1, (b43_phy_read(dev, 0x04A1) + & 0xFFC0) | 0x0015); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xCFFF) | 0x1000); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xF0FF) | 0x0A00); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xCFFF) | 0x1000); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0800); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xFFCF) | 0x0010); + b43_phy_write(dev, 0x04AB, (b43_phy_read(dev, 0x04AB) + & 0xFFF0) | 0x0005); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xFFCF) | 0x0010); + b43_phy_write(dev, 0x04A8, (b43_phy_read(dev, 0x04A8) + & 0xFFF0) | 0x0006); + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0800); + b43_phy_write(dev, 0x04A0, (b43_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0500); + b43_phy_write(dev, 0x04A2, (b43_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x000B); + + if (phy->rev >= 3) { + b43_phy_write(dev, 0x048A, b43_phy_read(dev, 0x048A) + & ~0x8000); + b43_phy_write(dev, 0x0415, (b43_phy_read(dev, 0x0415) + & 0x8000) | 0x36D8); + b43_phy_write(dev, 0x0416, (b43_phy_read(dev, 0x0416) + & 0x8000) | 0x36D8); + b43_phy_write(dev, 0x0417, (b43_phy_read(dev, 0x0417) + & 0xFE00) | 0x016D); + } else { + b43_phy_write(dev, 0x048A, b43_phy_read(dev, 0x048A) + | 0x1000); + b43_phy_write(dev, 0x048A, (b43_phy_read(dev, 0x048A) + & 0x9FFF) | 0x2000); + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ACIW); + } + if (phy->rev >= 2) { + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) + | 0x0800); + } + b43_phy_write(dev, 0x048C, (b43_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0200); + if (phy->rev == 2) { + b43_phy_write(dev, 0x04AE, (b43_phy_read(dev, 0x04AE) + & 0xFF00) | 0x007F); + b43_phy_write(dev, 0x04AD, (b43_phy_read(dev, 0x04AD) + & 0x00FF) | 0x1300); + } else if (phy->rev >= 6) { + b43_ofdmtab_write16(dev, 0x1A00, 0x3, 0x007F); + b43_ofdmtab_write16(dev, 0x1A00, 0x2, 0x007F); + b43_phy_write(dev, 0x04AD, b43_phy_read(dev, 0x04AD) + & 0x00FF); + } + b43_calc_nrssi_slope(dev); + break; + default: + B43_WARN_ON(1); + } +} + +static void +b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43_phy_write(dev, 0x042B, + b43_phy_read(dev, 0x042B) & ~0x0800); + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, + B43_PHY_G_CRS) | 0x4000); + break; + } + radio_stackrestore(0x0078); + b43_calc_nrssi_threshold(dev); + phy_stackrestore(0x0406); + b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) & ~0x0800); + if (!dev->bad_frames_preempt) { + b43_phy_write(dev, B43_PHY_RADIO_BITFIELD, + b43_phy_read(dev, B43_PHY_RADIO_BITFIELD) + & ~(1 << 11)); + } + b43_phy_write(dev, B43_PHY_G_CRS, + b43_phy_read(dev, B43_PHY_G_CRS) | 0x4000); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A7); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + break; + case B43_INTERFMODE_MANUALWLAN: + if (!(b43_phy_read(dev, 0x0033) & 0x0800)) + break; + + phy->aci_enable = 0; + + phy_stackrestore(B43_PHY_RADIO_BITFIELD); + phy_stackrestore(B43_PHY_G_CRS); + phy_stackrestore(0x0033); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A7); + if (phy->rev >= 2) { + phy_stackrestore(0x04C0); + phy_stackrestore(0x04C1); + } else + phy_stackrestore(0x0406); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A8); + if (phy->rev == 2) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x04AE); + } else if (phy->rev >= 3) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x0415); + phy_stackrestore(0x0416); + phy_stackrestore(0x0417); + ofdmtab_stackrestore(0x1A00, 0x2); + ofdmtab_stackrestore(0x1A00, 0x3); + } + phy_stackrestore(0x04A2); + phy_stackrestore(0x048A); + phy_stackrestore(0x042B); + phy_stackrestore(0x048C); + b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ACIW); + b43_calc_nrssi_slope(dev); + break; + default: + B43_WARN_ON(1); + } +} + +#undef phy_stacksave +#undef phy_stackrestore +#undef radio_stacksave +#undef radio_stackrestore +#undef ofdmtab_stacksave +#undef ofdmtab_stackrestore + +int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode) +{ + struct b43_phy *phy = &dev->phy; + int currentmode; + + if ((phy->type != B43_PHYTYPE_G) || (phy->rev == 0) || (!phy->gmode)) + return -ENODEV; + + phy->aci_wlan_automatic = 0; + switch (mode) { + case B43_INTERFMODE_AUTOWLAN: + phy->aci_wlan_automatic = 1; + if (phy->aci_enable) + mode = B43_INTERFMODE_MANUALWLAN; + else + mode = B43_INTERFMODE_NONE; + break; + case B43_INTERFMODE_NONE: + case B43_INTERFMODE_NONWLAN: + case B43_INTERFMODE_MANUALWLAN: + break; + default: + return -EINVAL; + } + + currentmode = phy->interfmode; + if (currentmode == mode) + return 0; + if (currentmode != B43_INTERFMODE_NONE) + b43_radio_interference_mitigation_disable(dev, currentmode); + + if (mode == B43_INTERFMODE_NONE) { + phy->aci_enable = 0; + phy->aci_hw_rssi = 0; + } else + b43_radio_interference_mitigation_enable(dev, mode); + phy->interfmode = mode; + + return 0; +} + +static u16 b43_radio_core_calibration_value(struct b43_wldev *dev) +{ + u16 reg, index, ret; + + static const u8 rcc_table[] = { + 0x02, 0x03, 0x01, 0x0F, + 0x06, 0x07, 0x05, 0x0F, + 0x0A, 0x0B, 0x09, 0x0F, + 0x0E, 0x0F, 0x0D, 0x0F, + }; + + reg = b43_radio_read16(dev, 0x60); + index = (reg & 0x001E) >> 1; + ret = rcc_table[index] << 1; + ret |= (reg & 0x0001); + ret |= 0x0020; + + return ret; +} + +#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) +static u16 radio2050_rfover_val(struct b43_wldev *dev, + u16 phy_register, unsigned int lpd) +{ + struct b43_phy *phy = &dev->phy; + struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + + if (!phy->gmode) + return 0; + + if (has_loopback_gain(phy)) { + int max_lb_gain = phy->max_lb_gain; + u16 extlna; + u16 i; + + if (phy->radio_rev == 8) + max_lb_gain += 0x3E; + else + max_lb_gain += 0x26; + if (max_lb_gain >= 0x46) { + extlna = 0x3000; + max_lb_gain -= 0x46; + } else if (max_lb_gain >= 0x3A) { + extlna = 0x1000; + max_lb_gain -= 0x3A; + } else if (max_lb_gain >= 0x2E) { + extlna = 0x2000; + max_lb_gain -= 0x2E; + } else { + extlna = 0; + max_lb_gain -= 0x10; + } + + for (i = 0; i < 16; i++) { + max_lb_gain -= (i * 6); + if (max_lb_gain < 6) + break; + } + + if ((phy->rev < 7) || + !(sprom->r1.boardflags_lo & B43_BFL_EXTLNA)) { + if (phy_register == B43_PHY_RFOVER) { + return 0x1B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + extlna |= (i << 8); + switch (lpd) { + case LPD(0, 1, 1): + return 0x0F92; + case LPD(0, 0, 1): + case LPD(1, 0, 1): + return (0x0092 | extlna); + case LPD(1, 0, 0): + return (0x0093 | extlna); + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } else { + if (phy_register == B43_PHY_RFOVER) { + return 0x9B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + if (extlna) + extlna |= 0x8000; + extlna |= (i << 8); + switch (lpd) { + case LPD(0, 1, 1): + return 0x8F92; + case LPD(0, 0, 1): + return (0x8092 | extlna); + case LPD(1, 0, 1): + return (0x2092 | extlna); + case LPD(1, 0, 0): + return (0x2093 | extlna); + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } + } else { + if ((phy->rev < 7) || + !(sprom->r1.boardflags_lo & B43_BFL_EXTLNA)) { + if (phy_register == B43_PHY_RFOVER) { + return 0x1B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0FB2; + case LPD(0, 0, 1): + return 0x00B2; + case LPD(1, 0, 1): + return 0x30B2; + case LPD(1, 0, 0): + return 0x30B3; + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } else { + if (phy_register == B43_PHY_RFOVER) { + return 0x9B3; + } else if (phy_register == B43_PHY_RFOVERVAL) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x8FB2; + case LPD(0, 0, 1): + return 0x80B2; + case LPD(1, 0, 1): + return 0x20B2; + case LPD(1, 0, 0): + return 0x20B3; + } + B43_WARN_ON(1); + } + B43_WARN_ON(1); + } + } + return 0; +} + +struct init2050_saved_values { + /* Core registers */ + u16 reg_3EC; + u16 reg_3E6; + u16 reg_3F4; + /* Radio registers */ + u16 radio_43; + u16 radio_51; + u16 radio_52; + /* PHY registers */ + u16 phy_pgactl; + u16 phy_base_5A; + u16 phy_base_59; + u16 phy_base_58; + u16 phy_base_30; + u16 phy_rfover; + u16 phy_rfoverval; + u16 phy_analogover; + u16 phy_analogoverval; + u16 phy_crs0; + u16 phy_classctl; + u16 phy_lo_mask; + u16 phy_lo_ctl; + u16 phy_syncctl; +}; + +u16 b43_radio_init2050(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + struct init2050_saved_values sav; + u16 rcc; + u16 radio78; + u16 ret; + u16 i, j; + u32 tmp1 = 0, tmp2 = 0; + + memset(&sav, 0, sizeof(sav)); /* get rid of "may be used uninitialized..." */ + + sav.radio_43 = b43_radio_read16(dev, 0x43); + sav.radio_51 = b43_radio_read16(dev, 0x51); + sav.radio_52 = b43_radio_read16(dev, 0x52); + sav.phy_pgactl = b43_phy_read(dev, B43_PHY_PGACTL); + sav.phy_base_5A = b43_phy_read(dev, B43_PHY_BASE(0x5A)); + sav.phy_base_59 = b43_phy_read(dev, B43_PHY_BASE(0x59)); + sav.phy_base_58 = b43_phy_read(dev, B43_PHY_BASE(0x58)); + + if (phy->type == B43_PHYTYPE_B) { + sav.phy_base_30 = b43_phy_read(dev, B43_PHY_BASE(0x30)); + sav.reg_3EC = b43_read16(dev, 0x3EC); + + b43_phy_write(dev, B43_PHY_BASE(0x30), 0xFF); + b43_write16(dev, 0x3EC, 0x3F3F); + } else if (phy->gmode || phy->rev >= 2) { + sav.phy_rfover = b43_phy_read(dev, B43_PHY_RFOVER); + sav.phy_rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + sav.phy_analogover = b43_phy_read(dev, B43_PHY_ANALOGOVER); + sav.phy_analogoverval = + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL); + sav.phy_crs0 = b43_phy_read(dev, B43_PHY_CRS0); + sav.phy_classctl = b43_phy_read(dev, B43_PHY_CLASSCTL); + + b43_phy_write(dev, B43_PHY_ANALOGOVER, + b43_phy_read(dev, B43_PHY_ANALOGOVER) + | 0x0003); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + b43_phy_read(dev, B43_PHY_ANALOGOVERVAL) + & 0xFFFC); + b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_CLASSCTL, + b43_phy_read(dev, B43_PHY_CLASSCTL) + & 0xFFFC); + if (has_loopback_gain(phy)) { + sav.phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); + sav.phy_lo_ctl = b43_phy_read(dev, B43_PHY_LO_CTL); + + if (phy->rev >= 3) + b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020); + else + b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020); + b43_phy_write(dev, B43_PHY_LO_CTL, 0); + } + + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 1, 1))); + b43_phy_write(dev, B43_PHY_RFOVER, + radio2050_rfover_val(dev, B43_PHY_RFOVER, 0)); + } + b43_write16(dev, 0x3E2, b43_read16(dev, 0x3E2) | 0x8000); + + sav.phy_syncctl = b43_phy_read(dev, B43_PHY_SYNCCTL); + b43_phy_write(dev, B43_PHY_SYNCCTL, b43_phy_read(dev, B43_PHY_SYNCCTL) + & 0xFF7F); + sav.reg_3E6 = b43_read16(dev, 0x3E6); + sav.reg_3F4 = b43_read16(dev, 0x3F4); + + if (phy->analog == 0) { + b43_write16(dev, 0x03E6, 0x0122); + } else { + if (phy->analog >= 2) { + b43_phy_write(dev, B43_PHY_BASE(0x03), + (b43_phy_read(dev, B43_PHY_BASE(0x03)) + & 0xFFBF) | 0x40); + } + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + (b43_read16(dev, B43_MMIO_CHANNEL_EXT) | 0x2000)); + } + + rcc = b43_radio_core_calibration_value(dev); + + if (phy->type == B43_PHYTYPE_B) + b43_radio_write16(dev, 0x78, 0x26); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 1, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xBFAF); + b43_phy_write(dev, B43_PHY_BASE(0x2B), 0x1403); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, B43_PHY_RFOVERVAL, + LPD(0, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xBFA0); + b43_radio_write16(dev, 0x51, b43_radio_read16(dev, 0x51) + | 0x0004); + if (phy->radio_rev == 8) { + b43_radio_write16(dev, 0x43, 0x1F); + } else { + b43_radio_write16(dev, 0x52, 0); + b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) + & 0xFFF0) | 0x0009); + } + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + + for (i = 0; i < 16; i++) { + b43_phy_write(dev, B43_PHY_BASE(0x5A), 0x0480); + b43_phy_write(dev, B43_PHY_BASE(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0x000D); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 0))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); + udelay(20); + tmp1 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + } + udelay(10); + + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + tmp1++; + tmp1 >>= 9; + + for (i = 0; i < 16; i++) { + radio78 = ((flip_4bit(i) << 1) | 0x20); + b43_radio_write16(dev, 0x78, radio78); + udelay(10); + for (j = 0; j < 16; j++) { + b43_phy_write(dev, B43_PHY_BASE(0x5A), 0x0D80); + b43_phy_write(dev, B43_PHY_BASE(0x59), 0xC810); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0x000D); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xEFB0); + udelay(10); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 0))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xFFF0); + udelay(10); + tmp2 += b43_phy_read(dev, B43_PHY_LO_LEAKAGE); + b43_phy_write(dev, B43_PHY_BASE(0x58), 0); + if (phy->gmode || phy->rev >= 2) { + b43_phy_write(dev, B43_PHY_RFOVERVAL, + radio2050_rfover_val(dev, + B43_PHY_RFOVERVAL, + LPD(1, 0, + 1))); + } + b43_phy_write(dev, B43_PHY_PGACTL, 0xAFB0); + } + tmp2++; + tmp2 >>= 8; + if (tmp1 < tmp2) + break; + } + + /* Restore the registers */ + b43_phy_write(dev, B43_PHY_PGACTL, sav.phy_pgactl); + b43_radio_write16(dev, 0x51, sav.radio_51); + b43_radio_write16(dev, 0x52, sav.radio_52); + b43_radio_write16(dev, 0x43, sav.radio_43); + b43_phy_write(dev, B43_PHY_BASE(0x5A), sav.phy_base_5A); + b43_phy_write(dev, B43_PHY_BASE(0x59), sav.phy_base_59); + b43_phy_write(dev, B43_PHY_BASE(0x58), sav.phy_base_58); + b43_write16(dev, 0x3E6, sav.reg_3E6); + if (phy->analog != 0) + b43_write16(dev, 0x3F4, sav.reg_3F4); + b43_phy_write(dev, B43_PHY_SYNCCTL, sav.phy_syncctl); + b43_synth_pu_workaround(dev, phy->channel); + if (phy->type == B43_PHYTYPE_B) { + b43_phy_write(dev, B43_PHY_BASE(0x30), sav.phy_base_30); + b43_write16(dev, 0x3EC, sav.reg_3EC); + } else if (phy->gmode) { + b43_write16(dev, B43_MMIO_PHY_RADIO, + b43_read16(dev, B43_MMIO_PHY_RADIO) + & 0x7FFF); + b43_phy_write(dev, B43_PHY_RFOVER, sav.phy_rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, sav.phy_rfoverval); + b43_phy_write(dev, B43_PHY_ANALOGOVER, sav.phy_analogover); + b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, + sav.phy_analogoverval); + b43_phy_write(dev, B43_PHY_CRS0, sav.phy_crs0); + b43_phy_write(dev, B43_PHY_CLASSCTL, sav.phy_classctl); + if (has_loopback_gain(phy)) { + b43_phy_write(dev, B43_PHY_LO_MASK, sav.phy_lo_mask); + b43_phy_write(dev, B43_PHY_LO_CTL, sav.phy_lo_ctl); + } + } + if (i > 15) + ret = radio78; + else + ret = rcc; + + return ret; +} + +void b43_radio_init2060(struct b43_wldev *dev) +{ + int err; + + b43_radio_write16(dev, 0x0004, 0x00C0); + b43_radio_write16(dev, 0x0005, 0x0008); + b43_radio_write16(dev, 0x0009, 0x0040); + b43_radio_write16(dev, 0x0005, 0x00AA); + b43_radio_write16(dev, 0x0032, 0x008F); + b43_radio_write16(dev, 0x0006, 0x008F); + b43_radio_write16(dev, 0x0034, 0x008F); + b43_radio_write16(dev, 0x002C, 0x0007); + b43_radio_write16(dev, 0x0082, 0x0080); + b43_radio_write16(dev, 0x0080, 0x0000); + b43_radio_write16(dev, 0x003F, 0x00DA); + b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); + msleep(1); /* delay 400usec */ + + b43_radio_write16(dev, 0x0081, + (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010); + msleep(1); /* delay 400usec */ + + b43_radio_write16(dev, 0x0005, + (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008); + b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010); + b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008); + b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040); + b43_radio_write16(dev, 0x0081, + (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040); + b43_radio_write16(dev, 0x0005, + (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008); + b43_phy_write(dev, 0x0063, 0xDDC6); + b43_phy_write(dev, 0x0069, 0x07BE); + b43_phy_write(dev, 0x006A, 0x0000); + + err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_A, 0); + B43_WARN_ON(err); + + msleep(1); +} + +static inline u16 freq_r3A_value(u16 frequency) +{ + u16 value; + + if (frequency < 5091) + value = 0x0040; + else if (frequency < 5321) + value = 0x0000; + else if (frequency < 5806) + value = 0x0080; + else + value = 0x0040; + + return value; +} + +void b43_radio_set_tx_iq(struct b43_wldev *dev) +{ + static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; + static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; + u16 tmp = b43_radio_read16(dev, 0x001E); + int i, j; + + for (i = 0; i < 5; i++) { + for (j = 0; j < 5; j++) { + if (tmp == (data_high[i] << 4 | data_low[j])) { + b43_phy_write(dev, 0x0069, + (i - j) << 8 | 0x00C0); + return; + } + } + } +} + +int b43_radio_selectchannel(struct b43_wldev *dev, + u8 channel, int synthetic_pu_workaround) +{ + struct b43_phy *phy = &dev->phy; + u16 r8, tmp; + u16 freq; + u16 channelcookie; + + if (channel == 0xFF) { + switch (phy->type) { + case B43_PHYTYPE_A: + channel = B43_DEFAULT_CHANNEL_A; + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + channel = B43_DEFAULT_CHANNEL_BG; + break; + default: + B43_WARN_ON(1); + } + } + + /* First we set the channel radio code to prevent the + * firmware from sending ghost packets. + */ + channelcookie = channel; + if (phy->type == B43_PHYTYPE_A) + channelcookie |= 0x100; + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie); + + if (phy->type == B43_PHYTYPE_A) { + if (channel > 200) + return -EINVAL; + freq = channel2freq_a(channel); + + r8 = b43_radio_read16(dev, 0x0008); + b43_write16(dev, 0x03F0, freq); + b43_radio_write16(dev, 0x0008, r8); + + //TODO: write max channel TX power? to Radio 0x2D + tmp = b43_radio_read16(dev, 0x002E); + tmp &= 0x0080; + //TODO: OR tmp with the Power out estimation for this channel? + b43_radio_write16(dev, 0x002E, tmp); + + if (freq >= 4920 && freq <= 5500) { + /* + * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F; + * = (freq * 0.025862069 + */ + r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */ + } + b43_radio_write16(dev, 0x0007, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0020, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0021, (r8 << 4) | r8); + b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022) + & 0x000F) | (r8 << 4)); + b43_radio_write16(dev, 0x002A, (r8 << 4)); + b43_radio_write16(dev, 0x002B, (r8 << 4)); + b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008) + & 0x00F0) | (r8 << 4)); + b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029) + & 0xFF0F) | 0x00B0); + b43_radio_write16(dev, 0x0035, 0x00AA); + b43_radio_write16(dev, 0x0036, 0x0085); + b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A) + & 0xFF20) | + freq_r3A_value(freq)); + b43_radio_write16(dev, 0x003D, + b43_radio_read16(dev, 0x003D) & 0x00FF); + b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081) + & 0xFF7F) | 0x0080); + b43_radio_write16(dev, 0x0035, + b43_radio_read16(dev, 0x0035) & 0xFFEF); + b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035) + & 0xFFEF) | 0x0010); + b43_radio_set_tx_iq(dev); + //TODO: TSSI2dbm workaround + b43_phy_xmitpower(dev); //FIXME correct? + } else { + if ((channel < 1) || (channel > 14)) + return -EINVAL; + + if (synthetic_pu_workaround) + b43_synth_pu_workaround(dev, channel); + + b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); + + if (channel == 14) { + if (dev->dev->bus->sprom.r1.country_code == + SSB_SPROM1CCODE_JAPAN) + b43_hf_write(dev, + b43_hf_read(dev) & ~B43_HF_ACPR); + else + b43_hf_write(dev, + b43_hf_read(dev) | B43_HF_ACPR); + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + | (1 << 11)); + } else { + b43_write16(dev, B43_MMIO_CHANNEL_EXT, + b43_read16(dev, B43_MMIO_CHANNEL_EXT) + & 0xF7BF); + } + } + + phy->channel = channel; + /* Wait for the radio to tune to the channel and stabilize. */ + msleep(8); + + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */ +static u16 b43_get_txgain_base_band(u16 txpower) +{ + u16 ret; + + B43_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = 2; + else if (txpower >= 49) + ret = 4; + else if (txpower >= 44) + ret = 5; + else + ret = 6; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */ +static u16 b43_get_txgain_freq_power_amp(u16 txpower) +{ + u16 ret; + + B43_WARN_ON(txpower > 63); + + if (txpower >= 32) + ret = 0; + else if (txpower >= 25) + ret = 1; + else if (txpower >= 20) + ret = 2; + else if (txpower >= 12) + ret = 3; + else + ret = 4; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */ +static u16 b43_get_txgain_dac(u16 txpower) +{ + u16 ret; + + B43_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = txpower - 53; + else if (txpower >= 49) + ret = txpower - 42; + else if (txpower >= 44) + ret = txpower - 37; + else if (txpower >= 32) + ret = txpower - 32; + else if (txpower >= 25) + ret = txpower - 20; + else if (txpower >= 20) + ret = txpower - 13; + else if (txpower >= 12) + ret = txpower - 8; + else + ret = txpower; + + return ret; +} + +static void b43_radio_set_txpower_a(struct b43_wldev *dev, u16 txpower) +{ + struct b43_phy *phy = &dev->phy; + u16 pamp, base, dac, t; + + txpower = limit_value(txpower, 0, 63); + + pamp = b43_get_txgain_freq_power_amp(txpower); + pamp <<= 5; + pamp &= 0x00E0; + b43_phy_write(dev, 0x0019, pamp); + + base = b43_get_txgain_base_band(txpower); + base &= 0x000F; + b43_phy_write(dev, 0x0017, base | 0x0020); + + t = b43_ofdmtab_read16(dev, 0x3000, 1); + t &= 0x0007; + + dac = b43_get_txgain_dac(txpower); + dac <<= 3; + dac |= t; + + b43_ofdmtab_write16(dev, 0x3000, 1, dac); + + phy->txpwr_offset = txpower; + + //TODO: FuncPlaceholder (Adjust BB loft cancel) +} + +void b43_radio_turn_on(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + int err; + u8 channel; + + might_sleep(); + + if (phy->radio_on) + return; + + switch (phy->type) { + case B43_PHYTYPE_A: + b43_radio_write16(dev, 0x0004, 0x00C0); + b43_radio_write16(dev, 0x0005, 0x0008); + b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7); + b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7); + b43_radio_init2060(dev); + break; + case B43_PHYTYPE_B: + case B43_PHYTYPE_G: + b43_phy_write(dev, 0x0015, 0x8000); + b43_phy_write(dev, 0x0015, 0xCC00); + b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); + if (phy->radio_off_context.valid) { + /* Restore the RFover values. */ + b43_phy_write(dev, B43_PHY_RFOVER, + phy->radio_off_context.rfover); + b43_phy_write(dev, B43_PHY_RFOVERVAL, + phy->radio_off_context.rfoverval); + phy->radio_off_context.valid = 0; + } + channel = phy->channel; + err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_BG, 1); + err |= b43_radio_selectchannel(dev, channel, 0); + B43_WARN_ON(err); + break; + default: + B43_WARN_ON(1); + } + phy->radio_on = 1; +} + +void b43_radio_turn_off(struct b43_wldev *dev, bool force) +{ + struct b43_phy *phy = &dev->phy; + + if (!phy->radio_on && !force) + return; + + if (phy->type == B43_PHYTYPE_A) { + b43_radio_write16(dev, 0x0004, 0x00FF); + b43_radio_write16(dev, 0x0005, 0x00FB); + b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008); + b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008); + } + if (phy->type == B43_PHYTYPE_G && dev->dev->id.revision >= 5) { + u16 rfover, rfoverval; + + rfover = b43_phy_read(dev, B43_PHY_RFOVER); + rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); + if (!force) { + phy->radio_off_context.rfover = rfover; + phy->radio_off_context.rfoverval = rfoverval; + phy->radio_off_context.valid = 1; + } + b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C); + b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73); + } else + b43_phy_write(dev, 0x0015, 0xAA00); + phy->radio_on = 0; +} diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h new file mode 100644 index 0000000000000000000000000000000000000000..c64d74504fc0ad29021bd415471f03f0b4c2b7e0 --- /dev/null +++ b/drivers/net/wireless/b43/phy.h @@ -0,0 +1,297 @@ +#ifndef B43_PHY_H_ +#define B43_PHY_H_ + +#include + +struct b43_wldev; +struct b43_phy; + +/*** PHY Registers ***/ + +/* Routing */ +#define B43_PHYROUTE_OFDM_GPHY 0x400 +#define B43_PHYROUTE_EXT_GPHY 0x800 + +/* Base registers. */ +#define B43_PHY_BASE(reg) (reg) +/* OFDM (A) registers of a G-PHY */ +#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY) +/* Extended G-PHY registers */ +#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY) + +/* OFDM (A) PHY Registers */ +#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */ +#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */ +#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */ +#define B43_PHY_BBANDCFG_RXANT_SHIFT 7 +#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */ +#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 */ +#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */ +#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */ +#define B43_PHY_CRS0 B43_PHY_OFDM(0x29) +#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */ +#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */ +#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */ +#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */ +#define B43_PHY_LMS B43_PHY_OFDM(0x55) +#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */ +#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */ +#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */ +#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */ +#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */ +#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */ +#define B43_PHY_OTABLENR_SHIFT 10 +#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */ +#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */ +#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */ +#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */ +#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */ +#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */ +#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */ +#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */ +#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0) +#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1) +#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2) +#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3) +#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4) +#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */ +#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9) +#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA) +#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB) +#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */ +#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */ +#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (rev 1 only) */ +#define B43_PHY_CRSTHRES2_R1 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (rev 1 only) */ +#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */ +#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */ +#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */ + +/* CCK (B) PHY Registers */ +#define B43_PHY_VERSION_CCK B43_PHY_BASE(0x00) /* Versioning register for B-PHY */ +#define B43_PHY_CCKBBANDCFG B43_PHY_BASE(0x01) /* Contains antenna 0/1 control bit */ +#define B43_PHY_PGACTL B43_PHY_BASE(0x15) /* PGA control */ +#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */ +#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */ +#define B43_PHY_PGACTL_UNKNOWN 0xEFA0 +#define B43_PHY_FBCTL1 B43_PHY_BASE(0x18) /* Frequency bandwidth control 1 */ +#define B43_PHY_ITSSI B43_PHY_BASE(0x29) /* Idle TSSI */ +#define B43_PHY_LO_LEAKAGE B43_PHY_BASE(0x2D) /* Measured LO leakage */ +#define B43_PHY_ENERGY B43_PHY_BASE(0x33) /* Energy */ +#define B43_PHY_SYNCCTL B43_PHY_BASE(0x35) +#define B43_PHY_FBCTL2 B43_PHY_BASE(0x38) /* Frequency bandwidth control 2 */ +#define B43_PHY_DACCTL B43_PHY_BASE(0x60) /* DAC control */ +#define B43_PHY_RCCALOVER B43_PHY_BASE(0x78) /* RC calibration override */ + +/* Extended G-PHY Registers */ +#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */ +#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */ +#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */ +#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */ +#define B43_PHY_GTABNR_SHIFT 10 +#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */ +#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */ +#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */ +#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */ +#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */ +#define B43_PHY_RFOVERVAL_EXTLNA 0x8000 +#define B43_PHY_RFOVERVAL_LNA 0x7000 +#define B43_PHY_RFOVERVAL_LNA_SHIFT 12 +#define B43_PHY_RFOVERVAL_PGA 0x0F00 +#define B43_PHY_RFOVERVAL_PGA_SHIFT 8 +#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */ +#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0 +#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */ +#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */ +#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */ +#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */ +#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */ + +/*** OFDM table numbers ***/ +#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset)) +#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0) +#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0) +#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename +#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4) +#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0) +#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3) +#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0) +#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0) +#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0) +#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0) +#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0) +#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0) +#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0) +#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0) +#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7) +#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12) +#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13) +//TODO +#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12) +#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0) +//TODO +#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0) +#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO rename +#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 1) +#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0) +#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4) +#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0) +#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0) +#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0) +#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0) + +u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset); +void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, + u16 offset, u16 value); +u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset); +void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, + u16 offset, u32 value); + +/*** G-PHY table numbers */ +#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset)) +#define B43_GTAB_NRSSI B43_GTAB(0x00, 0) +#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120) +#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298) + +u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset); //TODO implement +void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value); //TODO implement + +#define B43_DEFAULT_CHANNEL_A 36 +#define B43_DEFAULT_CHANNEL_BG 6 + +enum { + B43_ANTENNA0, /* Antenna 0 */ + B43_ANTENNA1, /* Antenna 0 */ + B43_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */ + B43_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */ + + B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0, + B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO, +}; + +enum { + B43_INTERFMODE_NONE, + B43_INTERFMODE_NONWLAN, + B43_INTERFMODE_MANUALWLAN, + B43_INTERFMODE_AUTOWLAN, +}; + +/* Masks for the different PHY versioning registers. */ +#define B43_PHYVER_ANALOG 0xF000 +#define B43_PHYVER_ANALOG_SHIFT 12 +#define B43_PHYVER_TYPE 0x0F00 +#define B43_PHYVER_TYPE_SHIFT 8 +#define B43_PHYVER_VERSION 0x00FF + +void b43_raw_phy_lock(struct b43_wldev *dev); +#define b43_phy_lock(dev, flags) \ + do { \ + local_irq_save(flags); \ + b43_raw_phy_lock(dev); \ + } while (0) +void b43_raw_phy_unlock(struct b43_wldev *dev); +#define b43_phy_unlock(dev, flags) \ + do { \ + b43_raw_phy_unlock(dev); \ + local_irq_restore(flags); \ + } while (0) + +u16 b43_phy_read(struct b43_wldev *dev, u16 offset); +void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val); + +int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev); + +void b43_phy_early_init(struct b43_wldev *dev); +int b43_phy_init(struct b43_wldev *dev); + +void b43_set_rx_antenna(struct b43_wldev *dev, int antenna); + +void b43_phy_xmitpower(struct b43_wldev *dev); +void b43_gphy_dc_lt_init(struct b43_wldev *dev); + +/* Returns the boolean whether the board has HardwarePowerControl */ +bool b43_has_hardware_pctl(struct b43_phy *phy); +/* Returns the boolean whether "TX Magnification" is enabled. */ +#define has_tx_magnification(phy) \ + (((phy)->rev >= 2) && \ + ((phy)->radio_ver == 0x2050) && \ + ((phy)->radio_rev == 8)) +/* Card uses the loopback gain stuff */ +#define has_loopback_gain(phy) \ + (((phy)->rev > 1) || ((phy)->gmode)) + +/* Radio Attenuation (RF Attenuation) */ +struct b43_rfatt { + u8 att; /* Attenuation value */ + bool with_padmix; /* Flag, PAD Mixer enabled. */ +}; +struct b43_rfatt_list { + /* Attenuation values list */ + const struct b43_rfatt *list; + u8 len; + /* Minimum/Maximum attenuation values */ + u8 min_val; + u8 max_val; +}; + +/* Baseband Attenuation */ +struct b43_bbatt { + u8 att; /* Attenuation value */ +}; +struct b43_bbatt_list { + /* Attenuation values list */ + const struct b43_bbatt *list; + u8 len; + /* Minimum/Maximum attenuation values */ + u8 min_val; + u8 max_val; +}; + +/* tx_control bits. */ +#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */ +#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */ +#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */ + +/* Write BasebandAttenuation value to the device. */ +void b43_phy_set_baseband_attenuation(struct b43_wldev *dev, + u16 baseband_attenuation); + +extern const u8 b43_radio_channel_codes_bg[]; + +void b43_radio_lock(struct b43_wldev *dev); +void b43_radio_unlock(struct b43_wldev *dev); + +u16 b43_radio_read16(struct b43_wldev *dev, u16 offset); +void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val); + +u16 b43_radio_init2050(struct b43_wldev *dev); +void b43_radio_init2060(struct b43_wldev *dev); + +void b43_radio_turn_on(struct b43_wldev *dev); +void b43_radio_turn_off(struct b43_wldev *dev, bool force); + +int b43_radio_selectchannel(struct b43_wldev *dev, u8 channel, + int synthetic_pu_workaround); + +u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel); +u8 b43_radio_aci_scan(struct b43_wldev *dev); + +int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode); + +void b43_calc_nrssi_slope(struct b43_wldev *dev); +void b43_calc_nrssi_threshold(struct b43_wldev *dev); +s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset); +void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val); +void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val); +void b43_nrssi_mem_update(struct b43_wldev *dev); + +void b43_radio_set_tx_iq(struct b43_wldev *dev); +u16 b43_radio_calibrationvalue(struct b43_wldev *dev); + +void b43_put_attenuation_into_ranges(struct b43_wldev *dev, + int *_bbatt, int *_rfatt); + +void b43_set_txpower_g(struct b43_wldev *dev, + const struct b43_bbatt *bbatt, + const struct b43_rfatt *rfatt, u8 tx_control); + +#endif /* B43_PHY_H_ */ diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c new file mode 100644 index 0000000000000000000000000000000000000000..67752a28eb9cc114468f3031b1ccb456d686dd3e --- /dev/null +++ b/drivers/net/wireless/b43/pio.c @@ -0,0 +1,652 @@ +/* + + Broadcom B43 wireless driver + + PIO Transmission + + Copyright (c) 2005 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "pio.h" +#include "main.h" +#include "xmit.h" + +#include + +static void tx_start(struct b43_pioqueue *queue) +{ + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_INIT); +} + +static void tx_octet(struct b43_pioqueue *queue, u8 octet) +{ + if (queue->need_workarounds) { + b43_pio_write(queue, B43_PIO_TXDATA, octet); + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_WRITELO); + } else { + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_WRITELO); + b43_pio_write(queue, B43_PIO_TXDATA, octet); + } +} + +static u16 tx_get_next_word(const u8 * txhdr, + const u8 * packet, + size_t txhdr_size, unsigned int *pos) +{ + const u8 *source; + unsigned int i = *pos; + u16 ret; + + if (i < txhdr_size) { + source = txhdr; + } else { + source = packet; + i -= txhdr_size; + } + ret = le16_to_cpu(*((__le16 *)(source + i))); + *pos += 2; + + return ret; +} + +static void tx_data(struct b43_pioqueue *queue, + u8 * txhdr, const u8 * packet, unsigned int octets) +{ + u16 data; + unsigned int i = 0; + + if (queue->need_workarounds) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43_txhdr_fw4), &i); + b43_pio_write(queue, B43_PIO_TXDATA, data); + } + b43_pio_write(queue, B43_PIO_TXCTL, + B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI); + while (i < octets - 1) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43_txhdr_fw4), &i); + b43_pio_write(queue, B43_PIO_TXDATA, data); + } + if (octets % 2) + tx_octet(queue, + packet[octets - sizeof(struct b43_txhdr_fw4) - 1]); +} + +static void tx_complete(struct b43_pioqueue *queue, struct sk_buff *skb) +{ + if (queue->need_workarounds) { + b43_pio_write(queue, B43_PIO_TXDATA, skb->data[skb->len - 1]); + b43_pio_write(queue, B43_PIO_TXCTL, + B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_COMPLETE); + } else { + b43_pio_write(queue, B43_PIO_TXCTL, B43_PIO_TXCTL_COMPLETE); + } +} + +static u16 generate_cookie(struct b43_pioqueue *queue, + struct b43_pio_txpacket *packet) +{ + u16 cookie = 0x0000; + u16 packetindex; + + /* We use the upper 4 bits for the PIO + * controller ID and the lower 12 bits + * for the packet index (in the cache). + */ + switch (queue->mmio_base) { + case B43_MMIO_PIO1_BASE: + break; + case B43_MMIO_PIO2_BASE: + cookie = 0x1000; + break; + case B43_MMIO_PIO3_BASE: + cookie = 0x2000; + break; + case B43_MMIO_PIO4_BASE: + cookie = 0x3000; + break; + default: + B43_WARN_ON(1); + } + packetindex = packet->index; + B43_WARN_ON(packetindex & ~0x0FFF); + cookie |= (u16) packetindex; + + return cookie; +} + +static +struct b43_pioqueue *parse_cookie(struct b43_wldev *dev, + u16 cookie, struct b43_pio_txpacket **packet) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pioqueue *queue = NULL; + int packetindex; + + switch (cookie & 0xF000) { + case 0x0000: + queue = pio->queue0; + break; + case 0x1000: + queue = pio->queue1; + break; + case 0x2000: + queue = pio->queue2; + break; + case 0x3000: + queue = pio->queue3; + break; + default: + B43_WARN_ON(1); + } + packetindex = (cookie & 0x0FFF); + B43_WARN_ON(!(packetindex >= 0 && packetindex < B43_PIO_MAXTXPACKETS)); + *packet = &(queue->tx_packets_cache[packetindex]); + + return queue; +} + +union txhdr_union { + struct b43_txhdr_fw4 txhdr_fw4; +}; + +static void pio_tx_write_fragment(struct b43_pioqueue *queue, + struct sk_buff *skb, + struct b43_pio_txpacket *packet, + size_t txhdr_size) +{ + union txhdr_union txhdr_data; + u8 *txhdr = NULL; + unsigned int octets; + + txhdr = (u8 *) (&txhdr_data.txhdr_fw4); + + B43_WARN_ON(skb_shinfo(skb)->nr_frags); + b43_generate_txhdr(queue->dev, + txhdr, skb->data, skb->len, + &packet->txstat.control, + generate_cookie(queue, packet)); + + tx_start(queue); + octets = skb->len + txhdr_size; + if (queue->need_workarounds) + octets--; + tx_data(queue, txhdr, (u8 *) skb->data, octets); + tx_complete(queue, skb); +} + +static void free_txpacket(struct b43_pio_txpacket *packet) +{ + struct b43_pioqueue *queue = packet->queue; + + if (packet->skb) + dev_kfree_skb_any(packet->skb); + list_move(&packet->list, &queue->txfree); + queue->nr_txfree++; +} + +static int pio_tx_packet(struct b43_pio_txpacket *packet) +{ + struct b43_pioqueue *queue = packet->queue; + struct sk_buff *skb = packet->skb; + u16 octets; + + octets = (u16) skb->len + sizeof(struct b43_txhdr_fw4); + if (queue->tx_devq_size < octets) { + b43warn(queue->dev->wl, "PIO queue too small. " + "Dropping packet.\n"); + /* Drop it silently (return success) */ + free_txpacket(packet); + return 0; + } + B43_WARN_ON(queue->tx_devq_packets > B43_PIO_MAXTXDEVQPACKETS); + B43_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); + /* Check if there is sufficient free space on the device + * TX queue. If not, return and let the TX tasklet + * retry later. + */ + if (queue->tx_devq_packets == B43_PIO_MAXTXDEVQPACKETS) + return -EBUSY; + if (queue->tx_devq_used + octets > queue->tx_devq_size) + return -EBUSY; + /* Now poke the device. */ + pio_tx_write_fragment(queue, skb, packet, sizeof(struct b43_txhdr_fw4)); + + /* Account for the packet size. + * (We must not overflow the device TX queue) + */ + queue->tx_devq_packets++; + queue->tx_devq_used += octets; + + /* Transmission started, everything ok, move the + * packet to the txrunning list. + */ + list_move_tail(&packet->list, &queue->txrunning); + + return 0; +} + +static void tx_tasklet(unsigned long d) +{ + struct b43_pioqueue *queue = (struct b43_pioqueue *)d; + struct b43_wldev *dev = queue->dev; + unsigned long flags; + struct b43_pio_txpacket *packet, *tmp_packet; + int err; + u16 txctl; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + if (queue->tx_frozen) + goto out_unlock; + txctl = b43_pio_read(queue, B43_PIO_TXCTL); + if (txctl & B43_PIO_TXCTL_SUSPEND) + goto out_unlock; + + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { + /* Try to transmit the packet. This can fail, if + * the device queue is full. In case of failure, the + * packet is left in the txqueue. + * If transmission succeed, the packet is moved to txrunning. + * If it is impossible to transmit the packet, it + * is dropped. + */ + err = pio_tx_packet(packet); + if (err) + break; + } + out_unlock: + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void setup_txqueues(struct b43_pioqueue *queue) +{ + struct b43_pio_txpacket *packet; + int i; + + queue->nr_txfree = B43_PIO_MAXTXPACKETS; + for (i = 0; i < B43_PIO_MAXTXPACKETS; i++) { + packet = &(queue->tx_packets_cache[i]); + + packet->queue = queue; + INIT_LIST_HEAD(&packet->list); + packet->index = i; + + list_add(&packet->list, &queue->txfree); + } +} + +static +struct b43_pioqueue *b43_setup_pioqueue(struct b43_wldev *dev, + u16 pio_mmio_base) +{ + struct b43_pioqueue *queue; + u16 qsize; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + goto out; + + queue->dev = dev; + queue->mmio_base = pio_mmio_base; + queue->need_workarounds = (dev->dev->id.revision < 3); + + INIT_LIST_HEAD(&queue->txfree); + INIT_LIST_HEAD(&queue->txqueue); + INIT_LIST_HEAD(&queue->txrunning); + tasklet_init(&queue->txtask, tx_tasklet, (unsigned long)queue); + + b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) + & ~B43_MACCTL_BE); + + qsize = b43_read16(dev, queue->mmio_base + B43_PIO_TXQBUFSIZE); + if (qsize == 0) { + b43err(dev->wl, "This card does not support PIO " + "operation mode. Please use DMA mode " + "(module parameter pio=0).\n"); + goto err_freequeue; + } + if (qsize <= B43_PIO_TXQADJUST) { + b43err(dev->wl, "PIO tx device-queue too small (%u)\n", qsize); + goto err_freequeue; + } + qsize -= B43_PIO_TXQADJUST; + queue->tx_devq_size = qsize; + + setup_txqueues(queue); + + out: + return queue; + + err_freequeue: + kfree(queue); + queue = NULL; + goto out; +} + +static void cancel_transfers(struct b43_pioqueue *queue) +{ + struct b43_pio_txpacket *packet, *tmp_packet; + + tasklet_disable(&queue->txtask); + + list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) + free_txpacket(packet); + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) + free_txpacket(packet); +} + +static void b43_destroy_pioqueue(struct b43_pioqueue *queue) +{ + if (!queue) + return; + + cancel_transfers(queue); + kfree(queue); +} + +void b43_pio_free(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + if (!b43_using_pio(dev)) + return; + pio = &dev->pio; + + b43_destroy_pioqueue(pio->queue3); + pio->queue3 = NULL; + b43_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + b43_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + b43_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; +} + +int b43_pio_init(struct b43_wldev *dev) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pioqueue *queue; + int err = -ENOMEM; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO1_BASE); + if (!queue) + goto out; + pio->queue0 = queue; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO2_BASE); + if (!queue) + goto err_destroy0; + pio->queue1 = queue; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO3_BASE); + if (!queue) + goto err_destroy1; + pio->queue2 = queue; + + queue = b43_setup_pioqueue(dev, B43_MMIO_PIO4_BASE); + if (!queue) + goto err_destroy2; + pio->queue3 = queue; + + if (dev->dev->id.revision < 3) + dev->irq_savedstate |= B43_IRQ_PIO_WORKAROUND; + + b43dbg(dev->wl, "PIO initialized\n"); + err = 0; + out: + return err; + + err_destroy2: + b43_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + err_destroy1: + b43_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + err_destroy0: + b43_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; + goto out; +} + +int b43_pio_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct b43_pioqueue *queue = dev->pio.queue1; + struct b43_pio_txpacket *packet; + + B43_WARN_ON(queue->tx_suspended); + B43_WARN_ON(list_empty(&queue->txfree)); + + packet = list_entry(queue->txfree.next, struct b43_pio_txpacket, list); + packet->skb = skb; + + memset(&packet->txstat, 0, sizeof(packet->txstat)); + memcpy(&packet->txstat.control, ctl, sizeof(*ctl)); + + list_move_tail(&packet->list, &queue->txqueue); + queue->nr_txfree--; + queue->nr_tx_packets++; + B43_WARN_ON(queue->nr_txfree >= B43_PIO_MAXTXPACKETS); + + tasklet_schedule(&queue->txtask); + + return 0; +} + +void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + struct b43_pioqueue *queue; + struct b43_pio_txpacket *packet; + + queue = parse_cookie(dev, status->cookie, &packet); + if (B43_WARN_ON(!queue)) + return; + + queue->tx_devq_packets--; + queue->tx_devq_used -= + (packet->skb->len + sizeof(struct b43_txhdr_fw4)); + + if (status->acked) { + packet->txstat.flags |= IEEE80211_TX_STATUS_ACK; + } else { + if (!(packet->txstat.control.flags & IEEE80211_TXCTL_NO_ACK)) + packet->txstat.excessive_retries = 1; + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + packet->txstat.retry_count = 0; + } else + packet->txstat.retry_count = status->frame_count - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb, + &(packet->txstat)); + packet->skb = NULL; + + free_txpacket(packet); + /* If there are packets on the txqueue, poke the tasklet + * to transmit them. + */ + if (!list_empty(&queue->txqueue)) + tasklet_schedule(&queue->txtask); +} + +void b43_pio_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43_pio *pio = &dev->pio; + struct b43_pioqueue *queue; + struct ieee80211_tx_queue_stats_data *data; + + queue = pio->queue1; + data = &(stats->data[0]); + data->len = B43_PIO_MAXTXPACKETS - queue->nr_txfree; + data->limit = B43_PIO_MAXTXPACKETS; + data->count = queue->nr_tx_packets; +} + +static void pio_rx_error(struct b43_pioqueue *queue, + int clear_buffers, const char *error) +{ + int i; + + b43err(queue->dev->wl, "PIO RX error: %s\n", error); + b43_pio_write(queue, B43_PIO_RXCTL, B43_PIO_RXCTL_READY); + if (clear_buffers) { + B43_WARN_ON(queue->mmio_base != B43_MMIO_PIO1_BASE); + for (i = 0; i < 15; i++) { + /* Dummy read. */ + b43_pio_read(queue, B43_PIO_RXDATA); + } + } +} + +void b43_pio_rx(struct b43_pioqueue *queue) +{ + __le16 preamble[21] = { 0 }; + struct b43_rxhdr_fw4 *rxhdr; + u16 tmp, len; + u32 macstat; + int i, preamble_readwords; + struct sk_buff *skb; + + tmp = b43_pio_read(queue, B43_PIO_RXCTL); + if (!(tmp & B43_PIO_RXCTL_DATAAVAILABLE)) + return; + b43_pio_write(queue, B43_PIO_RXCTL, B43_PIO_RXCTL_DATAAVAILABLE); + + for (i = 0; i < 10; i++) { + tmp = b43_pio_read(queue, B43_PIO_RXCTL); + if (tmp & B43_PIO_RXCTL_READY) + goto data_ready; + udelay(10); + } + b43dbg(queue->dev->wl, "PIO RX timed out\n"); + return; +data_ready: + + len = b43_pio_read(queue, B43_PIO_RXDATA); + if (unlikely(len > 0x700)) { + pio_rx_error(queue, 0, "len > 0x700"); + return; + } + if (unlikely(len == 0 && queue->mmio_base != B43_MMIO_PIO4_BASE)) { + pio_rx_error(queue, 0, "len == 0"); + return; + } + preamble[0] = cpu_to_le16(len); + if (queue->mmio_base == B43_MMIO_PIO4_BASE) + preamble_readwords = 14 / sizeof(u16); + else + preamble_readwords = 18 / sizeof(u16); + for (i = 0; i < preamble_readwords; i++) { + tmp = b43_pio_read(queue, B43_PIO_RXDATA); + preamble[i + 1] = cpu_to_le16(tmp); + } + rxhdr = (struct b43_rxhdr_fw4 *)preamble; + macstat = le32_to_cpu(rxhdr->mac_status); + if (macstat & B43_RX_MAC_FCSERR) { + pio_rx_error(queue, + (queue->mmio_base == B43_MMIO_PIO1_BASE), + "Frame FCS error"); + return; + } + if (queue->mmio_base == B43_MMIO_PIO4_BASE) { + /* We received an xmit status. */ + struct b43_hwtxstatus *hw; + + hw = (struct b43_hwtxstatus *)(preamble + 1); + b43_handle_hwtxstatus(queue->dev, hw); + + return; + } + + skb = dev_alloc_skb(len); + if (unlikely(!skb)) { + pio_rx_error(queue, 1, "OOM"); + return; + } + skb_put(skb, len); + for (i = 0; i < len - 1; i += 2) { + tmp = b43_pio_read(queue, B43_PIO_RXDATA); + *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp); + } + if (len % 2) { + tmp = b43_pio_read(queue, B43_PIO_RXDATA); + skb->data[len - 1] = (tmp & 0x00FF); +/* The specs say the following is required, but + * it is wrong and corrupts the PLCP. If we don't do + * this, the PLCP seems to be correct. So ifdef it out for now. + */ +#if 0 + if (rxflags2 & B43_RXHDR_FLAGS2_TYPE2FRAME) + skb->data[2] = (tmp & 0xFF00) >> 8; + else + skb->data[0] = (tmp & 0xFF00) >> 8; +#endif + } + b43_rx(queue->dev, skb, rxhdr); +} + +void b43_pio_tx_suspend(struct b43_pioqueue *queue) +{ + b43_power_saving_ctl_bits(queue->dev, B43_PS_AWAKE); + b43_pio_write(queue, B43_PIO_TXCTL, b43_pio_read(queue, B43_PIO_TXCTL) + | B43_PIO_TXCTL_SUSPEND); +} + +void b43_pio_tx_resume(struct b43_pioqueue *queue) +{ + b43_pio_write(queue, B43_PIO_TXCTL, b43_pio_read(queue, B43_PIO_TXCTL) + & ~B43_PIO_TXCTL_SUSPEND); + b43_power_saving_ctl_bits(queue->dev, 0); + tasklet_schedule(&queue->txtask); +} + +void b43_pio_freeze_txqueues(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + B43_WARN_ON(!b43_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 1; + pio->queue1->tx_frozen = 1; + pio->queue2->tx_frozen = 1; + pio->queue3->tx_frozen = 1; +} + +void b43_pio_thaw_txqueues(struct b43_wldev *dev) +{ + struct b43_pio *pio; + + B43_WARN_ON(!b43_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 0; + pio->queue1->tx_frozen = 0; + pio->queue2->tx_frozen = 0; + pio->queue3->tx_frozen = 0; + if (!list_empty(&pio->queue0->txqueue)) + tasklet_schedule(&pio->queue0->txtask); + if (!list_empty(&pio->queue1->txqueue)) + tasklet_schedule(&pio->queue1->txtask); + if (!list_empty(&pio->queue2->txqueue)) + tasklet_schedule(&pio->queue2->txtask); + if (!list_empty(&pio->queue3->txqueue)) + tasklet_schedule(&pio->queue3->txtask); +} diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h new file mode 100644 index 0000000000000000000000000000000000000000..34a44c1b6314126f1929bb2feaf6acf912428de4 --- /dev/null +++ b/drivers/net/wireless/b43/pio.h @@ -0,0 +1,152 @@ +#ifndef B43_PIO_H_ +#define B43_PIO_H_ + +#include "b43.h" + +#include +#include +#include + +#define B43_PIO_TXCTL 0x00 +#define B43_PIO_TXDATA 0x02 +#define B43_PIO_TXQBUFSIZE 0x04 +#define B43_PIO_RXCTL 0x08 +#define B43_PIO_RXDATA 0x0A + +#define B43_PIO_TXCTL_WRITELO (1 << 0) +#define B43_PIO_TXCTL_WRITEHI (1 << 1) +#define B43_PIO_TXCTL_COMPLETE (1 << 2) +#define B43_PIO_TXCTL_INIT (1 << 3) +#define B43_PIO_TXCTL_SUSPEND (1 << 7) + +#define B43_PIO_RXCTL_DATAAVAILABLE (1 << 0) +#define B43_PIO_RXCTL_READY (1 << 1) + +/* PIO constants */ +#define B43_PIO_MAXTXDEVQPACKETS 31 +#define B43_PIO_TXQADJUST 80 + +/* PIO tuning knobs */ +#define B43_PIO_MAXTXPACKETS 256 + +#ifdef CONFIG_B43_PIO + +struct b43_pioqueue; +struct b43_xmitstatus; + +struct b43_pio_txpacket { + struct b43_pioqueue *queue; + struct sk_buff *skb; + struct ieee80211_tx_status txstat; + struct list_head list; + u16 index; /* Index in the tx_packets_cache */ +}; + +struct b43_pioqueue { + struct b43_wldev *dev; + u16 mmio_base; + + bool tx_suspended; + bool tx_frozen; + bool need_workarounds; /* Workarounds needed for core.rev < 3 */ + + /* Adjusted size of the device internal TX buffer. */ + u16 tx_devq_size; + /* Used octets of the device internal TX buffer. */ + u16 tx_devq_used; + /* Used packet slots in the device internal TX buffer. */ + u8 tx_devq_packets; + /* Packets from the txfree list can + * be taken on incoming TX requests. + */ + struct list_head txfree; + unsigned int nr_txfree; + /* Packets on the txqueue are queued, + * but not completely written to the chip, yet. + */ + struct list_head txqueue; + /* Packets on the txrunning queue are completely + * posted to the device. We are waiting for the txstatus. + */ + struct list_head txrunning; + /* Total number or packets sent. + * (This counter can obviously wrap). + */ + unsigned int nr_tx_packets; + struct tasklet_struct txtask; + struct b43_pio_txpacket tx_packets_cache[B43_PIO_MAXTXPACKETS]; +}; + +static inline u16 b43_pio_read(struct b43_pioqueue *queue, u16 offset) +{ + return b43_read16(queue->dev, queue->mmio_base + offset); +} + +static inline + void b43_pio_write(struct b43_pioqueue *queue, u16 offset, u16 value) +{ + b43_write16(queue->dev, queue->mmio_base + offset, value); + mmiowb(); +} + +int b43_pio_init(struct b43_wldev *dev); +void b43_pio_free(struct b43_wldev *dev); + +int b43_pio_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl); +void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); +void b43_pio_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats); +void b43_pio_rx(struct b43_pioqueue *queue); + +/* Suspend TX queue in hardware. */ +void b43_pio_tx_suspend(struct b43_pioqueue *queue); +void b43_pio_tx_resume(struct b43_pioqueue *queue); +/* Suspend (freeze) the TX tasklet (software level). */ +void b43_pio_freeze_txqueues(struct b43_wldev *dev); +void b43_pio_thaw_txqueues(struct b43_wldev *dev); + +#else /* CONFIG_B43_PIO */ + +static inline int b43_pio_init(struct b43_wldev *dev) +{ + return 0; +} +static inline void b43_pio_free(struct b43_wldev *dev) +{ +} +static inline + int b43_pio_tx(struct b43_wldev *dev, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline + void b43_pio_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ +} +static inline + void b43_pio_get_tx_stats(struct b43_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline void b43_pio_rx(struct b43_pioqueue *queue) +{ +} +static inline void b43_pio_tx_suspend(struct b43_pioqueue *queue) +{ +} +static inline void b43_pio_tx_resume(struct b43_pioqueue *queue) +{ +} +static inline void b43_pio_freeze_txqueues(struct b43_wldev *dev) +{ +} +static inline void b43_pio_thaw_txqueues(struct b43_wldev *dev) +{ +} + +#endif /* CONFIG_B43_PIO */ +#endif /* B43_PIO_H_ */ diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c new file mode 100644 index 0000000000000000000000000000000000000000..800e0a61a7f589f1f649b7f6417cf6488267f4d5 --- /dev/null +++ b/drivers/net/wireless/b43/rfkill.c @@ -0,0 +1,184 @@ +/* + + Broadcom B43 wireless driver + RFKILL support + + Copyright (c) 2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "rfkill.h" +#include "b43.h" + + +/* Returns TRUE, if the radio is enabled in hardware. */ +static bool b43_is_hw_radio_enabled(struct b43_wldev *dev) +{ + if (dev->phy.rev >= 3) { + if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) + & B43_MMIO_RADIO_HWENABLED_HI_MASK)) + return 1; + } else { + if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) + & B43_MMIO_RADIO_HWENABLED_LO_MASK) + return 1; + } + return 0; +} + +/* The poll callback for the hardware button. */ +static void b43_rfkill_poll(struct input_polled_dev *poll_dev) +{ + struct b43_wldev *dev = poll_dev->private; + struct b43_wl *wl = dev->wl; + bool enabled; + + mutex_lock(&wl->mutex); + B43_WARN_ON(b43_status(dev) < B43_STAT_INITIALIZED); + enabled = b43_is_hw_radio_enabled(dev); + if (unlikely(enabled != dev->radio_hw_enable)) { + dev->radio_hw_enable = enabled; + b43info(wl, "Radio hardware status changed to %s\n", + enabled ? "ENABLED" : "DISABLED"); + mutex_unlock(&wl->mutex); + input_report_key(poll_dev->input, KEY_WLAN, enabled); + } else + mutex_unlock(&wl->mutex); +} + +/* Called when the RFKILL toggled in software. + * This is called without locking. */ +static int b43_rfkill_soft_toggle(void *data, enum rfkill_state state) +{ + struct b43_wldev *dev = data; + struct b43_wl *wl = dev->wl; + int err = 0; + + mutex_lock(&wl->mutex); + if (b43_status(dev) < B43_STAT_INITIALIZED) + goto out_unlock; + + switch (state) { + case RFKILL_STATE_ON: + if (!dev->radio_hw_enable) { + /* No luck. We can't toggle the hardware RF-kill + * button from software. */ + err = -EBUSY; + goto out_unlock; + } + if (!dev->phy.radio_on) + b43_radio_turn_on(dev); + break; + case RFKILL_STATE_OFF: + if (dev->phy.radio_on) + b43_radio_turn_off(dev, 0); + break; + } + +out_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +char * b43_rfkill_led_name(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + + if (!wl->rfkill.rfkill) + return NULL; + return rfkill_get_led_name(wl->rfkill.rfkill); +} + +void b43_rfkill_init(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_rfkill *rfk = &(wl->rfkill); + int err; + + if (rfk->rfkill) { + err = rfkill_register(rfk->rfkill); + if (err) { + b43warn(wl, "Failed to register RF-kill button\n"); + goto err_free_rfk; + } + } + if (rfk->poll_dev) { + err = input_register_polled_device(rfk->poll_dev); + if (err) { + b43warn(wl, "Failed to register RF-kill polldev\n"); + goto err_free_polldev; + } + } + + return; +err_free_rfk: + rfkill_free(rfk->rfkill); + rfk->rfkill = NULL; +err_free_polldev: + input_free_polled_device(rfk->poll_dev); + rfk->poll_dev = NULL; +} + +void b43_rfkill_exit(struct b43_wldev *dev) +{ + struct b43_rfkill *rfk = &(dev->wl->rfkill); + + if (rfk->poll_dev) + input_unregister_polled_device(rfk->poll_dev); + if (rfk->rfkill) + rfkill_unregister(rfk->rfkill); +} + +void b43_rfkill_alloc(struct b43_wldev *dev) +{ + struct b43_wl *wl = dev->wl; + struct b43_rfkill *rfk = &(wl->rfkill); + + snprintf(rfk->name, sizeof(rfk->name), + "b43-%s", wiphy_name(wl->hw->wiphy)); + + rfk->rfkill = rfkill_allocate(dev->dev->dev, RFKILL_TYPE_WLAN); + if (!rfk->rfkill) { + b43warn(wl, "Failed to allocate RF-kill button\n"); + return; + } + rfk->rfkill->name = rfk->name; + rfk->rfkill->state = RFKILL_STATE_ON; + rfk->rfkill->data = dev; + rfk->rfkill->toggle_radio = b43_rfkill_soft_toggle; + rfk->rfkill->user_claim_unsupported = 1; + + rfk->poll_dev = input_allocate_polled_device(); + if (rfk->poll_dev) { + rfk->poll_dev->private = dev; + rfk->poll_dev->poll = b43_rfkill_poll; + rfk->poll_dev->poll_interval = 1000; /* msecs */ + } else + b43warn(wl, "Failed to allocate RF-kill polldev\n"); +} + +void b43_rfkill_free(struct b43_wldev *dev) +{ + struct b43_rfkill *rfk = &(dev->wl->rfkill); + + input_free_polled_device(rfk->poll_dev); + rfk->poll_dev = NULL; + rfkill_free(rfk->rfkill); + rfk->rfkill = NULL; +} diff --git a/drivers/net/wireless/b43/rfkill.h b/drivers/net/wireless/b43/rfkill.h new file mode 100644 index 0000000000000000000000000000000000000000..29544e8c9e5f41e45878fda6d80941aaa264f0dd --- /dev/null +++ b/drivers/net/wireless/b43/rfkill.h @@ -0,0 +1,58 @@ +#ifndef B43_RFKILL_H_ +#define B43_RFKILL_H_ + +struct b43_wldev; + + +#ifdef CONFIG_B43_RFKILL + +#include +#include + + +struct b43_rfkill { + /* The RFKILL subsystem data structure */ + struct rfkill *rfkill; + /* The poll device for the RFKILL input button */ + struct input_polled_dev *poll_dev; + /* The unique name of this rfkill switch */ + char name[32]; +}; + +/* All the init functions return void, because we are not interested + * in failing the b43 init process when rfkill init failed. */ +void b43_rfkill_alloc(struct b43_wldev *dev); +void b43_rfkill_free(struct b43_wldev *dev); +void b43_rfkill_init(struct b43_wldev *dev); +void b43_rfkill_exit(struct b43_wldev *dev); + +char * b43_rfkill_led_name(struct b43_wldev *dev); + + +#else /* CONFIG_B43_RFKILL */ +/* No RFKILL support. */ + +struct b43_rfkill { + /* empty */ +}; + +static inline void b43_rfkill_alloc(struct b43_wldev *dev) +{ +} +static inline void b43_rfkill_free(struct b43_wldev *dev) +{ +} +static inline void b43_rfkill_init(struct b43_wldev *dev) +{ +} +static inline void b43_rfkill_exit(struct b43_wldev *dev) +{ +} +static inline char * b43_rfkill_led_name(struct b43_wldev *dev) +{ + return NULL; +} + +#endif /* CONFIG_B43_RFKILL */ + +#endif /* B43_RFKILL_H_ */ diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..fcb777383e70e663245f7d84b46f0f32bfd02a89 --- /dev/null +++ b/drivers/net/wireless/b43/sysfs.c @@ -0,0 +1,235 @@ +/* + + Broadcom B43 wireless driver + + SYSFS support routines + + Copyright (c) 2006 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "sysfs.h" +#include "main.h" +#include "phy.h" + +#include + +#define GENERIC_FILESIZE 64 + +static int get_integer(const char *buf, size_t count) +{ + char tmp[10 + 1] = { 0 }; + int ret = -EINVAL; + + if (count == 0) + goto out; + count = min(count, (size_t) 10); + memcpy(tmp, buf, count); + ret = simple_strtol(tmp, NULL, 10); + out: + return ret; +} + +static int get_boolean(const char *buf, size_t count) +{ + if (count != 0) { + if (buf[0] == '1') + return 1; + if (buf[0] == '0') + return 0; + if (count >= 4 && memcmp(buf, "true", 4) == 0) + return 1; + if (count >= 5 && memcmp(buf, "false", 5) == 0) + return 0; + if (count >= 3 && memcmp(buf, "yes", 3) == 0) + return 1; + if (count >= 2 && memcmp(buf, "no", 2) == 0) + return 0; + if (count >= 2 && memcmp(buf, "on", 2) == 0) + return 1; + if (count >= 3 && memcmp(buf, "off", 3) == 0) + return 0; + } + return -EINVAL; +} + +static ssize_t b43_attr_interfmode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + ssize_t count = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + switch (wldev->phy.interfmode) { + case B43_INTERFMODE_NONE: + count = + snprintf(buf, PAGE_SIZE, + "0 (No Interference Mitigation)\n"); + break; + case B43_INTERFMODE_NONWLAN: + count = + snprintf(buf, PAGE_SIZE, + "1 (Non-WLAN Interference Mitigation)\n"); + break; + case B43_INTERFMODE_MANUALWLAN: + count = + snprintf(buf, PAGE_SIZE, + "2 (WLAN Interference Mitigation)\n"); + break; + default: + B43_WARN_ON(1); + } + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43_attr_interfmode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + unsigned long flags; + int err; + int mode; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mode = get_integer(buf, count); + switch (mode) { + case 0: + mode = B43_INTERFMODE_NONE; + break; + case 1: + mode = B43_INTERFMODE_NONWLAN; + break; + case 2: + mode = B43_INTERFMODE_MANUALWLAN; + break; + case 3: + mode = B43_INTERFMODE_AUTOWLAN; + break; + default: + return -EINVAL; + } + + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + err = b43_radio_set_interference_mitigation(wldev, mode); + if (err) { + b43err(wldev->wl, "Interference Mitigation not " + "supported by device\n"); + } + mmiowb(); + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return err ? err : count; +} + +static DEVICE_ATTR(interference, 0644, + b43_attr_interfmode_show, b43_attr_interfmode_store); + +static ssize_t b43_attr_preamble_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + ssize_t count; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + if (wldev->short_preamble) + count = + snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n"); + else + count = + snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n"); + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43_attr_preamble_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43_wldev *wldev = dev_to_b43_wldev(dev); + unsigned long flags; + int value; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + value = get_boolean(buf, count); + if (value < 0) + return value; + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + wldev->short_preamble = !!value; + + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static DEVICE_ATTR(shortpreamble, 0644, + b43_attr_preamble_show, b43_attr_preamble_store); + +int b43_sysfs_register(struct b43_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + int err; + + B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED); + + err = device_create_file(dev, &dev_attr_interference); + if (err) + goto out; + err = device_create_file(dev, &dev_attr_shortpreamble); + if (err) + goto err_remove_interfmode; + + out: + return err; + err_remove_interfmode: + device_remove_file(dev, &dev_attr_interference); + goto out; +} + +void b43_sysfs_unregister(struct b43_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + device_remove_file(dev, &dev_attr_shortpreamble); + device_remove_file(dev, &dev_attr_interference); +} diff --git a/drivers/net/wireless/b43/sysfs.h b/drivers/net/wireless/b43/sysfs.h new file mode 100644 index 0000000000000000000000000000000000000000..12bda9ef1a85cad8bcafb6c2e1318eccbb91daac --- /dev/null +++ b/drivers/net/wireless/b43/sysfs.h @@ -0,0 +1,9 @@ +#ifndef B43_SYSFS_H_ +#define B43_SYSFS_H_ + +struct b43_wldev; + +int b43_sysfs_register(struct b43_wldev *dev); +void b43_sysfs_unregister(struct b43_wldev *dev); + +#endif /* B43_SYSFS_H_ */ diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c new file mode 100644 index 0000000000000000000000000000000000000000..15a87183a572612b68b39169b3ac85278d35426b --- /dev/null +++ b/drivers/net/wireless/b43/tables.c @@ -0,0 +1,375 @@ +/* + + Broadcom B43 wireless driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2006, 2006 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43.h" +#include "tables.h" +#include "phy.h" + +const u32 b43_tab_rotor[] = { + 0xFEB93FFD, 0xFEC63FFD, /* 0 */ + 0xFED23FFD, 0xFEDF3FFD, + 0xFEEC3FFE, 0xFEF83FFE, + 0xFF053FFE, 0xFF113FFE, + 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ + 0xFF373FFF, 0xFF443FFF, + 0xFF503FFF, 0xFF5D3FFF, + 0xFF693FFF, 0xFF763FFF, + 0xFF824000, 0xFF8F4000, /* 16 */ + 0xFF9B4000, 0xFFA84000, + 0xFFB54000, 0xFFC14000, + 0xFFCE4000, 0xFFDA4000, + 0xFFE74000, 0xFFF34000, /* 24 */ + 0x00004000, 0x000D4000, + 0x00194000, 0x00264000, + 0x00324000, 0x003F4000, + 0x004B4000, 0x00584000, /* 32 */ + 0x00654000, 0x00714000, + 0x007E4000, 0x008A3FFF, + 0x00973FFF, 0x00A33FFF, + 0x00B03FFF, 0x00BC3FFF, /* 40 */ + 0x00C93FFF, 0x00D63FFF, + 0x00E23FFE, 0x00EF3FFE, + 0x00FB3FFE, 0x01083FFE, + 0x01143FFE, 0x01213FFD, /* 48 */ + 0x012E3FFD, 0x013A3FFD, + 0x01473FFD, +}; + +const u32 b43_tab_retard[] = { + 0xDB93CB87, 0xD666CF64, /* 0 */ + 0xD1FDD358, 0xCDA6D826, + 0xCA38DD9F, 0xC729E2B4, + 0xC469E88E, 0xC26AEE2B, + 0xC0DEF46C, 0xC073FA62, /* 8 */ + 0xC01D00D5, 0xC0760743, + 0xC1560D1E, 0xC2E51369, + 0xC4ED18FF, 0xC7AC1ED7, + 0xCB2823B2, 0xCEFA28D9, /* 16 */ + 0xD2F62D3F, 0xD7BB3197, + 0xDCE53568, 0xE1FE3875, + 0xE7D13B35, 0xED663D35, + 0xF39B3EC4, 0xF98E3FA7, /* 24 */ + 0x00004000, 0x06723FA7, + 0x0C653EC4, 0x129A3D35, + 0x182F3B35, 0x1E023875, + 0x231B3568, 0x28453197, /* 32 */ + 0x2D0A2D3F, 0x310628D9, + 0x34D823B2, 0x38541ED7, + 0x3B1318FF, 0x3D1B1369, + 0x3EAA0D1E, 0x3F8A0743, /* 40 */ + 0x3FE300D5, 0x3F8DFA62, + 0x3F22F46C, 0x3D96EE2B, + 0x3B97E88E, 0x38D7E2B4, + 0x35C8DD9F, 0x325AD826, /* 48 */ + 0x2E03D358, 0x299ACF64, + 0x246DCB87, +}; + +const u16 b43_tab_finefreqa[] = { + 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ + 0x0202, 0x0282, 0x0302, 0x0382, + 0x0402, 0x0482, 0x0502, 0x0582, + 0x05E2, 0x0662, 0x06E2, 0x0762, + 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ + 0x09C2, 0x0A22, 0x0AA2, 0x0B02, + 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, + 0x0D42, 0x0DA2, 0x0E02, 0x0E62, + 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ + 0x1062, 0x10C2, 0x1122, 0x1182, + 0x11E2, 0x1242, 0x12A2, 0x12E2, + 0x1342, 0x13A2, 0x1402, 0x1442, + 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ + 0x15E2, 0x1622, 0x1662, 0x16C1, + 0x1701, 0x1741, 0x1781, 0x17E1, + 0x1821, 0x1861, 0x18A1, 0x18E1, + 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ + 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, + 0x1B01, 0x1B41, 0x1B81, 0x1BA1, + 0x1BE1, 0x1C21, 0x1C41, 0x1C81, + 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ + 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, + 0x1E21, 0x1E61, 0x1E81, 0x1EA1, + 0x1EE1, 0x1F01, 0x1F21, 0x1F41, + 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ + 0x2001, 0x2041, 0x2061, 0x2081, + 0x20A1, 0x20C1, 0x20E1, 0x2101, + 0x2121, 0x2141, 0x2161, 0x2181, + 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ + 0x2221, 0x2241, 0x2261, 0x2281, + 0x22A1, 0x22C1, 0x22C1, 0x22E1, + 0x2301, 0x2321, 0x2341, 0x2361, + 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ + 0x23E1, 0x23E1, 0x2401, 0x2421, + 0x2441, 0x2441, 0x2461, 0x2481, + 0x2481, 0x24A1, 0x24C1, 0x24C1, + 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ + 0x2541, 0x2541, 0x2561, 0x2561, + 0x2581, 0x25A1, 0x25A1, 0x25C1, + 0x25C1, 0x25E1, 0x2601, 0x2601, + 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ + 0x2661, 0x2661, 0x2681, 0x2681, + 0x26A1, 0x26A1, 0x26C1, 0x26C1, + 0x26E1, 0x26E1, 0x2701, 0x2701, + 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ + 0x2760, 0x2760, 0x2780, 0x2780, + 0x2780, 0x27A0, 0x27A0, 0x27C0, + 0x27C0, 0x27E0, 0x27E0, 0x27E0, + 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ + 0x2820, 0x2840, 0x2840, 0x2840, + 0x2860, 0x2860, 0x2880, 0x2880, + 0x2880, 0x28A0, 0x28A0, 0x28A0, + 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ + 0x28E0, 0x28E0, 0x2900, 0x2900, + 0x2900, 0x2920, 0x2920, 0x2920, + 0x2940, 0x2940, 0x2940, 0x2960, + 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ + 0x2980, 0x2980, 0x29A0, 0x29A0, + 0x29A0, 0x29A0, 0x29C0, 0x29C0, + 0x29C0, 0x29E0, 0x29E0, 0x29E0, + 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ + 0x2A00, 0x2A20, 0x2A20, 0x2A20, + 0x2A20, 0x2A40, 0x2A40, 0x2A40, + 0x2A40, 0x2A60, 0x2A60, 0x2A60, +}; + +const u16 b43_tab_finefreqg[] = { + 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ + 0x05A9, 0x0669, 0x0709, 0x0789, + 0x0829, 0x08A9, 0x0929, 0x0989, + 0x0A09, 0x0A69, 0x0AC9, 0x0B29, + 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ + 0x0D09, 0x0D69, 0x0DA9, 0x0E09, + 0x0E69, 0x0EA9, 0x0F09, 0x0F49, + 0x0FA9, 0x0FE9, 0x1029, 0x1089, + 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ + 0x11E9, 0x1229, 0x1289, 0x12C9, + 0x1309, 0x1349, 0x1389, 0x13C9, + 0x1409, 0x1449, 0x14A9, 0x14E9, + 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ + 0x1629, 0x1669, 0x16A9, 0x16E8, + 0x1728, 0x1768, 0x17A8, 0x17E8, + 0x1828, 0x1868, 0x18A8, 0x18E8, + 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ + 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, + 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, + 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, + 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ + 0x1E48, 0x1E88, 0x1EC8, 0x1F08, + 0x1F48, 0x1F88, 0x1FE8, 0x2028, + 0x2068, 0x20A8, 0x2108, 0x2148, + 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ + 0x22C8, 0x2308, 0x2348, 0x23A8, + 0x23E8, 0x2448, 0x24A8, 0x24E8, + 0x2548, 0x25A8, 0x2608, 0x2668, + 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ + 0x2847, 0x28C7, 0x2947, 0x29A7, + 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, + 0x2CA7, 0x2D67, 0x2E47, 0x2F67, + 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ + 0x3806, 0x38A6, 0x3946, 0x39E6, + 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, + 0x3C45, 0x3CA5, 0x3D05, 0x3D85, + 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ + 0x3F45, 0x3FA5, 0x4005, 0x4045, + 0x40A5, 0x40E5, 0x4145, 0x4185, + 0x41E5, 0x4225, 0x4265, 0x42C5, + 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ + 0x4424, 0x4464, 0x44C4, 0x4504, + 0x4544, 0x4584, 0x45C4, 0x4604, + 0x4644, 0x46A4, 0x46E4, 0x4724, + 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ + 0x4864, 0x48A4, 0x48E4, 0x4924, + 0x4964, 0x49A4, 0x49E4, 0x4A24, + 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, + 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ + 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, + 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, + 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, + 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ + 0x5083, 0x50C3, 0x5103, 0x5143, + 0x5183, 0x51E2, 0x5222, 0x5262, + 0x52A2, 0x52E2, 0x5342, 0x5382, + 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ + 0x5502, 0x5542, 0x55A2, 0x55E2, + 0x5642, 0x5682, 0x56E2, 0x5722, + 0x5782, 0x57E1, 0x5841, 0x58A1, + 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ + 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, + 0x5C61, 0x5D01, 0x5D80, 0x5E20, + 0x5EE0, 0x5FA0, 0x6080, 0x61C0, +}; + +const u16 b43_tab_noisea2[] = { + 0x0001, 0x0001, 0x0001, 0xFFFE, + 0xFFFE, 0x3FFF, 0x1000, 0x0393, +}; + +const u16 b43_tab_noisea3[] = { + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, +}; + +const u16 b43_tab_noiseg1[] = { + 0x013C, 0x01F5, 0x031A, 0x0631, + 0x0001, 0x0001, 0x0001, 0x0001, +}; + +const u16 b43_tab_noiseg2[] = { + 0x5484, 0x3C40, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, +}; + +const u16 b43_tab_noisescaleg1[] = { + 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ + 0x2F2D, 0x2A2A, 0x2527, 0x1F21, + 0x1A1D, 0x1719, 0x1616, 0x1414, + 0x1414, 0x1400, 0x1414, 0x1614, + 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ + 0x2A27, 0x2F2A, 0x332D, 0x3B35, + 0x5140, 0x6C62, 0x0077, +}; + +const u16 b43_tab_noisescaleg2[] = { + 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ + 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, + 0x969B, 0x9195, 0x8F8F, 0x8A8A, + 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, + 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ + 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, + 0xCBC0, 0xD8D4, 0x00DD, +}; + +const u16 b43_tab_noisescaleg3[] = { + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0x00A4, +}; + +const u16 b43_tab_sigmasqr1[] = { + 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ + 0x0067, 0x0063, 0x005E, 0x0059, + 0x0054, 0x0050, 0x004B, 0x0046, + 0x0042, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x0000, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x0042, 0x0046, 0x004B, 0x0050, + 0x0054, 0x0059, 0x005E, 0x0063, + 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ + 0x007A, +}; + +const u16 b43_tab_sigmasqr2[] = { + 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ + 0x00D6, 0x00D4, 0x00D2, 0x00CF, + 0x00CD, 0x00CA, 0x00C7, 0x00C4, + 0x00C1, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x0000, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00C1, 0x00C4, 0x00C7, 0x00CA, + 0x00CD, 0x00CF, 0x00D2, 0x00D4, + 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ + 0x00DE, +}; + +static inline void assert_sizes(void) +{ + BUILD_BUG_ON(B43_TAB_ROTOR_SIZE != ARRAY_SIZE(b43_tab_rotor)); + BUILD_BUG_ON(B43_TAB_RETARD_SIZE != ARRAY_SIZE(b43_tab_retard)); + BUILD_BUG_ON(B43_TAB_FINEFREQA_SIZE != ARRAY_SIZE(b43_tab_finefreqa)); + BUILD_BUG_ON(B43_TAB_FINEFREQG_SIZE != ARRAY_SIZE(b43_tab_finefreqg)); + BUILD_BUG_ON(B43_TAB_NOISEA2_SIZE != ARRAY_SIZE(b43_tab_noisea2)); + BUILD_BUG_ON(B43_TAB_NOISEA3_SIZE != ARRAY_SIZE(b43_tab_noisea3)); + BUILD_BUG_ON(B43_TAB_NOISEG1_SIZE != ARRAY_SIZE(b43_tab_noiseg1)); + BUILD_BUG_ON(B43_TAB_NOISEG2_SIZE != ARRAY_SIZE(b43_tab_noiseg2)); + BUILD_BUG_ON(B43_TAB_NOISESCALEG_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg1)); + BUILD_BUG_ON(B43_TAB_NOISESCALEG_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg2)); + BUILD_BUG_ON(B43_TAB_NOISESCALEG_SIZE != + ARRAY_SIZE(b43_tab_noisescaleg3)); + BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr1)); + BUILD_BUG_ON(B43_TAB_SIGMASQR_SIZE != ARRAY_SIZE(b43_tab_sigmasqr2)); +} + +u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) +{ + assert_sizes(); + + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + return b43_phy_read(dev, B43_PHY_OTABLEI); +} + +void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, + u16 offset, u16 value) +{ + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + b43_phy_write(dev, B43_PHY_OTABLEI, value); +} + +u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) +{ + u32 ret; + + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + ret = b43_phy_read(dev, B43_PHY_OTABLEQ); + ret <<= 16; + ret |= b43_phy_read(dev, B43_PHY_OTABLEI); + + return ret; +} + +void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, + u16 offset, u32 value) +{ + b43_phy_write(dev, B43_PHY_OTABLECTL, table + offset); + b43_phy_write(dev, B43_PHY_OTABLEI, value); + b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16)); +} + +u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset) +{ + b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); + return b43_phy_read(dev, B43_PHY_GTABDATA); +} + +void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value) +{ + b43_phy_write(dev, B43_PHY_GTABCTL, table + offset); + b43_phy_write(dev, B43_PHY_GTABDATA, value); +} diff --git a/drivers/net/wireless/b43/tables.h b/drivers/net/wireless/b43/tables.h new file mode 100644 index 0000000000000000000000000000000000000000..64635d7b518c3e9500d08193fc2190ecaa44a407 --- /dev/null +++ b/drivers/net/wireless/b43/tables.h @@ -0,0 +1,28 @@ +#ifndef B43_TABLES_H_ +#define B43_TABLES_H_ + +#define B43_TAB_ROTOR_SIZE 53 +extern const u32 b43_tab_rotor[]; +#define B43_TAB_RETARD_SIZE 53 +extern const u32 b43_tab_retard[]; +#define B43_TAB_FINEFREQA_SIZE 256 +extern const u16 b43_tab_finefreqa[]; +#define B43_TAB_FINEFREQG_SIZE 256 +extern const u16 b43_tab_finefreqg[]; +#define B43_TAB_NOISEA2_SIZE 8 +extern const u16 b43_tab_noisea2[]; +#define B43_TAB_NOISEA3_SIZE 8 +extern const u16 b43_tab_noisea3[]; +#define B43_TAB_NOISEG1_SIZE 8 +extern const u16 b43_tab_noiseg1[]; +#define B43_TAB_NOISEG2_SIZE 8 +extern const u16 b43_tab_noiseg2[]; +#define B43_TAB_NOISESCALEG_SIZE 27 +extern const u16 b43_tab_noisescaleg1[]; +extern const u16 b43_tab_noisescaleg2[]; +extern const u16 b43_tab_noisescaleg3[]; +#define B43_TAB_SIGMASQR_SIZE 53 +extern const u16 b43_tab_sigmasqr1[]; +extern const u16 b43_tab_sigmasqr2[]; + +#endif /* B43_TABLES_H_ */ diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c new file mode 100644 index 0000000000000000000000000000000000000000..0bd6f8a348a8a195821ff8ec5721c167b0443bca --- /dev/null +++ b/drivers/net/wireless/b43/xmit.c @@ -0,0 +1,650 @@ +/* + + Broadcom B43 wireless driver + + Transmission (TX/RX) related functions. + + Copyright (C) 2005 Martin Langer + Copyright (C) 2005 Stefano Brivio + Copyright (C) 2005, 2006 Michael Buesch + Copyright (C) 2005 Danny van Dyk + Copyright (C) 2005 Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "xmit.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" + +/* Extract the bitrate out of a CCK PLCP header. */ +static u8 b43_plcp_get_bitrate_cck(struct b43_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0]) { + case 0x0A: + return B43_CCK_RATE_1MB; + case 0x14: + return B43_CCK_RATE_2MB; + case 0x37: + return B43_CCK_RATE_5MB; + case 0x6E: + return B43_CCK_RATE_11MB; + } + B43_WARN_ON(1); + return 0; +} + +/* Extract the bitrate out of an OFDM PLCP header. */ +static u8 b43_plcp_get_bitrate_ofdm(struct b43_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0] & 0xF) { + case 0xB: + return B43_OFDM_RATE_6MB; + case 0xF: + return B43_OFDM_RATE_9MB; + case 0xA: + return B43_OFDM_RATE_12MB; + case 0xE: + return B43_OFDM_RATE_18MB; + case 0x9: + return B43_OFDM_RATE_24MB; + case 0xD: + return B43_OFDM_RATE_36MB; + case 0x8: + return B43_OFDM_RATE_48MB; + case 0xC: + return B43_OFDM_RATE_54MB; + } + B43_WARN_ON(1); + return 0; +} + +u8 b43_plcp_get_ratecode_cck(const u8 bitrate) +{ + switch (bitrate) { + case B43_CCK_RATE_1MB: + return 0x0A; + case B43_CCK_RATE_2MB: + return 0x14; + case B43_CCK_RATE_5MB: + return 0x37; + case B43_CCK_RATE_11MB: + return 0x6E; + } + B43_WARN_ON(1); + return 0; +} + +u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate) +{ + switch (bitrate) { + case B43_OFDM_RATE_6MB: + return 0xB; + case B43_OFDM_RATE_9MB: + return 0xF; + case B43_OFDM_RATE_12MB: + return 0xA; + case B43_OFDM_RATE_18MB: + return 0xE; + case B43_OFDM_RATE_24MB: + return 0x9; + case B43_OFDM_RATE_36MB: + return 0xD; + case B43_OFDM_RATE_48MB: + return 0x8; + case B43_OFDM_RATE_54MB: + return 0xC; + } + B43_WARN_ON(1); + return 0; +} + +void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate) +{ + __le32 *data = &(plcp->data); + __u8 *raw = plcp->raw; + + if (b43_is_ofdm_rate(bitrate)) { + u32 d; + + d = b43_plcp_get_ratecode_ofdm(bitrate); + B43_WARN_ON(octets & 0xF000); + d |= (octets << 5); + *data = cpu_to_le32(d); + } else { + u32 plen; + + plen = octets * 16 / bitrate; + if ((octets * 16 % bitrate) > 0) { + plen++; + if ((bitrate == B43_CCK_RATE_11MB) + && ((octets * 8 % 11) < 4)) { + raw[1] = 0x84; + } else + raw[1] = 0x04; + } else + raw[1] = 0x04; + *data |= cpu_to_le32(plen << 16); + raw[0] = b43_plcp_get_ratecode_cck(bitrate); + } +} + +static u8 b43_calc_fallback_rate(u8 bitrate) +{ + switch (bitrate) { + case B43_CCK_RATE_1MB: + return B43_CCK_RATE_1MB; + case B43_CCK_RATE_2MB: + return B43_CCK_RATE_1MB; + case B43_CCK_RATE_5MB: + return B43_CCK_RATE_2MB; + case B43_CCK_RATE_11MB: + return B43_CCK_RATE_5MB; + case B43_OFDM_RATE_6MB: + return B43_CCK_RATE_5MB; + case B43_OFDM_RATE_9MB: + return B43_OFDM_RATE_6MB; + case B43_OFDM_RATE_12MB: + return B43_OFDM_RATE_9MB; + case B43_OFDM_RATE_18MB: + return B43_OFDM_RATE_12MB; + case B43_OFDM_RATE_24MB: + return B43_OFDM_RATE_18MB; + case B43_OFDM_RATE_36MB: + return B43_OFDM_RATE_24MB; + case B43_OFDM_RATE_48MB: + return B43_OFDM_RATE_36MB; + case B43_OFDM_RATE_54MB: + return B43_OFDM_RATE_48MB; + } + B43_WARN_ON(1); + return 0; +} + +static void generate_txhdr_fw4(struct b43_wldev *dev, + struct b43_txhdr_fw4 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie) +{ + const struct b43_phy *phy = &dev->phy; + const struct ieee80211_hdr *wlhdr = + (const struct ieee80211_hdr *)fragment_data; + int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); + u16 fctl = le16_to_cpu(wlhdr->frame_control); + u8 rate, rate_fb; + int rate_ofdm, rate_fb_ofdm; + unsigned int plcp_fragment_len; + u32 mac_ctl = 0; + u16 phy_ctl = 0; + u8 extra_ft = 0; + + memset(txhdr, 0, sizeof(*txhdr)); + + rate = txctl->tx_rate; + rate_ofdm = b43_is_ofdm_rate(rate); + rate_fb = (txctl->alt_retry_rate == -1) ? rate : txctl->alt_retry_rate; + rate_fb_ofdm = b43_is_ofdm_rate(rate_fb); + + if (rate_ofdm) + txhdr->phy_rate = b43_plcp_get_ratecode_ofdm(rate); + else + txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate); + txhdr->mac_frame_ctl = wlhdr->frame_control; + memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); + + /* Calculate duration for fallback rate */ + if ((rate_fb == rate) || + (wlhdr->duration_id & cpu_to_le16(0x8000)) || + (wlhdr->duration_id == cpu_to_le16(0))) { + /* If the fallback rate equals the normal rate or the + * dur_id field contains an AID, CFP magic or 0, + * use the original dur_id field. */ + txhdr->dur_fb = wlhdr->duration_id; + } else { + int fbrate_base100kbps = B43_RATE_TO_BASE100KBPS(rate_fb); + txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + fragment_len, + fbrate_base100kbps); + } + + plcp_fragment_len = fragment_len + FCS_LEN; + if (use_encryption) { + u8 key_idx = (u16) (txctl->key_idx); + struct b43_key *key; + int wlhdr_len; + size_t iv_len; + + B43_WARN_ON(key_idx >= dev->max_nr_keys); + key = &(dev->key[key_idx]); + B43_WARN_ON(!key->keyconf); + + /* Hardware appends ICV. */ + plcp_fragment_len += txctl->icv_len; + + key_idx = b43_kidx_to_fw(dev, key_idx); + mac_ctl |= (key_idx << B43_TX4_MAC_KEYIDX_SHIFT) & + B43_TX4_MAC_KEYIDX; + mac_ctl |= (key->algorithm << B43_TX4_MAC_KEYALG_SHIFT) & + B43_TX4_MAC_KEYALG; + wlhdr_len = ieee80211_get_hdrlen(fctl); + iv_len = min((size_t) txctl->iv_len, + ARRAY_SIZE(txhdr->iv)); + memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len); + } + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->plcp), + plcp_fragment_len, rate); + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr->plcp_fb), + plcp_fragment_len, rate_fb); + + /* Extra Frame Types */ + if (rate_fb_ofdm) + extra_ft |= B43_TX4_EFT_FBOFDM; + + /* Set channel radio code. Note that the micrcode ORs 0x100 to + * this value before comparing it to the value in SHM, if this + * is a 5Ghz packet. + */ + txhdr->chan_radio_code = phy->channel; + + /* PHY TX Control word */ + if (rate_ofdm) + phy_ctl |= B43_TX4_PHY_OFDM; + if (dev->short_preamble) + phy_ctl |= B43_TX4_PHY_SHORTPRMBL; + switch (txctl->antenna_sel_tx) { + case 0: + phy_ctl |= B43_TX4_PHY_ANTLAST; + break; + case 1: + phy_ctl |= B43_TX4_PHY_ANT0; + break; + case 2: + phy_ctl |= B43_TX4_PHY_ANT1; + break; + default: + B43_WARN_ON(1); + } + + /* MAC control */ + if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) + mac_ctl |= B43_TX4_MAC_ACK; + if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) + mac_ctl |= B43_TX4_MAC_HWSEQ; + if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) + mac_ctl |= B43_TX4_MAC_STMSDU; + if (phy->type == B43_PHYTYPE_A) + mac_ctl |= B43_TX4_MAC_5GHZ; + + /* Generate the RTS or CTS-to-self frame */ + if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || + (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { + unsigned int len; + struct ieee80211_hdr *hdr; + int rts_rate, rts_rate_fb; + int rts_rate_ofdm, rts_rate_fb_ofdm; + + rts_rate = txctl->rts_cts_rate; + rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); + rts_rate_fb = b43_calc_fallback_rate(rts_rate); + rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); + + if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + ieee80211_ctstoself_get(dev->wl->hw, dev->wl->if_id, + fragment_data, fragment_len, + txctl, + (struct ieee80211_cts *)(txhdr-> + rts_frame)); + mac_ctl |= B43_TX4_MAC_SENDCTS; + len = sizeof(struct ieee80211_cts); + } else { + ieee80211_rts_get(dev->wl->hw, dev->wl->if_id, + fragment_data, fragment_len, txctl, + (struct ieee80211_rts *)(txhdr-> + rts_frame)); + mac_ctl |= B43_TX4_MAC_SENDRTS; + len = sizeof(struct ieee80211_rts); + } + len += FCS_LEN; + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr-> + rts_plcp), len, + rts_rate); + b43_generate_plcp_hdr((struct b43_plcp_hdr4 *)(&txhdr-> + rts_plcp_fb), + len, rts_rate_fb); + hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); + txhdr->rts_dur_fb = hdr->duration_id; + if (rts_rate_ofdm) { + extra_ft |= B43_TX4_EFT_RTSOFDM; + txhdr->phy_rate_rts = + b43_plcp_get_ratecode_ofdm(rts_rate); + } else + txhdr->phy_rate_rts = + b43_plcp_get_ratecode_cck(rts_rate); + if (rts_rate_fb_ofdm) + extra_ft |= B43_TX4_EFT_RTSFBOFDM; + mac_ctl |= B43_TX4_MAC_LONGFRAME; + } + + /* Magic cookie */ + txhdr->cookie = cpu_to_le16(cookie); + + /* Apply the bitfields */ + txhdr->mac_ctl = cpu_to_le32(mac_ctl); + txhdr->phy_ctl = cpu_to_le16(phy_ctl); + txhdr->extra_ft = extra_ft; +} + +void b43_generate_txhdr(struct b43_wldev *dev, + u8 * txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, u16 cookie) +{ + generate_txhdr_fw4(dev, (struct b43_txhdr_fw4 *)txhdr, + fragment_data, fragment_len, txctl, cookie); +} + +static s8 b43_rssi_postprocess(struct b43_wldev *dev, + u8 in_rssi, int ofdm, + int adjust_2053, int adjust_2050) +{ + struct b43_phy *phy = &dev->phy; + s32 tmp; + + switch (phy->radio_ver) { + case 0x2050: + if (ofdm) { + tmp = in_rssi; + if (tmp > 127) + tmp -= 256; + tmp *= 73; + tmp /= 64; + if (adjust_2050) + tmp += 25; + else + tmp -= 3; + } else { + if (dev->dev->bus->sprom.r1. + boardflags_lo & B43_BFL_RSSI) { + if (in_rssi > 63) + in_rssi = 63; + tmp = phy->nrssi_lt[in_rssi]; + tmp = 31 - tmp; + tmp *= -131; + tmp /= 128; + tmp -= 57; + } else { + tmp = in_rssi; + tmp = 31 - tmp; + tmp *= -149; + tmp /= 128; + tmp -= 68; + } + if (phy->type == B43_PHYTYPE_G && adjust_2050) + tmp += 25; + } + break; + case 0x2060: + if (in_rssi > 127) + tmp = in_rssi - 256; + else + tmp = in_rssi; + break; + default: + tmp = in_rssi; + tmp -= 11; + tmp *= 103; + tmp /= 64; + if (adjust_2053) + tmp -= 109; + else + tmp -= 83; + } + + return (s8) tmp; +} + +//TODO +#if 0 +static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi) +{ + struct b43_phy *phy = &dev->phy; + s8 ret; + + if (phy->type == B43_PHYTYPE_A) { + //TODO: Incomplete specs. + ret = 0; + } else + ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); + + return ret; +} +#endif + +void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) +{ + struct ieee80211_rx_status status; + struct b43_plcp_hdr6 *plcp; + struct ieee80211_hdr *wlhdr; + const struct b43_rxhdr_fw4 *rxhdr = _rxhdr; + u16 fctl; + u16 phystat0, phystat3, chanstat, mactime; + u32 macstat; + u16 chanid; + u8 jssi; + int padding; + + memset(&status, 0, sizeof(status)); + + /* Get metadata about the frame from the header. */ + phystat0 = le16_to_cpu(rxhdr->phy_status0); + phystat3 = le16_to_cpu(rxhdr->phy_status3); + jssi = rxhdr->jssi; + macstat = le32_to_cpu(rxhdr->mac_status); + mactime = le16_to_cpu(rxhdr->mac_time); + chanstat = le16_to_cpu(rxhdr->channel); + + if (macstat & B43_RX_MAC_FCSERR) + dev->wl->ieee_stats.dot11FCSErrorCount++; + if (macstat & B43_RX_MAC_DECERR) { + /* Decryption with the given key failed. + * Drop the packet. We also won't be able to decrypt it with + * the key in software. */ + goto drop; + } + + /* Skip PLCP and padding */ + padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0; + if (unlikely(skb->len < (sizeof(struct b43_plcp_hdr6) + padding))) { + b43dbg(dev->wl, "RX: Packet size underrun (1)\n"); + goto drop; + } + plcp = (struct b43_plcp_hdr6 *)(skb->data + padding); + skb_pull(skb, sizeof(struct b43_plcp_hdr6) + padding); + /* The skb contains the Wireless Header + payload data now */ + if (unlikely(skb->len < (2 + 2 + 6 /*minimum hdr */ + FCS_LEN))) { + b43dbg(dev->wl, "RX: Packet size underrun (2)\n"); + goto drop; + } + wlhdr = (struct ieee80211_hdr *)(skb->data); + fctl = le16_to_cpu(wlhdr->frame_control); + skb_trim(skb, skb->len - FCS_LEN); + + if (macstat & B43_RX_MAC_DEC) { + unsigned int keyidx; + int wlhdr_len; + + keyidx = ((macstat & B43_RX_MAC_KEYIDX) + >> B43_RX_MAC_KEYIDX_SHIFT); + /* We must adjust the key index here. We want the "physical" + * key index, but the ucode passed it slightly different. + */ + keyidx = b43_kidx_to_raw(dev, keyidx); + B43_WARN_ON(keyidx >= dev->max_nr_keys); + + if (dev->key[keyidx].algorithm != B43_SEC_ALGO_NONE) { + wlhdr_len = ieee80211_get_hdrlen(fctl); + if (unlikely(skb->len < (wlhdr_len + 3))) { + b43dbg(dev->wl, + "RX: Packet size underrun (3)\n"); + goto drop; + } + status.flag |= RX_FLAG_DECRYPTED; + } + } + + status.ssi = b43_rssi_postprocess(dev, jssi, + (phystat0 & B43_RX_PHYST0_OFDM), + (phystat0 & B43_RX_PHYST0_GAINCTL), + (phystat3 & B43_RX_PHYST3_TRSTATE)); + status.noise = dev->stats.link_noise; + /* the next line looks wrong, but is what mac80211 wants */ + status.signal = (jssi * 100) / B43_RX_MAX_SSI; + if (phystat0 & B43_RX_PHYST0_OFDM) + status.rate = b43_plcp_get_bitrate_ofdm(plcp); + else + status.rate = b43_plcp_get_bitrate_cck(plcp); + status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT); + status.mactime = mactime; + + chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; + switch (chanstat & B43_RX_CHAN_PHYTYPE) { + case B43_PHYTYPE_A: + status.phymode = MODE_IEEE80211A; + status.freq = chanid; + status.channel = b43_freq_to_channel_a(chanid); + break; + case B43_PHYTYPE_B: + status.phymode = MODE_IEEE80211B; + status.freq = chanid + 2400; + status.channel = b43_freq_to_channel_bg(chanid + 2400); + break; + case B43_PHYTYPE_G: + status.phymode = MODE_IEEE80211G; + status.freq = chanid + 2400; + status.channel = b43_freq_to_channel_bg(chanid + 2400); + break; + default: + B43_WARN_ON(1); + } + + dev->stats.last_rx = jiffies; + ieee80211_rx_irqsafe(dev->wl->hw, skb, &status); + + return; +drop: + b43dbg(dev->wl, "RX: Packet dropped\n"); + dev_kfree_skb_any(skb); +} + +void b43_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status) +{ + b43_debugfs_log_txstat(dev, status); + + if (status->intermediate) + return; + if (status->for_ampdu) + return; + if (!status->acked) + dev->wl->ieee_stats.dot11ACKFailureCount++; + if (status->rts_count) { + if (status->rts_count == 0xF) //FIXME + dev->wl->ieee_stats.dot11RTSFailureCount++; + else + dev->wl->ieee_stats.dot11RTSSuccessCount++; + } + + if (b43_using_pio(dev)) + b43_pio_handle_txstatus(dev, status); + else + b43_dma_handle_txstatus(dev, status); +} + +/* Handle TX status report as received through DMA/PIO queues */ +void b43_handle_hwtxstatus(struct b43_wldev *dev, + const struct b43_hwtxstatus *hw) +{ + struct b43_txstatus status; + u8 tmp; + + status.cookie = le16_to_cpu(hw->cookie); + status.seq = le16_to_cpu(hw->seq); + status.phy_stat = hw->phy_stat; + tmp = hw->count; + status.frame_count = (tmp >> 4); + status.rts_count = (tmp & 0x0F); + tmp = hw->flags; + status.supp_reason = ((tmp & 0x1C) >> 2); + status.pm_indicated = !!(tmp & 0x80); + status.intermediate = !!(tmp & 0x40); + status.for_ampdu = !!(tmp & 0x20); + status.acked = !!(tmp & 0x02); + + b43_handle_txstatus(dev, &status); +} + +/* Stop any TX operation on the device (suspend the hardware queues) */ +void b43_tx_suspend(struct b43_wldev *dev) +{ + if (b43_using_pio(dev)) + b43_pio_freeze_txqueues(dev); + else + b43_dma_tx_suspend(dev); +} + +/* Resume any TX operation on the device (resume the hardware queues) */ +void b43_tx_resume(struct b43_wldev *dev) +{ + if (b43_using_pio(dev)) + b43_pio_thaw_txqueues(dev); + else + b43_dma_tx_resume(dev); +} + +#if 0 +static void upload_qos_parms(struct b43_wldev *dev, + const u16 * parms, u16 offset) +{ + int i; + + for (i = 0; i < B43_NR_QOSPARMS; i++) { + b43_shm_write16(dev, B43_SHM_SHARED, + offset + (i * 2), parms[i]); + } +} +#endif + +/* Initialize the QoS parameters */ +void b43_qos_init(struct b43_wldev *dev) +{ + /* FIXME: This function must probably be called from the mac80211 + * config callback. */ + return; + + b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); + //FIXME kill magic + b43_write16(dev, 0x688, b43_read16(dev, 0x688) | 0x4); + + /*TODO: We might need some stack support here to get the values. */ +} diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h new file mode 100644 index 0000000000000000000000000000000000000000..03bddd251618472b9f102c0973bd7dc8531741d3 --- /dev/null +++ b/drivers/net/wireless/b43/xmit.h @@ -0,0 +1,250 @@ +#ifndef B43_XMIT_H_ +#define B43_XMIT_H_ + +#include "main.h" + +#define _b43_declare_plcp_hdr(size) \ + struct b43_plcp_hdr##size { \ + union { \ + __le32 data; \ + __u8 raw[size]; \ + } __attribute__((__packed__)); \ + } __attribute__((__packed__)) + +/* struct b43_plcp_hdr4 */ +_b43_declare_plcp_hdr(4); +/* struct b43_plcp_hdr6 */ +_b43_declare_plcp_hdr(6); + +#undef _b43_declare_plcp_hdr + +/* TX header for v4 firmware */ +struct b43_txhdr_fw4 { + __le32 mac_ctl; /* MAC TX control */ + __le16 mac_frame_ctl; /* Copy of the FrameControl field */ + __le16 tx_fes_time_norm; /* TX FES Time Normal */ + __le16 phy_ctl; /* PHY TX control */ + __le16 phy_ctl_0; /* Unused */ + __le16 phy_ctl_1; /* Unused */ + __le16 phy_ctl_rts_0; /* Unused */ + __le16 phy_ctl_rts_1; /* Unused */ + __u8 phy_rate; /* PHY rate */ + __u8 phy_rate_rts; /* PHY rate for RTS/CTS */ + __u8 extra_ft; /* Extra Frame Types */ + __u8 chan_radio_code; /* Channel Radio Code */ + __u8 iv[16]; /* Encryption IV */ + __u8 tx_receiver[6]; /* TX Frame Receiver address */ + __le16 tx_fes_time_fb; /* TX FES Time Fallback */ + struct b43_plcp_hdr6 rts_plcp_fb; /* RTS fallback PLCP */ + __le16 rts_dur_fb; /* RTS fallback duration */ + struct b43_plcp_hdr6 plcp_fb; /* Fallback PLCP */ + __le16 dur_fb; /* Fallback duration */ + __le16 mm_dur_time; /* Unused */ + __le16 mm_dur_time_fb; /* Unused */ + __le32 time_stamp; /* Timestamp */ + PAD_BYTES(2); + __le16 cookie; /* TX frame cookie */ + __le16 tx_status; /* TX status */ + struct b43_plcp_hdr6 rts_plcp; /* RTS PLCP */ + __u8 rts_frame[16]; /* The RTS frame (if used) */ + PAD_BYTES(2); + struct b43_plcp_hdr6 plcp; /* Main PLCP */ +} __attribute__ ((__packed__)); + +/* MAC TX control */ +#define B43_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ +#define B43_TX4_MAC_KEYIDX_SHIFT 20 +#define B43_TX4_MAC_KEYALG 0x00070000 /* Security key algorithm */ +#define B43_TX4_MAC_KEYALG_SHIFT 16 +#define B43_TX4_MAC_LIFETIME 0x00001000 +#define B43_TX4_MAC_FRAMEBURST 0x00000800 +#define B43_TX4_MAC_SENDCTS 0x00000400 +#define B43_TX4_MAC_AMPDU 0x00000300 +#define B43_TX4_MAC_AMPDU_SHIFT 8 +#define B43_TX4_MAC_5GHZ 0x00000080 +#define B43_TX4_MAC_IGNPMQ 0x00000020 +#define B43_TX4_MAC_HWSEQ 0x00000010 /* Use Hardware Sequence Number */ +#define B43_TX4_MAC_STMSDU 0x00000008 /* Start MSDU */ +#define B43_TX4_MAC_SENDRTS 0x00000004 +#define B43_TX4_MAC_LONGFRAME 0x00000002 +#define B43_TX4_MAC_ACK 0x00000001 + +/* Extra Frame Types */ +#define B43_TX4_EFT_FBOFDM 0x0001 /* Data frame fallback rate type */ +#define B43_TX4_EFT_RTSOFDM 0x0004 /* RTS/CTS rate type */ +#define B43_TX4_EFT_RTSFBOFDM 0x0010 /* RTS/CTS fallback rate type */ + +/* PHY TX control word */ +#define B43_TX4_PHY_OFDM 0x0001 /* Data frame rate type */ +#define B43_TX4_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ +#define B43_TX4_PHY_ANT 0x03C0 /* Antenna selection */ +#define B43_TX4_PHY_ANT0 0x0000 /* Use antenna 0 */ +#define B43_TX4_PHY_ANT1 0x0100 /* Use antenna 1 */ +#define B43_TX4_PHY_ANTLAST 0x0300 /* Use last used antenna */ + +void b43_generate_txhdr(struct b43_wldev *dev, + u8 * txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, u16 cookie); + +/* Transmit Status */ +struct b43_txstatus { + u16 cookie; /* The cookie from the txhdr */ + u16 seq; /* Sequence number */ + u8 phy_stat; /* PHY TX status */ + u8 frame_count; /* Frame transmit count */ + u8 rts_count; /* RTS transmit count */ + u8 supp_reason; /* Suppression reason */ + /* flags */ + u8 pm_indicated; /* PM mode indicated to AP */ + u8 intermediate; /* Intermediate status notification (not final) */ + u8 for_ampdu; /* Status is for an AMPDU (afterburner) */ + u8 acked; /* Wireless ACK received */ +}; + +/* txstatus supp_reason values */ +enum { + B43_TXST_SUPP_NONE, /* Not suppressed */ + B43_TXST_SUPP_PMQ, /* Suppressed due to PMQ entry */ + B43_TXST_SUPP_FLUSH, /* Suppressed due to flush request */ + B43_TXST_SUPP_PREV, /* Previous fragment failed */ + B43_TXST_SUPP_CHAN, /* Channel mismatch */ + B43_TXST_SUPP_LIFE, /* Lifetime expired */ + B43_TXST_SUPP_UNDER, /* Buffer underflow */ + B43_TXST_SUPP_ABNACK, /* Afterburner NACK */ +}; + +/* Transmit Status as received through DMA/PIO on old chips */ +struct b43_hwtxstatus { + PAD_BYTES(4); + __le16 cookie; + u8 flags; + u8 count; + PAD_BYTES(2); + __le16 seq; + u8 phy_stat; + PAD_BYTES(1); +} __attribute__ ((__packed__)); + +/* Receive header for v4 firmware. */ +struct b43_rxhdr_fw4 { + __le16 frame_len; /* Frame length */ + PAD_BYTES(2); + __le16 phy_status0; /* PHY RX Status 0 */ + __u8 jssi; /* PHY RX Status 1: JSSI */ + __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ + __le16 phy_status2; /* PHY RX Status 2 */ + __le16 phy_status3; /* PHY RX Status 3 */ + __le32 mac_status; /* MAC RX status */ + __le16 mac_time; + __le16 channel; +} __attribute__ ((__packed__)); + +/* PHY RX Status 0 */ +#define B43_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ +#define B43_RX_PHYST0_PLCPHCF 0x0200 +#define B43_RX_PHYST0_PLCPFV 0x0100 +#define B43_RX_PHYST0_SHORTPRMBL 0x0080 /* Received with Short Preamble */ +#define B43_RX_PHYST0_LCRS 0x0040 +#define B43_RX_PHYST0_ANT 0x0020 /* Antenna */ +#define B43_RX_PHYST0_UNSRATE 0x0010 +#define B43_RX_PHYST0_CLIP 0x000C +#define B43_RX_PHYST0_CLIP_SHIFT 2 +#define B43_RX_PHYST0_FTYPE 0x0003 /* Frame type */ +#define B43_RX_PHYST0_CCK 0x0000 /* Frame type: CCK */ +#define B43_RX_PHYST0_OFDM 0x0001 /* Frame type: OFDM */ +#define B43_RX_PHYST0_PRE_N 0x0002 /* Pre-standard N-PHY frame */ +#define B43_RX_PHYST0_STD_N 0x0003 /* Standard N-PHY frame */ + +/* PHY RX Status 2 */ +#define B43_RX_PHYST2_LNAG 0xC000 /* LNA Gain */ +#define B43_RX_PHYST2_LNAG_SHIFT 14 +#define B43_RX_PHYST2_PNAG 0x3C00 /* PNA Gain */ +#define B43_RX_PHYST2_PNAG_SHIFT 10 +#define B43_RX_PHYST2_FOFF 0x03FF /* F offset */ + +/* PHY RX Status 3 */ +#define B43_RX_PHYST3_DIGG 0x1800 /* DIG Gain */ +#define B43_RX_PHYST3_DIGG_SHIFT 11 +#define B43_RX_PHYST3_TRSTATE 0x0400 /* TR state */ + +/* MAC RX Status */ +#define B43_RX_MAC_BEACONSENT 0x00008000 /* Beacon send flag */ +#define B43_RX_MAC_KEYIDX 0x000007E0 /* Key index */ +#define B43_RX_MAC_KEYIDX_SHIFT 5 +#define B43_RX_MAC_DECERR 0x00000010 /* Decrypt error */ +#define B43_RX_MAC_DEC 0x00000008 /* Decryption attempted */ +#define B43_RX_MAC_PADDING 0x00000004 /* Pad bytes present */ +#define B43_RX_MAC_RESP 0x00000002 /* Response frame transmitted */ +#define B43_RX_MAC_FCSERR 0x00000001 /* FCS error */ + +/* RX channel */ +#define B43_RX_CHAN_GAIN 0xFC00 /* Gain */ +#define B43_RX_CHAN_GAIN_SHIFT 10 +#define B43_RX_CHAN_ID 0x03FC /* Channel ID */ +#define B43_RX_CHAN_ID_SHIFT 2 +#define B43_RX_CHAN_PHYTYPE 0x0003 /* PHY type */ + +u8 b43_plcp_get_ratecode_cck(const u8 bitrate); +u8 b43_plcp_get_ratecode_ofdm(const u8 bitrate); + +void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate); + +void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr); + +void b43_handle_txstatus(struct b43_wldev *dev, + const struct b43_txstatus *status); + +void b43_handle_hwtxstatus(struct b43_wldev *dev, + const struct b43_hwtxstatus *hw); + +void b43_tx_suspend(struct b43_wldev *dev); +void b43_tx_resume(struct b43_wldev *dev); + +#define B43_NR_QOSPARMS 22 +enum { + B43_QOSPARM_TXOP = 0, + B43_QOSPARM_CWMIN, + B43_QOSPARM_CWMAX, + B43_QOSPARM_CWCUR, + B43_QOSPARM_AIFS, + B43_QOSPARM_BSLOTS, + B43_QOSPARM_REGGAP, + B43_QOSPARM_STATUS, +}; +void b43_qos_init(struct b43_wldev *dev); + +/* Helper functions for converting the key-table index from "firmware-format" + * to "raw-format" and back. The firmware API changed for this at some revision. + * We need to account for that here. */ +static inline int b43_new_kidx_api(struct b43_wldev *dev) +{ + /* FIXME: Not sure the change was at rev 351 */ + return (dev->fw.rev >= 351); +} +static inline u8 b43_kidx_to_fw(struct b43_wldev *dev, u8 raw_kidx) +{ + u8 firmware_kidx; + if (b43_new_kidx_api(dev)) { + firmware_kidx = raw_kidx; + } else { + if (raw_kidx >= 4) /* Is per STA key? */ + firmware_kidx = raw_kidx - 4; + else + firmware_kidx = raw_kidx; /* TX default key */ + } + return firmware_kidx; +} +static inline u8 b43_kidx_to_raw(struct b43_wldev *dev, u8 firmware_kidx) +{ + u8 raw_kidx; + if (b43_new_kidx_api(dev)) + raw_kidx = firmware_kidx; + else + raw_kidx = firmware_kidx + 4; /* RX default keys or per STA keys */ + return raw_kidx; +} + +#endif /* B43_XMIT_H_ */ diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..7e23ec23fc98b9b26d2a02d6b1f4f53c3bed5d05 --- /dev/null +++ b/drivers/net/wireless/b43legacy/Kconfig @@ -0,0 +1,89 @@ +config B43LEGACY + tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" + depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 + select SSB + select FW_LOADER + select HW_RANDOM + ---help--- + b43legacy is a driver for 802.11b devices from Broadcom (BCM4301 and + BCM4303) and early model 802.11g chips (BCM4306 Ver. 2) used in the + Linksys WPC54G V1 PCMCIA devices. + + Newer 802.11g and 802.11a devices need b43. + + It is safe to include both b43 and b43legacy as the underlying glue + layer will automatically load the correct version for your device. + + This driver uses V3 firmware, which must be installed separately using + b43-fwcutter. + + This driver can be built as a module (recommended) that will be + called "b43legacy". If unsure, say M. + +# Auto-select SSB PCI-HOST support, if possible +config B43LEGACY_PCI_AUTOSELECT + bool + depends on B43LEGACY && SSB_PCIHOST_POSSIBLE + select SSB_PCIHOST + default y + +# Auto-select SSB PCICORE driver, if possible +config B43LEGACY_PCICORE_AUTOSELECT + bool + depends on B43LEGACY && SSB_DRIVER_PCICORE_POSSIBLE + select SSB_DRIVER_PCICORE + default y + +config B43LEGACY_DEBUG + bool "Broadcom 43xx-legacy debugging" + depends on B43LEGACY + default y + ---help--- + Say Y, because this information will help you get the driver running. + This option generates a minimum of log output. + +config B43LEGACY_DMA + bool + depends on B43LEGACY + +config B43LEGACY_PIO + bool + depends on B43LEGACY + +choice + prompt "Broadcom 43xx-legacy data transfer mode" + depends on B43LEGACY + default B43LEGACY_DMA_AND_PIO_MODE + +config B43LEGACY_DMA_AND_PIO_MODE + bool "DMA + PIO" + select B43LEGACY_DMA + select B43LEGACY_PIO + ---help--- + Include both, Direct Memory Access (DMA) and Programmed I/O (PIO) + data transfer modes. The mode actually used is selectable through + the module parameter "pio". With pio=0 as a module parameter, the + default DMA is used, otherwise PIO is used. + + If unsure, choose this option. + +config B43LEGACY_DMA_MODE + bool "DMA (Direct Memory Access) only" + select B43LEGACY_DMA + ---help--- + Only include Direct Memory Access (DMA). + This reduces the size of the driver module, by omitting the PIO code. + +config B43LEGACY_PIO_MODE + bool "PIO (Programmed I/O) only" + select B43LEGACY_PIO + ---help--- + Only include Programmed I/O (PIO). + This reduces the size of the driver module, by omitting the DMA code. + Please note that PIO transfers are slow (compared to DMA). + + Also note that not all devices of the b43legacy series support PIO. + + You should use PIO only if DMA does not work for you. + +endchoice diff --git a/drivers/net/wireless/b43legacy/Makefile b/drivers/net/wireless/b43legacy/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ec3a2482bbad26d922ba7f360401001548fe4cef --- /dev/null +++ b/drivers/net/wireless/b43legacy/Makefile @@ -0,0 +1,14 @@ +obj-$(CONFIG_B43LEGACY) += b43legacy.o +b43legacy-obj-$(CONFIG_B43LEGACY_DEBUG) += debugfs.o + +b43legacy-obj-$(CONFIG_B43LEGACY_DMA) += dma.o +b43legacy-obj-$(CONFIG_B43LEGACY_PIO) += pio.o + +b43legacy-objs := main.o \ + ilt.o \ + leds.o \ + phy.o \ + radio.o \ + sysfs.o \ + xmit.o \ + $(b43legacy-obj-y) diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h new file mode 100644 index 0000000000000000000000000000000000000000..afe145cec06757c70844e2c967ae4c946acc21b7 --- /dev/null +++ b/drivers/net/wireless/b43legacy/b43legacy.h @@ -0,0 +1,832 @@ +#ifndef B43legacy_H_ +#define B43legacy_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "debugfs.h" +#include "leds.h" +#include "phy.h" + + +#define B43legacy_IRQWAIT_MAX_RETRIES 100 + +#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */ + +/* MMIO offsets */ +#define B43legacy_MMIO_DMA0_REASON 0x20 +#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24 +#define B43legacy_MMIO_DMA1_REASON 0x28 +#define B43legacy_MMIO_DMA1_IRQ_MASK 0x2C +#define B43legacy_MMIO_DMA2_REASON 0x30 +#define B43legacy_MMIO_DMA2_IRQ_MASK 0x34 +#define B43legacy_MMIO_DMA3_REASON 0x38 +#define B43legacy_MMIO_DMA3_IRQ_MASK 0x3C +#define B43legacy_MMIO_DMA4_REASON 0x40 +#define B43legacy_MMIO_DMA4_IRQ_MASK 0x44 +#define B43legacy_MMIO_DMA5_REASON 0x48 +#define B43legacy_MMIO_DMA5_IRQ_MASK 0x4C +#define B43legacy_MMIO_MACCTL 0x120 +#define B43legacy_MMIO_STATUS_BITFIELD 0x120 +#define B43legacy_MMIO_STATUS2_BITFIELD 0x124 +#define B43legacy_MMIO_GEN_IRQ_REASON 0x128 +#define B43legacy_MMIO_GEN_IRQ_MASK 0x12C +#define B43legacy_MMIO_RAM_CONTROL 0x130 +#define B43legacy_MMIO_RAM_DATA 0x134 +#define B43legacy_MMIO_PS_STATUS 0x140 +#define B43legacy_MMIO_RADIO_HWENABLED_HI 0x158 +#define B43legacy_MMIO_SHM_CONTROL 0x160 +#define B43legacy_MMIO_SHM_DATA 0x164 +#define B43legacy_MMIO_SHM_DATA_UNALIGNED 0x166 +#define B43legacy_MMIO_XMITSTAT_0 0x170 +#define B43legacy_MMIO_XMITSTAT_1 0x174 +#define B43legacy_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ +#define B43legacy_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ + +/* 32-bit DMA */ +#define B43legacy_MMIO_DMA32_BASE0 0x200 +#define B43legacy_MMIO_DMA32_BASE1 0x220 +#define B43legacy_MMIO_DMA32_BASE2 0x240 +#define B43legacy_MMIO_DMA32_BASE3 0x260 +#define B43legacy_MMIO_DMA32_BASE4 0x280 +#define B43legacy_MMIO_DMA32_BASE5 0x2A0 +/* 64-bit DMA */ +#define B43legacy_MMIO_DMA64_BASE0 0x200 +#define B43legacy_MMIO_DMA64_BASE1 0x240 +#define B43legacy_MMIO_DMA64_BASE2 0x280 +#define B43legacy_MMIO_DMA64_BASE3 0x2C0 +#define B43legacy_MMIO_DMA64_BASE4 0x300 +#define B43legacy_MMIO_DMA64_BASE5 0x340 +/* PIO */ +#define B43legacy_MMIO_PIO1_BASE 0x300 +#define B43legacy_MMIO_PIO2_BASE 0x310 +#define B43legacy_MMIO_PIO3_BASE 0x320 +#define B43legacy_MMIO_PIO4_BASE 0x330 + +#define B43legacy_MMIO_PHY_VER 0x3E0 +#define B43legacy_MMIO_PHY_RADIO 0x3E2 +#define B43legacy_MMIO_PHY0 0x3E6 +#define B43legacy_MMIO_ANTENNA 0x3E8 +#define B43legacy_MMIO_CHANNEL 0x3F0 +#define B43legacy_MMIO_CHANNEL_EXT 0x3F4 +#define B43legacy_MMIO_RADIO_CONTROL 0x3F6 +#define B43legacy_MMIO_RADIO_DATA_HIGH 0x3F8 +#define B43legacy_MMIO_RADIO_DATA_LOW 0x3FA +#define B43legacy_MMIO_PHY_CONTROL 0x3FC +#define B43legacy_MMIO_PHY_DATA 0x3FE +#define B43legacy_MMIO_MACFILTER_CONTROL 0x420 +#define B43legacy_MMIO_MACFILTER_DATA 0x422 +#define B43legacy_MMIO_RCMTA_COUNT 0x43C /* Receive Match Transmitter Addr */ +#define B43legacy_MMIO_RADIO_HWENABLED_LO 0x49A +#define B43legacy_MMIO_GPIO_CONTROL 0x49C +#define B43legacy_MMIO_GPIO_MASK 0x49E +#define B43legacy_MMIO_TSF_0 0x632 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_1 0x634 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_2 0x636 /* core rev < 3 only */ +#define B43legacy_MMIO_TSF_3 0x638 /* core rev < 3 only */ +#define B43legacy_MMIO_RNG 0x65A +#define B43legacy_MMIO_POWERUP_DELAY 0x6A8 + +/* SPROM boardflags_lo values */ +#define B43legacy_BFL_PACTRL 0x0002 +#define B43legacy_BFL_RSSI 0x0008 +#define B43legacy_BFL_EXTLNA 0x1000 + +/* GPIO register offset, in both ChipCommon and PCI core. */ +#define B43legacy_GPIO_CONTROL 0x6c + +/* SHM Routing */ +#define B43legacy_SHM_SHARED 0x0001 +#define B43legacy_SHM_WIRELESS 0x0002 +#define B43legacy_SHM_HW 0x0004 +#define B43legacy_SHM_UCODE 0x0300 + +/* SHM Routing modifiers */ +#define B43legacy_SHM_AUTOINC_R 0x0200 /* Read Auto-increment */ +#define B43legacy_SHM_AUTOINC_W 0x0100 /* Write Auto-increment */ +#define B43legacy_SHM_AUTOINC_RW (B43legacy_SHM_AUTOINC_R | \ + B43legacy_SHM_AUTOINC_W) + +/* Misc SHM_SHARED offsets */ +#define B43legacy_SHM_SH_WLCOREREV 0x0016 /* 802.11 core revision */ +#define B43legacy_SHM_SH_HOSTFLO 0x005E /* Hostflags ucode opts (low) */ +#define B43legacy_SHM_SH_HOSTFHI 0x0060 /* Hostflags ucode opts (high) */ +/* SHM_SHARED crypto engine */ +#define B43legacy_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block */ +/* SHM_SHARED beacon variables */ +#define B43legacy_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word */ +/* SHM_SHARED ACK/CTS control */ +#define B43legacy_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word */ +/* SHM_SHARED probe response variables */ +#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */ +#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ +/* SHM_SHARED rate tables */ +/* SHM_SHARED microcode soft registers */ +#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ +#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ +#define B43legacy_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ +#define B43legacy_SHM_SH_UCODETIME 0x0006 /* Microcode time */ + +#define B43legacy_UCODEFLAGS_OFFSET 0x005E + +/* Hardware Radio Enable masks */ +#define B43legacy_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16) +#define B43legacy_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) + +/* HostFlags. See b43legacy_hf_read/write() */ +#define B43legacy_HF_SYMW 0x00000002 /* G-PHY SYM workaround */ +#define B43legacy_HF_GDCW 0x00000020 /* G-PHY DV cancel filter */ +#define B43legacy_HF_OFDMPABOOST 0x00000040 /* Enable PA boost OFDM */ +#define B43legacy_HF_EDCF 0x00000100 /* on if WME/MAC suspended */ + +/* MacFilter offsets. */ +#define B43legacy_MACFILTER_SELF 0x0000 +#define B43legacy_MACFILTER_BSSID 0x0003 +#define B43legacy_MACFILTER_MAC 0x0010 + +/* PHYVersioning */ +#define B43legacy_PHYTYPE_B 0x01 +#define B43legacy_PHYTYPE_G 0x02 + +/* PHYRegisters */ +#define B43legacy_PHY_G_LO_CONTROL 0x0810 +#define B43legacy_PHY_ILT_G_CTRL 0x0472 +#define B43legacy_PHY_ILT_G_DATA1 0x0473 +#define B43legacy_PHY_ILT_G_DATA2 0x0474 +#define B43legacy_PHY_G_PCTL 0x0029 +#define B43legacy_PHY_RADIO_BITFIELD 0x0401 +#define B43legacy_PHY_G_CRS 0x0429 +#define B43legacy_PHY_NRSSILT_CTRL 0x0803 +#define B43legacy_PHY_NRSSILT_DATA 0x0804 + +/* RadioRegisters */ +#define B43legacy_RADIOCTL_ID 0x01 + +/* MAC Control bitfield */ +#define B43legacy_MACCTL_IHR_ENABLED 0x00000400 /* IHR Region Enabled */ +#define B43legacy_MACCTL_INFRA 0x00020000 /* Infrastructure mode */ +#define B43legacy_MACCTL_AP 0x00040000 /* AccessPoint mode */ +#define B43legacy_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ +#define B43legacy_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep bad PLCP frames */ +#define B43legacy_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ +#define B43legacy_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ +#define B43legacy_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ +#define B43legacy_MACCTL_GMODE 0x80000000 /* G Mode */ + +/* StatusBitField */ +#define B43legacy_SBF_MAC_ENABLED 0x00000001 +#define B43legacy_SBF_CORE_READY 0x00000004 +#define B43legacy_SBF_400 0x00000400 /*FIXME: fix name*/ +#define B43legacy_SBF_XFER_REG_BYTESWAP 0x00010000 +#define B43legacy_SBF_MODE_NOTADHOC 0x00020000 +#define B43legacy_SBF_MODE_AP 0x00040000 +#define B43legacy_SBF_RADIOREG_LOCK 0x00080000 +#define B43legacy_SBF_MODE_MONITOR 0x00400000 +#define B43legacy_SBF_MODE_PROMISC 0x01000000 +#define B43legacy_SBF_PS1 0x02000000 +#define B43legacy_SBF_PS2 0x04000000 +#define B43legacy_SBF_NO_SSID_BCAST 0x08000000 +#define B43legacy_SBF_TIME_UPDATE 0x10000000 + +/* 802.11 core specific TM State Low flags */ +#define B43legacy_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ +#define B43legacy_TMSLOW_PLLREFSEL 0x00200000 /* PLL Freq Ref Select */ +#define B43legacy_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Ctrl Enbl */ +#define B43legacy_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ +#define B43legacy_TMSLOW_PHYCLKEN 0x00040000 /* PHY Clock Enable */ + +/* 802.11 core specific TM State High flags */ +#define B43legacy_TMSHIGH_FCLOCK 0x00040000 /* Fast Clock Available */ +#define B43legacy_TMSHIGH_GPHY 0x00010000 /* G-PHY avail (rev >= 5) */ + +#define B43legacy_UCODEFLAG_AUTODIV 0x0001 + +/* Generic-Interrupt reasons. */ +#define B43legacy_IRQ_MAC_SUSPENDED 0x00000001 +#define B43legacy_IRQ_BEACON 0x00000002 +#define B43legacy_IRQ_TBTT_INDI 0x00000004 /* Target Beacon Transmit Time */ +#define B43legacy_IRQ_BEACON_TX_OK 0x00000008 +#define B43legacy_IRQ_BEACON_CANCEL 0x00000010 +#define B43legacy_IRQ_ATIM_END 0x00000020 +#define B43legacy_IRQ_PMQ 0x00000040 +#define B43legacy_IRQ_PIO_WORKAROUND 0x00000100 +#define B43legacy_IRQ_MAC_TXERR 0x00000200 +#define B43legacy_IRQ_PHY_TXERR 0x00000800 +#define B43legacy_IRQ_PMEVENT 0x00001000 +#define B43legacy_IRQ_TIMER0 0x00002000 +#define B43legacy_IRQ_TIMER1 0x00004000 +#define B43legacy_IRQ_DMA 0x00008000 +#define B43legacy_IRQ_TXFIFO_FLUSH_OK 0x00010000 +#define B43legacy_IRQ_CCA_MEASURE_OK 0x00020000 +#define B43legacy_IRQ_NOISESAMPLE_OK 0x00040000 +#define B43legacy_IRQ_UCODE_DEBUG 0x08000000 +#define B43legacy_IRQ_RFKILL 0x10000000 +#define B43legacy_IRQ_TX_OK 0x20000000 +#define B43legacy_IRQ_PHY_G_CHANGED 0x40000000 +#define B43legacy_IRQ_TIMEOUT 0x80000000 + +#define B43legacy_IRQ_ALL 0xFFFFFFFF +#define B43legacy_IRQ_MASKTEMPLATE (B43legacy_IRQ_MAC_SUSPENDED | \ + B43legacy_IRQ_BEACON | \ + B43legacy_IRQ_TBTT_INDI | \ + B43legacy_IRQ_ATIM_END | \ + B43legacy_IRQ_PMQ | \ + B43legacy_IRQ_MAC_TXERR | \ + B43legacy_IRQ_PHY_TXERR | \ + B43legacy_IRQ_DMA | \ + B43legacy_IRQ_TXFIFO_FLUSH_OK | \ + B43legacy_IRQ_NOISESAMPLE_OK | \ + B43legacy_IRQ_UCODE_DEBUG | \ + B43legacy_IRQ_RFKILL | \ + B43legacy_IRQ_TX_OK) + +/* Device specific rate values. + * The actual values defined here are (rate_in_mbps * 2). + * Some code depends on this. Don't change it. */ +#define B43legacy_CCK_RATE_1MB 2 +#define B43legacy_CCK_RATE_2MB 4 +#define B43legacy_CCK_RATE_5MB 11 +#define B43legacy_CCK_RATE_11MB 22 +#define B43legacy_OFDM_RATE_6MB 12 +#define B43legacy_OFDM_RATE_9MB 18 +#define B43legacy_OFDM_RATE_12MB 24 +#define B43legacy_OFDM_RATE_18MB 36 +#define B43legacy_OFDM_RATE_24MB 48 +#define B43legacy_OFDM_RATE_36MB 72 +#define B43legacy_OFDM_RATE_48MB 96 +#define B43legacy_OFDM_RATE_54MB 108 +/* Convert a b43legacy rate value to a rate in 100kbps */ +#define B43legacy_RATE_TO_100KBPS(rate) (((rate) * 10) / 2) + + +#define B43legacy_DEFAULT_SHORT_RETRY_LIMIT 7 +#define B43legacy_DEFAULT_LONG_RETRY_LIMIT 4 + +/* Max size of a security key */ +#define B43legacy_SEC_KEYSIZE 16 +/* Security algorithms. */ +enum { + B43legacy_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */ + B43legacy_SEC_ALGO_WEP40, + B43legacy_SEC_ALGO_TKIP, + B43legacy_SEC_ALGO_AES, + B43legacy_SEC_ALGO_WEP104, + B43legacy_SEC_ALGO_AES_LEGACY, +}; + +/* Core Information Registers */ +#define B43legacy_CIR_BASE 0xf00 +#define B43legacy_CIR_SBTPSFLAG (B43legacy_CIR_BASE + 0x18) +#define B43legacy_CIR_SBIMSTATE (B43legacy_CIR_BASE + 0x90) +#define B43legacy_CIR_SBINTVEC (B43legacy_CIR_BASE + 0x94) +#define B43legacy_CIR_SBTMSTATELOW (B43legacy_CIR_BASE + 0x98) +#define B43legacy_CIR_SBTMSTATEHIGH (B43legacy_CIR_BASE + 0x9c) +#define B43legacy_CIR_SBIMCONFIGLOW (B43legacy_CIR_BASE + 0xa8) +#define B43legacy_CIR_SB_ID_HI (B43legacy_CIR_BASE + 0xfc) + +/* sbtmstatehigh state flags */ +#define B43legacy_SBTMSTATEHIGH_SERROR 0x00000001 +#define B43legacy_SBTMSTATEHIGH_BUSY 0x00000004 +#define B43legacy_SBTMSTATEHIGH_TIMEOUT 0x00000020 +#define B43legacy_SBTMSTATEHIGH_G_PHY_AVAIL 0x00010000 +#define B43legacy_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000 +#define B43legacy_SBTMSTATEHIGH_DMA64BIT 0x10000000 +#define B43legacy_SBTMSTATEHIGH_GATEDCLK 0x20000000 +#define B43legacy_SBTMSTATEHIGH_BISTFAILED 0x40000000 +#define B43legacy_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000 + +/* sbimstate flags */ +#define B43legacy_SBIMSTATE_IB_ERROR 0x20000 +#define B43legacy_SBIMSTATE_TIMEOUT 0x40000 + +#define PFX KBUILD_MODNAME ": " +#ifdef assert +# undef assert +#endif +#ifdef CONFIG_B43LEGACY_DEBUG +# define B43legacy_WARN_ON(expr) \ + do { \ + if (unlikely((expr))) { \ + printk(KERN_INFO PFX "Test (%s) failed at:" \ + " %s:%d:%s()\n", \ + #expr, __FILE__, \ + __LINE__, __FUNCTION__); \ + } \ + } while (0) +# define B43legacy_BUG_ON(expr) \ + do { \ + if (unlikely((expr))) { \ + printk(KERN_INFO PFX "Test (%s) failed\n", \ + #expr); \ + BUG_ON(expr); \ + } \ + } while (0) +# define B43legacy_DEBUG 1 +#else +# define B43legacy_WARN_ON(x) do { /* nothing */ } while (0) +# define B43legacy_BUG_ON(x) do { /* nothing */ } while (0) +# define B43legacy_DEBUG 0 +#endif + + +struct net_device; +struct pci_dev; +struct b43legacy_dmaring; +struct b43legacy_pioqueue; + +/* The firmware file header */ +#define B43legacy_FW_TYPE_UCODE 'u' +#define B43legacy_FW_TYPE_PCM 'p' +#define B43legacy_FW_TYPE_IV 'i' +struct b43legacy_fw_header { + /* File type */ + u8 type; + /* File format version */ + u8 ver; + u8 __padding[2]; + /* Size of the data. For ucode and PCM this is in bytes. + * For IV this is number-of-ivs. */ + __be32 size; +} __attribute__((__packed__)); + +/* Initial Value file format */ +#define B43legacy_IV_OFFSET_MASK 0x7FFF +#define B43legacy_IV_32BIT 0x8000 +struct b43legacy_iv { + __be16 offset_size; + union { + __be16 d16; + __be32 d32; + } data __attribute__((__packed__)); +} __attribute__((__packed__)); + +#define B43legacy_PHYMODE(phytype) (1 << (phytype)) +#define B43legacy_PHYMODE_B B43legacy_PHYMODE \ + ((B43legacy_PHYTYPE_B)) +#define B43legacy_PHYMODE_G B43legacy_PHYMODE \ + ((B43legacy_PHYTYPE_G)) + +/* Value pair to measure the LocalOscillator. */ +struct b43legacy_lopair { + s8 low; + s8 high; + u8 used:1; +}; +#define B43legacy_LO_COUNT (14*4) + +struct b43legacy_phy { + /* Possible PHYMODEs on this PHY */ + u8 possible_phymodes; + /* GMODE bit enabled in MACCTL? */ + bool gmode; + /* Possible ieee80211 subsystem hwmodes for this PHY. + * Which mode is selected, depends on thr GMODE enabled bit */ +#define B43legacy_MAX_PHYHWMODES 2 + struct ieee80211_hw_mode hwmodes[B43legacy_MAX_PHYHWMODES]; + + /* Analog Type */ + u8 analog; + /* B43legacy_PHYTYPE_ */ + u8 type; + /* PHY revision number. */ + u8 rev; + + u16 antenna_diversity; + u16 savedpctlreg; + /* Radio versioning */ + u16 radio_manuf; /* Radio manufacturer */ + u16 radio_ver; /* Radio version */ + u8 calibrated:1; + u8 radio_rev; /* Radio revision */ + + bool locked; /* Only used in b43legacy_phy_{un}lock() */ + bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */ + + /* ACI (adjacent channel interference) flags. */ + bool aci_enable; + bool aci_wlan_automatic; + bool aci_hw_rssi; + + /* Radio switched on/off */ + bool radio_on; + struct { + /* Values saved when turning the radio off. + * They are needed when turning it on again. */ + bool valid; + u16 rfover; + u16 rfoverval; + } radio_off_context; + + u16 minlowsig[2]; + u16 minlowsigpos[2]; + + /* LO Measurement Data. + * Use b43legacy_get_lopair() to get a value. + */ + struct b43legacy_lopair *_lo_pairs; + /* TSSI to dBm table in use */ + const s8 *tssi2dbm; + /* idle TSSI value */ + s8 idle_tssi; + /* Target idle TSSI */ + int tgt_idle_tssi; + /* Current idle TSSI */ + int cur_idle_tssi; + + /* LocalOscillator control values. */ + struct b43legacy_txpower_lo_control *lo_control; + /* Values from b43legacy_calc_loopback_gain() */ + s16 max_lb_gain; /* Maximum Loopback gain in hdB */ + s16 trsw_rx_gain; /* TRSW RX gain in hdB */ + s16 lna_lod_gain; /* LNA lod */ + s16 lna_gain; /* LNA */ + s16 pga_gain; /* PGA */ + + /* PHY lock for core.rev < 3 + * This lock is only used by b43legacy_phy_{un}lock() + */ + spinlock_t lock; + + /* Desired TX power level (in dBm). This is set by the user and + * adjusted in b43legacy_phy_xmitpower(). */ + u8 power_level; + + /* Values from b43legacy_calc_loopback_gain() */ + u16 loopback_gain[2]; + + /* TX Power control values. */ + /* B/G PHY */ + struct { + /* Current Radio Attenuation for TXpower recalculation. */ + u16 rfatt; + /* Current Baseband Attenuation for TXpower recalculation. */ + u16 bbatt; + /* Current TXpower control value for TXpower recalculation. */ + u16 txctl1; + u16 txctl2; + }; + /* A PHY */ + struct { + u16 txpwr_offset; + }; + +#ifdef CONFIG_B43LEGACY_DEBUG + bool manual_txpower_control; /* Manual TX-power control enabled? */ +#endif + /* Current Interference Mitigation mode */ + int interfmode; + /* Stack of saved values from the Interference Mitigation code. + * Each value in the stack is layed out as follows: + * bit 0-11: offset + * bit 12-15: register ID + * bit 16-32: value + * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT + */ +#define B43legacy_INTERFSTACK_SIZE 26 + u32 interfstack[B43legacy_INTERFSTACK_SIZE]; + + /* Saved values from the NRSSI Slope calculation */ + s16 nrssi[2]; + s32 nrssislope; + /* In memory nrssi lookup table. */ + s8 nrssi_lt[64]; + + /* current channel */ + u8 channel; + + u16 lofcal; + + u16 initval; +}; + +/* Data structures for DMA transmission, per 80211 core. */ +struct b43legacy_dma { + struct b43legacy_dmaring *tx_ring0; + struct b43legacy_dmaring *tx_ring1; + struct b43legacy_dmaring *tx_ring2; + struct b43legacy_dmaring *tx_ring3; + struct b43legacy_dmaring *tx_ring4; + struct b43legacy_dmaring *tx_ring5; + + struct b43legacy_dmaring *rx_ring0; + struct b43legacy_dmaring *rx_ring3; /* only on core.rev < 5 */ +}; + +/* Data structures for PIO transmission, per 80211 core. */ +struct b43legacy_pio { + struct b43legacy_pioqueue *queue0; + struct b43legacy_pioqueue *queue1; + struct b43legacy_pioqueue *queue2; + struct b43legacy_pioqueue *queue3; +}; + +/* Context information for a noise calculation (Link Quality). */ +struct b43legacy_noise_calculation { + u8 channel_at_start; + bool calculation_running; + u8 nr_samples; + s8 samples[8][4]; +}; + +struct b43legacy_stats { + u8 link_noise; + /* Store the last TX/RX times here for updating the leds. */ + unsigned long last_tx; + unsigned long last_rx; +}; + +struct b43legacy_key { + void *keyconf; + bool enabled; + u8 algorithm; +}; + +struct b43legacy_wldev; + +/* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */ +struct b43legacy_wl { + /* Pointer to the active wireless device on this chip */ + struct b43legacy_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + spinlock_t irq_lock; /* locks IRQ */ + struct mutex mutex; /* locks wireless core state */ + spinlock_t leds_lock; /* lock for leds */ + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + /* Opaque ID of the operating interface from the ieee80211 + * subsystem. Do not modify. + */ + int if_id; + /* MAC address (can be NULL). */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID (can be NULL). */ + u8 bssid[ETH_ALEN]; + /* Interface type. (IEEE80211_IF_TYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + + struct hwrng rng; + u8 rng_initialized; + char rng_name[30 + 1]; + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; +}; + +/* Pointers to the firmware data and meta information about it. */ +struct b43legacy_firmware { + /* Microcode */ + const struct firmware *ucode; + /* PCM code */ + const struct firmware *pcm; + /* Initial MMIO values for the firmware */ + const struct firmware *initvals; + /* Initial MMIO values for the firmware, band-specific */ + const struct firmware *initvals_band; + /* Firmware revision */ + u16 rev; + /* Firmware patchlevel */ + u16 patch; +}; + +/* Device (802.11 core) initialization status. */ +enum { + B43legacy_STAT_UNINIT = 0, /* Uninitialized. */ + B43legacy_STAT_INITIALIZED = 1, /* Initialized, not yet started. */ + B43legacy_STAT_STARTED = 2, /* Up and running. */ +}; +#define b43legacy_status(wldev) atomic_read(&(wldev)->__init_status) +#define b43legacy_set_status(wldev, stat) do { \ + atomic_set(&(wldev)->__init_status, (stat)); \ + smp_wmb(); \ + } while (0) + +/* *** --- HOW LOCKING WORKS IN B43legacy --- *** + * + * You should always acquire both, wl->mutex and wl->irq_lock unless: + * - You don't need to acquire wl->irq_lock, if the interface is stopped. + * - You don't need to acquire wl->mutex in the IRQ handler, IRQ tasklet + * and packet TX path (and _ONLY_ there.) + */ + +/* Data structure for one wireless device (802.11 core) */ +struct b43legacy_wldev { + struct ssb_device *dev; + struct b43legacy_wl *wl; + + /* The device initialization status. + * Use b43legacy_status() to query. */ + atomic_t __init_status; + /* Saved init status for handling suspend. */ + int suspend_init_status; + + bool __using_pio; /* Using pio rather than dma. */ + bool bad_frames_preempt;/* Use "Bad Frames Preemption". */ + bool reg124_set_0x4; /* Variable to keep track of IRQ. */ + bool short_preamble; /* TRUE if using short preamble. */ + bool short_slot; /* TRUE if using short slot timing. */ + bool radio_hw_enable; /* State of radio hardware enable bit. */ + + /* PHY/Radio device. */ + struct b43legacy_phy phy; + union { + /* DMA engines. */ + struct b43legacy_dma dma; + /* PIO engines. */ + struct b43legacy_pio pio; + }; + + /* Various statistics about the physical device. */ + struct b43legacy_stats stats; + +#define B43legacy_NR_LEDS 4 + struct b43legacy_led leds[B43legacy_NR_LEDS]; + + /* Reason code of the last interrupt. */ + u32 irq_reason; + u32 dma_reason[6]; + /* saved irq enable/disable state bitfield. */ + u32 irq_savedstate; + /* Link Quality calculation context. */ + struct b43legacy_noise_calculation noisecalc; + /* if > 0 MAC is suspended. if == 0 MAC is enabled. */ + int mac_suspended; + + /* Interrupt Service Routine tasklet (bottom-half) */ + struct tasklet_struct isr_tasklet; + + /* Periodic tasks */ + struct delayed_work periodic_work; + unsigned int periodic_state; + + struct work_struct restart_work; + + /* encryption/decryption */ + u16 ktp; /* Key table pointer */ + u8 max_nr_keys; + struct b43legacy_key key[58]; + + /* Cached beacon template while uploading the template. */ + struct sk_buff *cached_beacon; + + /* Firmware data */ + struct b43legacy_firmware fw; + + /* Devicelist in struct b43legacy_wl (all 802.11 cores) */ + struct list_head list; + + /* Debugging stuff follows. */ +#ifdef CONFIG_B43LEGACY_DEBUG + struct b43legacy_dfsentry *dfsentry; +#endif +}; + + +static inline +struct b43legacy_wl *hw_to_b43legacy_wl(struct ieee80211_hw *hw) +{ + return hw->priv; +} + +/* Helper function, which returns a boolean. + * TRUE, if PIO is used; FALSE, if DMA is used. + */ +#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return dev->__using_pio; +} +#elif defined(CONFIG_B43LEGACY_DMA) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return 0; +} +#elif defined(CONFIG_B43LEGACY_PIO) +static inline +int b43legacy_using_pio(struct b43legacy_wldev *dev) +{ + return 1; +} +#else +# error "Using neither DMA nor PIO? Confused..." +#endif + + +static inline +struct b43legacy_wldev *dev_to_b43legacy_wldev(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + return ssb_get_drvdata(ssb_dev); +} + +/* Is the device operating in a specified mode (IEEE80211_IF_TYPE_XXX). */ +static inline +int b43legacy_is_mode(struct b43legacy_wl *wl, int type) +{ + return (wl->operating && + wl->if_type == type); +} + +static inline +bool is_bcm_board_vendor(struct b43legacy_wldev *dev) +{ + return (dev->dev->bus->boardinfo.vendor == PCI_VENDOR_ID_BROADCOM); +} + +static inline +u16 b43legacy_read16(struct b43legacy_wldev *dev, u16 offset) +{ + return ssb_read16(dev->dev, offset); +} + +static inline +void b43legacy_write16(struct b43legacy_wldev *dev, u16 offset, u16 value) +{ + ssb_write16(dev->dev, offset, value); +} + +static inline +u32 b43legacy_read32(struct b43legacy_wldev *dev, u16 offset) +{ + return ssb_read32(dev->dev, offset); +} + +static inline +void b43legacy_write32(struct b43legacy_wldev *dev, u16 offset, u32 value) +{ + ssb_write32(dev->dev, offset, value); +} + +static inline +struct b43legacy_lopair *b43legacy_get_lopair(struct b43legacy_phy *phy, + u16 radio_attenuation, + u16 baseband_attenuation) +{ + return phy->_lo_pairs + (radio_attenuation + + 14 * (baseband_attenuation / 2)); +} + + + +/* Message printing */ +void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#if B43legacy_DEBUG +void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#else /* DEBUG */ +# define b43legacydbg(wl, fmt...) do { /* nothing */ } while (0) +#endif /* DEBUG */ + + +/** Limit a value between two limits */ +#ifdef limit_value +# undef limit_value +#endif +#define limit_value(value, min, max) \ + ({ \ + typeof(value) __value = (value); \ + typeof(value) __min = (min); \ + typeof(value) __max = (max); \ + if (__value < __min) \ + __value = __min; \ + else if (__value > __max) \ + __value = __max; \ + __value; \ + }) + +/* Macros for printing a value in Q5.2 format */ +#define Q52_FMT "%u.%u" +#define Q52_ARG(q52) ((q52) / 4), (((q52) & 3) * 100 / 4) + +#endif /* B43legacy_H_ */ diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..eefa6fb79685d3b5f2c20eb0d312cc05220f66fe --- /dev/null +++ b/drivers/net/wireless/b43legacy/debugfs.c @@ -0,0 +1,505 @@ +/* + + Broadcom B43legacy wireless driver + + debugfs driver debugging code + + Copyright (c) 2005-2007 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include +#include + +#include "b43legacy.h" +#include "main.h" +#include "debugfs.h" +#include "dma.h" +#include "pio.h" +#include "xmit.h" + + +/* The root directory. */ +static struct dentry *rootdir; + +struct b43legacy_debugfs_fops { + ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); + struct file_operations fops; + /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ + bool take_irqlock; +}; + +static inline +struct b43legacy_dfs_file * fops_to_dfs_file(struct b43legacy_wldev *dev, + const struct b43legacy_debugfs_fops *dfops) +{ + void *p; + + p = dev->dfsentry; + p += dfops->file_struct_offset; + + return p; +} + + +#define fappend(fmt, x...) \ + do { \ + if (bufsize - count) \ + count += snprintf(buf + count, \ + bufsize - count, \ + fmt , ##x); \ + else \ + printk(KERN_ERR "b43legacy: fappend overflow\n"); \ + } while (0) + + +/* wl->irq_lock is locked */ +static ssize_t tsf_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + u64 tsf; + + b43legacy_tsf_read(dev, &tsf); + fappend("0x%08x%08x\n", + (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32), + (unsigned int)(tsf & 0xFFFFFFFFULL)); + + return count; +} + +/* wl->irq_lock is locked */ +static int tsf_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) +{ + u64 tsf; + + if (sscanf(buf, "%llu", (unsigned long long *)(&tsf)) != 1) + return -EINVAL; + b43legacy_tsf_write(dev, tsf); + + return 0; +} + +/* wl->irq_lock is locked */ +static ssize_t ucode_regs_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + + for (i = 0; i < 64; i++) { + fappend("r%d = 0x%04x\n", i, + b43legacy_shm_read16(dev, B43legacy_SHM_WIRELESS, i)); + } + + return count; +} + +/* wl->irq_lock is locked */ +static ssize_t shm_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + ssize_t count = 0; + int i; + u16 tmp; + __le16 *le16buf = (__le16 *)buf; + + for (i = 0; i < 0x1000; i++) { + if (bufsize <= 0) + break; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 2 * i); + le16buf[i] = cpu_to_le16(tmp); + count += sizeof(tmp); + bufsize -= sizeof(tmp); + } + + return count; +} + +static ssize_t txstat_read_file(struct b43legacy_wldev *dev, char *buf, size_t bufsize) +{ + struct b43legacy_txstatus_log *log = &dev->dfsentry->txstatlog; + ssize_t count = 0; + unsigned long flags; + int i, idx; + struct b43legacy_txstatus *stat; + + spin_lock_irqsave(&log->lock, flags); + if (log->end < 0) { + fappend("Nothing transmitted, yet\n"); + goto out_unlock; + } + fappend("b43legacy TX status reports:\n\n" + "index | cookie | seq | phy_stat | frame_count | " + "rts_count | supp_reason | pm_indicated | " + "intermediate | for_ampdu | acked\n" "---\n"); + i = log->end + 1; + idx = 0; + while (1) { + if (i == B43legacy_NR_LOGGED_TXSTATUS) + i = 0; + stat = &(log->log[i]); + if (stat->cookie) { + fappend("%03d | " + "0x%04X | 0x%04X | 0x%02X | " + "0x%X | 0x%X | " + "%u | %u | " + "%u | %u | %u\n", + idx, + stat->cookie, stat->seq, stat->phy_stat, + stat->frame_count, stat->rts_count, + stat->supp_reason, stat->pm_indicated, + stat->intermediate, stat->for_ampdu, + stat->acked); + idx++; + } + if (i == log->end) + break; + i++; + } +out_unlock: + spin_unlock_irqrestore(&log->lock, flags); + + return count; +} + +/* wl->irq_lock is locked */ +static int restart_write_file(struct b43legacy_wldev *dev, const char *buf, size_t count) +{ + int err = 0; + + if (count > 0 && buf[0] == '1') { + b43legacy_controller_restart(dev, "manually restarted"); + } else + err = -EINVAL; + + return err; +} + +#undef fappend + +static int b43legacy_debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43legacy_wldev *dev; + struct b43legacy_debugfs_fops *dfops; + struct b43legacy_dfs_file *dfile; + ssize_t ret = 0; + char *buf; + const size_t bufsize = 1024 * 128; + const size_t buforder = get_order(bufsize); + int err = 0; + + if (!count) + return 0; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); + if (!dfops->read) { + err = -ENOSYS; + goto out_unlock; + } + dfile = fops_to_dfs_file(dev, dfops); + + if (!dfile->buffer) { + buf = (char *)__get_free_pages(GFP_KERNEL, buforder); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + memset(buf, 0, bufsize); + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + ret = dfops->read(dev, buf, bufsize); + spin_unlock_irq(&dev->wl->irq_lock); + } else + ret = dfops->read(dev, buf, bufsize); + if (ret <= 0) { + free_pages((unsigned long)buf, buforder); + err = ret; + goto out_unlock; + } + dfile->data_len = ret; + dfile->buffer = buf; + } + + ret = simple_read_from_buffer(userbuf, count, ppos, + dfile->buffer, + dfile->data_len); + if (*ppos >= dfile->data_len) { + free_pages((unsigned long)dfile->buffer, buforder); + dfile->buffer = NULL; + dfile->data_len = 0; + } +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : ret; +} + +static ssize_t b43legacy_debugfs_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct b43legacy_wldev *dev; + struct b43legacy_debugfs_fops *dfops; + char *buf; + int err = 0; + + if (!count) + return 0; + if (count > PAGE_SIZE) + return -E2BIG; + dev = file->private_data; + if (!dev) + return -ENODEV; + + mutex_lock(&dev->wl->mutex); + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + goto out_unlock; + } + + dfops = container_of(file->f_op, struct b43legacy_debugfs_fops, fops); + if (!dfops->write) { + err = -ENOSYS; + goto out_unlock; + } + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto out_unlock; + } + if (copy_from_user(buf, userbuf, count)) { + err = -EFAULT; + goto out_freepage; + } + if (dfops->take_irqlock) { + spin_lock_irq(&dev->wl->irq_lock); + err = dfops->write(dev, buf, count); + spin_unlock_irq(&dev->wl->irq_lock); + } else + err = dfops->write(dev, buf, count); + if (err) + goto out_freepage; + +out_freepage: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&dev->wl->mutex); + + return err ? err : count; +} + + +#define B43legacy_DEBUGFS_FOPS(name, _read, _write, _take_irqlock) \ + static struct b43legacy_debugfs_fops fops_##name = { \ + .read = _read, \ + .write = _write, \ + .fops = { \ + .open = b43legacy_debugfs_open, \ + .read = b43legacy_debugfs_read, \ + .write = b43legacy_debugfs_write, \ + }, \ + .file_struct_offset = offsetof(struct b43legacy_dfsentry, \ + file_##name), \ + .take_irqlock = _take_irqlock, \ + } + +B43legacy_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); +B43legacy_DEBUGFS_FOPS(ucode_regs, ucode_regs_read_file, NULL, 1); +B43legacy_DEBUGFS_FOPS(shm, shm_read_file, NULL, 1); +B43legacy_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); +B43legacy_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); + + +int b43legacy_debug(struct b43legacy_wldev *dev, enum b43legacy_dyndbg feature) +{ + return !!(dev->dfsentry && dev->dfsentry->dyn_debug[feature]); +} + +static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + int i; + + for (i = 0; i < __B43legacy_NR_DYNDBG; i++) + debugfs_remove(e->dyn_debug_dentries[i]); +} + +static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + struct dentry *d; + +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + d = debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + if (!IS_ERR(d)) \ + e->dyn_debug_dentries[id] = d; \ + } while (0) + + add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, 0); + add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, 0); + add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, 0); + add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, 0); + add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, 0); + +#undef add_dyn_dbg +} + +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e; + struct b43legacy_txstatus_log *log; + char devdir[16]; + + B43legacy_WARN_ON(!dev); + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + b43legacyerr(dev->wl, "debugfs: add device OOM\n"); + return; + } + e->dev = dev; + log = &e->txstatlog; + log->log = kcalloc(B43legacy_NR_LOGGED_TXSTATUS, + sizeof(struct b43legacy_txstatus), GFP_KERNEL); + if (!log->log) { + b43legacyerr(dev->wl, "debugfs: add device txstatus OOM\n"); + kfree(e); + return; + } + log->end = -1; + spin_lock_init(&log->lock); + + dev->dfsentry = e; + + snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); + e->subdir = debugfs_create_dir(devdir, rootdir); + if (!e->subdir || IS_ERR(e->subdir)) { + if (e->subdir == ERR_PTR(-ENODEV)) { + b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " + "enabled in kernel config\n"); + } else { + b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n", + devdir); + } + dev->dfsentry = NULL; + kfree(log->log); + kfree(e); + return; + } + +#define ADD_FILE(name, mode) \ + do { \ + struct dentry *d; \ + d = debugfs_create_file(__stringify(name), \ + mode, e->subdir, dev, \ + &fops_##name.fops); \ + e->file_##name.dentry = NULL; \ + if (!IS_ERR(d)) \ + e->file_##name.dentry = d; \ + } while (0) + + + ADD_FILE(tsf, 0600); + ADD_FILE(ucode_regs, 0400); + ADD_FILE(shm, 0400); + ADD_FILE(txstat, 0400); + ADD_FILE(restart, 0200); + +#undef ADD_FILE + + b43legacy_add_dynamic_debug(dev); +} + +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) +{ + struct b43legacy_dfsentry *e; + + if (!dev) + return; + e = dev->dfsentry; + if (!e) + return; + b43legacy_remove_dynamic_debug(dev); + + debugfs_remove(e->file_tsf.dentry); + debugfs_remove(e->file_ucode_regs.dentry); + debugfs_remove(e->file_shm.dentry); + debugfs_remove(e->file_txstat.dentry); + debugfs_remove(e->file_restart.dentry); + + debugfs_remove(e->subdir); + kfree(e->txstatlog.log); + kfree(e); +} + +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + struct b43legacy_dfsentry *e = dev->dfsentry; + struct b43legacy_txstatus_log *log; + struct b43legacy_txstatus *cur; + int i; + + if (!e) + return; + log = &e->txstatlog; + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&log->lock); + i = log->end + 1; + if (i == B43legacy_NR_LOGGED_TXSTATUS) + i = 0; + log->end = i; + cur = &(log->log[i]); + memcpy(cur, status, sizeof(*cur)); + spin_unlock(&log->lock); +} + +void b43legacy_debugfs_init(void) +{ + rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (IS_ERR(rootdir)) + rootdir = NULL; +} + +void b43legacy_debugfs_exit(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/net/wireless/b43legacy/debugfs.h b/drivers/net/wireless/b43legacy/debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..ae3b0d0fa84904b146f9c9129fd70dd1eedc5168 --- /dev/null +++ b/drivers/net/wireless/b43legacy/debugfs.h @@ -0,0 +1,89 @@ +#ifndef B43legacy_DEBUGFS_H_ +#define B43legacy_DEBUGFS_H_ + +struct b43legacy_wldev; +struct b43legacy_txstatus; + +enum b43legacy_dyndbg { /* Dynamic debugging features */ + B43legacy_DBG_XMITPOWER, + B43legacy_DBG_DMAOVERFLOW, + B43legacy_DBG_DMAVERBOSE, + B43legacy_DBG_PWORK_FAST, + B43legacy_DBG_PWORK_STOP, + __B43legacy_NR_DYNDBG, +}; + + +#ifdef CONFIG_B43LEGACY_DEBUG + +struct dentry; + +#define B43legacy_NR_LOGGED_TXSTATUS 100 + +struct b43legacy_txstatus_log { + struct b43legacy_txstatus *log; + int end; + spinlock_t lock; /* lock for debugging */ +}; + +struct b43legacy_dfs_file { + struct dentry *dentry; + char *buffer; + size_t data_len; +}; + +struct b43legacy_dfsentry { + struct b43legacy_wldev *dev; + struct dentry *subdir; + + struct b43legacy_dfs_file file_tsf; + struct b43legacy_dfs_file file_ucode_regs; + struct b43legacy_dfs_file file_shm; + struct b43legacy_dfs_file file_txstat; + struct b43legacy_dfs_file file_txpower_g; + struct b43legacy_dfs_file file_restart; + struct b43legacy_dfs_file file_loctls; + + struct b43legacy_txstatus_log txstatlog; + + /* Enabled/Disabled list for the dynamic debugging features. */ + u32 dyn_debug[__B43legacy_NR_DYNDBG]; + /* Dentries for the dynamic debugging entries. */ + struct dentry *dyn_debug_dentries[__B43legacy_NR_DYNDBG]; +}; + +int b43legacy_debug(struct b43legacy_wldev *dev, + enum b43legacy_dyndbg feature); + +void b43legacy_debugfs_init(void); +void b43legacy_debugfs_exit(void); +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev); +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev); +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +#else /* CONFIG_B43LEGACY_DEBUG*/ + +static inline +int b43legacy_debug(struct b43legacy_wldev *dev, + enum b43legacy_dyndbg feature) +{ + return 0; +} + +static inline +void b43legacy_debugfs_init(void) { } +static inline +void b43legacy_debugfs_exit(void) { } +static inline +void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) { } +static inline +void b43legacy_debugfs_remove_device(struct b43legacy_wldev *dev) { } +static inline +void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) + { } + +#endif /* CONFIG_B43LEGACY_DEBUG*/ + +#endif /* B43legacy_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c new file mode 100644 index 0000000000000000000000000000000000000000..8cb3dc4c474540d5a201a26e83993729bebd703b --- /dev/null +++ b/drivers/net/wireless/b43legacy/dma.c @@ -0,0 +1,1565 @@ +/* + + Broadcom B43legacy wireless driver + + DMA ringbuffer and descriptor allocation/management + + Copyright (c) 2005, 2006 Michael Buesch + + Some code in this file is derived from the b44.c driver + Copyright (C) 2002 David S. Miller + Copyright (C) Pekka Pietikainen + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "dma.h" +#include "main.h" +#include "debugfs.h" +#include "xmit.h" + +#include +#include +#include +#include +#include + +/* 32bit DMA ops. */ +static +struct b43legacy_dmadesc_generic *op32_idx2desc( + struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta **meta) +{ + struct b43legacy_dmadesc32 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43legacy_dmadesc_generic *)desc; +} + +static void op32_fill_descriptor(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43legacy_dmadesc32 *descbase = ring->descbase; + int slot; + u32 ctl; + u32 addr; + u32 addrext; + + slot = (int)(&(desc->dma32) - descbase); + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); + addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addr |= ssb_dma_translation(ring->dev->dev); + ctl = (bufsize - ring->frameoffset) + & B43legacy_DMA32_DCTL_BYTECNT; + if (slot == ring->nr_slots - 1) + ctl |= B43legacy_DMA32_DCTL_DTABLEEND; + if (start) + ctl |= B43legacy_DMA32_DCTL_FRAMESTART; + if (end) + ctl |= B43legacy_DMA32_DCTL_FRAMEEND; + if (irq) + ctl |= B43legacy_DMA32_DCTL_IRQ; + ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) + & B43legacy_DMA32_DCTL_ADDREXT_MASK; + + desc->dma32.control = cpu_to_le32(ctl); + desc->dma32.address = cpu_to_le32(addr); +} + +static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc32))); +} + +static void op32_tx_suspend(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) + | B43legacy_DMA32_TXSUSPEND); +} + +static void op32_tx_resume(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) + & ~B43legacy_DMA32_TXSUSPEND); +} + +static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) +{ + u32 val; + + val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); + val &= B43legacy_DMA32_RXDPTR; + + return (val / sizeof(struct b43legacy_dmadesc32)); +} + +static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, + int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc32))); +} + +static const struct b43legacy_dma_ops dma32_ops = { + .idx2desc = op32_idx2desc, + .fill_descriptor = op32_fill_descriptor, + .poke_tx = op32_poke_tx, + .tx_suspend = op32_tx_suspend, + .tx_resume = op32_tx_resume, + .get_current_rxslot = op32_get_current_rxslot, + .set_current_rxslot = op32_set_current_rxslot, +}; + +/* 64bit DMA ops. */ +static +struct b43legacy_dmadesc_generic *op64_idx2desc( + struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta + **meta) +{ + struct b43legacy_dmadesc64 *desc; + + *meta = &(ring->meta[slot]); + desc = ring->descbase; + desc = &(desc[slot]); + + return (struct b43legacy_dmadesc_generic *)desc; +} + +static void op64_fill_descriptor(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq) +{ + struct b43legacy_dmadesc64 *descbase = ring->descbase; + int slot; + u32 ctl0 = 0; + u32 ctl1 = 0; + u32 addrlo; + u32 addrhi; + u32 addrext; + + slot = (int)(&(desc->dma64) - descbase); + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + + addrlo = (u32)(dmaaddr & 0xFFFFFFFF); + addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); + addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + addrhi |= ssb_dma_translation(ring->dev->dev); + if (slot == ring->nr_slots - 1) + ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND; + if (start) + ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART; + if (end) + ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND; + if (irq) + ctl0 |= B43legacy_DMA64_DCTL0_IRQ; + ctl1 |= (bufsize - ring->frameoffset) + & B43legacy_DMA64_DCTL1_BYTECNT; + ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT) + & B43legacy_DMA64_DCTL1_ADDREXT_MASK; + + desc->dma64.control0 = cpu_to_le32(ctl0); + desc->dma64.control1 = cpu_to_le32(ctl1); + desc->dma64.address_low = cpu_to_le32(addrlo); + desc->dma64.address_high = cpu_to_le32(addrhi); +} + +static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc64))); +} + +static void op64_tx_suspend(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) + | B43legacy_DMA64_TXSUSPEND); +} + +static void op64_tx_resume(struct b43legacy_dmaring *ring) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) + & ~B43legacy_DMA64_TXSUSPEND); +} + +static int op64_get_current_rxslot(struct b43legacy_dmaring *ring) +{ + u32 val; + + val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS); + val &= B43legacy_DMA64_RXSTATDPTR; + + return (val / sizeof(struct b43legacy_dmadesc64)); +} + +static void op64_set_current_rxslot(struct b43legacy_dmaring *ring, + int slot) +{ + b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, + (u32)(slot * sizeof(struct b43legacy_dmadesc64))); +} + +static const struct b43legacy_dma_ops dma64_ops = { + .idx2desc = op64_idx2desc, + .fill_descriptor = op64_fill_descriptor, + .poke_tx = op64_poke_tx, + .tx_suspend = op64_tx_suspend, + .tx_resume = op64_tx_resume, + .get_current_rxslot = op64_get_current_rxslot, + .set_current_rxslot = op64_set_current_rxslot, +}; + + +static inline int free_slots(struct b43legacy_dmaring *ring) +{ + return (ring->nr_slots - ring->used_slots); +} + +static inline int next_slot(struct b43legacy_dmaring *ring, int slot) +{ + B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); + if (slot == ring->nr_slots - 1) + return 0; + return slot + 1; +} + +static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) +{ + B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); + if (slot == 0) + return ring->nr_slots - 1; + return slot - 1; +} + +#ifdef CONFIG_B43LEGACY_DEBUG +static void update_max_used_slots(struct b43legacy_dmaring *ring, + int current_used_slots) +{ + if (current_used_slots <= ring->max_used_slots) + return; + ring->max_used_slots = current_used_slots; + if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(ring->dev->wl, + "max_used_slots increased to %d on %s ring %d\n", + ring->max_used_slots, + ring->tx ? "TX" : "RX", + ring->index); +} +#else +static inline +void update_max_used_slots(struct b43legacy_dmaring *ring, + int current_used_slots) +{ } +#endif /* DEBUG */ + +/* Request a slot for usage. */ +static inline +int request_slot(struct b43legacy_dmaring *ring) +{ + int slot; + + B43legacy_WARN_ON(!ring->tx); + B43legacy_WARN_ON(ring->stopped); + B43legacy_WARN_ON(free_slots(ring) == 0); + + slot = next_slot(ring, ring->current_slot); + ring->current_slot = slot; + ring->used_slots++; + + update_max_used_slots(ring, ring->used_slots); + + return slot; +} + +/* Mac80211-queue to b43legacy-ring mapping */ +static struct b43legacy_dmaring *priority_to_txring( + struct b43legacy_wldev *dev, + int queue_priority) +{ + struct b43legacy_dmaring *ring; + +/*FIXME: For now we always run on TX-ring-1 */ +return dev->dma.tx_ring1; + + /* 0 = highest priority */ + switch (queue_priority) { + default: + B43legacy_WARN_ON(1); + /* fallthrough */ + case 0: + ring = dev->dma.tx_ring3; + break; + case 1: + ring = dev->dma.tx_ring2; + break; + case 2: + ring = dev->dma.tx_ring1; + break; + case 3: + ring = dev->dma.tx_ring0; + break; + case 4: + ring = dev->dma.tx_ring4; + break; + case 5: + ring = dev->dma.tx_ring5; + break; + } + + return ring; +} + +/* Bcm4301-ring to mac80211-queue mapping */ +static inline int txring_to_priority(struct b43legacy_dmaring *ring) +{ + static const u8 idx_to_prio[] = + { 3, 2, 1, 0, 4, 5, }; + +/*FIXME: have only one queue, for now */ +return 0; + + return idx_to_prio[ring->index]; +} + + +u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx) +{ + static const u16 map64[] = { + B43legacy_MMIO_DMA64_BASE0, + B43legacy_MMIO_DMA64_BASE1, + B43legacy_MMIO_DMA64_BASE2, + B43legacy_MMIO_DMA64_BASE3, + B43legacy_MMIO_DMA64_BASE4, + B43legacy_MMIO_DMA64_BASE5, + }; + static const u16 map32[] = { + B43legacy_MMIO_DMA32_BASE0, + B43legacy_MMIO_DMA32_BASE1, + B43legacy_MMIO_DMA32_BASE2, + B43legacy_MMIO_DMA32_BASE3, + B43legacy_MMIO_DMA32_BASE4, + B43legacy_MMIO_DMA32_BASE5, + }; + + if (dma64bit) { + B43legacy_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map64))); + return map64[controller_idx]; + } + B43legacy_WARN_ON(!(controller_idx >= 0 && + controller_idx < ARRAY_SIZE(map32))); + return map32[controller_idx]; +} + +static inline +dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, + unsigned char *buf, + size_t len, + int tx) +{ + dma_addr_t dmaaddr; + + if (tx) + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, + DMA_TO_DEVICE); + else + dmaaddr = dma_map_single(ring->dev->dev->dev, + buf, len, + DMA_FROM_DEVICE); + + return dmaaddr; +} + +static inline +void unmap_descbuffer(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len, + int tx) +{ + if (tx) + dma_unmap_single(ring->dev->dev->dev, + addr, len, + DMA_TO_DEVICE); + else + dma_unmap_single(ring->dev->dev->dev, + addr, len, + DMA_FROM_DEVICE); +} + +static inline +void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len) +{ + B43legacy_WARN_ON(ring->tx); + + dma_sync_single_for_cpu(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline +void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, + dma_addr_t addr, + size_t len) +{ + B43legacy_WARN_ON(ring->tx); + + dma_sync_single_for_device(ring->dev->dev->dev, + addr, len, DMA_FROM_DEVICE); +} + +static inline +void free_descriptor_buffer(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_meta *meta, + int irq_context) +{ + if (meta->skb) { + if (irq_context) + dev_kfree_skb_irq(meta->skb); + else + dev_kfree_skb(meta->skb); + meta->skb = NULL; + } +} + +static int alloc_ringmemory(struct b43legacy_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + ring->descbase = dma_alloc_coherent(dev, B43legacy_DMA_RINGMEMSIZE, + &(ring->dmabase), GFP_KERNEL); + if (!ring->descbase) { + b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" + " failed\n"); + return -ENOMEM; + } + memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); + + return 0; +} + +static void free_ringmemory(struct b43legacy_dmaring *ring) +{ + struct device *dev = ring->dev->dev->dev; + + dma_free_coherent(dev, B43legacy_DMA_RINGMEMSIZE, + ring->descbase, ring->dmabase); +} + +/* Reset the RX DMA channel */ +int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + offset = dma64 ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; + b43legacy_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43legacy_DMA64_RXSTATUS : + B43legacy_DMA32_RXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43legacy_DMA64_RXSTAT; + if (value == B43legacy_DMA64_RXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43legacy_DMA32_RXSTATE; + if (value == B43legacy_DMA32_RXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43legacyerr(dev->wl, "DMA RX reset timed out\n"); + return -ENODEV; + } + + return 0; +} + +/* Reset the RX DMA channel */ +int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 mmio_base, int dma64) +{ + int i; + u32 value; + u16 offset; + + might_sleep(); + + for (i = 0; i < 10; i++) { + offset = dma64 ? B43legacy_DMA64_TXSTATUS : + B43legacy_DMA32_TXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43legacy_DMA64_TXSTAT; + if (value == B43legacy_DMA64_TXSTAT_DISABLED || + value == B43legacy_DMA64_TXSTAT_IDLEWAIT || + value == B43legacy_DMA64_TXSTAT_STOPPED) + break; + } else { + value &= B43legacy_DMA32_TXSTATE; + if (value == B43legacy_DMA32_TXSTAT_DISABLED || + value == B43legacy_DMA32_TXSTAT_IDLEWAIT || + value == B43legacy_DMA32_TXSTAT_STOPPED) + break; + } + msleep(1); + } + offset = dma64 ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL; + b43legacy_write32(dev, mmio_base + offset, 0); + for (i = 0; i < 10; i++) { + offset = dma64 ? B43legacy_DMA64_TXSTATUS : + B43legacy_DMA32_TXSTATUS; + value = b43legacy_read32(dev, mmio_base + offset); + if (dma64) { + value &= B43legacy_DMA64_TXSTAT; + if (value == B43legacy_DMA64_TXSTAT_DISABLED) { + i = -1; + break; + } + } else { + value &= B43legacy_DMA32_TXSTATE; + if (value == B43legacy_DMA32_TXSTAT_DISABLED) { + i = -1; + break; + } + } + msleep(1); + } + if (i != -1) { + b43legacyerr(dev->wl, "DMA TX reset timed out\n"); + return -ENODEV; + } + /* ensure the reset is completed. */ + msleep(1); + + return 0; +} + +static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + struct b43legacy_dmadesc_meta *meta, + gfp_t gfp_flags) +{ + struct b43legacy_rxhdr_fw3 *rxhdr; + struct b43legacy_hwtxstatus *txstat; + dma_addr_t dmaaddr; + struct sk_buff *skb; + + B43legacy_WARN_ON(ring->tx); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + if (dma_mapping_error(dmaaddr)) { + /* ugh. try to realloc in zone_dma */ + gfp_flags |= GFP_DMA; + + dev_kfree_skb_any(skb); + + skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); + if (unlikely(!skb)) + return -ENOMEM; + dmaaddr = map_descbuffer(ring, skb->data, + ring->rx_buffersize, 0); + } + + if (dma_mapping_error(dmaaddr)) { + dev_kfree_skb_any(skb); + return -EIO; + } + + meta->skb = skb; + meta->dmaaddr = dmaaddr; + ring->ops->fill_descriptor(ring, desc, dmaaddr, + ring->rx_buffersize, 0, 0, 0); + + rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); + rxhdr->frame_len = 0; + txstat = (struct b43legacy_hwtxstatus *)(skb->data); + txstat->cookie = 0; + + return 0; +} + +/* Allocate the initial descbuffers. + * This is used for an RX ring only. + */ +static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) +{ + int i; + int err = -ENOMEM; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); + if (err) { + b43legacyerr(ring->dev->wl, + "Failed to allocate initial descbuffers\n"); + goto err_unwind; + } + } + mb(); /* all descbuffer setup before next line */ + ring->used_slots = ring->nr_slots; + err = 0; +out: + return err; + +err_unwind: + for (i--; i >= 0; i--) { + desc = ring->ops->idx2desc(ring, i, &meta); + + unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); + dev_kfree_skb(meta->skb); + } + goto out; +} + +/* Do initial setup of the DMA controller. + * Reset the controller, write the ring busaddress + * and switch the "enable" bit on. + */ +static int dmacontroller_setup(struct b43legacy_dmaring *ring) +{ + int err = 0; + u32 value; + u32 addrext; + u32 trans = ssb_dma_translation(ring->dev->dev); + + if (ring->tx) { + if (ring->dma64) { + u64 ringbase = (u64)(ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43legacy_DMA64_TXENABLE; + value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) + & B43legacy_DMA64_TXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, + ((ringbase >> 32) + & ~SSB_DMA_TRANSLATION_MASK) + | trans); + } else { + u32 ringbase = (u32)(ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = B43legacy_DMA32_TXENABLE; + value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) + & B43legacy_DMA32_TXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, + (ringbase & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + } + } else { + err = alloc_initial_descbuffers(ring); + if (err) + goto out; + if (ring->dma64) { + u64 ringbase = (u64)(ring->dmabase); + + addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << + B43legacy_DMA64_RXFROFF_SHIFT); + value |= B43legacy_DMA64_RXENABLE; + value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) + & B43legacy_DMA64_RXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, + (ringbase & 0xFFFFFFFF)); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, + ((ringbase >> 32) & + ~SSB_DMA_TRANSLATION_MASK) | + trans); + b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, + 200); + } else { + u32 ringbase = (u32)(ring->dmabase); + + addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) + >> SSB_DMA_TRANSLATION_SHIFT; + value = (ring->frameoffset << + B43legacy_DMA32_RXFROFF_SHIFT); + value |= B43legacy_DMA32_RXENABLE; + value |= (addrext << + B43legacy_DMA32_RXADDREXT_SHIFT) + & B43legacy_DMA32_RXADDREXT_MASK; + b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, + value); + b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, + (ringbase & + ~SSB_DMA_TRANSLATION_MASK) + | trans); + b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, + 200); + } + } + +out: + return err; +} + +/* Shutdown the DMA controller. */ +static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) +{ + if (ring->tx) { + b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); + b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); + } else + b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); + } else { + b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, + ring->dma64); + if (ring->dma64) { + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); + b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); + } else + b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); + } +} + +static void free_all_descbuffers(struct b43legacy_dmaring *ring) +{ + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + int i; + + if (!ring->used_slots) + return; + for (i = 0; i < ring->nr_slots; i++) { + desc = ring->ops->idx2desc(ring, i, &meta); + + if (!meta->skb) { + B43legacy_WARN_ON(!ring->tx); + continue; + } + if (ring->tx) + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + ring->rx_buffersize, 0); + free_descriptor_buffer(ring, meta, 0); + } +} + +static u64 supported_dma_mask(struct b43legacy_wldev *dev) +{ + u32 tmp; + u16 mmio_base; + + tmp = b43legacy_read32(dev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_64BIT_MASK; + mmio_base = b43legacy_dmacontroller_base(0, 0); + b43legacy_write32(dev, + mmio_base + B43legacy_DMA32_TXCTL, + B43legacy_DMA32_TXADDREXT_MASK); + tmp = b43legacy_read32(dev, mmio_base + + B43legacy_DMA32_TXCTL); + if (tmp & B43legacy_DMA32_TXADDREXT_MASK) + return DMA_32BIT_MASK; + + return DMA_30BIT_MASK; +} + +/* Main initialization function. */ +static +struct b43legacy_dmaring *b43legacy_setup_dmaring( + struct b43legacy_wldev *dev, + int controller_index, + int for_tx, + int dma64) +{ + struct b43legacy_dmaring *ring; + int err; + int nr_slots; + dma_addr_t dma_test; + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + goto out; + + nr_slots = B43legacy_RXRING_SLOTS; + if (for_tx) + nr_slots = B43legacy_TXRING_SLOTS; + + ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), + GFP_KERNEL); + if (!ring->meta) + goto err_kfree_ring; + if (for_tx) { + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43legacy_txhdr_fw3), + GFP_KERNEL); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + /* test for ability to dma to txhdr_cache */ + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) { + /* ugh realloc */ + kfree(ring->txhdr_cache); + ring->txhdr_cache = kcalloc(nr_slots, + sizeof(struct b43legacy_txhdr_fw3), + GFP_KERNEL | GFP_DMA); + if (!ring->txhdr_cache) + goto err_kfree_meta; + + dma_test = dma_map_single(dev->dev->dev, + ring->txhdr_cache, + sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + + if (dma_mapping_error(dma_test)) + goto err_kfree_txhdr_cache; + } + + dma_unmap_single(dev->dev->dev, + dma_test, sizeof(struct b43legacy_txhdr_fw3), + DMA_TO_DEVICE); + } + + ring->dev = dev; + ring->nr_slots = nr_slots; + ring->mmio_base = b43legacy_dmacontroller_base(dma64, + controller_index); + ring->index = controller_index; + ring->dma64 = !!dma64; + if (dma64) + ring->ops = &dma64_ops; + else + ring->ops = &dma32_ops; + if (for_tx) { + ring->tx = 1; + ring->current_slot = -1; + } else { + if (ring->index == 0) { + ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; + ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; + } else if (ring->index == 3) { + ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; + ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; + } else + B43legacy_WARN_ON(1); + } + spin_lock_init(&ring->lock); +#ifdef CONFIG_B43LEGACY_DEBUG + ring->last_injected_overflow = jiffies; +#endif + + err = alloc_ringmemory(ring); + if (err) + goto err_kfree_txhdr_cache; + err = dmacontroller_setup(ring); + if (err) + goto err_free_ringmemory; + +out: + return ring; + +err_free_ringmemory: + free_ringmemory(ring); +err_kfree_txhdr_cache: + kfree(ring->txhdr_cache); +err_kfree_meta: + kfree(ring->meta); +err_kfree_ring: + kfree(ring); + ring = NULL; + goto out; +} + +/* Main cleanup function. */ +static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) +{ + if (!ring) + return; + + b43legacydbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots:" + " %d/%d\n", (ring->dma64) ? "64" : "32", ring->mmio_base, + (ring->tx) ? "TX" : "RX", + ring->max_used_slots, ring->nr_slots); + /* Device IRQs are disabled prior entering this function, + * so no need to take care of concurrency with rx handler stuff. + */ + dmacontroller_cleanup(ring); + free_all_descbuffers(ring); + free_ringmemory(ring); + + kfree(ring->txhdr_cache); + kfree(ring->meta); + kfree(ring); +} + +void b43legacy_dma_free(struct b43legacy_wldev *dev) +{ + struct b43legacy_dma *dma; + + if (b43legacy_using_pio(dev)) + return; + dma = &dev->dma; + + b43legacy_destroy_dmaring(dma->rx_ring3); + dma->rx_ring3 = NULL; + b43legacy_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; + + b43legacy_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; + b43legacy_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; +} + +int b43legacy_dma_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_dma *dma = &dev->dma; + struct b43legacy_dmaring *ring; + int err; + u64 dmamask; + int dma64 = 0; + + dmamask = supported_dma_mask(dev); + if (dmamask == DMA_64BIT_MASK) + dma64 = 1; + + err = ssb_dma_set_mask(dev->dev, dmamask); + if (err) { +#ifdef BCM43XX_PIO + b43legacywarn(dev->wl, "DMA for this device not supported. " + "Falling back to PIO\n"); + dev->__using_pio = 1; + return -EAGAIN; +#else + b43legacyerr(dev->wl, "DMA for this device not supported and " + "no PIO support compiled in\n"); + return -EOPNOTSUPP; +#endif + } + + err = -ENOMEM; + /* setup TX DMA channels. */ + ring = b43legacy_setup_dmaring(dev, 0, 1, dma64); + if (!ring) + goto out; + dma->tx_ring0 = ring; + + ring = b43legacy_setup_dmaring(dev, 1, 1, dma64); + if (!ring) + goto err_destroy_tx0; + dma->tx_ring1 = ring; + + ring = b43legacy_setup_dmaring(dev, 2, 1, dma64); + if (!ring) + goto err_destroy_tx1; + dma->tx_ring2 = ring; + + ring = b43legacy_setup_dmaring(dev, 3, 1, dma64); + if (!ring) + goto err_destroy_tx2; + dma->tx_ring3 = ring; + + ring = b43legacy_setup_dmaring(dev, 4, 1, dma64); + if (!ring) + goto err_destroy_tx3; + dma->tx_ring4 = ring; + + ring = b43legacy_setup_dmaring(dev, 5, 1, dma64); + if (!ring) + goto err_destroy_tx4; + dma->tx_ring5 = ring; + + /* setup RX DMA channels. */ + ring = b43legacy_setup_dmaring(dev, 0, 0, dma64); + if (!ring) + goto err_destroy_tx5; + dma->rx_ring0 = ring; + + if (dev->dev->id.revision < 5) { + ring = b43legacy_setup_dmaring(dev, 3, 0, dma64); + if (!ring) + goto err_destroy_rx0; + dma->rx_ring3 = ring; + } + + b43legacydbg(dev->wl, "%d-bit DMA initialized\n", + (dmamask == DMA_64BIT_MASK) ? 64 : + (dmamask == DMA_32BIT_MASK) ? 32 : 30); + err = 0; +out: + return err; + +err_destroy_rx0: + b43legacy_destroy_dmaring(dma->rx_ring0); + dma->rx_ring0 = NULL; +err_destroy_tx5: + b43legacy_destroy_dmaring(dma->tx_ring5); + dma->tx_ring5 = NULL; +err_destroy_tx4: + b43legacy_destroy_dmaring(dma->tx_ring4); + dma->tx_ring4 = NULL; +err_destroy_tx3: + b43legacy_destroy_dmaring(dma->tx_ring3); + dma->tx_ring3 = NULL; +err_destroy_tx2: + b43legacy_destroy_dmaring(dma->tx_ring2); + dma->tx_ring2 = NULL; +err_destroy_tx1: + b43legacy_destroy_dmaring(dma->tx_ring1); + dma->tx_ring1 = NULL; +err_destroy_tx0: + b43legacy_destroy_dmaring(dma->tx_ring0); + dma->tx_ring0 = NULL; + goto out; +} + +/* Generate a cookie for the TX header. */ +static u16 generate_cookie(struct b43legacy_dmaring *ring, + int slot) +{ + u16 cookie = 0x1000; + + /* Use the upper 4 bits of the cookie as + * DMA controller ID and store the slot number + * in the lower 12 bits. + * Note that the cookie must never be 0, as this + * is a special value used in RX path. + */ + switch (ring->index) { + case 0: + cookie = 0xA000; + break; + case 1: + cookie = 0xB000; + break; + case 2: + cookie = 0xC000; + break; + case 3: + cookie = 0xD000; + break; + case 4: + cookie = 0xE000; + break; + case 5: + cookie = 0xF000; + break; + } + B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); + cookie |= (u16)slot; + + return cookie; +} + +/* Inspect a cookie and find out to which controller/slot it belongs. */ +static +struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, + u16 cookie, int *slot) +{ + struct b43legacy_dma *dma = &dev->dma; + struct b43legacy_dmaring *ring = NULL; + + switch (cookie & 0xF000) { + case 0xA000: + ring = dma->tx_ring0; + break; + case 0xB000: + ring = dma->tx_ring1; + break; + case 0xC000: + ring = dma->tx_ring2; + break; + case 0xD000: + ring = dma->tx_ring3; + break; + case 0xE000: + ring = dma->tx_ring4; + break; + case 0xF000: + ring = dma->tx_ring5; + break; + default: + B43legacy_WARN_ON(1); + } + *slot = (cookie & 0x0FFF); + B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); + + return ring; +} + +static int dma_tx_fragment(struct b43legacy_dmaring *ring, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + u8 *header; + int slot; + int err; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + struct b43legacy_dmadesc_meta *meta_hdr; + struct sk_buff *bounce_skb; + +#define SLOTS_PER_PACKET 2 + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); + + /* Get a slot for the header. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta_hdr); + memset(meta_hdr, 0, sizeof(*meta_hdr)); + + header = &(ring->txhdr_cache[slot * sizeof( + struct b43legacy_txhdr_fw3)]); + b43legacy_generate_txhdr(ring->dev, header, + skb->data, skb->len, ctl, + generate_cookie(ring, slot)); + + meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, + sizeof(struct b43legacy_txhdr_fw3), 1); + if (dma_mapping_error(meta_hdr->dmaaddr)) + return -EIO; + ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); + + /* Get a slot for the payload. */ + slot = request_slot(ring); + desc = ops->idx2desc(ring, slot, &meta); + memset(meta, 0, sizeof(*meta)); + + memcpy(&meta->txstat.control, ctl, sizeof(*ctl)); + meta->skb = skb; + meta->is_last_fragment = 1; + + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + /* create a bounce buffer in zone_dma on mapping failure. */ + if (dma_mapping_error(meta->dmaaddr)) { + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) { + err = -ENOMEM; + goto out_unmap_hdr; + } + + memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = bounce_skb; + meta->skb = skb; + meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); + if (dma_mapping_error(meta->dmaaddr)) { + err = -EIO; + goto out_free_bounce; + } + } + + ops->fill_descriptor(ring, desc, meta->dmaaddr, + skb->len, 0, 1, 1); + + wmb(); /* previous stuff MUST be done */ + /* Now transfer the whole frame. */ + ops->poke_tx(ring, next_slot(ring, slot)); + return 0; + +out_free_bounce: + dev_kfree_skb_any(skb); +out_unmap_hdr: + unmap_descbuffer(ring, meta_hdr->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), 1); + return err; +} + +static inline +int should_inject_overflow(struct b43legacy_dmaring *ring) +{ +#ifdef CONFIG_B43LEGACY_DEBUG + if (unlikely(b43legacy_debug(ring->dev, + B43legacy_DBG_DMAOVERFLOW))) { + /* Check if we should inject another ringbuffer overflow + * to test handling of this situation in the stack. */ + unsigned long next_overflow; + + next_overflow = ring->last_injected_overflow + HZ; + if (time_after(jiffies, next_overflow)) { + ring->last_injected_overflow = jiffies; + b43legacydbg(ring->dev->wl, + "Injecting TX ring overflow on " + "DMA controller %d\n", ring->index); + return 1; + } + } +#endif /* CONFIG_B43LEGACY_DEBUG */ + return 0; +} + +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct b43legacy_dmaring *ring; + int err = 0; + unsigned long flags; + + ring = priority_to_txring(dev, ctl->queue); + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) { + b43legacywarn(dev->wl, "DMA queue overflow\n"); + err = -ENOSPC; + goto out_unlock; + } + /* Check if the queue was stopped in mac80211, + * but we got called nevertheless. + * That would be a mac80211 bug. */ + B43legacy_BUG_ON(ring->stopped); + + err = dma_tx_fragment(ring, skb, ctl); + if (unlikely(err)) { + b43legacyerr(dev->wl, "DMA tx mapping failure\n"); + goto out_unlock; + } + ring->nr_tx_packets++; + if ((free_slots(ring) < SLOTS_PER_PACKET) || + should_inject_overflow(ring)) { + /* This TX ring is full. */ + ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 1; + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(dev->wl, "Stopped TX ring %d\n", + ring->index); + } +out_unlock: + spin_unlock_irqrestore(&ring->lock, flags); + + return err; +} + +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + const struct b43legacy_dma_ops *ops; + struct b43legacy_dmaring *ring; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + int slot; + + ring = parse_cookie(dev, status->cookie, &slot); + if (unlikely(!ring)) + return; + B43legacy_WARN_ON(!irqs_disabled()); + spin_lock(&ring->lock); + + B43legacy_WARN_ON(!ring->tx); + ops = ring->ops; + while (1) { + B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); + desc = ops->idx2desc(ring, slot, &meta); + + if (meta->skb) + unmap_descbuffer(ring, meta->dmaaddr, + meta->skb->len, 1); + else + unmap_descbuffer(ring, meta->dmaaddr, + sizeof(struct b43legacy_txhdr_fw3), + 1); + + if (meta->is_last_fragment) { + B43legacy_WARN_ON(!meta->skb); + /* Call back to inform the ieee80211 subsystem about the + * status of the transmission. + * Some fields of txstat are already filled in dma_tx(). + */ + if (status->acked) { + meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; + } else { + if (!(meta->txstat.control.flags + & IEEE80211_TXCTL_NO_ACK)) + meta->txstat.excessive_retries = 1; + } + if (status->frame_count == 0) { + /* The frame was not transmitted at all. */ + meta->txstat.retry_count = 0; + } else + meta->txstat.retry_count = status->frame_count + - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, + &(meta->txstat)); + /* skb is freed by ieee80211_tx_status_irqsafe() */ + meta->skb = NULL; + } else { + /* No need to call free_descriptor_buffer here, as + * this is only the txhdr, which is not allocated. + */ + B43legacy_WARN_ON(meta->skb != NULL); + } + + /* Everything unmapped and free'd. So it's not used anymore. */ + ring->used_slots--; + + if (meta->is_last_fragment) + break; + slot = next_slot(ring, slot); + } + dev->stats.last_tx = jiffies; + if (ring->stopped) { + B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); + ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); + ring->stopped = 0; + if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) + b43legacydbg(dev->wl, "Woke up TX ring %d\n", + ring->index); + } + + spin_unlock(&ring->lock); +} + +void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + const int nr_queues = dev->wl->hw->queues; + struct b43legacy_dmaring *ring; + struct ieee80211_tx_queue_stats_data *data; + unsigned long flags; + int i; + + for (i = 0; i < nr_queues; i++) { + data = &(stats->data[i]); + ring = priority_to_txring(dev, i); + + spin_lock_irqsave(&ring->lock, flags); + data->len = ring->used_slots / SLOTS_PER_PACKET; + data->limit = ring->nr_slots / SLOTS_PER_PACKET; + data->count = ring->nr_tx_packets; + spin_unlock_irqrestore(&ring->lock, flags); + } +} + +static void dma_rx(struct b43legacy_dmaring *ring, + int *slot) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + struct b43legacy_dmadesc_generic *desc; + struct b43legacy_dmadesc_meta *meta; + struct b43legacy_rxhdr_fw3 *rxhdr; + struct sk_buff *skb; + u16 len; + int err; + dma_addr_t dmaaddr; + + desc = ops->idx2desc(ring, *slot, &meta); + + sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); + skb = meta->skb; + + if (ring->index == 3) { + /* We received an xmit status. */ + struct b43legacy_hwtxstatus *hw = + (struct b43legacy_hwtxstatus *)skb->data; + int i = 0; + + while (hw->cookie == 0) { + if (i > 100) + break; + i++; + udelay(2); + barrier(); + } + b43legacy_handle_hwtxstatus(ring->dev, hw); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + + return; + } + rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; + len = le16_to_cpu(rxhdr->frame_len); + if (len == 0) { + int i = 0; + + do { + udelay(2); + barrier(); + len = le16_to_cpu(rxhdr->frame_len); + } while (len == 0 && i++ < 5); + if (unlikely(len == 0)) { + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + goto drop; + } + } + if (unlikely(len > ring->rx_buffersize)) { + /* The data did not fit into one descriptor buffer + * and is split over multiple buffers. + * This should never happen, as we try to allocate buffers + * big enough. So simply ignore this packet. + */ + int cnt = 0; + s32 tmp = len; + + while (1) { + desc = ops->idx2desc(ring, *slot, &meta); + /* recycle the descriptor buffer. */ + sync_descbuffer_for_device(ring, meta->dmaaddr, + ring->rx_buffersize); + *slot = next_slot(ring, *slot); + cnt++; + tmp -= ring->rx_buffersize; + if (tmp <= 0) + break; + } + b43legacyerr(ring->dev->wl, "DMA RX buffer too small " + "(len: %u, buffer: %u, nr-dropped: %d)\n", + len, ring->rx_buffersize, cnt); + goto drop; + } + + dmaaddr = meta->dmaaddr; + err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); + if (unlikely(err)) { + b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" + " failed\n"); + sync_descbuffer_for_device(ring, dmaaddr, + ring->rx_buffersize); + goto drop; + } + + unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); + skb_put(skb, len + ring->frameoffset); + skb_pull(skb, ring->frameoffset); + + b43legacy_rx(ring->dev, skb, rxhdr); +drop: + return; +} + +void b43legacy_dma_rx(struct b43legacy_dmaring *ring) +{ + const struct b43legacy_dma_ops *ops = ring->ops; + int slot; + int current_slot; + int used_slots = 0; + + B43legacy_WARN_ON(ring->tx); + current_slot = ops->get_current_rxslot(ring); + B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < + ring->nr_slots)); + + slot = ring->current_slot; + for (; slot != current_slot; slot = next_slot(ring, slot)) { + dma_rx(ring, &slot); + update_max_used_slots(ring, ++used_slots); + } + ops->set_current_rxslot(ring, slot); + ring->current_slot = slot; +} + +static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + ring->ops->tx_suspend(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + B43legacy_WARN_ON(!ring->tx); + ring->ops->tx_resume(ring); + spin_unlock_irqrestore(&ring->lock, flags); +} + +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) +{ + b43legacy_power_saving_ctl_bits(dev, -1, 1); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); + b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); +} + +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) +{ + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); + b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); + b43legacy_power_saving_ctl_bits(dev, -1, -1); +} diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h new file mode 100644 index 0000000000000000000000000000000000000000..26f6ab08de75eb6bf760dd9c541c03e5b0b2aef9 --- /dev/null +++ b/drivers/net/wireless/b43legacy/dma.h @@ -0,0 +1,367 @@ +#ifndef B43legacy_DMA_H_ +#define B43legacy_DMA_H_ + +#include +#include +#include +#include +#include + +#include "b43legacy.h" + + +/* DMA-Interrupt reasons. */ +#define B43legacy_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ + | (1 << 14) | (1 << 15)) +#define B43legacy_DMAIRQ_NONFATALMASK (1 << 13) +#define B43legacy_DMAIRQ_RX_DONE (1 << 16) + + +/*** 32-bit DMA Engine. ***/ + +/* 32-bit DMA controller registers. */ +#define B43legacy_DMA32_TXCTL 0x00 +#define B43legacy_DMA32_TXENABLE 0x00000001 +#define B43legacy_DMA32_TXSUSPEND 0x00000002 +#define B43legacy_DMA32_TXLOOPBACK 0x00000004 +#define B43legacy_DMA32_TXFLUSH 0x00000010 +#define B43legacy_DMA32_TXADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_TXADDREXT_SHIFT 16 +#define B43legacy_DMA32_TXRING 0x04 +#define B43legacy_DMA32_TXINDEX 0x08 +#define B43legacy_DMA32_TXSTATUS 0x0C +#define B43legacy_DMA32_TXDPTR 0x00000FFF +#define B43legacy_DMA32_TXSTATE 0x0000F000 +#define B43legacy_DMA32_TXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA32_TXSTAT_ACTIVE 0x00001000 +#define B43legacy_DMA32_TXSTAT_IDLEWAIT 0x00002000 +#define B43legacy_DMA32_TXSTAT_STOPPED 0x00003000 +#define B43legacy_DMA32_TXSTAT_SUSP 0x00004000 +#define B43legacy_DMA32_TXERROR 0x000F0000 +#define B43legacy_DMA32_TXERR_NOERR 0x00000000 +#define B43legacy_DMA32_TXERR_PROT 0x00010000 +#define B43legacy_DMA32_TXERR_UNDERRUN 0x00020000 +#define B43legacy_DMA32_TXERR_BUFREAD 0x00030000 +#define B43legacy_DMA32_TXERR_DESCREAD 0x00040000 +#define B43legacy_DMA32_TXACTIVE 0xFFF00000 +#define B43legacy_DMA32_RXCTL 0x10 +#define B43legacy_DMA32_RXENABLE 0x00000001 +#define B43legacy_DMA32_RXFROFF_MASK 0x000000FE +#define B43legacy_DMA32_RXFROFF_SHIFT 1 +#define B43legacy_DMA32_RXDIRECTFIFO 0x00000100 +#define B43legacy_DMA32_RXADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_RXADDREXT_SHIFT 16 +#define B43legacy_DMA32_RXRING 0x14 +#define B43legacy_DMA32_RXINDEX 0x18 +#define B43legacy_DMA32_RXSTATUS 0x1C +#define B43legacy_DMA32_RXDPTR 0x00000FFF +#define B43legacy_DMA32_RXSTATE 0x0000F000 +#define B43legacy_DMA32_RXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA32_RXSTAT_ACTIVE 0x00001000 +#define B43legacy_DMA32_RXSTAT_IDLEWAIT 0x00002000 +#define B43legacy_DMA32_RXSTAT_STOPPED 0x00003000 +#define B43legacy_DMA32_RXERROR 0x000F0000 +#define B43legacy_DMA32_RXERR_NOERR 0x00000000 +#define B43legacy_DMA32_RXERR_PROT 0x00010000 +#define B43legacy_DMA32_RXERR_OVERFLOW 0x00020000 +#define B43legacy_DMA32_RXERR_BUFWRITE 0x00030000 +#define B43legacy_DMA32_RXERR_DESCREAD 0x00040000 +#define B43legacy_DMA32_RXACTIVE 0xFFF00000 + +/* 32-bit DMA descriptor. */ +struct b43legacy_dmadesc32 { + __le32 control; + __le32 address; +} __attribute__((__packed__)); +#define B43legacy_DMA32_DCTL_BYTECNT 0x00001FFF +#define B43legacy_DMA32_DCTL_ADDREXT_MASK 0x00030000 +#define B43legacy_DMA32_DCTL_ADDREXT_SHIFT 16 +#define B43legacy_DMA32_DCTL_DTABLEEND 0x10000000 +#define B43legacy_DMA32_DCTL_IRQ 0x20000000 +#define B43legacy_DMA32_DCTL_FRAMEEND 0x40000000 +#define B43legacy_DMA32_DCTL_FRAMESTART 0x80000000 + + + +/*** 64-bit DMA Engine. ***/ + +/* 64-bit DMA controller registers. */ +#define B43legacy_DMA64_TXCTL 0x00 +#define B43legacy_DMA64_TXENABLE 0x00000001 +#define B43legacy_DMA64_TXSUSPEND 0x00000002 +#define B43legacy_DMA64_TXLOOPBACK 0x00000004 +#define B43legacy_DMA64_TXFLUSH 0x00000010 +#define B43legacy_DMA64_TXADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_TXADDREXT_SHIFT 16 +#define B43legacy_DMA64_TXINDEX 0x04 +#define B43legacy_DMA64_TXRINGLO 0x08 +#define B43legacy_DMA64_TXRINGHI 0x0C +#define B43legacy_DMA64_TXSTATUS 0x10 +#define B43legacy_DMA64_TXSTATDPTR 0x00001FFF +#define B43legacy_DMA64_TXSTAT 0xF0000000 +#define B43legacy_DMA64_TXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA64_TXSTAT_ACTIVE 0x10000000 +#define B43legacy_DMA64_TXSTAT_IDLEWAIT 0x20000000 +#define B43legacy_DMA64_TXSTAT_STOPPED 0x30000000 +#define B43legacy_DMA64_TXSTAT_SUSP 0x40000000 +#define B43legacy_DMA64_TXERROR 0x14 +#define B43legacy_DMA64_TXERRDPTR 0x0001FFFF +#define B43legacy_DMA64_TXERR 0xF0000000 +#define B43legacy_DMA64_TXERR_NOERR 0x00000000 +#define B43legacy_DMA64_TXERR_PROT 0x10000000 +#define B43legacy_DMA64_TXERR_UNDERRUN 0x20000000 +#define B43legacy_DMA64_TXERR_TRANSFER 0x30000000 +#define B43legacy_DMA64_TXERR_DESCREAD 0x40000000 +#define B43legacy_DMA64_TXERR_CORE 0x50000000 +#define B43legacy_DMA64_RXCTL 0x20 +#define B43legacy_DMA64_RXENABLE 0x00000001 +#define B43legacy_DMA64_RXFROFF_MASK 0x000000FE +#define B43legacy_DMA64_RXFROFF_SHIFT 1 +#define B43legacy_DMA64_RXDIRECTFIFO 0x00000100 +#define B43legacy_DMA64_RXADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_RXADDREXT_SHIFT 16 +#define B43legacy_DMA64_RXINDEX 0x24 +#define B43legacy_DMA64_RXRINGLO 0x28 +#define B43legacy_DMA64_RXRINGHI 0x2C +#define B43legacy_DMA64_RXSTATUS 0x30 +#define B43legacy_DMA64_RXSTATDPTR 0x00001FFF +#define B43legacy_DMA64_RXSTAT 0xF0000000 +#define B43legacy_DMA64_RXSTAT_DISABLED 0x00000000 +#define B43legacy_DMA64_RXSTAT_ACTIVE 0x10000000 +#define B43legacy_DMA64_RXSTAT_IDLEWAIT 0x20000000 +#define B43legacy_DMA64_RXSTAT_STOPPED 0x30000000 +#define B43legacy_DMA64_RXSTAT_SUSP 0x40000000 +#define B43legacy_DMA64_RXERROR 0x34 +#define B43legacy_DMA64_RXERRDPTR 0x0001FFFF +#define B43legacy_DMA64_RXERR 0xF0000000 +#define B43legacy_DMA64_RXERR_NOERR 0x00000000 +#define B43legacy_DMA64_RXERR_PROT 0x10000000 +#define B43legacy_DMA64_RXERR_UNDERRUN 0x20000000 +#define B43legacy_DMA64_RXERR_TRANSFER 0x30000000 +#define B43legacy_DMA64_RXERR_DESCREAD 0x40000000 +#define B43legacy_DMA64_RXERR_CORE 0x50000000 + +/* 64-bit DMA descriptor. */ +struct b43legacy_dmadesc64 { + __le32 control0; + __le32 control1; + __le32 address_low; + __le32 address_high; +} __attribute__((__packed__)); +#define B43legacy_DMA64_DCTL0_DTABLEEND 0x10000000 +#define B43legacy_DMA64_DCTL0_IRQ 0x20000000 +#define B43legacy_DMA64_DCTL0_FRAMEEND 0x40000000 +#define B43legacy_DMA64_DCTL0_FRAMESTART 0x80000000 +#define B43legacy_DMA64_DCTL1_BYTECNT 0x00001FFF +#define B43legacy_DMA64_DCTL1_ADDREXT_MASK 0x00030000 +#define B43legacy_DMA64_DCTL1_ADDREXT_SHIFT 16 + + + +struct b43legacy_dmadesc_generic { + union { + struct b43legacy_dmadesc32 dma32; + struct b43legacy_dmadesc64 dma64; + } __attribute__((__packed__)); +} __attribute__((__packed__)); + + +/* Misc DMA constants */ +#define B43legacy_DMA_RINGMEMSIZE PAGE_SIZE +#define B43legacy_DMA0_RX_FRAMEOFFSET 30 +#define B43legacy_DMA3_RX_FRAMEOFFSET 0 + + +/* DMA engine tuning knobs */ +#define B43legacy_TXRING_SLOTS 128 +#define B43legacy_RXRING_SLOTS 64 +#define B43legacy_DMA0_RX_BUFFERSIZE (2304 + 100) +#define B43legacy_DMA3_RX_BUFFERSIZE 16 + + + +#ifdef CONFIG_B43LEGACY_DMA + + +struct sk_buff; +struct b43legacy_private; +struct b43legacy_txstatus; + + +struct b43legacy_dmadesc_meta { + /* The kernel DMA-able buffer. */ + struct sk_buff *skb; + /* DMA base bus-address of the descriptor buffer. */ + dma_addr_t dmaaddr; + /* ieee80211 TX status. Only used once per 802.11 frag. */ + bool is_last_fragment; + struct ieee80211_tx_status txstat; +}; + +struct b43legacy_dmaring; + +/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */ +struct b43legacy_dma_ops { + struct b43legacy_dmadesc_generic * (*idx2desc) + (struct b43legacy_dmaring *ring, + int slot, + struct b43legacy_dmadesc_meta + **meta); + void (*fill_descriptor)(struct b43legacy_dmaring *ring, + struct b43legacy_dmadesc_generic *desc, + dma_addr_t dmaaddr, u16 bufsize, + int start, int end, int irq); + void (*poke_tx)(struct b43legacy_dmaring *ring, int slot); + void (*tx_suspend)(struct b43legacy_dmaring *ring); + void (*tx_resume)(struct b43legacy_dmaring *ring); + int (*get_current_rxslot)(struct b43legacy_dmaring *ring); + void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot); +}; + +struct b43legacy_dmaring { + /* Lowlevel DMA ops. */ + const struct b43legacy_dma_ops *ops; + /* Kernel virtual base address of the ring memory. */ + void *descbase; + /* Meta data about all descriptors. */ + struct b43legacy_dmadesc_meta *meta; + /* Cache of TX headers for each slot. + * This is to avoid an allocation on each TX. + * This is NULL for an RX ring. + */ + u8 *txhdr_cache; + /* (Unadjusted) DMA base bus-address of the ring memory. */ + dma_addr_t dmabase; + /* Number of descriptor slots in the ring. */ + int nr_slots; + /* Number of used descriptor slots. */ + int used_slots; + /* Currently used slot in the ring. */ + int current_slot; + /* Total number of packets sent. Statistics only. */ + unsigned int nr_tx_packets; + /* Frameoffset in octets. */ + u32 frameoffset; + /* Descriptor buffer size. */ + u16 rx_buffersize; + /* The MMIO base register of the DMA controller. */ + u16 mmio_base; + /* DMA controller index number (0-5). */ + int index; + /* Boolean. Is this a TX ring? */ + bool tx; + /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ + bool dma64; + /* Boolean. Is this ring stopped at ieee80211 level? */ + bool stopped; + /* Lock, only used for TX. */ + spinlock_t lock; + struct b43legacy_wldev *dev; +#ifdef CONFIG_B43LEGACY_DEBUG + /* Maximum number of used slots. */ + int max_used_slots; + /* Last time we injected a ring overflow. */ + unsigned long last_injected_overflow; +#endif /* CONFIG_B43LEGACY_DEBUG*/ +}; + + +static inline +u32 b43legacy_dma_read(struct b43legacy_dmaring *ring, + u16 offset) +{ + return b43legacy_read32(ring->dev, ring->mmio_base + offset); +} + +static inline +void b43legacy_dma_write(struct b43legacy_dmaring *ring, + u16 offset, u32 value) +{ + b43legacy_write32(ring->dev, ring->mmio_base + offset, value); +} + + +int b43legacy_dma_init(struct b43legacy_wldev *dev); +void b43legacy_dma_free(struct b43legacy_wldev *dev); + +int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64); +int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64); + +u16 b43legacy_dmacontroller_base(int dma64bit, int dmacontroller_idx); + +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev); +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev); + +void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats); + +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl); +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +void b43legacy_dma_rx(struct b43legacy_dmaring *ring); + +#else /* CONFIG_B43LEGACY_DMA */ + + +static inline +int b43legacy_dma_init(struct b43legacy_wldev *dev) +{ + return 0; +} +static inline +void b43legacy_dma_free(struct b43legacy_wldev *dev) +{ +} +static inline +int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64) +{ + return 0; +} +static inline +int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, + u16 dmacontroller_mmio_base, + int dma64) +{ + return 0; +} +static inline +void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline +int b43legacy_dma_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline +void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ +} +static inline +void b43legacy_dma_rx(struct b43legacy_dmaring *ring) +{ +} +static inline +void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) +{ +} +static inline +void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) +{ +} + +#endif /* CONFIG_B43LEGACY_DMA */ +#endif /* B43legacy_DMA_H_ */ diff --git a/drivers/net/wireless/b43legacy/ilt.c b/drivers/net/wireless/b43legacy/ilt.c new file mode 100644 index 0000000000000000000000000000000000000000..247fc780ffdbda62d934c39d45b329b3dca45432 --- /dev/null +++ b/drivers/net/wireless/b43legacy/ilt.c @@ -0,0 +1,336 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "ilt.h" +#include "phy.h" + + +/**** Initial Internal Lookup Tables ****/ + +const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE] = { + 0xFEB93FFD, 0xFEC63FFD, /* 0 */ + 0xFED23FFD, 0xFEDF3FFD, + 0xFEEC3FFE, 0xFEF83FFE, + 0xFF053FFE, 0xFF113FFE, + 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */ + 0xFF373FFF, 0xFF443FFF, + 0xFF503FFF, 0xFF5D3FFF, + 0xFF693FFF, 0xFF763FFF, + 0xFF824000, 0xFF8F4000, /* 16 */ + 0xFF9B4000, 0xFFA84000, + 0xFFB54000, 0xFFC14000, + 0xFFCE4000, 0xFFDA4000, + 0xFFE74000, 0xFFF34000, /* 24 */ + 0x00004000, 0x000D4000, + 0x00194000, 0x00264000, + 0x00324000, 0x003F4000, + 0x004B4000, 0x00584000, /* 32 */ + 0x00654000, 0x00714000, + 0x007E4000, 0x008A3FFF, + 0x00973FFF, 0x00A33FFF, + 0x00B03FFF, 0x00BC3FFF, /* 40 */ + 0x00C93FFF, 0x00D63FFF, + 0x00E23FFE, 0x00EF3FFE, + 0x00FB3FFE, 0x01083FFE, + 0x01143FFE, 0x01213FFD, /* 48 */ + 0x012E3FFD, 0x013A3FFD, + 0x01473FFD, +}; + +const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE] = { + 0xDB93CB87, 0xD666CF64, /* 0 */ + 0xD1FDD358, 0xCDA6D826, + 0xCA38DD9F, 0xC729E2B4, + 0xC469E88E, 0xC26AEE2B, + 0xC0DEF46C, 0xC073FA62, /* 8 */ + 0xC01D00D5, 0xC0760743, + 0xC1560D1E, 0xC2E51369, + 0xC4ED18FF, 0xC7AC1ED7, + 0xCB2823B2, 0xCEFA28D9, /* 16 */ + 0xD2F62D3F, 0xD7BB3197, + 0xDCE53568, 0xE1FE3875, + 0xE7D13B35, 0xED663D35, + 0xF39B3EC4, 0xF98E3FA7, /* 24 */ + 0x00004000, 0x06723FA7, + 0x0C653EC4, 0x129A3D35, + 0x182F3B35, 0x1E023875, + 0x231B3568, 0x28453197, /* 32 */ + 0x2D0A2D3F, 0x310628D9, + 0x34D823B2, 0x38541ED7, + 0x3B1318FF, 0x3D1B1369, + 0x3EAA0D1E, 0x3F8A0743, /* 40 */ + 0x3FE300D5, 0x3F8DFA62, + 0x3F22F46C, 0x3D96EE2B, + 0x3B97E88E, 0x38D7E2B4, + 0x35C8DD9F, 0x325AD826, /* 48 */ + 0x2E03D358, 0x299ACF64, + 0x246DCB87, +}; + +const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE] = { + 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */ + 0x0202, 0x0282, 0x0302, 0x0382, + 0x0402, 0x0482, 0x0502, 0x0582, + 0x05E2, 0x0662, 0x06E2, 0x0762, + 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */ + 0x09C2, 0x0A22, 0x0AA2, 0x0B02, + 0x0B82, 0x0BE2, 0x0C62, 0x0CC2, + 0x0D42, 0x0DA2, 0x0E02, 0x0E62, + 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */ + 0x1062, 0x10C2, 0x1122, 0x1182, + 0x11E2, 0x1242, 0x12A2, 0x12E2, + 0x1342, 0x13A2, 0x1402, 0x1442, + 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */ + 0x15E2, 0x1622, 0x1662, 0x16C1, + 0x1701, 0x1741, 0x1781, 0x17E1, + 0x1821, 0x1861, 0x18A1, 0x18E1, + 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */ + 0x1A21, 0x1A61, 0x1AA1, 0x1AC1, + 0x1B01, 0x1B41, 0x1B81, 0x1BA1, + 0x1BE1, 0x1C21, 0x1C41, 0x1C81, + 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */ + 0x1D61, 0x1DA1, 0x1DC1, 0x1E01, + 0x1E21, 0x1E61, 0x1E81, 0x1EA1, + 0x1EE1, 0x1F01, 0x1F21, 0x1F41, + 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */ + 0x2001, 0x2041, 0x2061, 0x2081, + 0x20A1, 0x20C1, 0x20E1, 0x2101, + 0x2121, 0x2141, 0x2161, 0x2181, + 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */ + 0x2221, 0x2241, 0x2261, 0x2281, + 0x22A1, 0x22C1, 0x22C1, 0x22E1, + 0x2301, 0x2321, 0x2341, 0x2361, + 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */ + 0x23E1, 0x23E1, 0x2401, 0x2421, + 0x2441, 0x2441, 0x2461, 0x2481, + 0x2481, 0x24A1, 0x24C1, 0x24C1, + 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */ + 0x2541, 0x2541, 0x2561, 0x2561, + 0x2581, 0x25A1, 0x25A1, 0x25C1, + 0x25C1, 0x25E1, 0x2601, 0x2601, + 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */ + 0x2661, 0x2661, 0x2681, 0x2681, + 0x26A1, 0x26A1, 0x26C1, 0x26C1, + 0x26E1, 0x26E1, 0x2701, 0x2701, + 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */ + 0x2760, 0x2760, 0x2780, 0x2780, + 0x2780, 0x27A0, 0x27A0, 0x27C0, + 0x27C0, 0x27E0, 0x27E0, 0x27E0, + 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */ + 0x2820, 0x2840, 0x2840, 0x2840, + 0x2860, 0x2860, 0x2880, 0x2880, + 0x2880, 0x28A0, 0x28A0, 0x28A0, + 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */ + 0x28E0, 0x28E0, 0x2900, 0x2900, + 0x2900, 0x2920, 0x2920, 0x2920, + 0x2940, 0x2940, 0x2940, 0x2960, + 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */ + 0x2980, 0x2980, 0x29A0, 0x29A0, + 0x29A0, 0x29A0, 0x29C0, 0x29C0, + 0x29C0, 0x29E0, 0x29E0, 0x29E0, + 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */ + 0x2A00, 0x2A20, 0x2A20, 0x2A20, + 0x2A20, 0x2A40, 0x2A40, 0x2A40, + 0x2A40, 0x2A60, 0x2A60, 0x2A60, +}; + +const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE] = { + 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */ + 0x05A9, 0x0669, 0x0709, 0x0789, + 0x0829, 0x08A9, 0x0929, 0x0989, + 0x0A09, 0x0A69, 0x0AC9, 0x0B29, + 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */ + 0x0D09, 0x0D69, 0x0DA9, 0x0E09, + 0x0E69, 0x0EA9, 0x0F09, 0x0F49, + 0x0FA9, 0x0FE9, 0x1029, 0x1089, + 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */ + 0x11E9, 0x1229, 0x1289, 0x12C9, + 0x1309, 0x1349, 0x1389, 0x13C9, + 0x1409, 0x1449, 0x14A9, 0x14E9, + 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */ + 0x1629, 0x1669, 0x16A9, 0x16E8, + 0x1728, 0x1768, 0x17A8, 0x17E8, + 0x1828, 0x1868, 0x18A8, 0x18E8, + 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */ + 0x1A28, 0x1A68, 0x1AA8, 0x1AE8, + 0x1B28, 0x1B68, 0x1BA8, 0x1BE8, + 0x1C28, 0x1C68, 0x1CA8, 0x1CE8, + 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */ + 0x1E48, 0x1E88, 0x1EC8, 0x1F08, + 0x1F48, 0x1F88, 0x1FE8, 0x2028, + 0x2068, 0x20A8, 0x2108, 0x2148, + 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */ + 0x22C8, 0x2308, 0x2348, 0x23A8, + 0x23E8, 0x2448, 0x24A8, 0x24E8, + 0x2548, 0x25A8, 0x2608, 0x2668, + 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */ + 0x2847, 0x28C7, 0x2947, 0x29A7, + 0x2A27, 0x2AC7, 0x2B47, 0x2BE7, + 0x2CA7, 0x2D67, 0x2E47, 0x2F67, + 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */ + 0x3806, 0x38A6, 0x3946, 0x39E6, + 0x3A66, 0x3AE6, 0x3B66, 0x3BC6, + 0x3C45, 0x3CA5, 0x3D05, 0x3D85, + 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */ + 0x3F45, 0x3FA5, 0x4005, 0x4045, + 0x40A5, 0x40E5, 0x4145, 0x4185, + 0x41E5, 0x4225, 0x4265, 0x42C5, + 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */ + 0x4424, 0x4464, 0x44C4, 0x4504, + 0x4544, 0x4584, 0x45C4, 0x4604, + 0x4644, 0x46A4, 0x46E4, 0x4724, + 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */ + 0x4864, 0x48A4, 0x48E4, 0x4924, + 0x4964, 0x49A4, 0x49E4, 0x4A24, + 0x4A64, 0x4AA4, 0x4AE4, 0x4B23, + 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */ + 0x4C63, 0x4CA3, 0x4CE3, 0x4D23, + 0x4D63, 0x4DA3, 0x4DE3, 0x4E23, + 0x4E63, 0x4EA3, 0x4EE3, 0x4F23, + 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */ + 0x5083, 0x50C3, 0x5103, 0x5143, + 0x5183, 0x51E2, 0x5222, 0x5262, + 0x52A2, 0x52E2, 0x5342, 0x5382, + 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */ + 0x5502, 0x5542, 0x55A2, 0x55E2, + 0x5642, 0x5682, 0x56E2, 0x5722, + 0x5782, 0x57E1, 0x5841, 0x58A1, + 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */ + 0x5AA1, 0x5B01, 0x5B81, 0x5BE1, + 0x5C61, 0x5D01, 0x5D80, 0x5E20, + 0x5EE0, 0x5FA0, 0x6080, 0x61C0, +}; + +const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE] = { + 0x0001, 0x0001, 0x0001, 0xFFFE, + 0xFFFE, 0x3FFF, 0x1000, 0x0393, +}; + +const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE] = { + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, + 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36, +}; + +const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE] = { + 0x013C, 0x01F5, 0x031A, 0x0631, + 0x0001, 0x0001, 0x0001, 0x0001, +}; + +const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE] = { + 0x5484, 0x3C40, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, +}; + +const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */ + 0x2F2D, 0x2A2A, 0x2527, 0x1F21, + 0x1A1D, 0x1719, 0x1616, 0x1414, + 0x1414, 0x1400, 0x1414, 0x1614, + 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */ + 0x2A27, 0x2F2A, 0x332D, 0x3B35, + 0x5140, 0x6C62, 0x0077, +}; + +const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */ + 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1, + 0x969B, 0x9195, 0x8F8F, 0x8A8A, + 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A, + 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */ + 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7, + 0xCBC0, 0xD8D4, 0x00DD, +}; + +const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE] = { + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA400, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */ + 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, + 0xA4A4, 0xA4A4, 0x00A4, +}; + +const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE] = { + 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */ + 0x0067, 0x0063, 0x005E, 0x0059, + 0x0054, 0x0050, 0x004B, 0x0046, + 0x0042, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x0000, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, + 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */ + 0x003D, 0x003D, 0x003D, 0x003D, + 0x0042, 0x0046, 0x004B, 0x0050, + 0x0054, 0x0059, 0x005E, 0x0063, + 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */ + 0x007A, +}; + +const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE] = { + 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */ + 0x00D6, 0x00D4, 0x00D2, 0x00CF, + 0x00CD, 0x00CA, 0x00C7, 0x00C4, + 0x00C1, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x0000, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */ + 0x00BE, 0x00BE, 0x00BE, 0x00BE, + 0x00C1, 0x00C4, 0x00C7, 0x00CA, + 0x00CD, 0x00CF, 0x00D2, 0x00D4, + 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */ + 0x00DE, +}; + +/**** Helper functions to access the device Internal Lookup Tables ****/ + +void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, val); +} + +void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, u32 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA2, + (val & 0xFFFF0000) >> 16); + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_DATA1, + val & 0x0000FFFF); +} + +u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset) +{ + b43legacy_phy_write(dev, B43legacy_PHY_ILT_G_CTRL, offset); + return b43legacy_phy_read(dev, B43legacy_PHY_ILT_G_DATA1); +} diff --git a/drivers/net/wireless/b43legacy/ilt.h b/drivers/net/wireless/b43legacy/ilt.h new file mode 100644 index 0000000000000000000000000000000000000000..48bcf37eccb8080a66e619a085757874a7163627 --- /dev/null +++ b/drivers/net/wireless/b43legacy/ilt.h @@ -0,0 +1,34 @@ +#ifndef B43legacy_ILT_H_ +#define B43legacy_ILT_H_ + +#define B43legacy_ILT_ROTOR_SIZE 53 +extern const u32 b43legacy_ilt_rotor[B43legacy_ILT_ROTOR_SIZE]; +#define B43legacy_ILT_RETARD_SIZE 53 +extern const u32 b43legacy_ilt_retard[B43legacy_ILT_RETARD_SIZE]; +#define B43legacy_ILT_FINEFREQA_SIZE 256 +extern const u16 b43legacy_ilt_finefreqa[B43legacy_ILT_FINEFREQA_SIZE]; +#define B43legacy_ILT_FINEFREQG_SIZE 256 +extern const u16 b43legacy_ilt_finefreqg[B43legacy_ILT_FINEFREQG_SIZE]; +#define B43legacy_ILT_NOISEA2_SIZE 8 +extern const u16 b43legacy_ilt_noisea2[B43legacy_ILT_NOISEA2_SIZE]; +#define B43legacy_ILT_NOISEA3_SIZE 8 +extern const u16 b43legacy_ilt_noisea3[B43legacy_ILT_NOISEA3_SIZE]; +#define B43legacy_ILT_NOISEG1_SIZE 8 +extern const u16 b43legacy_ilt_noiseg1[B43legacy_ILT_NOISEG1_SIZE]; +#define B43legacy_ILT_NOISEG2_SIZE 8 +extern const u16 b43legacy_ilt_noiseg2[B43legacy_ILT_NOISEG2_SIZE]; +#define B43legacy_ILT_NOISESCALEG_SIZE 27 +extern const u16 b43legacy_ilt_noisescaleg1[B43legacy_ILT_NOISESCALEG_SIZE]; +extern const u16 b43legacy_ilt_noisescaleg2[B43legacy_ILT_NOISESCALEG_SIZE]; +extern const u16 b43legacy_ilt_noisescaleg3[B43legacy_ILT_NOISESCALEG_SIZE]; +#define B43legacy_ILT_SIGMASQR_SIZE 53 +extern const u16 b43legacy_ilt_sigmasqr1[B43legacy_ILT_SIGMASQR_SIZE]; +extern const u16 b43legacy_ilt_sigmasqr2[B43legacy_ILT_SIGMASQR_SIZE]; + + +void b43legacy_ilt_write(struct b43legacy_wldev *dev, u16 offset, u16 val); +void b43legacy_ilt_write32(struct b43legacy_wldev *dev, u16 offset, + u32 val); +u16 b43legacy_ilt_read(struct b43legacy_wldev *dev, u16 offset); + +#endif /* B43legacy_ILT_H_ */ diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c new file mode 100644 index 0000000000000000000000000000000000000000..a584ea810502f8289fd961798e849e484581a26d --- /dev/null +++ b/drivers/net/wireless/b43legacy/leds.c @@ -0,0 +1,298 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "leds.h" +#include "b43legacy.h" +#include "main.h" + +static void b43legacy_led_changestate(struct b43legacy_led *led) +{ + struct b43legacy_wldev *dev = led->dev; + const int index = led->index; + u16 ledctl; + + B43legacy_WARN_ON(!(index >= 0 && index < B43legacy_NR_LEDS)); + B43legacy_WARN_ON(!led->blink_interval); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + ledctl ^= (1 << index); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); +} + +static void b43legacy_led_blink(unsigned long d) +{ + struct b43legacy_led *led = (struct b43legacy_led *)d; + struct b43legacy_wldev *dev = led->dev; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->leds_lock, flags); + if (led->blink_interval) { + b43legacy_led_changestate(led); + mod_timer(&led->blink_timer, jiffies + led->blink_interval); + } + spin_unlock_irqrestore(&dev->wl->leds_lock, flags); +} + +static void b43legacy_led_blink_start(struct b43legacy_led *led, + unsigned long interval) +{ + if (led->blink_interval) + return; + led->blink_interval = interval; + b43legacy_led_changestate(led); + led->blink_timer.expires = jiffies + interval; + add_timer(&led->blink_timer); +} + +static void b43legacy_led_blink_stop(struct b43legacy_led *led, int sync) +{ + struct b43legacy_wldev *dev = led->dev; + const int index = led->index; + u16 ledctl; + + if (!led->blink_interval) + return; + if (unlikely(sync)) + del_timer_sync(&led->blink_timer); + else + del_timer(&led->blink_timer); + led->blink_interval = 0; + + /* Make sure the LED is turned off. */ + B43legacy_WARN_ON(!(index >= 0 && index < B43legacy_NR_LEDS)); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + if (led->activelow) + ledctl |= (1 << index); + else + ledctl &= ~(1 << index); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); +} + +static void b43legacy_led_init_hardcoded(struct b43legacy_wldev *dev, + struct b43legacy_led *led, + int led_index) +{ + struct ssb_bus *bus = dev->dev->bus; + + /* This function is called, if the behaviour (and activelow) + * information for a LED is missing in the SPROM. + * We hardcode the behaviour values for various devices here. + * Note that the B43legacy_LED_TEST_XXX behaviour values can + * be used to figure out which led is mapped to which index. + */ + + switch (led_index) { + case 0: + led->behaviour = B43legacy_LED_ACTIVITY; + led->activelow = 1; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ) + led->behaviour = B43legacy_LED_RADIO_ALL; + break; + case 1: + led->behaviour = B43legacy_LED_RADIO_B; + if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK) + led->behaviour = B43legacy_LED_ASSOC; + break; + case 2: + led->behaviour = B43legacy_LED_RADIO_A; + break; + case 3: + led->behaviour = B43legacy_LED_OFF; + break; + default: + B43legacy_BUG_ON(1); + } +} + +int b43legacy_leds_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_led *led; + u8 sprom[4]; + int i; + + sprom[0] = dev->dev->bus->sprom.r1.gpio0; + sprom[1] = dev->dev->bus->sprom.r1.gpio1; + sprom[2] = dev->dev->bus->sprom.r1.gpio2; + sprom[3] = dev->dev->bus->sprom.r1.gpio3; + + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + led->index = i; + led->dev = dev; + setup_timer(&led->blink_timer, + b43legacy_led_blink, + (unsigned long)led); + + if (sprom[i] == 0xFF) + b43legacy_led_init_hardcoded(dev, led, i); + else { + led->behaviour = sprom[i] & B43legacy_LED_BEHAVIOUR; + led->activelow = !!(sprom[i] & + B43legacy_LED_ACTIVELOW); + } + } + + return 0; +} + +void b43legacy_leds_exit(struct b43legacy_wldev *dev) +{ + struct b43legacy_led *led; + int i; + + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + b43legacy_led_blink_stop(led, 1); + } + b43legacy_leds_switch_all(dev, 0); +} + +void b43legacy_leds_update(struct b43legacy_wldev *dev, int activity) +{ + struct b43legacy_led *led; + struct b43legacy_phy *phy = &dev->phy; + const int transferring = (jiffies - dev->stats.last_tx) + < B43legacy_LED_XFER_THRES; + int i; + int turn_on; + unsigned long interval = 0; + u16 ledctl; + unsigned long flags; + bool radio_enabled = (phy->radio_on && dev->radio_hw_enable); + + spin_lock_irqsave(&dev->wl->leds_lock, flags); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + + turn_on = 0; + switch (led->behaviour) { + case B43legacy_LED_INACTIVE: + continue; + case B43legacy_LED_OFF: + break; + case B43legacy_LED_ON: + turn_on = 1; + break; + case B43legacy_LED_ACTIVITY: + turn_on = activity; + break; + case B43legacy_LED_RADIO_ALL: + turn_on = radio_enabled; + break; + case B43legacy_LED_RADIO_A: + break; + case B43legacy_LED_RADIO_B: + turn_on = radio_enabled; + break; + case B43legacy_LED_MODE_BG: + if (phy->type == B43legacy_PHYTYPE_G && radio_enabled) + turn_on = 1; + break; + case B43legacy_LED_TRANSFER: + if (transferring) + b43legacy_led_blink_start(led, + B43legacy_LEDBLINK_MEDIUM); + else + b43legacy_led_blink_stop(led, 0); + continue; + case B43legacy_LED_APTRANSFER: + if (b43legacy_is_mode(dev->wl, + IEEE80211_IF_TYPE_AP)) { + if (transferring) { + interval = B43legacy_LEDBLINK_FAST; + turn_on = 1; + } + } else { + turn_on = 1; + if (transferring) + interval = B43legacy_LEDBLINK_FAST; + else + turn_on = 0; + } + if (turn_on) + b43legacy_led_blink_start(led, interval); + else + b43legacy_led_blink_stop(led, 0); + continue; + case B43legacy_LED_WEIRD: + break; + case B43legacy_LED_ASSOC: + turn_on = 1; +#ifdef CONFIG_B43LEGACY_DEBUG + case B43legacy_LED_TEST_BLINKSLOW: + b43legacy_led_blink_start(led, B43legacy_LEDBLINK_SLOW); + continue; + case B43legacy_LED_TEST_BLINKMEDIUM: + b43legacy_led_blink_start(led, + B43legacy_LEDBLINK_MEDIUM); + continue; + case B43legacy_LED_TEST_BLINKFAST: + b43legacy_led_blink_start(led, B43legacy_LEDBLINK_FAST); + continue; +#endif /* CONFIG_B43LEGACY_DEBUG */ + default: + B43legacy_BUG_ON(1); + }; + + if (led->activelow) + turn_on = !turn_on; + if (turn_on) + ledctl |= (1 << i); + else + ledctl &= ~(1 << i); + } + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); + spin_unlock_irqrestore(&dev->wl->leds_lock, flags); +} + +void b43legacy_leds_switch_all(struct b43legacy_wldev *dev, int on) +{ + struct b43legacy_led *led; + u16 ledctl; + int i; + int bit_on; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->leds_lock, flags); + ledctl = b43legacy_read16(dev, B43legacy_MMIO_GPIO_CONTROL); + for (i = 0; i < B43legacy_NR_LEDS; i++) { + led = &(dev->leds[i]); + if (led->behaviour == B43legacy_LED_INACTIVE) + continue; + if (on) + bit_on = led->activelow ? 0 : 1; + else + bit_on = led->activelow ? 1 : 0; + if (bit_on) + ledctl |= (1 << i); + else + ledctl &= ~(1 << i); + } + b43legacy_write16(dev, B43legacy_MMIO_GPIO_CONTROL, ledctl); + spin_unlock_irqrestore(&dev->wl->leds_lock, flags); +} diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/b43legacy/leds.h new file mode 100644 index 0000000000000000000000000000000000000000..b989f503e68421d2b2d5e9a8fd9789e60bcb8ef9 --- /dev/null +++ b/drivers/net/wireless/b43legacy/leds.h @@ -0,0 +1,56 @@ +#ifndef B43legacy_LEDS_H_ +#define B43legacy_LEDS_H_ + +#include +#include + + +struct b43legacy_led { + u8 behaviour; + bool activelow; + /* Index in the "leds" array in b43legacy_wldev */ + u8 index; + struct b43legacy_wldev *dev; + struct timer_list blink_timer; + unsigned long blink_interval; +}; + +/* Delay between state changes when blinking in jiffies */ +#define B43legacy_LEDBLINK_SLOW (HZ / 1) +#define B43legacy_LEDBLINK_MEDIUM (HZ / 4) +#define B43legacy_LEDBLINK_FAST (HZ / 8) + +#define B43legacy_LED_XFER_THRES (HZ / 100) + +#define B43legacy_LED_BEHAVIOUR 0x7F +#define B43legacy_LED_ACTIVELOW 0x80 +enum { /* LED behaviour values */ + B43legacy_LED_OFF, + B43legacy_LED_ON, + B43legacy_LED_ACTIVITY, + B43legacy_LED_RADIO_ALL, + B43legacy_LED_RADIO_A, + B43legacy_LED_RADIO_B, + B43legacy_LED_MODE_BG, + B43legacy_LED_TRANSFER, + B43legacy_LED_APTRANSFER, + B43legacy_LED_WEIRD, + B43legacy_LED_ASSOC, + B43legacy_LED_INACTIVE, + + /* Behaviour values for testing. + * With these values it is easier to figure out + * the real behaviour of leds, in case the SPROM + * is missing information. + */ + B43legacy_LED_TEST_BLINKSLOW, + B43legacy_LED_TEST_BLINKMEDIUM, + B43legacy_LED_TEST_BLINKFAST, +}; + +int b43legacy_leds_init(struct b43legacy_wldev *dev); +void b43legacy_leds_exit(struct b43legacy_wldev *dev); +void b43legacy_leds_update(struct b43legacy_wldev *dev, int activity); +void b43legacy_leds_switch_all(struct b43legacy_wldev *dev, int on); + +#endif /* B43legacy_LEDS_H_ */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c new file mode 100644 index 0000000000000000000000000000000000000000..f0749510bcd7a193088194ce6b6e935bcd47a105 --- /dev/null +++ b/drivers/net/wireless/b43legacy/main.c @@ -0,0 +1,3856 @@ +/* + * + * Broadcom B43legacy wireless driver + * + * Copyright (c) 2005 Martin Langer + * Copyright (c) 2005 Stefano Brivio + * Copyright (c) 2005, 2006 Michael Buesch + * Copyright (c) 2005 Danny van Dyk + * Copyright (c) 2005 Andreas Jaggi + * Copyright (c) 2007 Larry Finger + * + * Some parts of the code in this file are derived from the ipw2200 + * driver Copyright(c) 2003 - 2004 Intel Corporation. + + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "b43legacy.h" +#include "main.h" +#include "debugfs.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" +#include "sysfs.h" +#include "xmit.h" +#include "radio.h" + + +MODULE_DESCRIPTION("Broadcom B43legacy wireless driver"); +MODULE_AUTHOR("Martin Langer"); +MODULE_AUTHOR("Stefano Brivio"); +MODULE_AUTHOR("Michael Buesch"); +MODULE_LICENSE("GPL"); + +#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) +static int modparam_pio; +module_param_named(pio, modparam_pio, int, 0444); +MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode"); +#elif defined(CONFIG_B43LEGACY_DMA) +# define modparam_pio 0 +#elif defined(CONFIG_B43LEGACY_PIO) +# define modparam_pio 1 +#endif + +static int modparam_bad_frames_preempt; +module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); +MODULE_PARM_DESC(bad_frames_preempt, "enable(1) / disable(0) Bad Frames" + " Preemption"); + +static int modparam_short_retry = B43legacy_DEFAULT_SHORT_RETRY_LIMIT; +module_param_named(short_retry, modparam_short_retry, int, 0444); +MODULE_PARM_DESC(short_retry, "Short-Retry-Limit (0 - 15)"); + +static int modparam_long_retry = B43legacy_DEFAULT_LONG_RETRY_LIMIT; +module_param_named(long_retry, modparam_long_retry, int, 0444); +MODULE_PARM_DESC(long_retry, "Long-Retry-Limit (0 - 15)"); + +static int modparam_noleds; +module_param_named(noleds, modparam_noleds, int, 0444); +MODULE_PARM_DESC(noleds, "Turn off all LED activity"); + +static char modparam_fwpostfix[16]; +module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); +MODULE_PARM_DESC(fwpostfix, "Postfix for the firmware files to load."); + +/* The following table supports BCM4301, BCM4303 and BCM4306/2 devices. */ +static const struct ssb_device_id b43legacy_ssb_tbl[] = { + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 2), + SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 4), + SSB_DEVTABLE_END +}; +MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl); + + +/* Channel and ratetables are shared for all devices. + * They can't be const, because ieee80211 puts some precalculated + * data in there. This data is the same for all devices, so we don't + * get concurrency issues */ +#define RATETAB_ENT(_rateid, _flags) \ + { \ + .rate = B43legacy_RATE_TO_100KBPS(_rateid), \ + .val = (_rateid), \ + .val2 = (_rateid), \ + .flags = (_flags), \ + } +static struct ieee80211_rate __b43legacy_ratetable[] = { + RATETAB_ENT(B43legacy_CCK_RATE_1MB, IEEE80211_RATE_CCK), + RATETAB_ENT(B43legacy_CCK_RATE_2MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43legacy_CCK_RATE_5MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43legacy_CCK_RATE_11MB, IEEE80211_RATE_CCK_2), + RATETAB_ENT(B43legacy_OFDM_RATE_6MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_9MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_12MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_18MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_24MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_36MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_48MB, IEEE80211_RATE_OFDM), + RATETAB_ENT(B43legacy_OFDM_RATE_54MB, IEEE80211_RATE_OFDM), +}; +#define b43legacy_a_ratetable (__b43legacy_ratetable + 4) +#define b43legacy_a_ratetable_size 8 +#define b43legacy_b_ratetable (__b43legacy_ratetable + 0) +#define b43legacy_b_ratetable_size 4 +#define b43legacy_g_ratetable (__b43legacy_ratetable + 0) +#define b43legacy_g_ratetable_size 12 + +#define CHANTAB_ENT(_chanid, _freq) \ + { \ + .chan = (_chanid), \ + .freq = (_freq), \ + .val = (_chanid), \ + .flag = IEEE80211_CHAN_W_SCAN | \ + IEEE80211_CHAN_W_ACTIVE_SCAN | \ + IEEE80211_CHAN_W_IBSS, \ + .power_level = 0x0A, \ + .antenna_max = 0xFF, \ + } +static struct ieee80211_channel b43legacy_bg_chantable[] = { + CHANTAB_ENT(1, 2412), + CHANTAB_ENT(2, 2417), + CHANTAB_ENT(3, 2422), + CHANTAB_ENT(4, 2427), + CHANTAB_ENT(5, 2432), + CHANTAB_ENT(6, 2437), + CHANTAB_ENT(7, 2442), + CHANTAB_ENT(8, 2447), + CHANTAB_ENT(9, 2452), + CHANTAB_ENT(10, 2457), + CHANTAB_ENT(11, 2462), + CHANTAB_ENT(12, 2467), + CHANTAB_ENT(13, 2472), + CHANTAB_ENT(14, 2484), +}; +#define b43legacy_bg_chantable_size ARRAY_SIZE(b43legacy_bg_chantable) + +static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev); +static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev); +static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev); +static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev); + + +static int b43legacy_ratelimit(struct b43legacy_wl *wl) +{ + if (!wl || !wl->current_dev) + return 1; + if (b43legacy_status(wl->current_dev) < B43legacy_STAT_STARTED) + return 1; + /* We are up and running. + * Ratelimit the messages to avoid DoS over the net. */ + return net_ratelimit(); +} + +void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_INFO "b43legacy-%s: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_ERR "b43legacy-%s ERROR: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + if (!b43legacy_ratelimit(wl)) + return; + va_start(args, fmt); + printk(KERN_WARNING "b43legacy-%s warning: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} + +#if B43legacy_DEBUG +void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + printk(KERN_DEBUG "b43legacy-%s debug: ", + (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); + vprintk(fmt, args); + va_end(args); +} +#endif /* DEBUG */ + +static void b43legacy_ram_write(struct b43legacy_wldev *dev, u16 offset, + u32 val) +{ + u32 status; + + B43legacy_WARN_ON(offset % 4 != 0); + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + if (status & B43legacy_SBF_XFER_REG_BYTESWAP) + val = swab32(val); + + b43legacy_write32(dev, B43legacy_MMIO_RAM_CONTROL, offset); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_RAM_DATA, val); +} + +static inline +void b43legacy_shm_control_word(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u32 control; + + /* "offset" is the WORD offset. */ + + control = routing; + control <<= 16; + control |= offset; + b43legacy_write32(dev, B43legacy_MMIO_SHM_CONTROL, control); +} + +u32 b43legacy_shm_read32(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u32 ret; + + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + ret = b43legacy_read16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED); + ret <<= 16; + b43legacy_shm_control_word(dev, routing, + (offset >> 2) + 1); + ret |= b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA); + + return ret; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + ret = b43legacy_read32(dev, B43legacy_MMIO_SHM_DATA); + + return ret; +} + +u16 b43legacy_shm_read16(struct b43legacy_wldev *dev, + u16 routing, u16 offset) +{ + u16 ret; + + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + ret = b43legacy_read16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED); + + return ret; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + ret = b43legacy_read16(dev, B43legacy_MMIO_SHM_DATA); + + return ret; +} + +void b43legacy_shm_write32(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u32 value) +{ + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43legacy_write16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED, + (value >> 16) & 0xffff); + mmiowb(); + b43legacy_shm_control_word(dev, routing, + (offset >> 2) + 1); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, + value & 0xffff); + return; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, value); +} + +void b43legacy_shm_write16(struct b43legacy_wldev *dev, u16 routing, u16 offset, + u16 value) +{ + if (routing == B43legacy_SHM_SHARED) { + B43legacy_WARN_ON((offset & 0x0001) != 0); + if (offset & 0x0003) { + /* Unaligned access */ + b43legacy_shm_control_word(dev, routing, offset >> 2); + mmiowb(); + b43legacy_write16(dev, + B43legacy_MMIO_SHM_DATA_UNALIGNED, + value); + return; + } + offset >>= 2; + } + b43legacy_shm_control_word(dev, routing, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_SHM_DATA, value); +} + +/* Read HostFlags */ +u32 b43legacy_hf_read(struct b43legacy_wldev *dev) +{ + u32 ret; + + ret = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFHI); + ret <<= 16; + ret |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFLO); + + return ret; +} + +/* Write HostFlags */ +void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value) +{ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFLO, + (value & 0x0000FFFF)); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_HOSTFHI, + ((value & 0xFFFF0000) >> 16)); +} + +void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf) +{ + /* We need to be careful. As we read the TSF from multiple + * registers, we should take care of register overflows. + * In theory, the whole tsf read process should be atomic. + * We try to be atomic here, by restaring the read process, + * if any of the high registers changed (overflew). + */ + if (dev->dev->id.revision >= 3) { + u32 low; + u32 high; + u32 high2; + + do { + high = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_HIGH); + low = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_LOW); + high2 = b43legacy_read32(dev, + B43legacy_MMIO_REV3PLUS_TSF_HIGH); + } while (unlikely(high != high2)); + + *tsf = high; + *tsf <<= 32; + *tsf |= low; + } else { + u64 tmp; + u16 v0; + u16 v1; + u16 v2; + u16 v3; + u16 test1; + u16 test2; + u16 test3; + + do { + v3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3); + v2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2); + v1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1); + v0 = b43legacy_read16(dev, B43legacy_MMIO_TSF_0); + + test3 = b43legacy_read16(dev, B43legacy_MMIO_TSF_3); + test2 = b43legacy_read16(dev, B43legacy_MMIO_TSF_2); + test1 = b43legacy_read16(dev, B43legacy_MMIO_TSF_1); + } while (v3 != test3 || v2 != test2 || v1 != test1); + + *tsf = v3; + *tsf <<= 48; + tmp = v2; + tmp <<= 32; + *tsf |= tmp; + tmp = v1; + tmp <<= 16; + *tsf |= tmp; + *tsf |= v0; + } +} + +static void b43legacy_time_lock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status |= B43legacy_SBF_TIME_UPDATE; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + mmiowb(); +} + +static void b43legacy_time_unlock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status &= ~B43legacy_SBF_TIME_UPDATE; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); +} + +static void b43legacy_tsf_write_locked(struct b43legacy_wldev *dev, u64 tsf) +{ + /* Be careful with the in-progress timer. + * First zero out the low register, so we have a full + * register-overflow duration to complete the operation. + */ + if (dev->dev->id.revision >= 3) { + u32 lo = (tsf & 0x00000000FFFFFFFFULL); + u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32; + + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, 0); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_HIGH, + hi); + mmiowb(); + b43legacy_write32(dev, B43legacy_MMIO_REV3PLUS_TSF_LOW, + lo); + } else { + u16 v0 = (tsf & 0x000000000000FFFFULL); + u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16; + u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32; + u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48; + + b43legacy_write16(dev, B43legacy_MMIO_TSF_0, 0); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_3, v3); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_2, v2); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_1, v1); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_TSF_0, v0); + } +} + +void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf) +{ + b43legacy_time_lock(dev); + b43legacy_tsf_write_locked(dev, tsf); + b43legacy_time_unlock(dev); +} + +static +void b43legacy_macfilter_set(struct b43legacy_wldev *dev, + u16 offset, const u8 *mac) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + u16 data; + + if (!mac) + mac = zero_addr; + + offset |= 0x0020; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_CONTROL, offset); + + data = mac[0]; + data |= mac[1] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); + data = mac[2]; + data |= mac[3] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); + data = mac[4]; + data |= mac[5] << 8; + b43legacy_write16(dev, B43legacy_MMIO_MACFILTER_DATA, data); +} + +static void b43legacy_write_mac_bssid_templates(struct b43legacy_wldev *dev) +{ + static const u8 zero_addr[ETH_ALEN] = { 0 }; + const u8 *mac = dev->wl->mac_addr; + const u8 *bssid = dev->wl->bssid; + u8 mac_bssid[ETH_ALEN * 2]; + int i; + u32 tmp; + + if (!bssid) + bssid = zero_addr; + if (!mac) + mac = zero_addr; + + b43legacy_macfilter_set(dev, B43legacy_MACFILTER_BSSID, bssid); + + memcpy(mac_bssid, mac, ETH_ALEN); + memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); + + /* Write our MAC address and BSSID to template ram */ + for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { + tmp = (u32)(mac_bssid[i + 0]); + tmp |= (u32)(mac_bssid[i + 1]) << 8; + tmp |= (u32)(mac_bssid[i + 2]) << 16; + tmp |= (u32)(mac_bssid[i + 3]) << 24; + b43legacy_ram_write(dev, 0x20 + i, tmp); + b43legacy_ram_write(dev, 0x78 + i, tmp); + b43legacy_ram_write(dev, 0x478 + i, tmp); + } +} + +static void b43legacy_upload_card_macaddress(struct b43legacy_wldev *dev) +{ + b43legacy_write_mac_bssid_templates(dev); + b43legacy_macfilter_set(dev, B43legacy_MACFILTER_SELF, + dev->wl->mac_addr); +} + +static void b43legacy_set_slot_time(struct b43legacy_wldev *dev, + u16 slot_time) +{ + /* slot_time is in usec. */ + if (dev->phy.type != B43legacy_PHYTYPE_G) + return; + b43legacy_write16(dev, 0x684, 510 + slot_time); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0010, + slot_time); +} + +static void b43legacy_short_slot_timing_enable(struct b43legacy_wldev *dev) +{ + b43legacy_set_slot_time(dev, 9); + dev->short_slot = 1; +} + +static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev) +{ + b43legacy_set_slot_time(dev, 20); + dev->short_slot = 0; +} + +/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43legacy_interrupt_enable(struct b43legacy_wldev *dev, + u32 mask) +{ + u32 old_mask; + + old_mask = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, old_mask | + mask); + + return old_mask; +} + +/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable. + * Returns the _previously_ enabled IRQ mask. + */ +static inline u32 b43legacy_interrupt_disable(struct b43legacy_wldev *dev, + u32 mask) +{ + u32 old_mask; + + old_mask = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_MASK, old_mask & ~mask); + + return old_mask; +} + +/* Synchronize IRQ top- and bottom-half. + * IRQs must be masked before calling this. + * This must not be called with the irq_lock held. + */ +static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev) +{ + synchronize_irq(dev->dev->irq); + tasklet_kill(&dev->isr_tasklet); +} + +/* DummyTransmission function, as documented on + * http://bcm-specs.sipsolutions.net/DummyTransmission + */ +void b43legacy_dummy_transmission(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + unsigned int i; + unsigned int max_loop; + u16 value; + u32 buffer[5] = { + 0x00000000, + 0x00D40000, + 0x00000000, + 0x01000000, + 0x00000000, + }; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + max_loop = 0xFA; + buffer[0] = 0x000B846E; + break; + default: + B43legacy_BUG_ON(1); + return; + } + + for (i = 0; i < 5; i++) + b43legacy_ram_write(dev, i * 4, buffer[i]); + + /* dummy read follows */ + b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + + b43legacy_write16(dev, 0x0568, 0x0000); + b43legacy_write16(dev, 0x07C0, 0x0000); + b43legacy_write16(dev, 0x050C, 0x0000); + b43legacy_write16(dev, 0x0508, 0x0000); + b43legacy_write16(dev, 0x050A, 0x0000); + b43legacy_write16(dev, 0x054C, 0x0000); + b43legacy_write16(dev, 0x056A, 0x0014); + b43legacy_write16(dev, 0x0568, 0x0826); + b43legacy_write16(dev, 0x0500, 0x0000); + b43legacy_write16(dev, 0x0502, 0x0030); + + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43legacy_radio_write16(dev, 0x0051, 0x0017); + for (i = 0x00; i < max_loop; i++) { + value = b43legacy_read16(dev, 0x050E); + if (value & 0x0080) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43legacy_read16(dev, 0x050E); + if (value & 0x0400) + break; + udelay(10); + } + for (i = 0x00; i < 0x0A; i++) { + value = b43legacy_read16(dev, 0x0690); + if (!(value & 0x0100)) + break; + udelay(10); + } + if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) + b43legacy_radio_write16(dev, 0x0051, 0x0037); +} + +/* Turn the Analog ON/OFF */ +static void b43legacy_switch_analog(struct b43legacy_wldev *dev, int on) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY0, on ? 0 : 0xF4); +} + +void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags) +{ + u32 tmslow; + u32 macctl; + + flags |= B43legacy_TMSLOW_PHYCLKEN; + flags |= B43legacy_TMSLOW_PHYRESET; + ssb_device_enable(dev->dev, flags); + msleep(2); /* Wait for the PLL to turn on. */ + + /* Now take the PHY out of Reset again */ + tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow |= SSB_TMSLOW_FGC; + tmslow &= ~B43legacy_TMSLOW_PHYRESET; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + tmslow &= ~SSB_TMSLOW_FGC; + ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + msleep(1); + + /* Turn Analog ON */ + b43legacy_switch_analog(dev, 1); + + macctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + macctl &= ~B43legacy_MACCTL_GMODE; + if (flags & B43legacy_TMSLOW_GMODE) { + macctl |= B43legacy_MACCTL_GMODE; + dev->phy.gmode = 1; + } else + dev->phy.gmode = 0; + macctl |= B43legacy_MACCTL_IHR_ENABLED; + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); +} + +static void handle_irq_transmit_status(struct b43legacy_wldev *dev) +{ + u32 v0; + u32 v1; + u16 tmp; + struct b43legacy_txstatus stat; + + while (1) { + v0 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0); + if (!(v0 & 0x00000001)) + break; + v1 = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1); + + stat.cookie = (v0 >> 16); + stat.seq = (v1 & 0x0000FFFF); + stat.phy_stat = ((v1 & 0x00FF0000) >> 16); + tmp = (v0 & 0x0000FFFF); + stat.frame_count = ((tmp & 0xF000) >> 12); + stat.rts_count = ((tmp & 0x0F00) >> 8); + stat.supp_reason = ((tmp & 0x001C) >> 2); + stat.pm_indicated = !!(tmp & 0x0080); + stat.intermediate = !!(tmp & 0x0040); + stat.for_ampdu = !!(tmp & 0x0020); + stat.acked = !!(tmp & 0x0002); + + b43legacy_handle_txstatus(dev, &stat); + } +} + +static void drain_txstatus_queue(struct b43legacy_wldev *dev) +{ + u32 dummy; + + if (dev->dev->id.revision < 5) + return; + /* Read all entries from the microcode TXstatus FIFO + * and throw them away. + */ + while (1) { + dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_0); + if (!(dummy & 0x00000001)) + break; + dummy = b43legacy_read32(dev, B43legacy_MMIO_XMITSTAT_1); + } +} + +static u32 b43legacy_jssi_read(struct b43legacy_wldev *dev) +{ + u32 val = 0; + + val = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x40A); + val <<= 16; + val |= b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x408); + + return val; +} + +static void b43legacy_jssi_write(struct b43legacy_wldev *dev, u32 jssi) +{ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x408, + (jssi & 0x0000FFFF)); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x40A, + (jssi & 0xFFFF0000) >> 16); +} + +static void b43legacy_generate_noise_sample(struct b43legacy_wldev *dev) +{ + b43legacy_jssi_write(dev, 0x7F7F7F7F); + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS2_BITFIELD) + | (1 << 4)); + B43legacy_WARN_ON(dev->noisecalc.channel_at_start != + dev->phy.channel); +} + +static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev) +{ + /* Top half of Link Quality calculation. */ + + if (dev->noisecalc.calculation_running) + return; + dev->noisecalc.channel_at_start = dev->phy.channel; + dev->noisecalc.calculation_running = 1; + dev->noisecalc.nr_samples = 0; + + b43legacy_generate_noise_sample(dev); +} + +static void handle_irq_noise(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u8 noise[4]; + u8 i; + u8 j; + s32 average; + + /* Bottom half of Link Quality calculation. */ + + B43legacy_WARN_ON(!dev->noisecalc.calculation_running); + if (dev->noisecalc.channel_at_start != phy->channel) + goto drop_calculation; + *((__le32 *)noise) = cpu_to_le32(b43legacy_jssi_read(dev)); + if (noise[0] == 0x7F || noise[1] == 0x7F || + noise[2] == 0x7F || noise[3] == 0x7F) + goto generate_new; + + /* Get the noise samples. */ + B43legacy_WARN_ON(dev->noisecalc.nr_samples >= 8); + i = dev->noisecalc.nr_samples; + noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); + dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; + dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; + dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; + dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; + dev->noisecalc.nr_samples++; + if (dev->noisecalc.nr_samples == 8) { + /* Calculate the Link Quality by the noise samples. */ + average = 0; + for (i = 0; i < 8; i++) { + for (j = 0; j < 4; j++) + average += dev->noisecalc.samples[i][j]; + } + average /= (8 * 4); + average *= 125; + average += 64; + average /= 128; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x40C); + tmp = (tmp / 128) & 0x1F; + if (tmp >= 8) + average += 2; + else + average -= 25; + if (tmp == 8) + average -= 72; + else + average -= 48; + + dev->stats.link_noise = average; +drop_calculation: + dev->noisecalc.calculation_running = 0; + return; + } +generate_new: + b43legacy_generate_noise_sample(dev); +} + +static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) +{ + if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { + /* TODO: PS TBTT */ + } else { + if (1/*FIXME: the last PSpoll frame was sent successfully */) + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } + dev->reg124_set_0x4 = 0; + if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) + dev->reg124_set_0x4 = 1; +} + +static void handle_irq_atim_end(struct b43legacy_wldev *dev) +{ + if (!dev->reg124_set_0x4) /*FIXME rename this variable*/ + return; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + b43legacy_read32(dev, B43legacy_MMIO_STATUS2_BITFIELD) + | 0x4); +} + +static void handle_irq_pmq(struct b43legacy_wldev *dev) +{ + u32 tmp; + + /* TODO: AP mode. */ + + while (1) { + tmp = b43legacy_read32(dev, B43legacy_MMIO_PS_STATUS); + if (!(tmp & 0x00000008)) + break; + } + /* 16bit write is odd, but correct. */ + b43legacy_write16(dev, B43legacy_MMIO_PS_STATUS, 0x0002); +} + +static void b43legacy_write_template_common(struct b43legacy_wldev *dev, + const u8 *data, u16 size, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u32 i; + u32 tmp; + struct b43legacy_plcp_hdr4 plcp; + + plcp.data = 0; + b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + b43legacy_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); + ram_offset += sizeof(u32); + /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. + * So leave the first two bytes of the next write blank. + */ + tmp = (u32)(data[0]) << 16; + tmp |= (u32)(data[1]) << 24; + b43legacy_ram_write(dev, ram_offset, tmp); + ram_offset += sizeof(u32); + for (i = 2; i < size; i += sizeof(u32)) { + tmp = (u32)(data[i + 0]); + if (i + 1 < size) + tmp |= (u32)(data[i + 1]) << 8; + if (i + 2 < size) + tmp |= (u32)(data[i + 2]) << 16; + if (i + 3 < size) + tmp |= (u32)(data[i + 3]) << 24; + b43legacy_ram_write(dev, ram_offset + i - 2, tmp); + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_size_offset, + size + sizeof(struct b43legacy_plcp_hdr6)); +} + +static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + int len; + const u8 *data; + + B43legacy_WARN_ON(!dev->cached_beacon); + len = min((size_t)dev->cached_beacon->len, + 0x200 - sizeof(struct b43legacy_plcp_hdr6)); + data = (const u8 *)(dev->cached_beacon->data); + b43legacy_write_template_common(dev, data, + len, ram_offset, + shm_size_offset, rate); +} + +static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev, + u16 shm_offset, u16 size, + u8 rate) +{ + struct b43legacy_plcp_hdr4 plcp; + u32 tmp; + __le16 dur; + + plcp.data = 0; + b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + size, + B43legacy_RATE_TO_100KBPS(rate)); + /* Write PLCP in two parts and timing for packet transfer */ + tmp = le32_to_cpu(plcp.data); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset, + tmp & 0xFFFF); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 2, + tmp >> 16); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset + 6, + le16_to_cpu(dur)); +} + +/* Instead of using custom probe response template, this function + * just patches custom beacon template by: + * 1) Changing packet type + * 2) Patching duration field + * 3) Stripping TIM + */ +static u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev, + u16 *dest_size, u8 rate) +{ + const u8 *src_data; + u8 *dest_data; + u16 src_size; + u16 elem_size; + u16 src_pos; + u16 dest_pos; + __le16 dur; + struct ieee80211_hdr *hdr; + + B43legacy_WARN_ON(!dev->cached_beacon); + src_size = dev->cached_beacon->len; + src_data = (const u8 *)dev->cached_beacon->data; + + if (unlikely(src_size < 0x24)) { + b43legacydbg(dev->wl, "b43legacy_generate_probe_resp: " + "invalid beacon\n"); + return NULL; + } + + dest_data = kmalloc(src_size, GFP_ATOMIC); + if (unlikely(!dest_data)) + return NULL; + + /* 0x24 is offset of first variable-len Information-Element + * in beacon frame. + */ + memcpy(dest_data, src_data, 0x24); + src_pos = 0x24; + dest_pos = 0x24; + for (; src_pos < src_size - 2; src_pos += elem_size) { + elem_size = src_data[src_pos + 1] + 2; + if (src_data[src_pos] != 0x05) { /* TIM */ + memcpy(dest_data + dest_pos, src_data + src_pos, + elem_size); + dest_pos += elem_size; + } + } + *dest_size = dest_pos; + hdr = (struct ieee80211_hdr *)dest_data; + + /* Set the frame control. */ + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + dur = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + *dest_size, + B43legacy_RATE_TO_100KBPS(rate)); + hdr->duration_id = dur; + + return dest_data; +} + +static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev, + u16 ram_offset, + u16 shm_size_offset, u8 rate) +{ + u8 *probe_resp_data; + u16 size; + + B43legacy_WARN_ON(!dev->cached_beacon); + size = dev->cached_beacon->len; + probe_resp_data = b43legacy_generate_probe_resp(dev, &size, rate); + if (unlikely(!probe_resp_data)) + return; + + /* Looks like PLCP headers plus packet timings are stored for + * all possible basic rates + */ + b43legacy_write_probe_resp_plcp(dev, 0x31A, size, + B43legacy_CCK_RATE_1MB); + b43legacy_write_probe_resp_plcp(dev, 0x32C, size, + B43legacy_CCK_RATE_2MB); + b43legacy_write_probe_resp_plcp(dev, 0x33E, size, + B43legacy_CCK_RATE_5MB); + b43legacy_write_probe_resp_plcp(dev, 0x350, size, + B43legacy_CCK_RATE_11MB); + + size = min((size_t)size, + 0x200 - sizeof(struct b43legacy_plcp_hdr6)); + b43legacy_write_template_common(dev, probe_resp_data, + size, ram_offset, + shm_size_offset, rate); + kfree(probe_resp_data); +} + +static int b43legacy_refresh_cached_beacon(struct b43legacy_wldev *dev, + struct sk_buff *beacon) +{ + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = beacon; + + return 0; +} + +static void b43legacy_update_templates(struct b43legacy_wldev *dev) +{ + u32 status; + + B43legacy_WARN_ON(!dev->cached_beacon); + + b43legacy_write_beacon_template(dev, 0x68, 0x18, + B43legacy_CCK_RATE_1MB); + b43legacy_write_beacon_template(dev, 0x468, 0x1A, + B43legacy_CCK_RATE_1MB); + b43legacy_write_probe_resp_template(dev, 0x268, 0x4A, + B43legacy_CCK_RATE_11MB); + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS2_BITFIELD); + status |= 0x03; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, status); +} + +static void b43legacy_refresh_templates(struct b43legacy_wldev *dev, + struct sk_buff *beacon) +{ + int err; + + err = b43legacy_refresh_cached_beacon(dev, beacon); + if (unlikely(err)) + return; + b43legacy_update_templates(dev); +} + +static void b43legacy_set_ssid(struct b43legacy_wldev *dev, + const u8 *ssid, u8 ssid_len) +{ + u32 tmp; + u16 i; + u16 len; + + len = min((u16)ssid_len, (u16)0x100); + for (i = 0; i < len; i += sizeof(u32)) { + tmp = (u32)(ssid[i + 0]); + if (i + 1 < len) + tmp |= (u32)(ssid[i + 1]) << 8; + if (i + 2 < len) + tmp |= (u32)(ssid[i + 2]) << 16; + if (i + 3 < len) + tmp |= (u32)(ssid[i + 3]) << 24; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + 0x380 + i, tmp); + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x48, len); +} + +static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev, + u16 beacon_int) +{ + b43legacy_time_lock(dev); + if (dev->dev->id.revision >= 3) + b43legacy_write32(dev, 0x188, (beacon_int << 16)); + else { + b43legacy_write16(dev, 0x606, (beacon_int >> 6)); + b43legacy_write16(dev, 0x610, beacon_int); + } + b43legacy_time_unlock(dev); +} + +static void handle_irq_beacon(struct b43legacy_wldev *dev) +{ + u32 status; + + if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + return; + + dev->irq_savedstate &= ~B43legacy_IRQ_BEACON; + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS2_BITFIELD); + + if (!dev->cached_beacon || ((status & 0x1) && (status & 0x2))) { + /* ACK beacon IRQ. */ + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_BEACON); + dev->irq_savedstate |= B43legacy_IRQ_BEACON; + if (dev->cached_beacon) + kfree_skb(dev->cached_beacon); + dev->cached_beacon = NULL; + return; + } + if (!(status & 0x1)) { + b43legacy_write_beacon_template(dev, 0x68, 0x18, + B43legacy_CCK_RATE_1MB); + status |= 0x1; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + status); + } + if (!(status & 0x2)) { + b43legacy_write_beacon_template(dev, 0x468, 0x1A, + B43legacy_CCK_RATE_1MB); + status |= 0x2; + b43legacy_write32(dev, B43legacy_MMIO_STATUS2_BITFIELD, + status); + } +} + +static void handle_irq_ucode_debug(struct b43legacy_wldev *dev) +{ +} + +/* Interrupt handler bottom-half */ +static void b43legacy_interrupt_tasklet(struct b43legacy_wldev *dev) +{ + u32 reason; + u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; + u32 merged_dma_reason = 0; + int i; + int activity = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + B43legacy_WARN_ON(b43legacy_status(dev) < + B43legacy_STAT_INITIALIZED); + + reason = dev->irq_reason; + for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { + dma_reason[i] = dev->dma_reason[i]; + merged_dma_reason |= dma_reason[i]; + } + + if (unlikely(reason & B43legacy_IRQ_MAC_TXERR)) + b43legacyerr(dev->wl, "MAC transmission error\n"); + + if (unlikely(reason & B43legacy_IRQ_PHY_TXERR)) + b43legacyerr(dev->wl, "PHY transmission error\n"); + + if (unlikely(merged_dma_reason & (B43legacy_DMAIRQ_FATALMASK | + B43legacy_DMAIRQ_NONFATALMASK))) { + if (merged_dma_reason & B43legacy_DMAIRQ_FATALMASK) { + b43legacyerr(dev->wl, "Fatal DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43legacy_controller_restart(dev, "DMA error"); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + return; + } + if (merged_dma_reason & B43legacy_DMAIRQ_NONFATALMASK) + b43legacyerr(dev->wl, "DMA error: " + "0x%08X, 0x%08X, 0x%08X, " + "0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + } + + if (unlikely(reason & B43legacy_IRQ_UCODE_DEBUG)) + handle_irq_ucode_debug(dev); + if (reason & B43legacy_IRQ_TBTT_INDI) + handle_irq_tbtt_indication(dev); + if (reason & B43legacy_IRQ_ATIM_END) + handle_irq_atim_end(dev); + if (reason & B43legacy_IRQ_BEACON) + handle_irq_beacon(dev); + if (reason & B43legacy_IRQ_PMQ) + handle_irq_pmq(dev); + if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK) + ;/*TODO*/ + if (reason & B43legacy_IRQ_NOISESAMPLE_OK) + handle_irq_noise(dev); + + /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43legacy_DMAIRQ_RX_DONE) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_rx(dev->pio.queue0); + else + b43legacy_dma_rx(dev->dma.rx_ring0); + /* We intentionally don't set "activity" to 1, here. */ + } + B43legacy_WARN_ON(dma_reason[1] & B43legacy_DMAIRQ_RX_DONE); + B43legacy_WARN_ON(dma_reason[2] & B43legacy_DMAIRQ_RX_DONE); + if (dma_reason[3] & B43legacy_DMAIRQ_RX_DONE) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_rx(dev->pio.queue3); + else + b43legacy_dma_rx(dev->dma.rx_ring3); + activity = 1; + } + B43legacy_WARN_ON(dma_reason[4] & B43legacy_DMAIRQ_RX_DONE); + B43legacy_WARN_ON(dma_reason[5] & B43legacy_DMAIRQ_RX_DONE); + + if (reason & B43legacy_IRQ_TX_OK) { + handle_irq_transmit_status(dev); + activity = 1; + /* TODO: In AP mode, this also causes sending of powersave + responses. */ + } + + if (!modparam_noleds) + b43legacy_leds_update(dev, activity); + b43legacy_interrupt_enable(dev, dev->irq_savedstate); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void pio_irq_workaround(struct b43legacy_wldev *dev, + u16 base, int queueidx) +{ + u16 rxctl; + + rxctl = b43legacy_read16(dev, base + B43legacy_PIO_RXCTL); + if (rxctl & B43legacy_PIO_RXCTL_DATAAVAILABLE) + dev->dma_reason[queueidx] |= B43legacy_DMAIRQ_RX_DONE; + else + dev->dma_reason[queueidx] &= ~B43legacy_DMAIRQ_RX_DONE; +} + +static void b43legacy_interrupt_ack(struct b43legacy_wldev *dev, u32 reason) +{ + if (b43legacy_using_pio(dev) && + (dev->dev->id.revision < 3) && + (!(reason & B43legacy_IRQ_PIO_WORKAROUND))) { + /* Apply a PIO specific workaround to the dma_reasons */ + pio_irq_workaround(dev, B43legacy_MMIO_PIO1_BASE, 0); + pio_irq_workaround(dev, B43legacy_MMIO_PIO2_BASE, 1); + pio_irq_workaround(dev, B43legacy_MMIO_PIO3_BASE, 2); + pio_irq_workaround(dev, B43legacy_MMIO_PIO4_BASE, 3); + } + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, reason); + + b43legacy_write32(dev, B43legacy_MMIO_DMA0_REASON, + dev->dma_reason[0]); + b43legacy_write32(dev, B43legacy_MMIO_DMA1_REASON, + dev->dma_reason[1]); + b43legacy_write32(dev, B43legacy_MMIO_DMA2_REASON, + dev->dma_reason[2]); + b43legacy_write32(dev, B43legacy_MMIO_DMA3_REASON, + dev->dma_reason[3]); + b43legacy_write32(dev, B43legacy_MMIO_DMA4_REASON, + dev->dma_reason[4]); + b43legacy_write32(dev, B43legacy_MMIO_DMA5_REASON, + dev->dma_reason[5]); +} + +/* Interrupt handler top-half */ +static irqreturn_t b43legacy_interrupt_handler(int irq, void *dev_id) +{ + irqreturn_t ret = IRQ_NONE; + struct b43legacy_wldev *dev = dev_id; + u32 reason; + + if (!dev) + return IRQ_NONE; + + spin_lock(&dev->wl->irq_lock); + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) + goto out; + reason = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (reason == 0xffffffff) /* shared IRQ */ + goto out; + ret = IRQ_HANDLED; + reason &= b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); + if (!reason) + goto out; + + dev->dma_reason[0] = b43legacy_read32(dev, + B43legacy_MMIO_DMA0_REASON) + & 0x0001DC00; + dev->dma_reason[1] = b43legacy_read32(dev, + B43legacy_MMIO_DMA1_REASON) + & 0x0000DC00; + dev->dma_reason[2] = b43legacy_read32(dev, + B43legacy_MMIO_DMA2_REASON) + & 0x0000DC00; + dev->dma_reason[3] = b43legacy_read32(dev, + B43legacy_MMIO_DMA3_REASON) + & 0x0001DC00; + dev->dma_reason[4] = b43legacy_read32(dev, + B43legacy_MMIO_DMA4_REASON) + & 0x0000DC00; + dev->dma_reason[5] = b43legacy_read32(dev, + B43legacy_MMIO_DMA5_REASON) + & 0x0000DC00; + + b43legacy_interrupt_ack(dev, reason); + /* disable all IRQs. They are enabled again in the bottom half. */ + dev->irq_savedstate = b43legacy_interrupt_disable(dev, + B43legacy_IRQ_ALL); + /* save the reason code and call our bottom half. */ + dev->irq_reason = reason; + tasklet_schedule(&dev->isr_tasklet); +out: + mmiowb(); + spin_unlock(&dev->wl->irq_lock); + + return ret; +} + +static void b43legacy_release_firmware(struct b43legacy_wldev *dev) +{ + release_firmware(dev->fw.ucode); + dev->fw.ucode = NULL; + release_firmware(dev->fw.pcm); + dev->fw.pcm = NULL; + release_firmware(dev->fw.initvals); + dev->fw.initvals = NULL; + release_firmware(dev->fw.initvals_band); + dev->fw.initvals_band = NULL; +} + +static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) +{ + b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/" + "Drivers/bcm43xx#devicefirmware " + "and download the correct firmware (version 3).\n"); +} + +static int do_request_fw(struct b43legacy_wldev *dev, + const char *name, + const struct firmware **fw) +{ + char path[sizeof(modparam_fwpostfix) + 32]; + struct b43legacy_fw_header *hdr; + u32 size; + int err; + + if (!name) + return 0; + + snprintf(path, ARRAY_SIZE(path), + "b43legacy%s/%s.fw", + modparam_fwpostfix, name); + err = request_firmware(fw, path, dev->dev->dev); + if (err) { + b43legacyerr(dev->wl, "Firmware file \"%s\" not found " + "or load failed.\n", path); + return err; + } + if ((*fw)->size < sizeof(struct b43legacy_fw_header)) + goto err_format; + hdr = (struct b43legacy_fw_header *)((*fw)->data); + switch (hdr->type) { + case B43legacy_FW_TYPE_UCODE: + case B43legacy_FW_TYPE_PCM: + size = be32_to_cpu(hdr->size); + if (size != (*fw)->size - sizeof(struct b43legacy_fw_header)) + goto err_format; + /* fallthrough */ + case B43legacy_FW_TYPE_IV: + if (hdr->ver != 1) + goto err_format; + break; + default: + goto err_format; + } + + return err; + +err_format: + b43legacyerr(dev->wl, "Firmware file \"%s\" format error.\n", path); + return -EPROTO; +} + +static int b43legacy_request_firmware(struct b43legacy_wldev *dev) +{ + struct b43legacy_firmware *fw = &dev->fw; + const u8 rev = dev->dev->id.revision; + const char *filename; + u32 tmshigh; + int err; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + if (!fw->ucode) { + if (rev == 2) + filename = "ucode2"; + else if (rev == 4) + filename = "ucode4"; + else + filename = "ucode5"; + err = do_request_fw(dev, filename, &fw->ucode); + if (err) + goto err_load; + } + if (!fw->pcm) { + if (rev < 5) + filename = "pcm4"; + else + filename = "pcm5"; + err = do_request_fw(dev, filename, &fw->pcm); + if (err) + goto err_load; + } + if (!fw->initvals) { + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0initvals5"; + else if (rev == 2 || rev == 4) + filename = "b0g0initvals2"; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals); + if (err) + goto err_load; + } + if (!fw->initvals_band) { + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + if ((rev >= 5) && (rev <= 10)) + filename = "b0g0bsinitvals5"; + else if (rev >= 11) + filename = NULL; + else if (rev == 2 || rev == 4) + filename = NULL; + else + goto err_no_initvals; + break; + default: + goto err_no_initvals; + } + err = do_request_fw(dev, filename, &fw->initvals_band); + if (err) + goto err_load; + } + + return 0; + +err_load: + b43legacy_print_fw_helptext(dev->wl); + goto error; + +err_no_initvals: + err = -ENODEV; + b43legacyerr(dev->wl, "No Initial Values firmware file for PHY %u, " + "core rev %u\n", dev->phy.type, rev); + goto error; + +error: + b43legacy_release_firmware(dev); + return err; +} + +static int b43legacy_upload_microcode(struct b43legacy_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43legacy_fw_header); + const __be32 *data; + unsigned int i; + unsigned int len; + u16 fwrev; + u16 fwpatch; + u16 fwdate; + u16 fwtime; + u32 tmp; + int err = 0; + + /* Upload Microcode. */ + data = (__be32 *) (dev->fw.ucode->data + hdr_len); + len = (dev->fw.ucode->size - hdr_len) / sizeof(__be32); + b43legacy_shm_control_word(dev, + B43legacy_SHM_UCODE | + B43legacy_SHM_AUTOINC_W, + 0x0000); + for (i = 0; i < len; i++) { + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, + be32_to_cpu(data[i])); + udelay(10); + } + + if (dev->fw.pcm) { + /* Upload PCM data. */ + data = (__be32 *) (dev->fw.pcm->data + hdr_len); + len = (dev->fw.pcm->size - hdr_len) / sizeof(__be32); + b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EA); + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, 0x00004000); + /* No need for autoinc bit in SHM_HW */ + b43legacy_shm_control_word(dev, B43legacy_SHM_HW, 0x01EB); + for (i = 0; i < len; i++) { + b43legacy_write32(dev, B43legacy_MMIO_SHM_DATA, + be32_to_cpu(data[i])); + udelay(10); + } + } + + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_ALL); + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, 0x00020402); + + /* Wait for the microcode to load and respond */ + i = 0; + while (1) { + tmp = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (tmp == B43legacy_IRQ_MAC_SUSPENDED) + break; + i++; + if (i >= B43legacy_IRQWAIT_MAX_RETRIES) { + b43legacyerr(dev->wl, "Microcode not responding\n"); + b43legacy_print_fw_helptext(dev->wl); + err = -ENODEV; + goto out; + } + udelay(10); + } + /* dummy read follows */ + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + + /* Get and check the revisions. */ + fwrev = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEREV); + fwpatch = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEPATCH); + fwdate = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODEDATE); + fwtime = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_UCODETIME); + + if (fwrev > 0x128) { + b43legacyerr(dev->wl, "YOU ARE TRYING TO LOAD V4 FIRMWARE." + " Only firmware from binary drivers version 3.x" + " is supported. You must change your firmware" + " files.\n"); + b43legacy_print_fw_helptext(dev->wl); + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, 0); + err = -EOPNOTSUPP; + goto out; + } + b43legacydbg(dev->wl, "Loading firmware version 0x%X, patch level %u " + "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch, + (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, + (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); + + dev->fw.rev = fwrev; + dev->fw.patch = fwpatch; + +out: + return err; +} + +static int b43legacy_write_initvals(struct b43legacy_wldev *dev, + const struct b43legacy_iv *ivals, + size_t count, + size_t array_size) +{ + const struct b43legacy_iv *iv; + u16 offset; + size_t i; + bool bit32; + + BUILD_BUG_ON(sizeof(struct b43legacy_iv) != 6); + iv = ivals; + for (i = 0; i < count; i++) { + if (array_size < sizeof(iv->offset_size)) + goto err_format; + array_size -= sizeof(iv->offset_size); + offset = be16_to_cpu(iv->offset_size); + bit32 = !!(offset & B43legacy_IV_32BIT); + offset &= B43legacy_IV_OFFSET_MASK; + if (offset >= 0x1000) + goto err_format; + if (bit32) { + u32 value; + + if (array_size < sizeof(iv->data.d32)) + goto err_format; + array_size -= sizeof(iv->data.d32); + + value = be32_to_cpu(get_unaligned(&iv->data.d32)); + b43legacy_write32(dev, offset, value); + + iv = (const struct b43legacy_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be32)); + } else { + u16 value; + + if (array_size < sizeof(iv->data.d16)) + goto err_format; + array_size -= sizeof(iv->data.d16); + + value = be16_to_cpu(iv->data.d16); + b43legacy_write16(dev, offset, value); + + iv = (const struct b43legacy_iv *)((const uint8_t *)iv + + sizeof(__be16) + + sizeof(__be16)); + } + } + if (array_size) + goto err_format; + + return 0; + +err_format: + b43legacyerr(dev->wl, "Initial Values Firmware file-format error.\n"); + b43legacy_print_fw_helptext(dev->wl); + + return -EPROTO; +} + +static int b43legacy_upload_initvals(struct b43legacy_wldev *dev) +{ + const size_t hdr_len = sizeof(struct b43legacy_fw_header); + const struct b43legacy_fw_header *hdr; + struct b43legacy_firmware *fw = &dev->fw; + const struct b43legacy_iv *ivals; + size_t count; + int err; + + hdr = (const struct b43legacy_fw_header *)(fw->initvals->data); + ivals = (const struct b43legacy_iv *)(fw->initvals->data + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43legacy_write_initvals(dev, ivals, count, + fw->initvals->size - hdr_len); + if (err) + goto out; + if (fw->initvals_band) { + hdr = (const struct b43legacy_fw_header *) + (fw->initvals_band->data); + ivals = (const struct b43legacy_iv *)(fw->initvals_band->data + + hdr_len); + count = be32_to_cpu(hdr->size); + err = b43legacy_write_initvals(dev, ivals, count, + fw->initvals_band->size - hdr_len); + if (err) + goto out; + } +out: + + return err; +} + +/* Initialize the GPIOs + * http://bcm-specs.sipsolutions.net/GPIO + */ +static int b43legacy_gpio_init(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + u32 mask; + u32 set; + + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS_BITFIELD) + & 0xFFFF3FFF); + + b43legacy_leds_switch_all(dev, 0); + b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK, + b43legacy_read16(dev, + B43legacy_MMIO_GPIO_MASK) + | 0x000F); + + mask = 0x0000001F; + set = 0x0000000F; + if (dev->dev->bus->chip_id == 0x4301) { + mask |= 0x0060; + set |= 0x0060; + } + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_PACTRL) { + b43legacy_write16(dev, B43legacy_MMIO_GPIO_MASK, + b43legacy_read16(dev, + B43legacy_MMIO_GPIO_MASK) + | 0x0200); + mask |= 0x0200; + set |= 0x0200; + } + if (dev->dev->id.revision >= 2) + mask |= 0x0010; /* FIXME: This is redundant. */ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return 0; + ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, + (ssb_read32(gpiodev, B43legacy_GPIO_CONTROL) + & mask) | set); + + return 0; +} + +/* Turn off all GPIO stuff. Call this on module unload, for example. */ +static void b43legacy_gpio_cleanup(struct b43legacy_wldev *dev) +{ + struct ssb_bus *bus = dev->dev->bus; + struct ssb_device *gpiodev, *pcidev = NULL; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + pcidev = bus->pcicore.dev; +#endif + gpiodev = bus->chipco.dev ? : pcidev; + if (!gpiodev) + return; + ssb_write32(gpiodev, B43legacy_GPIO_CONTROL, 0); +} + +/* http://bcm-specs.sipsolutions.net/EnableMac */ +void b43legacy_mac_enable(struct b43legacy_wldev *dev) +{ + dev->mac_suspended--; + B43legacy_WARN_ON(dev->mac_suspended < 0); + if (dev->mac_suspended == 0) { + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS_BITFIELD) + | B43legacy_SBF_MAC_ENABLED); + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, + B43legacy_IRQ_MAC_SUSPENDED); + /* the next two are dummy reads */ + b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } +} + +/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +void b43legacy_mac_suspend(struct b43legacy_wldev *dev) +{ + int i; + u32 tmp; + + B43legacy_WARN_ON(dev->mac_suspended < 0); + if (dev->mac_suspended == 0) { + b43legacy_power_saving_ctl_bits(dev, -1, 1); + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + b43legacy_read32(dev, + B43legacy_MMIO_STATUS_BITFIELD) + & ~B43legacy_SBF_MAC_ENABLED); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + for (i = 10000; i; i--) { + tmp = b43legacy_read32(dev, + B43legacy_MMIO_GEN_IRQ_REASON); + if (tmp & B43legacy_IRQ_MAC_SUSPENDED) + goto out; + udelay(1); + } + b43legacyerr(dev->wl, "MAC suspend failed\n"); + } +out: + dev->mac_suspended++; +} + +static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + u32 ctl; + u16 cfp_pretbtt; + + ctl = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + /* Reset status to STA infrastructure mode. */ + ctl &= ~B43legacy_MACCTL_AP; + ctl &= ~B43legacy_MACCTL_KEEP_CTL; + ctl &= ~B43legacy_MACCTL_KEEP_BADPLCP; + ctl &= ~B43legacy_MACCTL_KEEP_BAD; + ctl &= ~B43legacy_MACCTL_PROMISC; + ctl &= ~B43legacy_MACCTL_BEACPROMISC; + ctl |= B43legacy_MACCTL_INFRA; + + if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) + ctl |= B43legacy_MACCTL_AP; + else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) + ctl &= ~B43legacy_MACCTL_INFRA; + + if (wl->filter_flags & FIF_CONTROL) + ctl |= B43legacy_MACCTL_KEEP_CTL; + if (wl->filter_flags & FIF_FCSFAIL) + ctl |= B43legacy_MACCTL_KEEP_BAD; + if (wl->filter_flags & FIF_PLCPFAIL) + ctl |= B43legacy_MACCTL_KEEP_BADPLCP; + if (wl->filter_flags & FIF_PROMISC_IN_BSS) + ctl |= B43legacy_MACCTL_PROMISC; + if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) + ctl |= B43legacy_MACCTL_BEACPROMISC; + + /* Workaround: On old hardware the HW-MAC-address-filter + * doesn't work properly, so always run promisc in filter + * it in software. */ + if (dev->dev->id.revision <= 4) + ctl |= B43legacy_MACCTL_PROMISC; + + b43legacy_write32(dev, B43legacy_MMIO_MACCTL, ctl); + + cfp_pretbtt = 2; + if ((ctl & B43legacy_MACCTL_INFRA) && + !(ctl & B43legacy_MACCTL_AP)) { + if (dev->dev->bus->chip_id == 0x4306 && + dev->dev->bus->chip_rev == 3) + cfp_pretbtt = 100; + else + cfp_pretbtt = 50; + } + b43legacy_write16(dev, 0x612, cfp_pretbtt); +} + +static void b43legacy_rate_memory_write(struct b43legacy_wldev *dev, + u16 rate, + int is_ofdm) +{ + u16 offset; + + if (is_ofdm) { + offset = 0x480; + offset += (b43legacy_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; + } else { + offset = 0x4C0; + offset += (b43legacy_plcp_get_ratecode_cck(rate) & 0x000F) * 2; + } + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, offset + 0x20, + b43legacy_shm_read16(dev, + B43legacy_SHM_SHARED, offset)); +} + +static void b43legacy_rate_memory_init(struct b43legacy_wldev *dev) +{ + switch (dev->phy.type) { + case B43legacy_PHYTYPE_G: + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_6MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_12MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_18MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_24MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_36MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_48MB, 1); + b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_54MB, 1); + /* fallthrough */ + case B43legacy_PHYTYPE_B: + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_1MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_2MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_5MB, 0); + b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_11MB, 0); + break; + default: + B43legacy_BUG_ON(1); + } +} + +/* Set the TX-Antenna for management frames sent by firmware. */ +static void b43legacy_mgmtframe_txantenna(struct b43legacy_wldev *dev, + int antenna) +{ + u16 ant = 0; + u16 tmp; + + switch (antenna) { + case B43legacy_ANTENNA0: + ant |= B43legacy_TX4_PHY_ANT0; + break; + case B43legacy_ANTENNA1: + ant |= B43legacy_TX4_PHY_ANT1; + break; + case B43legacy_ANTENNA_AUTO: + ant |= B43legacy_TX4_PHY_ANTLAST; + break; + default: + B43legacy_BUG_ON(1); + } + + /* FIXME We also need to set the other flags of the PHY control + * field somewhere. */ + + /* For Beacons */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_BEACPHYCTL, tmp); + /* For ACK/CTS */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_ACKCTSPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_ACKCTSPHYCTL, tmp); + /* For Probe Resposes */ + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRPHYCTL); + tmp = (tmp & ~B43legacy_TX4_PHY_ANT) | ant; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRPHYCTL, tmp); +} + +/* Returns TRUE, if the radio is enabled in hardware. */ +static bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) +{ + if (dev->phy.rev >= 3) { + if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) + & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) + return 1; + } else { + if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO) + & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK) + return 1; + } + return 0; +} + +/* This is the opposite of b43legacy_chip_init() */ +static void b43legacy_chip_exit(struct b43legacy_wldev *dev) +{ + b43legacy_radio_turn_off(dev); + if (!modparam_noleds) + b43legacy_leds_exit(dev); + b43legacy_gpio_cleanup(dev); + /* firmware is released later */ +} + +/* Initialize the chip + * http://bcm-specs.sipsolutions.net/ChipInit + */ +static int b43legacy_chip_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err; + int tmp; + u32 value32; + u16 value16; + + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, + B43legacy_SBF_CORE_READY + | B43legacy_SBF_400); + + err = b43legacy_request_firmware(dev); + if (err) + goto out; + err = b43legacy_upload_microcode(dev); + if (err) + goto out; /* firmware is released later */ + + err = b43legacy_gpio_init(dev); + if (err) + goto out; /* firmware is released later */ + err = b43legacy_upload_initvals(dev); + if (err) + goto err_gpio_cleanup; + b43legacy_radio_turn_on(dev); + + b43legacy_write16(dev, 0x03E6, 0x0000); + err = b43legacy_phy_init(dev); + if (err) + goto err_radio_off; + + /* Select initial Interference Mitigation. */ + tmp = phy->interfmode; + phy->interfmode = B43legacy_INTERFMODE_NONE; + b43legacy_radio_set_interference_mitigation(dev, tmp); + + b43legacy_phy_set_antenna_diversity(dev); + b43legacy_mgmtframe_txantenna(dev, B43legacy_ANTENNA_DEFAULT); + + if (phy->type == B43legacy_PHYTYPE_B) { + value16 = b43legacy_read16(dev, 0x005E); + value16 |= 0x0004; + b43legacy_write16(dev, 0x005E, value16); + } + b43legacy_write32(dev, 0x0100, 0x01000000); + if (dev->dev->id.revision < 5) + b43legacy_write32(dev, 0x010C, 0x01000000); + + value32 = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + value32 &= ~B43legacy_SBF_MODE_NOTADHOC; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, value32); + value32 = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + value32 |= B43legacy_SBF_MODE_NOTADHOC; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, value32); + + if (b43legacy_using_pio(dev)) { + b43legacy_write32(dev, 0x0210, 0x00000100); + b43legacy_write32(dev, 0x0230, 0x00000100); + b43legacy_write32(dev, 0x0250, 0x00000100); + b43legacy_write32(dev, 0x0270, 0x00000100); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0034, + 0x0000); + } + + /* Probe Response Timeout value */ + /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0074, 0x0000); + + /* Initially set the wireless operation mode. */ + b43legacy_adjust_opmode(dev); + + if (dev->dev->id.revision < 3) { + b43legacy_write16(dev, 0x060E, 0x0000); + b43legacy_write16(dev, 0x0610, 0x8000); + b43legacy_write16(dev, 0x0604, 0x0000); + b43legacy_write16(dev, 0x0606, 0x0200); + } else { + b43legacy_write32(dev, 0x0188, 0x80000000); + b43legacy_write32(dev, 0x018C, 0x02000000); + } + b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, 0x00004000); + b43legacy_write32(dev, B43legacy_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA1_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA2_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA3_IRQ_MASK, 0x0001DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA4_IRQ_MASK, 0x0000DC00); + b43legacy_write32(dev, B43legacy_MMIO_DMA5_IRQ_MASK, 0x0000DC00); + + value32 = ssb_read32(dev->dev, SSB_TMSLOW); + value32 |= 0x00100000; + ssb_write32(dev->dev, SSB_TMSLOW, value32); + + b43legacy_write16(dev, B43legacy_MMIO_POWERUP_DELAY, + dev->dev->bus->chipco.fast_pwrup_delay); + + B43legacy_WARN_ON(err != 0); + b43legacydbg(dev->wl, "Chip initialized\n"); +out: + return err; + +err_radio_off: + b43legacy_radio_turn_off(dev); +err_gpio_cleanup: + b43legacy_gpio_cleanup(dev); + goto out; +} + +static void b43legacy_periodic_every120sec(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->type != B43legacy_PHYTYPE_G || phy->rev < 2) + return; + + b43legacy_mac_suspend(dev); + b43legacy_phy_lo_g_measure(dev); + b43legacy_mac_enable(dev); +} + +static void b43legacy_periodic_every60sec(struct b43legacy_wldev *dev) +{ + b43legacy_phy_lo_mark_all_unused(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_RSSI) { + b43legacy_mac_suspend(dev); + b43legacy_calc_nrssi_slope(dev); + b43legacy_mac_enable(dev); + } +} + +static void b43legacy_periodic_every30sec(struct b43legacy_wldev *dev) +{ + /* Update device statistics. */ + b43legacy_calculate_link_quality(dev); +} + +static void b43legacy_periodic_every15sec(struct b43legacy_wldev *dev) +{ + b43legacy_phy_xmitpower(dev); /* FIXME: unless scanning? */ +} + +static void b43legacy_periodic_every1sec(struct b43legacy_wldev *dev) +{ + bool radio_hw_enable; + + /* check if radio hardware enabled status changed */ + radio_hw_enable = b43legacy_is_hw_radio_enabled(dev); + if (unlikely(dev->radio_hw_enable != radio_hw_enable)) { + dev->radio_hw_enable = radio_hw_enable; + b43legacyinfo(dev->wl, "Radio hardware status changed to %s\n", + (radio_hw_enable) ? "enabled" : "disabled"); + b43legacy_leds_update(dev, 0); + } +} + +static void do_periodic_work(struct b43legacy_wldev *dev) +{ + unsigned int state; + + state = dev->periodic_state; + if (state % 120 == 0) + b43legacy_periodic_every120sec(dev); + if (state % 60 == 0) + b43legacy_periodic_every60sec(dev); + if (state % 30 == 0) + b43legacy_periodic_every30sec(dev); + if (state % 15 == 0) + b43legacy_periodic_every15sec(dev); + b43legacy_periodic_every1sec(dev); +} + +/* Estimate a "Badness" value based on the periodic work + * state-machine state. "Badness" is worse (bigger), if the + * periodic work will take longer. + */ +static int estimate_periodic_work_badness(unsigned int state) +{ + int badness = 0; + + if (state % 120 == 0) /* every 120 sec */ + badness += 10; + if (state % 60 == 0) /* every 60 sec */ + badness += 5; + if (state % 30 == 0) /* every 30 sec */ + badness += 1; + if (state % 15 == 0) /* every 15 sec */ + badness += 1; + +#define BADNESS_LIMIT 4 + return badness; +} + +static void b43legacy_periodic_work_handler(struct work_struct *work) +{ + struct b43legacy_wldev *dev = + container_of(work, struct b43legacy_wldev, + periodic_work.work); + unsigned long flags; + unsigned long delay; + u32 savedirqs = 0; + int badness; + + mutex_lock(&dev->wl->mutex); + + if (unlikely(b43legacy_status(dev) != B43legacy_STAT_STARTED)) + goto out; + if (b43legacy_debug(dev, B43legacy_DBG_PWORK_STOP)) + goto out_requeue; + + badness = estimate_periodic_work_badness(dev->periodic_state); + if (badness > BADNESS_LIMIT) { + spin_lock_irqsave(&dev->wl->irq_lock, flags); + /* Suspend TX as we don't want to transmit packets while + * we recalibrate the hardware. */ + b43legacy_tx_suspend(dev); + savedirqs = b43legacy_interrupt_disable(dev, + B43legacy_IRQ_ALL); + /* Periodic work will take a long time, so we want it to + * be preemtible and release the spinlock. */ + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + do_periodic_work(dev); + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + b43legacy_interrupt_enable(dev, savedirqs); + b43legacy_tx_resume(dev); + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + } else { + /* Take the global driver lock. This will lock any operation. */ + spin_lock_irqsave(&dev->wl->irq_lock, flags); + + do_periodic_work(dev); + + mmiowb(); + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); + } + dev->periodic_state++; +out_requeue: + if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) + delay = msecs_to_jiffies(50); + else + delay = round_jiffies(HZ); + queue_delayed_work(dev->wl->hw->workqueue, + &dev->periodic_work, delay); +out: + mutex_unlock(&dev->wl->mutex); +} + +static void b43legacy_periodic_tasks_setup(struct b43legacy_wldev *dev) +{ + struct delayed_work *work = &dev->periodic_work; + + dev->periodic_state = 0; + INIT_DELAYED_WORK(work, b43legacy_periodic_work_handler); + queue_delayed_work(dev->wl->hw->workqueue, work, 0); +} + +/* Validate access to the chip (SHM) */ +static int b43legacy_validate_chipaccess(struct b43legacy_wldev *dev) +{ + u32 value; + u32 shm_backup; + + shm_backup = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0xAA5555AA); + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) != + 0xAA5555AA) + goto error; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, 0x55AAAA55); + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, 0) != + 0x55AAAA55) + goto error; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, 0, shm_backup); + + value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL); + if ((value | B43legacy_MACCTL_GMODE) != + (B43legacy_MACCTL_GMODE | B43legacy_MACCTL_IHR_ENABLED)) + goto error; + + value = b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_REASON); + if (value) + goto error; + + return 0; +error: + b43legacyerr(dev->wl, "Failed to validate the chipaccess\n"); + return -ENODEV; +} + +static void b43legacy_security_init(struct b43legacy_wldev *dev) +{ + dev->max_nr_keys = (dev->dev->id.revision >= 5) ? 58 : 20; + B43legacy_WARN_ON(dev->max_nr_keys > ARRAY_SIZE(dev->key)); + dev->ktp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0056); + /* KTP is a word address, but we address SHM bytewise. + * So multiply by two. + */ + dev->ktp *= 2; + if (dev->dev->id.revision >= 5) + /* Number of RCMTA address slots */ + b43legacy_write16(dev, B43legacy_MMIO_RCMTA_COUNT, + dev->max_nr_keys - 8); +} + +static int b43legacy_rng_read(struct hwrng *rng, u32 *data) +{ + struct b43legacy_wl *wl = (struct b43legacy_wl *)rng->priv; + unsigned long flags; + + /* Don't take wl->mutex here, as it could deadlock with + * hwrng internal locking. It's not needed to take + * wl->mutex here, anyway. */ + + spin_lock_irqsave(&wl->irq_lock, flags); + *data = b43legacy_read16(wl->current_dev, B43legacy_MMIO_RNG); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return (sizeof(u16)); +} + +static void b43legacy_rng_exit(struct b43legacy_wl *wl) +{ + if (wl->rng_initialized) + hwrng_unregister(&wl->rng); +} + +static int b43legacy_rng_init(struct b43legacy_wl *wl) +{ + int err; + + snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), + "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); + wl->rng.name = wl->rng_name; + wl->rng.data_read = b43legacy_rng_read; + wl->rng.priv = (unsigned long)wl; + wl->rng_initialized = 1; + err = hwrng_register(&wl->rng); + if (err) { + wl->rng_initialized = 0; + b43legacyerr(wl, "Failed to register the random " + "number generator (%d)\n", err); + } + + return err; +} + +static int b43legacy_tx(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int err = -ENODEV; + unsigned long flags; + + if (unlikely(!dev)) + goto out; + if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED)) + goto out; + /* DMA-TX is done without a global lock. */ + if (b43legacy_using_pio(dev)) { + spin_lock_irqsave(&wl->irq_lock, flags); + err = b43legacy_pio_tx(dev, skb, ctl); + spin_unlock_irqrestore(&wl->irq_lock, flags); + } else + err = b43legacy_dma_tx(dev, skb, ctl); +out: + if (unlikely(err)) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +static int b43legacy_conf_tx(struct ieee80211_hw *hw, + int queue, + const struct ieee80211_tx_queue_params *params) +{ + return 0; +} + +static int b43legacy_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + int err = -ENODEV; + + if (!dev) + goto out; + spin_lock_irqsave(&wl->irq_lock, flags); + if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) { + if (b43legacy_using_pio(dev)) + b43legacy_pio_get_tx_stats(dev, stats); + else + b43legacy_dma_get_tx_stats(dev, stats); + err = 0; + } + spin_unlock_irqrestore(&wl->irq_lock, flags); +out: + return err; +} + +static int b43legacy_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + unsigned long flags; + + spin_lock_irqsave(&wl->irq_lock, flags); + memcpy(stats, &wl->ieee_stats, sizeof(*stats)); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + return 0; +} + +static const char *phymode_to_string(unsigned int phymode) +{ + switch (phymode) { + case B43legacy_PHYMODE_B: + return "B"; + case B43legacy_PHYMODE_G: + return "G"; + default: + B43legacy_BUG_ON(1); + } + return ""; +} + +static int find_wldev_for_phymode(struct b43legacy_wl *wl, + unsigned int phymode, + struct b43legacy_wldev **dev, + bool *gmode) +{ + struct b43legacy_wldev *d; + + list_for_each_entry(d, &wl->devlist, list) { + if (d->phy.possible_phymodes & phymode) { + /* Ok, this device supports the PHY-mode. + * Set the gmode bit. */ + *gmode = 1; + *dev = d; + + return 0; + } + } + + return -ESRCH; +} + +static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev) +{ + struct ssb_device *sdev = dev->dev; + u32 tmslow; + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~B43legacy_TMSLOW_GMODE; + tmslow |= B43legacy_TMSLOW_PHYRESET; + tmslow |= SSB_TMSLOW_FGC; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); + + tmslow = ssb_read32(sdev, SSB_TMSLOW); + tmslow &= ~SSB_TMSLOW_FGC; + tmslow |= B43legacy_TMSLOW_PHYRESET; + ssb_write32(sdev, SSB_TMSLOW, tmslow); + msleep(1); +} + +/* Expects wl->mutex locked */ +static int b43legacy_switch_phymode(struct b43legacy_wl *wl, + unsigned int new_mode) +{ + struct b43legacy_wldev *up_dev; + struct b43legacy_wldev *down_dev; + int err; + bool gmode = 0; + int prev_status; + + err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); + if (err) { + b43legacyerr(wl, "Could not find a device for %s-PHY mode\n", + phymode_to_string(new_mode)); + return err; + } + if ((up_dev == wl->current_dev) && + (!!wl->current_dev->phy.gmode == !!gmode)) + /* This device is already running. */ + return 0; + b43legacydbg(wl, "Reconfiguring PHYmode to %s-PHY\n", + phymode_to_string(new_mode)); + down_dev = wl->current_dev; + + prev_status = b43legacy_status(down_dev); + /* Shutdown the currently running core. */ + if (prev_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(down_dev); + if (prev_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(down_dev); + + if (down_dev != up_dev) + /* We switch to a different core, so we put PHY into + * RESET on the old core. */ + b43legacy_put_phy_into_reset(down_dev); + + /* Now start the new core. */ + up_dev->phy.gmode = gmode; + if (prev_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(up_dev); + if (err) { + b43legacyerr(wl, "Fatal: Could not initialize device" + " for newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + goto init_failure; + } + } + if (prev_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(up_dev); + if (err) { + b43legacyerr(wl, "Fatal: Coult not start device for " + "newly selected %s-PHY mode\n", + phymode_to_string(new_mode)); + b43legacy_wireless_core_exit(up_dev); + goto init_failure; + } + } + B43legacy_WARN_ON(b43legacy_status(up_dev) != prev_status); + + b43legacy_shm_write32(up_dev, B43legacy_SHM_SHARED, 0x003E, 0); + + wl->current_dev = up_dev; + + return 0; +init_failure: + /* Whoops, failed to init the new core. No core is operating now. */ + wl->current_dev = NULL; + return err; +} + +static int b43legacy_antenna_from_ieee80211(u8 antenna) +{ + switch (antenna) { + case 0: /* default/diversity */ + return B43legacy_ANTENNA_DEFAULT; + case 1: /* Antenna 0 */ + return B43legacy_ANTENNA0; + case 2: /* Antenna 1 */ + return B43legacy_ANTENNA1; + default: + return B43legacy_ANTENNA_DEFAULT; + } +} + +static int b43legacy_dev_config(struct ieee80211_hw *hw, + struct ieee80211_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + struct b43legacy_phy *phy; + unsigned long flags; + unsigned int new_phymode = 0xFFFF; + int antenna_tx; + int antenna_rx; + int err = 0; + u32 savedirqs; + + antenna_tx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_tx); + antenna_rx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_rx); + + mutex_lock(&wl->mutex); + + /* Switch the PHY mode (if necessary). */ + switch (conf->phymode) { + case MODE_IEEE80211B: + new_phymode = B43legacy_PHYMODE_B; + break; + case MODE_IEEE80211G: + new_phymode = B43legacy_PHYMODE_G; + break; + default: + B43legacy_WARN_ON(1); + } + err = b43legacy_switch_phymode(wl, new_phymode); + if (err) + goto out_unlock_mutex; + dev = wl->current_dev; + phy = &dev->phy; + + /* Disable IRQs while reconfiguring the device. + * This makes it possible to drop the spinlock throughout + * the reconfiguration process. */ + spin_lock_irqsave(&wl->irq_lock, flags); + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + spin_unlock_irqrestore(&wl->irq_lock, flags); + goto out_unlock_mutex; + } + savedirqs = b43legacy_interrupt_disable(dev, B43legacy_IRQ_ALL); + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + /* Switch to the requested channel. + * The firmware takes care of races with the TX handler. */ + if (conf->channel_val != phy->channel) + b43legacy_radio_selectchannel(dev, conf->channel_val, 0); + + /* Enable/Disable ShortSlot timing. */ + if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) + != dev->short_slot) { + B43legacy_WARN_ON(phy->type != B43legacy_PHYTYPE_G); + if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) + b43legacy_short_slot_timing_enable(dev); + else + b43legacy_short_slot_timing_disable(dev); + } + + /* Adjust the desired TX power level. */ + if (conf->power_level != 0) { + if (conf->power_level != phy->power_level) { + phy->power_level = conf->power_level; + b43legacy_phy_xmitpower(dev); + } + } + + /* Antennas for RX and management frame TX. */ + b43legacy_mgmtframe_txantenna(dev, antenna_tx); + + /* Update templates for AP mode. */ + if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) + b43legacy_set_beacon_int(dev, conf->beacon_int); + + + if (!!conf->radio_enabled != phy->radio_on) { + if (conf->radio_enabled) { + b43legacy_radio_turn_on(dev); + b43legacyinfo(dev->wl, "Radio turned on by software\n"); + if (!dev->radio_hw_enable) + b43legacyinfo(dev->wl, "The hardware RF-kill" + " button still turns the radio" + " physically off. Press the" + " button to turn it on.\n"); + } else { + b43legacy_radio_turn_off(dev); + b43legacyinfo(dev->wl, "Radio turned off by" + " software\n"); + } + } + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_interrupt_enable(dev, savedirqs); + mmiowb(); + spin_unlock_irqrestore(&wl->irq_lock, flags); +out_unlock_mutex: + mutex_unlock(&wl->mutex); + + return err; +} + +static int b43legacy_dev_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + int err = -EOPNOTSUPP; + DECLARE_MAC_BUF(mac); + + if (!dev) + return -ENODEV; + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = -ENODEV; + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); + b43legacydbg(wl, "Using software based encryption for " + "mac: %s\n", print_mac(mac, addr)); + return err; +} + +static void b43legacy_configure_filter(struct ieee80211_hw *hw, + unsigned int changed, + unsigned int *fflags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) { + *fflags = 0; + return; + } + + spin_lock_irqsave(&wl->irq_lock, flags); + *fflags &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + changed &= FIF_PROMISC_IN_BSS | + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; + + wl->filter_flags = *fflags; + + if (changed && b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) + b43legacy_adjust_opmode(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); +} + +static int b43legacy_config_interface(struct ieee80211_hw *hw, + int if_id, + struct ieee80211_if_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + if (!dev) + return -ENODEV; + mutex_lock(&wl->mutex); + spin_lock_irqsave(&wl->irq_lock, flags); + B43legacy_WARN_ON(wl->if_id != if_id); + if (conf->bssid) + memcpy(wl->bssid, conf->bssid, ETH_ALEN); + else + memset(wl->bssid, 0, ETH_ALEN); + if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { + if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) { + B43legacy_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); + b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len); + if (conf->beacon) + b43legacy_refresh_templates(dev, conf->beacon); + } + b43legacy_write_mac_bssid_templates(dev); + } + spin_unlock_irqrestore(&wl->irq_lock, flags); + mutex_unlock(&wl->mutex); + + return 0; +} + +/* Locking: wl->mutex */ +static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + unsigned long flags; + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) + return; + b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel the possibly running self-rearming periodic work. */ + cancel_delayed_work_sync(&dev->periodic_work); + mutex_lock(&wl->mutex); + + ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */ + + /* Disable and sync interrupts. */ + spin_lock_irqsave(&wl->irq_lock, flags); + dev->irq_savedstate = b43legacy_interrupt_disable(dev, + B43legacy_IRQ_ALL); + b43legacy_read32(dev, B43legacy_MMIO_GEN_IRQ_MASK); /* flush */ + spin_unlock_irqrestore(&wl->irq_lock, flags); + b43legacy_synchronize_irq(dev); + + b43legacy_mac_suspend(dev); + free_irq(dev->dev->irq, dev); + b43legacydbg(wl, "Wireless interface stopped\n"); +} + +/* Locking: wl->mutex */ +static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev) +{ + int err; + + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_INITIALIZED); + + drain_txstatus_queue(dev); + err = request_irq(dev->dev->irq, b43legacy_interrupt_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (err) { + b43legacyerr(dev->wl, "Cannot request IRQ-%d\n", + dev->dev->irq); + goto out; + } + /* We are ready to run. */ + b43legacy_set_status(dev, B43legacy_STAT_STARTED); + + /* Start data flow (TX/RX) */ + b43legacy_mac_enable(dev); + b43legacy_interrupt_enable(dev, dev->irq_savedstate); + ieee80211_start_queues(dev->wl->hw); + + /* Start maintenance work */ + b43legacy_periodic_tasks_setup(dev); + + b43legacydbg(dev->wl, "Wireless interface started\n"); +out: + return err; +} + +/* Get PHY and RADIO versioning numbers */ +static int b43legacy_phy_versioning(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 tmp; + u8 analog_type; + u8 phy_type; + u8 phy_rev; + u16 radio_manuf; + u16 radio_ver; + u16 radio_rev; + int unsupported = 0; + + /* Get PHY versioning */ + tmp = b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); + analog_type = (tmp & B43legacy_PHYVER_ANALOG) + >> B43legacy_PHYVER_ANALOG_SHIFT; + phy_type = (tmp & B43legacy_PHYVER_TYPE) >> B43legacy_PHYVER_TYPE_SHIFT; + phy_rev = (tmp & B43legacy_PHYVER_VERSION); + switch (phy_type) { + case B43legacy_PHYTYPE_B: + if (phy_rev != 2 && phy_rev != 4 + && phy_rev != 6 && phy_rev != 7) + unsupported = 1; + break; + case B43legacy_PHYTYPE_G: + if (phy_rev > 8) + unsupported = 1; + break; + default: + unsupported = 1; + }; + if (unsupported) { + b43legacyerr(dev->wl, "FOUND UNSUPPORTED PHY " + "(Analog %u, Type %u, Revision %u)\n", + analog_type, phy_type, phy_rev); + return -EOPNOTSUPP; + } + b43legacydbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", + analog_type, phy_type, phy_rev); + + + /* Get RADIO versioning */ + if (dev->dev->bus->chip_id == 0x4317) { + if (dev->dev->bus->chip_rev == 0) + tmp = 0x3205017F; + else if (dev->dev->bus->chip_rev == 1) + tmp = 0x4205017F; + else + tmp = 0x5205017F; + } else { + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, + B43legacy_RADIOCTL_ID); + tmp = b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_HIGH); + tmp <<= 16; + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, + B43legacy_RADIOCTL_ID); + tmp |= b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); + } + radio_manuf = (tmp & 0x00000FFF); + radio_ver = (tmp & 0x0FFFF000) >> 12; + radio_rev = (tmp & 0xF0000000) >> 28; + switch (phy_type) { + case B43legacy_PHYTYPE_B: + if ((radio_ver & 0xFFF0) != 0x2050) + unsupported = 1; + break; + case B43legacy_PHYTYPE_G: + if (radio_ver != 0x2050) + unsupported = 1; + break; + default: + B43legacy_BUG_ON(1); + } + if (unsupported) { + b43legacyerr(dev->wl, "FOUND UNSUPPORTED RADIO " + "(Manuf 0x%X, Version 0x%X, Revision %u)\n", + radio_manuf, radio_ver, radio_rev); + return -EOPNOTSUPP; + } + b43legacydbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X," + " Revision %u\n", radio_manuf, radio_ver, radio_rev); + + + phy->radio_manuf = radio_manuf; + phy->radio_ver = radio_ver; + phy->radio_rev = radio_rev; + + phy->analog = analog_type; + phy->type = phy_type; + phy->rev = phy_rev; + + return 0; +} + +static void setup_struct_phy_for_init(struct b43legacy_wldev *dev, + struct b43legacy_phy *phy) +{ + struct b43legacy_lopair *lo; + int i; + + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->locked = 0; + /* Assume the radio is enabled. If it's not enabled, the state will + * immediately get fixed on the first periodic work run. */ + dev->radio_hw_enable = 1; + + phy->savedpctlreg = 0xFFFF; + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + lo = phy->_lo_pairs; + if (lo) + memset(lo, 0, sizeof(struct b43legacy_lopair) * + B43legacy_LO_COUNT); + phy->max_lb_gain = 0; + phy->trsw_rx_gain = 0; + + /* Set default attenuation values. */ + phy->bbatt = b43legacy_default_baseband_attenuation(dev); + phy->rfatt = b43legacy_default_radio_attenuation(dev); + phy->txctl1 = b43legacy_default_txctl1(dev); + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + spin_lock_init(&phy->lock); + phy->interfmode = B43legacy_INTERFMODE_NONE; + phy->channel = 0xFF; +} + +static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev) +{ + /* Flags */ + dev->reg124_set_0x4 = 0; + + /* Stats */ + memset(&dev->stats, 0, sizeof(dev->stats)); + + setup_struct_phy_for_init(dev, &dev->phy); + + /* IRQ related flags */ + dev->irq_reason = 0; + memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); + dev->irq_savedstate = B43legacy_IRQ_MASKTEMPLATE; + + dev->mac_suspended = 1; + + /* Noise calculation context */ + memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); +} + +static void b43legacy_imcfglo_timeouts_workaround(struct b43legacy_wldev *dev) +{ +#ifdef CONFIG_SSB_DRIVER_PCICORE + struct ssb_bus *bus = dev->dev->bus; + u32 tmp; + + if (bus->pcicore.dev && + bus->pcicore.dev->id.coreid == SSB_DEV_PCI && + bus->pcicore.dev->id.revision <= 5) { + /* IMCFGLO timeouts workaround. */ + tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + tmp &= ~SSB_IMCFGLO_REQTO; + tmp &= ~SSB_IMCFGLO_SERTO; + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + tmp |= 0x32; + break; + case SSB_BUSTYPE_SSB: + tmp |= 0x53; + break; + } + ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + } +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +} + +/* Shutdown a wireless core */ +/* Locking: wl->mutex */ +static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct b43legacy_phy *phy = &dev->phy; + + B43legacy_WARN_ON(b43legacy_status(dev) > B43legacy_STAT_INITIALIZED); + if (b43legacy_status(dev) != B43legacy_STAT_INITIALIZED) + return; + b43legacy_set_status(dev, B43legacy_STAT_UNINIT); + + mutex_unlock(&wl->mutex); + /* Must unlock as it would otherwise deadlock. No races here. + * Cancel possibly pending workqueues. */ + cancel_work_sync(&dev->restart_work); + mutex_lock(&wl->mutex); + + b43legacy_rng_exit(dev->wl); + b43legacy_pio_free(dev); + b43legacy_dma_free(dev); + b43legacy_chip_exit(dev); + b43legacy_radio_turn_off(dev); + b43legacy_switch_analog(dev, 0); + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(dev->dev->bus); +} + +static void prepare_phy_data_for_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int i; + + /* Set default attenuation values. */ + phy->bbatt = b43legacy_default_baseband_attenuation(dev); + phy->rfatt = b43legacy_default_radio_attenuation(dev); + phy->txctl1 = b43legacy_default_txctl1(dev); + phy->txctl2 = 0xFFFF; + phy->txpwr_offset = 0; + + /* NRSSI */ + phy->nrssislope = 0; + for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++) + phy->nrssi[i] = -1000; + for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++) + phy->nrssi_lt[i] = i; + + phy->lofcal = 0xFFFF; + phy->initval = 0xFFFF; + + phy->aci_enable = 0; + phy->aci_wlan_automatic = 0; + phy->aci_hw_rssi = 0; + + phy->antenna_diversity = 0xFFFF; + memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig)); + memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos)); + + /* Flags */ + phy->calibrated = 0; + phy->locked = 0; + + if (phy->_lo_pairs) + memset(phy->_lo_pairs, 0, + sizeof(struct b43legacy_lopair) * B43legacy_LO_COUNT); + memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain)); +} + +/* Initialize a wireless core */ +static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct b43legacy_phy *phy = &dev->phy; + struct ssb_sprom *sprom = &dev->dev->bus->sprom; + int err; + u32 hf; + u32 tmp; + + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT); + + err = ssb_bus_powerup(bus, 0); + if (err) + goto out; + if (!ssb_device_is_enabled(dev->dev)) { + tmp = phy->gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + } + + if ((phy->type == B43legacy_PHYTYPE_B) || + (phy->type == B43legacy_PHYTYPE_G)) { + phy->_lo_pairs = kzalloc(sizeof(struct b43legacy_lopair) + * B43legacy_LO_COUNT, + GFP_KERNEL); + if (!phy->_lo_pairs) + return -ENOMEM; + } + setup_struct_wldev_for_init(dev); + + err = b43legacy_phy_init_tssi2dbm_table(dev); + if (err) + goto err_kfree_lo_control; + + /* Enable IRQ routing to this device. */ + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + + b43legacy_imcfglo_timeouts_workaround(dev); + prepare_phy_data_for_init(dev); + b43legacy_phy_calibrate(dev); + err = b43legacy_chip_init(dev); + if (err) + goto err_kfree_tssitbl; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_WLCOREREV, + dev->dev->id.revision); + hf = b43legacy_hf_read(dev); + if (phy->type == B43legacy_PHYTYPE_G) { + hf |= B43legacy_HF_SYMW; + if (phy->rev == 1) + hf |= B43legacy_HF_GDCW; + if (sprom->r1.boardflags_lo & B43legacy_BFL_PACTRL) + hf |= B43legacy_HF_OFDMPABOOST; + } else if (phy->type == B43legacy_PHYTYPE_B) { + hf |= B43legacy_HF_SYMW; + if (phy->rev >= 2 && phy->radio_ver == 0x2050) + hf &= ~B43legacy_HF_GDCW; + } + b43legacy_hf_write(dev, hf); + + /* Short/Long Retry Limit. + * The retry-limit is a 4-bit counter. Enforce this to avoid overflowing + * the chip-internal counter. + */ + tmp = limit_value(modparam_short_retry, 0, 0xF); + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0006, tmp); + tmp = limit_value(modparam_long_retry, 0, 0xF); + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0007, tmp); + + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x0044, 3); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + 0x0046, 2); + + /* Disable sending probe responses from firmware. + * Setting the MaxTime to one usec will always trigger + * a timeout, so we never send any probe resp. + * A timeout of zero is infinite. */ + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, + B43legacy_SHM_SH_PRMAXTIME, 1); + + b43legacy_rate_memory_init(dev); + + /* Minimum Contention Window */ + if (phy->type == B43legacy_PHYTYPE_B) + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0003, 31); + else + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0003, 15); + /* Maximum Contention Window */ + b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, + 0x0004, 1023); + + do { + if (b43legacy_using_pio(dev)) + err = b43legacy_pio_init(dev); + else { + err = b43legacy_dma_init(dev); + if (!err) + b43legacy_qos_init(dev); + } + } while (err == -EAGAIN); + if (err) + goto err_chip_exit; + + b43legacy_write16(dev, 0x0612, 0x0050); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0416, 0x0050); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4); + + ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ + memset(wl->bssid, 0, ETH_ALEN); + memset(wl->mac_addr, 0, ETH_ALEN); + b43legacy_upload_card_macaddress(dev); + b43legacy_security_init(dev); + b43legacy_rng_init(wl); + + b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED); + +out: + return err; + +err_chip_exit: + b43legacy_chip_exit(dev); +err_kfree_tssitbl: + if (phy->dyn_tssi_tbl) + kfree(phy->tssi2dbm); +err_kfree_lo_control: + kfree(phy->lo_control); + phy->lo_control = NULL; + ssb_bus_may_powerdown(bus); + B43legacy_WARN_ON(b43legacy_status(dev) != B43legacy_STAT_UNINIT); + return err; +} + +static int b43legacy_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev; + unsigned long flags; + int err = -EOPNOTSUPP; + + /* TODO: allow WDS/AP devices to coexist */ + + if (conf->type != IEEE80211_IF_TYPE_AP && + conf->type != IEEE80211_IF_TYPE_STA && + conf->type != IEEE80211_IF_TYPE_WDS && + conf->type != IEEE80211_IF_TYPE_IBSS) + return -EOPNOTSUPP; + + mutex_lock(&wl->mutex); + if (wl->operating) + goto out_mutex_unlock; + + b43legacydbg(wl, "Adding Interface type %d\n", conf->type); + + dev = wl->current_dev; + wl->operating = 1; + wl->if_id = conf->if_id; + wl->if_type = conf->type; + memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_adjust_opmode(dev); + b43legacy_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + err = 0; + out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +static void b43legacy_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + unsigned long flags; + + b43legacydbg(wl, "Removing Interface type %d\n", conf->type); + + mutex_lock(&wl->mutex); + + B43legacy_WARN_ON(!wl->operating); + B43legacy_WARN_ON(wl->if_id != conf->if_id); + + wl->operating = 0; + + spin_lock_irqsave(&wl->irq_lock, flags); + b43legacy_adjust_opmode(dev); + memset(wl->mac_addr, 0, ETH_ALEN); + b43legacy_upload_card_macaddress(dev); + spin_unlock_irqrestore(&wl->irq_lock, flags); + + mutex_unlock(&wl->mutex); +} + +static int b43legacy_start(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + int did_init = 0; + int err; + + mutex_lock(&wl->mutex); + + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(dev); + if (err) + goto out_mutex_unlock; + did_init = 1; + } + + if (b43legacy_status(dev) < B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(dev); + if (err) { + if (did_init) + b43legacy_wireless_core_exit(dev); + goto out_mutex_unlock; + } + } + +out_mutex_unlock: + mutex_unlock(&wl->mutex); + + return err; +} + +void b43legacy_stop(struct ieee80211_hw *hw) +{ + struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); + struct b43legacy_wldev *dev = wl->current_dev; + + mutex_lock(&wl->mutex); + if (b43legacy_status(dev) >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(dev); + b43legacy_wireless_core_exit(dev); + mutex_unlock(&wl->mutex); +} + + +static const struct ieee80211_ops b43legacy_hw_ops = { + .tx = b43legacy_tx, + .conf_tx = b43legacy_conf_tx, + .add_interface = b43legacy_add_interface, + .remove_interface = b43legacy_remove_interface, + .config = b43legacy_dev_config, + .config_interface = b43legacy_config_interface, + .set_key = b43legacy_dev_set_key, + .configure_filter = b43legacy_configure_filter, + .get_stats = b43legacy_get_stats, + .get_tx_stats = b43legacy_get_tx_stats, + .start = b43legacy_start, + .stop = b43legacy_stop, +}; + +/* Hard-reset the chip. Do not call this directly. + * Use b43legacy_controller_restart() + */ +static void b43legacy_chip_reset(struct work_struct *work) +{ + struct b43legacy_wldev *dev = + container_of(work, struct b43legacy_wldev, restart_work); + struct b43legacy_wl *wl = dev->wl; + int err = 0; + int prev_status; + + mutex_lock(&wl->mutex); + + prev_status = b43legacy_status(dev); + /* Bring the device down... */ + if (prev_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(dev); + if (prev_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(dev); + + /* ...and up again. */ + if (prev_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(dev); + if (err) + goto out; + } + if (prev_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(dev); + if (err) { + b43legacy_wireless_core_exit(dev); + goto out; + } + } +out: + mutex_unlock(&wl->mutex); + if (err) + b43legacyerr(wl, "Controller restart FAILED\n"); + else + b43legacyinfo(wl, "Controller restarted\n"); +} + +static int b43legacy_setup_modes(struct b43legacy_wldev *dev, + int have_bphy, + int have_gphy) +{ + struct ieee80211_hw *hw = dev->wl->hw; + struct ieee80211_hw_mode *mode; + struct b43legacy_phy *phy = &dev->phy; + int cnt = 0; + int err; + + phy->possible_phymodes = 0; + for (; 1; cnt++) { + if (have_bphy) { + B43legacy_WARN_ON(cnt >= B43legacy_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211B; + mode->num_channels = b43legacy_bg_chantable_size; + mode->channels = b43legacy_bg_chantable; + mode->num_rates = b43legacy_b_ratetable_size; + mode->rates = b43legacy_b_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43legacy_PHYMODE_B; + have_bphy = 0; + continue; + } + if (have_gphy) { + B43legacy_WARN_ON(cnt >= B43legacy_MAX_PHYHWMODES); + mode = &phy->hwmodes[cnt]; + + mode->mode = MODE_IEEE80211G; + mode->num_channels = b43legacy_bg_chantable_size; + mode->channels = b43legacy_bg_chantable; + mode->num_rates = b43legacy_g_ratetable_size; + mode->rates = b43legacy_g_ratetable; + err = ieee80211_register_hwmode(hw, mode); + if (err) + return err; + + phy->possible_phymodes |= B43legacy_PHYMODE_G; + have_gphy = 0; + continue; + } + break; + } + + return 0; +} + +static void b43legacy_wireless_core_detach(struct b43legacy_wldev *dev) +{ + /* We release firmware that late to not be required to re-request + * is all the time when we reinit the core. */ + b43legacy_release_firmware(dev); +} + +static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev) +{ + struct b43legacy_wl *wl = dev->wl; + struct ssb_bus *bus = dev->dev->bus; + struct pci_dev *pdev = bus->host_pci; + int err; + int have_bphy = 0; + int have_gphy = 0; + u32 tmp; + + /* Do NOT do any device initialization here. + * Do it in wireless_core_init() instead. + * This function is for gathering basic information about the HW, only. + * Also some structs may be set up here. But most likely you want to + * have that in core_init(), too. + */ + + err = ssb_bus_powerup(bus, 0); + if (err) { + b43legacyerr(wl, "Bus powerup failed\n"); + goto out; + } + /* Get the PHY type. */ + if (dev->dev->id.revision >= 5) { + u32 tmshigh; + + tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + have_gphy = !!(tmshigh & B43legacy_TMSHIGH_GPHY); + if (!have_gphy) + have_bphy = 1; + } else if (dev->dev->id.revision == 4) + have_gphy = 1; + else + have_bphy = 1; + + /* Initialize LEDs structs. */ + err = b43legacy_leds_init(dev); + if (err) + goto err_powerdown; + + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + + err = b43legacy_phy_versioning(dev); + if (err) + goto err_leds_exit; + /* Check if this device supports multiband. */ + if (!pdev || + (pdev->device != 0x4312 && + pdev->device != 0x4319 && + pdev->device != 0x4324)) { + /* No multiband support. */ + have_bphy = 0; + have_gphy = 0; + switch (dev->phy.type) { + case B43legacy_PHYTYPE_B: + have_bphy = 1; + break; + case B43legacy_PHYTYPE_G: + have_gphy = 1; + break; + default: + B43legacy_BUG_ON(1); + } + } + dev->phy.gmode = (have_gphy || have_bphy); + tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0; + b43legacy_wireless_core_reset(dev, tmp); + + err = b43legacy_validate_chipaccess(dev); + if (err) + goto err_leds_exit; + err = b43legacy_setup_modes(dev, have_bphy, have_gphy); + if (err) + goto err_leds_exit; + + /* Now set some default "current_dev" */ + if (!wl->current_dev) + wl->current_dev = dev; + INIT_WORK(&dev->restart_work, b43legacy_chip_reset); + + b43legacy_radio_turn_off(dev); + b43legacy_switch_analog(dev, 0); + ssb_device_disable(dev->dev, 0); + ssb_bus_may_powerdown(bus); + +out: + return err; + +err_leds_exit: + b43legacy_leds_exit(dev); +err_powerdown: + ssb_bus_may_powerdown(bus); + return err; +} + +static void b43legacy_one_core_detach(struct ssb_device *dev) +{ + struct b43legacy_wldev *wldev; + struct b43legacy_wl *wl; + + wldev = ssb_get_drvdata(dev); + wl = wldev->wl; + cancel_work_sync(&wldev->restart_work); + b43legacy_debugfs_remove_device(wldev); + b43legacy_wireless_core_detach(wldev); + list_del(&wldev->list); + wl->nr_devs--; + ssb_set_drvdata(dev, NULL); + kfree(wldev); +} + +static int b43legacy_one_core_attach(struct ssb_device *dev, + struct b43legacy_wl *wl) +{ + struct b43legacy_wldev *wldev; + struct pci_dev *pdev; + int err = -ENOMEM; + + if (!list_empty(&wl->devlist)) { + /* We are not the first core on this chip. */ + pdev = dev->bus->host_pci; + /* Only special chips support more than one wireless + * core, although some of the other chips have more than + * one wireless core as well. Check for this and + * bail out early. + */ + if (!pdev || + ((pdev->device != 0x4321) && + (pdev->device != 0x4313) && + (pdev->device != 0x431A))) { + b43legacydbg(wl, "Ignoring unconnected 802.11 core\n"); + return -ENODEV; + } + } + + wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); + if (!wldev) + goto out; + + wldev->dev = dev; + wldev->wl = wl; + b43legacy_set_status(wldev, B43legacy_STAT_UNINIT); + wldev->bad_frames_preempt = modparam_bad_frames_preempt; + tasklet_init(&wldev->isr_tasklet, + (void (*)(unsigned long))b43legacy_interrupt_tasklet, + (unsigned long)wldev); + if (modparam_pio) + wldev->__using_pio = 1; + INIT_LIST_HEAD(&wldev->list); + + err = b43legacy_wireless_core_attach(wldev); + if (err) + goto err_kfree_wldev; + + list_add(&wldev->list, &wl->devlist); + wl->nr_devs++; + ssb_set_drvdata(dev, wldev); + b43legacy_debugfs_add_device(wldev); +out: + return err; + +err_kfree_wldev: + kfree(wldev); + return err; +} + +static void b43legacy_sprom_fixup(struct ssb_bus *bus) +{ + /* boardflags workarounds */ + if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && + bus->boardinfo.type == 0x4E && + bus->boardinfo.rev > 0x40) + bus->sprom.r1.boardflags_lo |= B43legacy_BFL_PACTRL; + + /* Convert Antennagain values to Q5.2 */ + if (bus->sprom.r1.antenna_gain_bg == 0xFF) + bus->sprom.r1.antenna_gain_bg = 2; /* if unset, use 2 dBm */ + bus->sprom.r1.antenna_gain_bg <<= 2; +} + +static void b43legacy_wireless_exit(struct ssb_device *dev, + struct b43legacy_wl *wl) +{ + struct ieee80211_hw *hw = wl->hw; + + ssb_set_devtypedata(dev, NULL); + ieee80211_free_hw(hw); +} + +static int b43legacy_wireless_init(struct ssb_device *dev) +{ + struct ssb_sprom *sprom = &dev->bus->sprom; + struct ieee80211_hw *hw; + struct b43legacy_wl *wl; + int err = -ENOMEM; + + b43legacy_sprom_fixup(dev->bus); + + hw = ieee80211_alloc_hw(sizeof(*wl), &b43legacy_hw_ops); + if (!hw) { + b43legacyerr(NULL, "Could not allocate ieee80211 device\n"); + goto out; + } + + /* fill hw info */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_RX_INCLUDES_FCS; + hw->max_signal = 100; + hw->max_rssi = -110; + hw->max_noise = -110; + hw->queues = 1; /* FIXME: hardware has more queues */ + SET_IEEE80211_DEV(hw, dev->dev); + if (is_valid_ether_addr(sprom->r1.et1mac)) + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.et1mac); + else + SET_IEEE80211_PERM_ADDR(hw, sprom->r1.il0mac); + + /* Get and initialize struct b43legacy_wl */ + wl = hw_to_b43legacy_wl(hw); + memset(wl, 0, sizeof(*wl)); + wl->hw = hw; + spin_lock_init(&wl->irq_lock); + spin_lock_init(&wl->leds_lock); + mutex_init(&wl->mutex); + INIT_LIST_HEAD(&wl->devlist); + + ssb_set_devtypedata(dev, wl); + b43legacyinfo(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); + err = 0; +out: + return err; +} + +static int b43legacy_probe(struct ssb_device *dev, + const struct ssb_device_id *id) +{ + struct b43legacy_wl *wl; + int err; + int first = 0; + + wl = ssb_get_devtypedata(dev); + if (!wl) { + /* Probing the first core - setup common struct b43legacy_wl */ + first = 1; + err = b43legacy_wireless_init(dev); + if (err) + goto out; + wl = ssb_get_devtypedata(dev); + B43legacy_WARN_ON(!wl); + } + err = b43legacy_one_core_attach(dev, wl); + if (err) + goto err_wireless_exit; + + if (first) { + err = ieee80211_register_hw(wl->hw); + if (err) + goto err_one_core_detach; + } + +out: + return err; + +err_one_core_detach: + b43legacy_one_core_detach(dev); +err_wireless_exit: + if (first) + b43legacy_wireless_exit(dev, wl); + return err; +} + +static void b43legacy_remove(struct ssb_device *dev) +{ + struct b43legacy_wl *wl = ssb_get_devtypedata(dev); + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + + B43legacy_WARN_ON(!wl); + if (wl->current_dev == wldev) + ieee80211_unregister_hw(wl->hw); + + b43legacy_one_core_detach(dev); + + if (list_empty(&wl->devlist)) + /* Last core on the chip unregistered. + * We can destroy common struct b43legacy_wl. + */ + b43legacy_wireless_exit(dev, wl); +} + +/* Perform a hardware reset. This can be called from any context. */ +void b43legacy_controller_restart(struct b43legacy_wldev *dev, + const char *reason) +{ + /* Must avoid requeueing, if we are in shutdown. */ + if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) + return; + b43legacyinfo(dev->wl, "Controller RESET (%s) ...\n", reason); + queue_work(dev->wl->hw->workqueue, &dev->restart_work); +} + +#ifdef CONFIG_PM + +static int b43legacy_suspend(struct ssb_device *dev, pm_message_t state) +{ + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + struct b43legacy_wl *wl = wldev->wl; + + b43legacydbg(wl, "Suspending...\n"); + + mutex_lock(&wl->mutex); + wldev->suspend_init_status = b43legacy_status(wldev); + if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) + b43legacy_wireless_core_stop(wldev); + if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) + b43legacy_wireless_core_exit(wldev); + mutex_unlock(&wl->mutex); + + b43legacydbg(wl, "Device suspended.\n"); + + return 0; +} + +static int b43legacy_resume(struct ssb_device *dev) +{ + struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); + struct b43legacy_wl *wl = wldev->wl; + int err = 0; + + b43legacydbg(wl, "Resuming...\n"); + + mutex_lock(&wl->mutex); + if (wldev->suspend_init_status >= B43legacy_STAT_INITIALIZED) { + err = b43legacy_wireless_core_init(wldev); + if (err) { + b43legacyerr(wl, "Resume failed at core init\n"); + goto out; + } + } + if (wldev->suspend_init_status >= B43legacy_STAT_STARTED) { + err = b43legacy_wireless_core_start(wldev); + if (err) { + b43legacy_wireless_core_exit(wldev); + b43legacyerr(wl, "Resume failed at core start\n"); + goto out; + } + } + mutex_unlock(&wl->mutex); + + b43legacydbg(wl, "Device resumed.\n"); +out: + return err; +} + +#else /* CONFIG_PM */ +# define b43legacy_suspend NULL +# define b43legacy_resume NULL +#endif /* CONFIG_PM */ + +static struct ssb_driver b43legacy_ssb_driver = { + .name = KBUILD_MODNAME, + .id_table = b43legacy_ssb_tbl, + .probe = b43legacy_probe, + .remove = b43legacy_remove, + .suspend = b43legacy_suspend, + .resume = b43legacy_resume, +}; + +static int __init b43legacy_init(void) +{ + int err; + + b43legacy_debugfs_init(); + + err = ssb_driver_register(&b43legacy_ssb_driver); + if (err) + goto err_dfs_exit; + + return err; + +err_dfs_exit: + b43legacy_debugfs_exit(); + return err; +} + +static void __exit b43legacy_exit(void) +{ + ssb_driver_unregister(&b43legacy_ssb_driver); + b43legacy_debugfs_exit(); +} + +module_init(b43legacy_init) +module_exit(b43legacy_exit) diff --git a/drivers/net/wireless/b43legacy/main.h b/drivers/net/wireless/b43legacy/main.h new file mode 100644 index 0000000000000000000000000000000000000000..68435c50d8e07fd05f1e3993501f19520ee6fc67 --- /dev/null +++ b/drivers/net/wireless/b43legacy/main.h @@ -0,0 +1,127 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Copyright (c) 2005 Stefano Brivio + Copyright (c) 2005, 2006 Michael Buesch + Copyright (c) 2005 Danny van Dyk + Copyright (c) 2005 Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_MAIN_H_ +#define B43legacy_MAIN_H_ + +#include "b43legacy.h" + + +#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] +#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) +/* Magic helper macro to pad structures. Ignore those above. It's magic. */ +#define PAD_BYTES(nr_bytes) P4D_BYTES(__LINE__ , (nr_bytes)) + + +/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ +static inline +u8 b43legacy_freq_to_channel_bg(int freq) +{ + u8 channel; + + if (freq == 2484) + channel = 14; + else + channel = (freq - 2407) / 5; + + return channel; +} +static inline +u8 b43legacy_freq_to_channel(struct b43legacy_wldev *dev, + int freq) +{ + return b43legacy_freq_to_channel_bg(freq); +} + +/* Lightweight function to convert a channel number to a frequency (in Mhz). */ +static inline +int b43legacy_channel_to_freq_bg(u8 channel) +{ + int freq; + + if (channel == 14) + freq = 2484; + else + freq = 2407 + (5 * channel); + + return freq; +} + +static inline +int b43legacy_channel_to_freq(struct b43legacy_wldev *dev, + u8 channel) +{ + return b43legacy_channel_to_freq_bg(channel); +} + +static inline +int b43legacy_is_cck_rate(int rate) +{ + return (rate == B43legacy_CCK_RATE_1MB || + rate == B43legacy_CCK_RATE_2MB || + rate == B43legacy_CCK_RATE_5MB || + rate == B43legacy_CCK_RATE_11MB); +} + +static inline +int b43legacy_is_ofdm_rate(int rate) +{ + return !b43legacy_is_cck_rate(rate); +} + +void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf); +void b43legacy_tsf_write(struct b43legacy_wldev *dev, u64 tsf); + +u32 b43legacy_shm_read32(struct b43legacy_wldev *dev, + u16 routing, u16 offset); +u16 b43legacy_shm_read16(struct b43legacy_wldev *dev, + u16 routing, u16 offset); +void b43legacy_shm_write32(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u32 value); +void b43legacy_shm_write16(struct b43legacy_wldev *dev, + u16 routing, u16 offset, + u16 value); + +u32 b43legacy_hf_read(struct b43legacy_wldev *dev); +void b43legacy_hf_write(struct b43legacy_wldev *dev, u32 value); + +void b43legacy_dummy_transmission(struct b43legacy_wldev *dev); + +void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags); + +void b43legacy_mac_suspend(struct b43legacy_wldev *dev); +void b43legacy_mac_enable(struct b43legacy_wldev *dev); + +void b43legacy_controller_restart(struct b43legacy_wldev *dev, + const char *reason); + +#endif /* B43legacy_MAIN_H_ */ diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c new file mode 100644 index 0000000000000000000000000000000000000000..22a4b3d0186d5da770c79a34ddf4310c8bebe403 --- /dev/null +++ b/drivers/net/wireless/b43legacy/phy.c @@ -0,0 +1,2255 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include + +#include "b43legacy.h" +#include "phy.h" +#include "main.h" +#include "radio.h" +#include "ilt.h" + + +static const s8 b43legacy_tssi2dbm_b_table[] = { + 0x4D, 0x4C, 0x4B, 0x4A, + 0x4A, 0x49, 0x48, 0x47, + 0x47, 0x46, 0x45, 0x45, + 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3F, 0x3E, + 0x3D, 0x3C, 0x3B, 0x3A, + 0x39, 0x38, 0x37, 0x36, + 0x35, 0x34, 0x32, 0x31, + 0x30, 0x2F, 0x2D, 0x2C, + 0x2B, 0x29, 0x28, 0x26, + 0x25, 0x23, 0x21, 0x1F, + 0x1D, 0x1A, 0x17, 0x14, + 0x10, 0x0C, 0x06, 0x00, + -7, -7, -7, -7, + -7, -7, -7, -7, + -7, -7, -7, -7, +}; + +static const s8 b43legacy_tssi2dbm_g_table[] = { + 77, 77, 77, 76, + 76, 76, 75, 75, + 74, 74, 73, 73, + 73, 72, 72, 71, + 71, 70, 70, 69, + 68, 68, 67, 67, + 66, 65, 65, 64, + 63, 63, 62, 61, + 60, 59, 58, 57, + 56, 55, 54, 53, + 52, 50, 49, 47, + 45, 43, 40, 37, + 33, 28, 22, 14, + 5, -7, -20, -20, + -20, -20, -20, -20, + -20, -20, -20, -20, +}; + +static void b43legacy_phy_initg(struct b43legacy_wldev *dev); + + +static inline +void b43legacy_voluntary_preempt(void) +{ + B43legacy_BUG_ON(!(!in_atomic() && !in_irq() && + !in_interrupt() && !irqs_disabled())); +#ifndef CONFIG_PREEMPT + cond_resched(); +#endif /* CONFIG_PREEMPT */ +} + +void b43legacy_raw_phy_lock(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + B43legacy_WARN_ON(!irqs_disabled()); + if (b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD) == 0) { + phy->locked = 0; + return; + } + if (dev->dev->id.revision < 3) { + b43legacy_mac_suspend(dev); + spin_lock(&phy->lock); + } else { + if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43legacy_power_saving_ctl_bits(dev, -1, 1); + } + phy->locked = 1; +} + +void b43legacy_raw_phy_unlock(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + B43legacy_WARN_ON(!irqs_disabled()); + if (dev->dev->id.revision < 3) { + if (phy->locked) { + spin_unlock(&phy->lock); + b43legacy_mac_enable(dev); + } + } else { + if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) + b43legacy_power_saving_ctl_bits(dev, -1, -1); + } + phy->locked = 0; +} + +u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); + return b43legacy_read16(dev, B43legacy_MMIO_PHY_DATA); +} + +void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_write16(dev, B43legacy_MMIO_PHY_CONTROL, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_PHY_DATA, val); +} + +void b43legacy_phy_calibrate(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); /* Dummy read. */ + if (phy->calibrated) + return; + if (phy->type == B43legacy_PHYTYPE_G && phy->rev == 1) { + b43legacy_wireless_core_reset(dev, 0); + b43legacy_phy_initg(dev); + b43legacy_wireless_core_reset(dev, B43legacy_TMSLOW_GMODE); + } + phy->calibrated = 1; +} + +/* intialize B PHY power control + * as described in http://bcm-specs.sipsolutions.net/InitPowerControl + */ +static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 saved_batt = 0; + u16 saved_ratt = 0; + u16 saved_txctl1 = 0; + int must_reset_txpower = 0; + + B43legacy_BUG_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416)) + return; + + b43legacy_phy_write(dev, 0x0028, 0x8018); + b43legacy_write16(dev, 0x03E6, b43legacy_read16(dev, 0x03E6) & 0xFFDF); + + if (phy->type == B43legacy_PHYTYPE_G) { + if (!phy->gmode) + return; + b43legacy_phy_write(dev, 0x047A, 0xC111); + } + if (phy->savedpctlreg != 0xFFFF) + return; +#ifdef CONFIG_B43LEGACY_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + if (phy->type == B43legacy_PHYTYPE_B && + phy->rev >= 2 && + phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x0076, + b43legacy_radio_read16(dev, 0x0076) + | 0x0084); + else { + saved_batt = phy->bbatt; + saved_ratt = phy->rfatt; + saved_txctl1 = phy->txctl1; + if ((phy->radio_rev >= 6) && (phy->radio_rev <= 8) + && /*FIXME: incomplete specs for 5 < revision < 9 */ 0) + b43legacy_radio_set_txpower_bg(dev, 0xB, 0x1F, 0); + else + b43legacy_radio_set_txpower_bg(dev, 0xB, 9, 0); + must_reset_txpower = 1; + } + b43legacy_dummy_transmission(dev); + + phy->savedpctlreg = b43legacy_phy_read(dev, B43legacy_PHY_G_PCTL); + + if (must_reset_txpower) + b43legacy_radio_set_txpower_bg(dev, saved_batt, saved_ratt, + saved_txctl1); + else + b43legacy_radio_write16(dev, 0x0076, b43legacy_radio_read16(dev, + 0x0076) & 0xFF7B); + b43legacy_radio_clear_tssi(dev); +} + +static void b43legacy_phy_agcsetup(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset = 0x0000; + + if (phy->rev == 1) + offset = 0x4C00; + + b43legacy_ilt_write(dev, offset, 0x00FE); + b43legacy_ilt_write(dev, offset + 1, 0x000D); + b43legacy_ilt_write(dev, offset + 2, 0x0013); + b43legacy_ilt_write(dev, offset + 3, 0x0019); + + if (phy->rev == 1) { + b43legacy_ilt_write(dev, 0x1800, 0x2710); + b43legacy_ilt_write(dev, 0x1801, 0x9B83); + b43legacy_ilt_write(dev, 0x1802, 0x9B83); + b43legacy_ilt_write(dev, 0x1803, 0x0F8D); + b43legacy_phy_write(dev, 0x0455, 0x0004); + } + + b43legacy_phy_write(dev, 0x04A5, (b43legacy_phy_read(dev, 0x04A5) + & 0x00FF) | 0x5700); + b43legacy_phy_write(dev, 0x041A, (b43legacy_phy_read(dev, 0x041A) + & 0xFF80) | 0x000F); + b43legacy_phy_write(dev, 0x041A, (b43legacy_phy_read(dev, 0x041A) + & 0xC07F) | 0x2B80); + b43legacy_phy_write(dev, 0x048C, (b43legacy_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0300); + + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0008); + + b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) + & 0xFFF0) | 0x0008); + b43legacy_phy_write(dev, 0x04A1, (b43legacy_phy_read(dev, 0x04A1) + & 0xF0FF) | 0x0600); + b43legacy_phy_write(dev, 0x04A2, (b43legacy_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0700); + b43legacy_phy_write(dev, 0x04A0, (b43legacy_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0100); + + if (phy->rev == 1) + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x0007); + + b43legacy_phy_write(dev, 0x0488, (b43legacy_phy_read(dev, 0x0488) + & 0xFF00) | 0x001C); + b43legacy_phy_write(dev, 0x0488, (b43legacy_phy_read(dev, 0x0488) + & 0xC0FF) | 0x0200); + b43legacy_phy_write(dev, 0x0496, (b43legacy_phy_read(dev, 0x0496) + & 0xFF00) | 0x001C); + b43legacy_phy_write(dev, 0x0489, (b43legacy_phy_read(dev, 0x0489) + & 0xFF00) | 0x0020); + b43legacy_phy_write(dev, 0x0489, (b43legacy_phy_read(dev, 0x0489) + & 0xC0FF) | 0x0200); + b43legacy_phy_write(dev, 0x0482, (b43legacy_phy_read(dev, 0x0482) + & 0xFF00) | 0x002E); + b43legacy_phy_write(dev, 0x0496, (b43legacy_phy_read(dev, 0x0496) + & 0x00FF) | 0x1A00); + b43legacy_phy_write(dev, 0x0481, (b43legacy_phy_read(dev, 0x0481) + & 0xFF00) | 0x0028); + b43legacy_phy_write(dev, 0x0481, (b43legacy_phy_read(dev, 0x0481) + & 0x00FF) | 0x2C00); + + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x0430, 0x092B); + b43legacy_phy_write(dev, 0x041B, + (b43legacy_phy_read(dev, 0x041B) + & 0xFFE1) | 0x0002); + } else { + b43legacy_phy_write(dev, 0x041B, + b43legacy_phy_read(dev, 0x041B) & 0xFFE1); + b43legacy_phy_write(dev, 0x041F, 0x287A); + b43legacy_phy_write(dev, 0x0420, + (b43legacy_phy_read(dev, 0x0420) + & 0xFFF0) | 0x0004); + } + + if (phy->rev > 2) { + b43legacy_phy_write(dev, 0x0422, 0x287A); + b43legacy_phy_write(dev, 0x0420, + (b43legacy_phy_read(dev, 0x0420) + & 0x0FFF) | 0x3000); + } + + b43legacy_phy_write(dev, 0x04A8, (b43legacy_phy_read(dev, 0x04A8) + & 0x8080) | 0x7874); + b43legacy_phy_write(dev, 0x048E, 0x1C00); + + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0600); + b43legacy_phy_write(dev, 0x048B, 0x005E); + b43legacy_phy_write(dev, 0x048C, + (b43legacy_phy_read(dev, 0x048C) & 0xFF00) + | 0x001E); + b43legacy_phy_write(dev, 0x048D, 0x0002); + } + + b43legacy_ilt_write(dev, offset + 0x0800, 0); + b43legacy_ilt_write(dev, offset + 0x0801, 7); + b43legacy_ilt_write(dev, offset + 0x0802, 16); + b43legacy_ilt_write(dev, offset + 0x0803, 28); + + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x0426, + (b43legacy_phy_read(dev, 0x0426) & 0xFFFC)); + b43legacy_phy_write(dev, 0x0426, + (b43legacy_phy_read(dev, 0x0426) & 0xEFFF)); + } +} + +static void b43legacy_phy_setupg(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + + B43legacy_BUG_ON(phy->type != B43legacy_PHYTYPE_G); + if (phy->rev == 1) { + b43legacy_phy_write(dev, 0x0406, 0x4F19); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & 0xFC3F) | 0x0340); + b43legacy_phy_write(dev, 0x042C, 0x005A); + b43legacy_phy_write(dev, 0x0427, 0x001A); + + for (i = 0; i < B43legacy_ILT_FINEFREQG_SIZE; i++) + b43legacy_ilt_write(dev, 0x5800 + i, + b43legacy_ilt_finefreqg[i]); + for (i = 0; i < B43legacy_ILT_NOISEG1_SIZE; i++) + b43legacy_ilt_write(dev, 0x1800 + i, + b43legacy_ilt_noiseg1[i]); + for (i = 0; i < B43legacy_ILT_ROTOR_SIZE; i++) + b43legacy_ilt_write32(dev, 0x2000 + i, + b43legacy_ilt_rotor[i]); + } else { + /* nrssi values are signed 6-bit values. Why 0x7654 here? */ + b43legacy_nrssi_hw_write(dev, 0xBA98, (s16)0x7654); + + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04C0, 0x1861); + b43legacy_phy_write(dev, 0x04C1, 0x0271); + } else if (phy->rev > 2) { + b43legacy_phy_write(dev, 0x04C0, 0x0098); + b43legacy_phy_write(dev, 0x04C1, 0x0070); + b43legacy_phy_write(dev, 0x04C9, 0x0080); + } + b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, + 0x042B) | 0x800); + + for (i = 0; i < 64; i++) + b43legacy_ilt_write(dev, 0x4000 + i, i); + for (i = 0; i < B43legacy_ILT_NOISEG2_SIZE; i++) + b43legacy_ilt_write(dev, 0x1800 + i, + b43legacy_ilt_noiseg2[i]); + } + + if (phy->rev <= 2) + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg1[i]); + else if ((phy->rev >= 7) && (b43legacy_phy_read(dev, 0x0449) & 0x0200)) + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg3[i]); + else + for (i = 0; i < B43legacy_ILT_NOISESCALEG_SIZE; i++) + b43legacy_ilt_write(dev, 0x1400 + i, + b43legacy_ilt_noisescaleg2[i]); + + if (phy->rev == 2) + for (i = 0; i < B43legacy_ILT_SIGMASQR_SIZE; i++) + b43legacy_ilt_write(dev, 0x5000 + i, + b43legacy_ilt_sigmasqr1[i]); + else if ((phy->rev > 2) && (phy->rev <= 8)) + for (i = 0; i < B43legacy_ILT_SIGMASQR_SIZE; i++) + b43legacy_ilt_write(dev, 0x5000 + i, + b43legacy_ilt_sigmasqr2[i]); + + if (phy->rev == 1) { + for (i = 0; i < B43legacy_ILT_RETARD_SIZE; i++) + b43legacy_ilt_write32(dev, 0x2400 + i, + b43legacy_ilt_retard[i]); + for (i = 4; i < 20; i++) + b43legacy_ilt_write(dev, 0x5400 + i, 0x0020); + b43legacy_phy_agcsetup(dev); + + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416) && + (dev->dev->bus->boardinfo.rev == 0x0017)) + return; + + b43legacy_ilt_write(dev, 0x5001, 0x0002); + b43legacy_ilt_write(dev, 0x5002, 0x0001); + } else { + for (i = 0; i <= 0x20; i++) + b43legacy_ilt_write(dev, 0x1000 + i, 0x0820); + b43legacy_phy_agcsetup(dev); + b43legacy_phy_read(dev, 0x0400); /* dummy read */ + b43legacy_phy_write(dev, 0x0403, 0x1000); + b43legacy_ilt_write(dev, 0x3C02, 0x000F); + b43legacy_ilt_write(dev, 0x3C03, 0x0014); + + if (is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type == 0x0416) && + (dev->dev->bus->boardinfo.rev == 0x0017)) + return; + + b43legacy_ilt_write(dev, 0x0401, 0x0002); + b43legacy_ilt_write(dev, 0x0402, 0x0001); + } +} + +/* Initialize the APHY portion of a GPHY. */ +static void b43legacy_phy_inita(struct b43legacy_wldev *dev) +{ + + might_sleep(); + + b43legacy_phy_setupg(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_PACTRL) + b43legacy_phy_write(dev, 0x046E, 0x03CF); +} + +static void b43legacy_phy_initb2(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + int val; + + b43legacy_write16(dev, 0x03EC, 0x3F22); + b43legacy_phy_write(dev, 0x0020, 0x301C); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + b43legacy_phy_write(dev, 0x03E4, 0x3000); + b43legacy_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + b43legacy_radio_write16(dev, 0x007A, 0x000F); + b43legacy_phy_write(dev, 0x0038, 0x0677); + b43legacy_radio_init2050(dev); + } + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + b43legacy_phy_write(dev, 0x0032, 0x00CC); + b43legacy_phy_write(dev, 0x0035, 0x07C2); + b43legacy_phy_lo_b_measure(dev); + b43legacy_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver != 0x2050) + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x1000); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver != 0x2050) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + b43legacy_phy_init_pctl(dev); +} + +static void b43legacy_phy_initb4(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 val; + + b43legacy_write16(dev, 0x03EC, 0x3F22); + b43legacy_phy_write(dev, 0x0020, 0x301C); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_phy_write(dev, 0x0088, 0x3E00); + val = 0x3C3D; + for (offset = 0x0089; offset < 0x00A7; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + b43legacy_phy_write(dev, 0x03E4, 0x3000); + b43legacy_radio_selectchannel(dev, phy->channel, 0); + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + b43legacy_radio_write16(dev, 0x007A, 0x000F); + b43legacy_phy_write(dev, 0x0038, 0x0677); + b43legacy_radio_init2050(dev); + } + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0032, 0x00E0); + b43legacy_phy_write(dev, 0x0035, 0x07C2); + + b43legacy_phy_lo_b_measure(dev); + + b43legacy_phy_write(dev, 0x0026, 0xCC00); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x1100); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_RSSI) { + b43legacy_calc_nrssi_slope(dev); + b43legacy_calc_nrssi_threshold(dev); + } + b43legacy_phy_init_pctl(dev); +} + +static void b43legacy_phy_initb5(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 value; + u8 old_channel; + + if (phy->analog == 1) + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0050); + if (!is_bcm_board_vendor(dev) && + (dev->dev->bus->boardinfo.type != 0x0416)) { + value = 0x2120; + for (offset = 0x00A8 ; offset < 0x00C7; offset++) { + b43legacy_phy_write(dev, offset, value); + value += 0x0202; + } + } + b43legacy_phy_write(dev, 0x0035, + (b43legacy_phy_read(dev, 0x0035) & 0xF0FF) + | 0x0700); + if (phy->radio_ver == 0x2050) + b43legacy_phy_write(dev, 0x0038, 0x0667); + + if (phy->gmode) { + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0020); + b43legacy_radio_write16(dev, 0x0051, + b43legacy_radio_read16(dev, 0x0051) + | 0x0004); + } + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, 0x0000); + + b43legacy_phy_write(dev, 0x0802, b43legacy_phy_read(dev, 0x0802) + | 0x0100); + b43legacy_phy_write(dev, 0x042B, b43legacy_phy_read(dev, 0x042B) + | 0x2000); + + b43legacy_phy_write(dev, 0x001C, 0x186A); + + b43legacy_phy_write(dev, 0x0013, (b43legacy_phy_read(dev, + 0x0013) & 0x00FF) | 0x1900); + b43legacy_phy_write(dev, 0x0035, (b43legacy_phy_read(dev, + 0x0035) & 0xFFC0) | 0x0064); + b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, + 0x005D) & 0xFF80) | 0x000A); + } + + if (dev->bad_frames_preempt) + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) | (1 << 11)); + + if (phy->analog == 1) { + b43legacy_phy_write(dev, 0x0026, 0xCE00); + b43legacy_phy_write(dev, 0x0021, 0x3763); + b43legacy_phy_write(dev, 0x0022, 0x1BC3); + b43legacy_phy_write(dev, 0x0023, 0x06F9); + b43legacy_phy_write(dev, 0x0024, 0x037E); + } else + b43legacy_phy_write(dev, 0x0026, 0xCC00); + b43legacy_phy_write(dev, 0x0030, 0x00C6); + b43legacy_write16(dev, 0x03EC, 0x3F22); + + if (phy->analog == 1) + b43legacy_phy_write(dev, 0x0020, 0x3E1C); + else + b43legacy_phy_write(dev, 0x0020, 0x301C); + + if (phy->analog == 0) + b43legacy_write16(dev, 0x03E4, 0x3000); + + old_channel = (phy->channel == 0xFF) ? 1 : phy->channel; + /* Force to channel 7, even if not supported. */ + b43legacy_radio_selectchannel(dev, 7, 0); + + if (phy->radio_ver != 0x2050) { + b43legacy_radio_write16(dev, 0x0075, 0x0080); + b43legacy_radio_write16(dev, 0x0079, 0x0081); + } + + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + + if (phy->radio_ver == 0x2050) { + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + } + + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + + b43legacy_radio_write16(dev, 0x007A, b43legacy_radio_read16(dev, + 0x007A) | 0x0007); + + b43legacy_radio_selectchannel(dev, old_channel, 0); + + b43legacy_phy_write(dev, 0x0014, 0x0080); + b43legacy_phy_write(dev, 0x0032, 0x00CA); + b43legacy_phy_write(dev, 0x002A, 0x88A3); + + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + + if (phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x005D, 0x000D); + + b43legacy_write16(dev, 0x03E4, (b43legacy_read16(dev, 0x03E4) & + 0xFFC0) | 0x0004); +} + +static void b43legacy_phy_initb6(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 offset; + u16 val; + u8 old_channel; + + b43legacy_phy_write(dev, 0x003E, 0x817A); + b43legacy_radio_write16(dev, 0x007A, + (b43legacy_radio_read16(dev, 0x007A) | 0x0058)); + if (phy->radio_rev == 4 || + phy->radio_rev == 5) { + b43legacy_radio_write16(dev, 0x0051, 0x0037); + b43legacy_radio_write16(dev, 0x0052, 0x0070); + b43legacy_radio_write16(dev, 0x0053, 0x00B3); + b43legacy_radio_write16(dev, 0x0054, 0x009B); + b43legacy_radio_write16(dev, 0x005A, 0x0088); + b43legacy_radio_write16(dev, 0x005B, 0x0088); + b43legacy_radio_write16(dev, 0x005D, 0x0088); + b43legacy_radio_write16(dev, 0x005E, 0x0088); + b43legacy_radio_write16(dev, 0x007D, 0x0088); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + (b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + | 0x00000200)); + } + if (phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x0051, 0x0000); + b43legacy_radio_write16(dev, 0x0052, 0x0040); + b43legacy_radio_write16(dev, 0x0053, 0x00B7); + b43legacy_radio_write16(dev, 0x0054, 0x0098); + b43legacy_radio_write16(dev, 0x005A, 0x0088); + b43legacy_radio_write16(dev, 0x005B, 0x006B); + b43legacy_radio_write16(dev, 0x005C, 0x000F); + if (dev->dev->bus->sprom.r1.boardflags_lo & 0x8000) { + b43legacy_radio_write16(dev, 0x005D, 0x00FA); + b43legacy_radio_write16(dev, 0x005E, 0x00D8); + } else { + b43legacy_radio_write16(dev, 0x005D, 0x00F5); + b43legacy_radio_write16(dev, 0x005E, 0x00B8); + } + b43legacy_radio_write16(dev, 0x0073, 0x0003); + b43legacy_radio_write16(dev, 0x007D, 0x00A8); + b43legacy_radio_write16(dev, 0x007C, 0x0001); + b43legacy_radio_write16(dev, 0x007E, 0x0008); + } + val = 0x1E1F; + for (offset = 0x0088; offset < 0x0098; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x3E3F; + for (offset = 0x0098; offset < 0x00A8; offset++) { + b43legacy_phy_write(dev, offset, val); + val -= 0x0202; + } + val = 0x2120; + for (offset = 0x00A8; offset < 0x00C8; offset++) { + b43legacy_phy_write(dev, offset, (val & 0x3F3F)); + val += 0x0202; + } + if (phy->type == B43legacy_PHYTYPE_G) { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | + 0x0020); + b43legacy_radio_write16(dev, 0x0051, + b43legacy_radio_read16(dev, 0x0051) | + 0x0004); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x0100); + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) | 0x2000); + b43legacy_phy_write(dev, 0x5B, 0x0000); + b43legacy_phy_write(dev, 0x5C, 0x0000); + } + + old_channel = phy->channel; + if (old_channel >= 8) + b43legacy_radio_selectchannel(dev, 1, 0); + else + b43legacy_radio_selectchannel(dev, 13, 0); + + b43legacy_radio_write16(dev, 0x0050, 0x0020); + b43legacy_radio_write16(dev, 0x0050, 0x0023); + udelay(40); + if (phy->radio_rev < 6 || phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x007C, + (b43legacy_radio_read16(dev, 0x007C) + | 0x0002)); + b43legacy_radio_write16(dev, 0x0050, 0x0020); + } + if (phy->radio_rev <= 2) { + b43legacy_radio_write16(dev, 0x007C, 0x0020); + b43legacy_radio_write16(dev, 0x005A, 0x0070); + b43legacy_radio_write16(dev, 0x005B, 0x007B); + b43legacy_radio_write16(dev, 0x005C, 0x00B0); + } + b43legacy_radio_write16(dev, 0x007A, + (b43legacy_radio_read16(dev, + 0x007A) & 0x00F8) | 0x0007); + + b43legacy_radio_selectchannel(dev, old_channel, 0); + + b43legacy_phy_write(dev, 0x0014, 0x0200); + if (phy->radio_rev >= 6) + b43legacy_phy_write(dev, 0x002A, 0x88C2); + else + b43legacy_phy_write(dev, 0x002A, 0x8AC0); + b43legacy_phy_write(dev, 0x0038, 0x0668); + b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); + if (phy->radio_rev <= 5) + b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, + 0x005D) & 0xFF80) | 0x0003); + if (phy->radio_rev <= 2) + b43legacy_radio_write16(dev, 0x005D, 0x000D); + + if (phy->analog == 4) { + b43legacy_write16(dev, 0x03E4, 0x0009); + b43legacy_phy_write(dev, 0x61, b43legacy_phy_read(dev, 0x61) + & 0xFFF); + } else + b43legacy_phy_write(dev, 0x0002, (b43legacy_phy_read(dev, + 0x0002) & 0xFFC0) | 0x0004); + if (phy->type == B43legacy_PHYTYPE_G) + b43legacy_write16(dev, 0x03E6, 0x0); + if (phy->type == B43legacy_PHYTYPE_B) { + b43legacy_write16(dev, 0x03E6, 0x8140); + b43legacy_phy_write(dev, 0x0016, 0x0410); + b43legacy_phy_write(dev, 0x0017, 0x0820); + b43legacy_phy_write(dev, 0x0062, 0x0007); + b43legacy_radio_init2050(dev); + b43legacy_phy_lo_g_measure(dev); + if (dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_RSSI) { + b43legacy_calc_nrssi_slope(dev); + b43legacy_calc_nrssi_threshold(dev); + } + b43legacy_phy_init_pctl(dev); + } +} + +static void b43legacy_calc_loopback_gain(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup_phy[15] = {0}; + u16 backup_radio[3]; + u16 backup_bband; + u16 i; + u16 loop1_cnt; + u16 loop1_done; + u16 loop1_omitted; + u16 loop2_done; + + backup_phy[0] = b43legacy_phy_read(dev, 0x0429); + backup_phy[1] = b43legacy_phy_read(dev, 0x0001); + backup_phy[2] = b43legacy_phy_read(dev, 0x0811); + backup_phy[3] = b43legacy_phy_read(dev, 0x0812); + if (phy->rev != 1) { + backup_phy[4] = b43legacy_phy_read(dev, 0x0814); + backup_phy[5] = b43legacy_phy_read(dev, 0x0815); + } + backup_phy[6] = b43legacy_phy_read(dev, 0x005A); + backup_phy[7] = b43legacy_phy_read(dev, 0x0059); + backup_phy[8] = b43legacy_phy_read(dev, 0x0058); + backup_phy[9] = b43legacy_phy_read(dev, 0x000A); + backup_phy[10] = b43legacy_phy_read(dev, 0x0003); + backup_phy[11] = b43legacy_phy_read(dev, 0x080F); + backup_phy[12] = b43legacy_phy_read(dev, 0x0810); + backup_phy[13] = b43legacy_phy_read(dev, 0x002B); + backup_phy[14] = b43legacy_phy_read(dev, 0x0015); + b43legacy_phy_read(dev, 0x002D); /* dummy read */ + backup_bband = phy->bbatt; + backup_radio[0] = b43legacy_radio_read16(dev, 0x0052); + backup_radio[1] = b43legacy_radio_read16(dev, 0x0043); + backup_radio[2] = b43legacy_radio_read16(dev, 0x007A); + + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0x3FFF); + b43legacy_phy_write(dev, 0x0001, + b43legacy_phy_read(dev, 0x0001) & 0x8000); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0002); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xFFFD); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0001); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xFFFE); + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0001); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFE); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0002); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFD); + } + b43legacy_phy_write(dev, 0x0811, b43legacy_phy_read(dev, 0x0811) | + 0x000C); + b43legacy_phy_write(dev, 0x0812, b43legacy_phy_read(dev, 0x0812) | + 0x000C); + + b43legacy_phy_write(dev, 0x0811, (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0030); + b43legacy_phy_write(dev, 0x0812, (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0010); + + b43legacy_phy_write(dev, 0x005A, 0x0780); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->analog == 0) + b43legacy_phy_write(dev, 0x0003, 0x0122); + else + b43legacy_phy_write(dev, 0x000A, + b43legacy_phy_read(dev, 0x000A) + | 0x2000); + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0004); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFB); + } + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + if (phy->radio_ver == 0x2050 && phy->radio_rev == 2) { + b43legacy_radio_write16(dev, 0x0052, 0x0000); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0xFFF0) | 0x0009); + loop1_cnt = 9; + } else if (phy->radio_rev == 8) { + b43legacy_radio_write16(dev, 0x0043, 0x000F); + loop1_cnt = 15; + } else + loop1_cnt = 0; + + b43legacy_phy_set_baseband_attenuation(dev, 11); + + if (phy->rev >= 3) + b43legacy_phy_write(dev, 0x080F, 0xC020); + else + b43legacy_phy_write(dev, 0x080F, 0x8020); + b43legacy_phy_write(dev, 0x0810, 0x0000); + + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, 0x002B) + & 0xFFC0) | 0x0001); + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, 0x002B) + & 0xC0FF) | 0x0800); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0100); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) & 0xCFFF); + if (dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_EXTLNA) { + if (phy->rev >= 7) { + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) + | 0x0800); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + | 0x8000); + } + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x00F7); + + for (i = 0; i < loop1_cnt; i++) { + b43legacy_radio_write16(dev, 0x0043, loop1_cnt); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xF0FF) | (i << 8)); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xA000); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xF000); + udelay(20); + if (b43legacy_phy_read(dev, 0x002D) >= 0x0DFC) + break; + } + loop1_done = i; + loop1_omitted = loop1_cnt - loop1_done; + + loop2_done = 0; + if (loop1_done >= 8) { + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + | 0x0030); + for (i = loop1_done - 8; i < 16; i++) { + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xF0FF) | (i << 8)); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xA000); + b43legacy_phy_write(dev, 0x0015, + (b43legacy_phy_read(dev, 0x0015) + & 0x0FFF) | 0xF000); + udelay(20); + if (b43legacy_phy_read(dev, 0x002D) >= 0x0DFC) + break; + } + } + + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x0814, backup_phy[4]); + b43legacy_phy_write(dev, 0x0815, backup_phy[5]); + } + b43legacy_phy_write(dev, 0x005A, backup_phy[6]); + b43legacy_phy_write(dev, 0x0059, backup_phy[7]); + b43legacy_phy_write(dev, 0x0058, backup_phy[8]); + b43legacy_phy_write(dev, 0x000A, backup_phy[9]); + b43legacy_phy_write(dev, 0x0003, backup_phy[10]); + b43legacy_phy_write(dev, 0x080F, backup_phy[11]); + b43legacy_phy_write(dev, 0x0810, backup_phy[12]); + b43legacy_phy_write(dev, 0x002B, backup_phy[13]); + b43legacy_phy_write(dev, 0x0015, backup_phy[14]); + + b43legacy_phy_set_baseband_attenuation(dev, backup_bband); + + b43legacy_radio_write16(dev, 0x0052, backup_radio[0]); + b43legacy_radio_write16(dev, 0x0043, backup_radio[1]); + b43legacy_radio_write16(dev, 0x007A, backup_radio[2]); + + b43legacy_phy_write(dev, 0x0811, backup_phy[2] | 0x0003); + udelay(10); + b43legacy_phy_write(dev, 0x0811, backup_phy[2]); + b43legacy_phy_write(dev, 0x0812, backup_phy[3]); + b43legacy_phy_write(dev, 0x0429, backup_phy[0]); + b43legacy_phy_write(dev, 0x0001, backup_phy[1]); + + phy->loopback_gain[0] = ((loop1_done * 6) - (loop1_omitted * 4)) - 11; + phy->loopback_gain[1] = (24 - (3 * loop2_done)) * 2; +} + +static void b43legacy_phy_initg(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + + if (phy->rev == 1) + b43legacy_phy_initb5(dev); + else + b43legacy_phy_initb6(dev); + if (phy->rev >= 2 || phy->gmode) + b43legacy_phy_inita(dev); + + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0814, 0x0000); + b43legacy_phy_write(dev, 0x0815, 0x0000); + } + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x0811, 0x0000); + b43legacy_phy_write(dev, 0x0015, 0x00C0); + } + if (phy->rev > 5) { + b43legacy_phy_write(dev, 0x0811, 0x0400); + b43legacy_phy_write(dev, 0x0015, 0x00C0); + } + if (phy->rev >= 2 || phy->gmode) { + tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF; + if (tmp == 3 || tmp == 5) { + b43legacy_phy_write(dev, 0x04C2, 0x1816); + b43legacy_phy_write(dev, 0x04C3, 0x8006); + if (tmp == 5) + b43legacy_phy_write(dev, 0x04CC, + (b43legacy_phy_read(dev, + 0x04CC) & 0x00FF) | + 0x1F00); + } + b43legacy_phy_write(dev, 0x047E, 0x0078); + } + if (phy->radio_rev == 8) { + b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) + | 0x0080); + b43legacy_phy_write(dev, 0x043E, b43legacy_phy_read(dev, 0x043E) + | 0x0004); + } + if (phy->rev >= 2 && phy->gmode) + b43legacy_calc_loopback_gain(dev); + if (phy->radio_rev != 8) { + if (phy->initval == 0xFFFF) + phy->initval = b43legacy_radio_init2050(dev); + else + b43legacy_radio_write16(dev, 0x0078, phy->initval); + } + if (phy->txctl2 == 0xFFFF) + b43legacy_phy_lo_g_measure(dev); + else { + if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0052, + (phy->txctl1 << 4) | + phy->txctl2); + else + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, + 0x0052) & 0xFFF0) | + phy->txctl1); + if (phy->rev >= 6) + b43legacy_phy_write(dev, 0x0036, + (b43legacy_phy_read(dev, 0x0036) + & 0x0FFF) | (phy->txctl2 << 12)); + if (dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_PACTRL) + b43legacy_phy_write(dev, 0x002E, 0x8075); + else + b43legacy_phy_write(dev, 0x002E, 0x807F); + if (phy->rev < 2) + b43legacy_phy_write(dev, 0x002F, 0x0101); + else + b43legacy_phy_write(dev, 0x002F, 0x0202); + } + if (phy->gmode || phy->rev >= 2) { + b43legacy_phy_lo_adjust(dev, 0); + b43legacy_phy_write(dev, 0x080F, 0x8078); + } + + if (!(dev->dev->bus->sprom.r1.boardflags_lo & B43legacy_BFL_RSSI)) { + /* The specs state to update the NRSSI LT with + * the value 0x7FFFFFFF here. I think that is some weird + * compiler optimization in the original driver. + * Essentially, what we do here is resetting all NRSSI LT + * entries to -32 (see the limit_value() in nrssi_hw_update()) + */ + b43legacy_nrssi_hw_update(dev, 0xFFFF); + b43legacy_calc_nrssi_threshold(dev); + } else if (phy->gmode || phy->rev >= 2) { + if (phy->nrssi[0] == -1000) { + B43legacy_WARN_ON(phy->nrssi[1] != -1000); + b43legacy_calc_nrssi_slope(dev); + } else { + B43legacy_WARN_ON(phy->nrssi[1] == -1000); + b43legacy_calc_nrssi_threshold(dev); + } + } + if (phy->radio_rev == 8) + b43legacy_phy_write(dev, 0x0805, 0x3230); + b43legacy_phy_init_pctl(dev); + if (dev->dev->bus->chip_id == 0x4306 + && dev->dev->bus->chip_package == 2) { + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0xBFFF); + b43legacy_phy_write(dev, 0x04C3, + b43legacy_phy_read(dev, 0x04C3) & 0x7FFF); + } +} + +static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev) +{ + int i; + u16 ret = 0; + unsigned long flags; + + local_irq_save(flags); + for (i = 0; i < 10; i++) { + b43legacy_phy_write(dev, 0x0015, 0xAFA0); + udelay(1); + b43legacy_phy_write(dev, 0x0015, 0xEFA0); + udelay(10); + b43legacy_phy_write(dev, 0x0015, 0xFFA0); + udelay(40); + ret += b43legacy_phy_read(dev, 0x002C); + } + local_irq_restore(flags); + b43legacy_voluntary_preempt(); + + return ret; +} + +void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 regstack[12] = { 0 }; + u16 mls; + u16 fval; + int i; + int j; + + regstack[0] = b43legacy_phy_read(dev, 0x0015); + regstack[1] = b43legacy_radio_read16(dev, 0x0052) & 0xFFF0; + + if (phy->radio_ver == 0x2053) { + regstack[2] = b43legacy_phy_read(dev, 0x000A); + regstack[3] = b43legacy_phy_read(dev, 0x002A); + regstack[4] = b43legacy_phy_read(dev, 0x0035); + regstack[5] = b43legacy_phy_read(dev, 0x0003); + regstack[6] = b43legacy_phy_read(dev, 0x0001); + regstack[7] = b43legacy_phy_read(dev, 0x0030); + + regstack[8] = b43legacy_radio_read16(dev, 0x0043); + regstack[9] = b43legacy_radio_read16(dev, 0x007A); + regstack[10] = b43legacy_read16(dev, 0x03EC); + regstack[11] = b43legacy_radio_read16(dev, 0x0052) & 0x00F0; + + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x3F3F); + b43legacy_phy_write(dev, 0x0035, regstack[4] & 0xFF7F); + b43legacy_radio_write16(dev, 0x007A, regstack[9] & 0xFFF0); + } + b43legacy_phy_write(dev, 0x0015, 0xB000); + b43legacy_phy_write(dev, 0x002B, 0x0004); + + if (phy->radio_ver == 0x2053) { + b43legacy_phy_write(dev, 0x002B, 0x0203); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + } + + phy->minlowsig[0] = 0xFFFF; + + for (i = 0; i < 4; i++) { + b43legacy_radio_write16(dev, 0x0052, regstack[1] | i); + b43legacy_phy_lo_b_r15_loop(dev); + } + for (i = 0; i < 10; i++) { + b43legacy_radio_write16(dev, 0x0052, regstack[1] | i); + mls = b43legacy_phy_lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[0]) { + phy->minlowsig[0] = mls; + phy->minlowsigpos[0] = i; + } + } + b43legacy_radio_write16(dev, 0x0052, regstack[1] + | phy->minlowsigpos[0]); + + phy->minlowsig[1] = 0xFFFF; + + for (i = -4; i < 5; i += 2) { + for (j = -4; j < 5; j += 2) { + if (j < 0) + fval = (0x0100 * i) + j + 0x0100; + else + fval = (0x0100 * i) + j; + b43legacy_phy_write(dev, 0x002F, fval); + mls = b43legacy_phy_lo_b_r15_loop(dev) / 10; + if (mls < phy->minlowsig[1]) { + phy->minlowsig[1] = mls; + phy->minlowsigpos[1] = fval; + } + } + } + phy->minlowsigpos[1] += 0x0101; + + b43legacy_phy_write(dev, 0x002F, phy->minlowsigpos[1]); + if (phy->radio_ver == 0x2053) { + b43legacy_phy_write(dev, 0x000A, regstack[2]); + b43legacy_phy_write(dev, 0x002A, regstack[3]); + b43legacy_phy_write(dev, 0x0035, regstack[4]); + b43legacy_phy_write(dev, 0x0003, regstack[5]); + b43legacy_phy_write(dev, 0x0001, regstack[6]); + b43legacy_phy_write(dev, 0x0030, regstack[7]); + + b43legacy_radio_write16(dev, 0x0043, regstack[8]); + b43legacy_radio_write16(dev, 0x007A, regstack[9]); + + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, 0x0052) + & 0x000F) | regstack[11]); + + b43legacy_write16(dev, 0x03EC, regstack[10]); + } + b43legacy_phy_write(dev, 0x0015, regstack[0]); +} + +static inline +u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev, + u16 control) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 ret; + unsigned long flags; + + local_irq_save(flags); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x15, 0xE300); + control <<= 8; + b43legacy_phy_write(dev, 0x0812, control | 0x00B0); + udelay(5); + b43legacy_phy_write(dev, 0x0812, control | 0x00B2); + udelay(2); + b43legacy_phy_write(dev, 0x0812, control | 0x00B3); + udelay(4); + b43legacy_phy_write(dev, 0x0015, 0xF300); + udelay(8); + } else { + b43legacy_phy_write(dev, 0x0015, control | 0xEFA0); + udelay(2); + b43legacy_phy_write(dev, 0x0015, control | 0xEFE0); + udelay(4); + b43legacy_phy_write(dev, 0x0015, control | 0xFFE0); + udelay(8); + } + ret = b43legacy_phy_read(dev, 0x002D); + local_irq_restore(flags); + b43legacy_voluntary_preempt(); + + return ret; +} + +static u32 b43legacy_phy_lo_g_singledeviation(struct b43legacy_wldev *dev, + u16 control) +{ + int i; + u32 ret = 0; + + for (i = 0; i < 8; i++) + ret += b43legacy_phy_lo_g_deviation_subval(dev, control); + + return ret; +} + +/* Write the LocalOscillator CONTROL */ +static inline +void b43legacy_lo_write(struct b43legacy_wldev *dev, + struct b43legacy_lopair *pair) +{ + u16 value; + + value = (u8)(pair->low); + value |= ((u8)(pair->high)) << 8; + +#ifdef CONFIG_B43LEGACY_DEBUG + /* Sanity check. */ + if (pair->low < -8 || pair->low > 8 || + pair->high < -8 || pair->high > 8) { + struct b43legacy_phy *phy = &dev->phy; + b43legacydbg(dev->wl, + "WARNING: Writing invalid LOpair " + "(low: %d, high: %d, index: %lu)\n", + pair->low, pair->high, + (unsigned long)(pair - phy->_lo_pairs)); + dump_stack(); + } +#endif + + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, value); +} + +static inline +struct b43legacy_lopair *b43legacy_find_lopair(struct b43legacy_wldev *dev, + u16 bbatt, + u16 rfatt, + u16 tx) +{ + static const u8 dict[10] = { 11, 10, 11, 12, 13, 12, 13, 12, 13, 12 }; + struct b43legacy_phy *phy = &dev->phy; + + if (bbatt > 6) + bbatt = 6; + B43legacy_WARN_ON(rfatt >= 10); + + if (tx == 3) + return b43legacy_get_lopair(phy, rfatt, bbatt); + return b43legacy_get_lopair(phy, dict[rfatt], bbatt); +} + +static inline +struct b43legacy_lopair *b43legacy_current_lopair(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + return b43legacy_find_lopair(dev, phy->bbatt, + phy->rfatt, phy->txctl1); +} + +/* Adjust B/G LO */ +void b43legacy_phy_lo_adjust(struct b43legacy_wldev *dev, int fixed) +{ + struct b43legacy_lopair *pair; + + if (fixed) { + /* Use fixed values. Only for initialization. */ + pair = b43legacy_find_lopair(dev, 2, 3, 0); + } else + pair = b43legacy_current_lopair(dev); + b43legacy_lo_write(dev, pair); +} + +static void b43legacy_phy_lo_g_measure_txctl2(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 txctl2 = 0; + u16 i; + u32 smallest; + u32 tmp; + + b43legacy_radio_write16(dev, 0x0052, 0x0000); + udelay(10); + smallest = b43legacy_phy_lo_g_singledeviation(dev, 0); + for (i = 0; i < 16; i++) { + b43legacy_radio_write16(dev, 0x0052, i); + udelay(10); + tmp = b43legacy_phy_lo_g_singledeviation(dev, 0); + if (tmp < smallest) { + smallest = tmp; + txctl2 = i; + } + } + phy->txctl2 = txctl2; +} + +static +void b43legacy_phy_lo_g_state(struct b43legacy_wldev *dev, + const struct b43legacy_lopair *in_pair, + struct b43legacy_lopair *out_pair, + u16 r27) +{ + static const struct b43legacy_lopair transitions[8] = { + { .high = 1, .low = 1, }, + { .high = 1, .low = 0, }, + { .high = 1, .low = -1, }, + { .high = 0, .low = -1, }, + { .high = -1, .low = -1, }, + { .high = -1, .low = 0, }, + { .high = -1, .low = 1, }, + { .high = 0, .low = 1, }, + }; + struct b43legacy_lopair lowest_transition = { + .high = in_pair->high, + .low = in_pair->low, + }; + struct b43legacy_lopair tmp_pair; + struct b43legacy_lopair transition; + int i = 12; + int state = 0; + int found_lower; + int j; + int begin; + int end; + u32 lowest_deviation; + u32 tmp; + + /* Note that in_pair and out_pair can point to the same pair. + * Be careful. */ + + b43legacy_lo_write(dev, &lowest_transition); + lowest_deviation = b43legacy_phy_lo_g_singledeviation(dev, r27); + do { + found_lower = 0; + B43legacy_WARN_ON(!(state >= 0 && state <= 8)); + if (state == 0) { + begin = 1; + end = 8; + } else if (state % 2 == 0) { + begin = state - 1; + end = state + 1; + } else { + begin = state - 2; + end = state + 2; + } + if (begin < 1) + begin += 8; + if (end > 8) + end -= 8; + + j = begin; + tmp_pair.high = lowest_transition.high; + tmp_pair.low = lowest_transition.low; + while (1) { + B43legacy_WARN_ON(!(j >= 1 && j <= 8)); + transition.high = tmp_pair.high + + transitions[j - 1].high; + transition.low = tmp_pair.low + transitions[j - 1].low; + if ((abs(transition.low) < 9) + && (abs(transition.high) < 9)) { + b43legacy_lo_write(dev, &transition); + tmp = b43legacy_phy_lo_g_singledeviation(dev, + r27); + if (tmp < lowest_deviation) { + lowest_deviation = tmp; + state = j; + found_lower = 1; + + lowest_transition.high = + transition.high; + lowest_transition.low = transition.low; + } + } + if (j == end) + break; + if (j == 8) + j = 1; + else + j++; + } + } while (i-- && found_lower); + + out_pair->high = lowest_transition.high; + out_pair->low = lowest_transition.low; +} + +/* Set the baseband attenuation value on chip. */ +void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, + u16 bbatt) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 value; + + if (phy->analog == 0) { + value = (b43legacy_read16(dev, 0x03E6) & 0xFFF0); + value |= (bbatt & 0x000F); + b43legacy_write16(dev, 0x03E6, value); + return; + } + + if (phy->analog > 1) { + value = b43legacy_phy_read(dev, 0x0060) & 0xFFC3; + value |= (bbatt << 2) & 0x003C; + } else { + value = b43legacy_phy_read(dev, 0x0060) & 0xFF87; + value |= (bbatt << 3) & 0x0078; + } + b43legacy_phy_write(dev, 0x0060, value); +} + +/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ +void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) +{ + static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 }; + const int is_initializing = (b43legacy_status(dev) + < B43legacy_STAT_STARTED); + struct b43legacy_phy *phy = &dev->phy; + u16 h; + u16 i; + u16 oldi = 0; + u16 j; + struct b43legacy_lopair control; + struct b43legacy_lopair *tmp_control; + u16 tmp; + u16 regstack[16] = { 0 }; + u8 oldchannel; + + /* XXX: What are these? */ + u8 r27 = 0; + u16 r31; + + oldchannel = phy->channel; + /* Setup */ + if (phy->gmode) { + regstack[0] = b43legacy_phy_read(dev, B43legacy_PHY_G_CRS); + regstack[1] = b43legacy_phy_read(dev, 0x0802); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0] + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, regstack[1] & 0xFFFC); + } + regstack[3] = b43legacy_read16(dev, 0x03E2); + b43legacy_write16(dev, 0x03E2, regstack[3] | 0x8000); + regstack[4] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + regstack[5] = b43legacy_phy_read(dev, 0x15); + regstack[6] = b43legacy_phy_read(dev, 0x2A); + regstack[7] = b43legacy_phy_read(dev, 0x35); + regstack[8] = b43legacy_phy_read(dev, 0x60); + regstack[9] = b43legacy_radio_read16(dev, 0x43); + regstack[10] = b43legacy_radio_read16(dev, 0x7A); + regstack[11] = b43legacy_radio_read16(dev, 0x52); + if (phy->gmode) { + regstack[12] = b43legacy_phy_read(dev, 0x0811); + regstack[13] = b43legacy_phy_read(dev, 0x0812); + regstack[14] = b43legacy_phy_read(dev, 0x0814); + regstack[15] = b43legacy_phy_read(dev, 0x0815); + } + b43legacy_radio_selectchannel(dev, 6, 0); + if (phy->gmode) { + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0] + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, regstack[1] & 0xFFFC); + b43legacy_dummy_transmission(dev); + } + b43legacy_radio_write16(dev, 0x0043, 0x0006); + + b43legacy_phy_set_baseband_attenuation(dev, 2); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, 0x0000); + b43legacy_phy_write(dev, 0x002E, 0x007F); + b43legacy_phy_write(dev, 0x080F, 0x0078); + b43legacy_phy_write(dev, 0x0035, regstack[7] & ~(1 << 7)); + b43legacy_radio_write16(dev, 0x007A, regstack[10] & 0xFFF0); + b43legacy_phy_write(dev, 0x002B, 0x0203); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0814, regstack[14] | 0x0003); + b43legacy_phy_write(dev, 0x0815, regstack[15] & 0xFFFC); + b43legacy_phy_write(dev, 0x0811, 0x01B3); + b43legacy_phy_write(dev, 0x0812, 0x00B2); + } + if (is_initializing) + b43legacy_phy_lo_g_measure_txctl2(dev); + b43legacy_phy_write(dev, 0x080F, 0x8078); + + /* Measure */ + control.low = 0; + control.high = 0; + for (h = 0; h < 10; h++) { + /* Loop over each possible RadioAttenuation (0-9) */ + i = pairorder[h]; + if (is_initializing) { + if (i == 3) { + control.low = 0; + control.high = 0; + } else if (((i % 2 == 1) && (oldi % 2 == 1)) || + ((i % 2 == 0) && (oldi % 2 == 0))) { + tmp_control = b43legacy_get_lopair(phy, oldi, + 0); + memcpy(&control, tmp_control, sizeof(control)); + } else { + tmp_control = b43legacy_get_lopair(phy, 3, 0); + memcpy(&control, tmp_control, sizeof(control)); + } + } + /* Loop over each possible BasebandAttenuation/2 */ + for (j = 0; j < 4; j++) { + if (is_initializing) { + tmp = i * 2 + j; + r27 = 0; + r31 = 0; + if (tmp > 14) { + r31 = 1; + if (tmp > 17) + r27 = 1; + if (tmp > 19) + r27 = 2; + } + } else { + tmp_control = b43legacy_get_lopair(phy, i, + j * 2); + if (!tmp_control->used) + continue; + memcpy(&control, tmp_control, sizeof(control)); + r27 = 3; + r31 = 0; + } + b43legacy_radio_write16(dev, 0x43, i); + b43legacy_radio_write16(dev, 0x52, phy->txctl2); + udelay(10); + b43legacy_voluntary_preempt(); + + b43legacy_phy_set_baseband_attenuation(dev, j * 2); + + tmp = (regstack[10] & 0xFFF0); + if (r31) + tmp |= 0x0008; + b43legacy_radio_write16(dev, 0x007A, tmp); + + tmp_control = b43legacy_get_lopair(phy, i, j * 2); + b43legacy_phy_lo_g_state(dev, &control, tmp_control, + r27); + } + oldi = i; + } + /* Loop over each possible RadioAttenuation (10-13) */ + for (i = 10; i < 14; i++) { + /* Loop over each possible BasebandAttenuation/2 */ + for (j = 0; j < 4; j++) { + if (is_initializing) { + tmp_control = b43legacy_get_lopair(phy, i - 9, + j * 2); + memcpy(&control, tmp_control, sizeof(control)); + /* FIXME: The next line is wrong, as the + * following if statement can never trigger. */ + tmp = (i - 9) * 2 + j - 5; + r27 = 0; + r31 = 0; + if (tmp > 14) { + r31 = 1; + if (tmp > 17) + r27 = 1; + if (tmp > 19) + r27 = 2; + } + } else { + tmp_control = b43legacy_get_lopair(phy, i - 9, + j * 2); + if (!tmp_control->used) + continue; + memcpy(&control, tmp_control, sizeof(control)); + r27 = 3; + r31 = 0; + } + b43legacy_radio_write16(dev, 0x43, i - 9); + /* FIXME: shouldn't txctl1 be zero in the next line + * and 3 in the loop above? */ + b43legacy_radio_write16(dev, 0x52, + phy->txctl2 + | (3/*txctl1*/ << 4)); + udelay(10); + b43legacy_voluntary_preempt(); + + b43legacy_phy_set_baseband_attenuation(dev, j * 2); + + tmp = (regstack[10] & 0xFFF0); + if (r31) + tmp |= 0x0008; + b43legacy_radio_write16(dev, 0x7A, tmp); + + tmp_control = b43legacy_get_lopair(phy, i, j * 2); + b43legacy_phy_lo_g_state(dev, &control, tmp_control, + r27); + } + } + + /* Restoration */ + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0015, 0xE300); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA0); + udelay(5); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2); + udelay(2); + b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3); + b43legacy_voluntary_preempt(); + } else + b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0); + b43legacy_phy_lo_adjust(dev, is_initializing); + b43legacy_phy_write(dev, 0x002E, 0x807F); + if (phy->gmode) + b43legacy_phy_write(dev, 0x002F, 0x0202); + else + b43legacy_phy_write(dev, 0x002F, 0x0101); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, regstack[4]); + b43legacy_phy_write(dev, 0x0015, regstack[5]); + b43legacy_phy_write(dev, 0x002A, regstack[6]); + b43legacy_phy_write(dev, 0x0035, regstack[7]); + b43legacy_phy_write(dev, 0x0060, regstack[8]); + b43legacy_radio_write16(dev, 0x0043, regstack[9]); + b43legacy_radio_write16(dev, 0x007A, regstack[10]); + regstack[11] &= 0x00F0; + regstack[11] |= (b43legacy_radio_read16(dev, 0x52) & 0x000F); + b43legacy_radio_write16(dev, 0x52, regstack[11]); + b43legacy_write16(dev, 0x03E2, regstack[3]); + if (phy->gmode) { + b43legacy_phy_write(dev, 0x0811, regstack[12]); + b43legacy_phy_write(dev, 0x0812, regstack[13]); + b43legacy_phy_write(dev, 0x0814, regstack[14]); + b43legacy_phy_write(dev, 0x0815, regstack[15]); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, regstack[0]); + b43legacy_phy_write(dev, 0x0802, regstack[1]); + } + b43legacy_radio_selectchannel(dev, oldchannel, 1); + +#ifdef CONFIG_B43LEGACY_DEBUG + { + /* Sanity check for all lopairs. */ + for (i = 0; i < B43legacy_LO_COUNT; i++) { + tmp_control = phy->_lo_pairs + i; + if (tmp_control->low < -8 || tmp_control->low > 8 || + tmp_control->high < -8 || tmp_control->high > 8) + b43legacywarn(dev->wl, + "WARNING: Invalid LOpair (low: %d, high:" + " %d, index: %d)\n", + tmp_control->low, tmp_control->high, i); + } + } +#endif /* CONFIG_B43LEGACY_DEBUG */ +} + +static +void b43legacy_phy_lo_mark_current_used(struct b43legacy_wldev *dev) +{ + struct b43legacy_lopair *pair; + + pair = b43legacy_current_lopair(dev); + pair->used = 1; +} + +void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + struct b43legacy_lopair *pair; + int i; + + for (i = 0; i < B43legacy_LO_COUNT; i++) { + pair = phy->_lo_pairs + i; + pair->used = 0; + } +} + +/* http://bcm-specs.sipsolutions.net/EstimatePowerOut + * This function converts a TSSI value to dBm in Q5.2 + */ +static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi) +{ + struct b43legacy_phy *phy = &dev->phy; + s8 dbm = 0; + s32 tmp; + + tmp = phy->idle_tssi; + tmp += tssi; + tmp -= phy->savedpctlreg; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + tmp = limit_value(tmp, 0x00, 0x3F); + dbm = phy->tssi2dbm[tmp]; + break; + default: + B43legacy_BUG_ON(1); + } + + return dbm; +} + +/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ +void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u16 txpower; + s8 v0; + s8 v1; + s8 v2; + s8 v3; + s8 average; + int max_pwr; + s16 desired_pwr; + s16 estimated_pwr; + s16 pwr_adjust; + s16 radio_att_delta; + s16 baseband_att_delta; + s16 radio_attenuation; + s16 baseband_attenuation; + unsigned long phylock_flags; + + if (phy->savedpctlreg == 0xFFFF) + return; + if ((dev->dev->bus->boardinfo.type == 0x0416) && + is_bcm_board_vendor(dev)) + return; +#ifdef CONFIG_B43LEGACY_DEBUG + if (phy->manual_txpower_control) + return; +#endif + + B43legacy_BUG_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0058); + v0 = (s8)(tmp & 0x00FF); + v1 = (s8)((tmp & 0xFF00) >> 8); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x005A); + v2 = (s8)(tmp & 0x00FF); + v3 = (s8)((tmp & 0xFF00) >> 8); + tmp = 0; + + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) { + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0070); + v0 = (s8)(tmp & 0x00FF); + v1 = (s8)((tmp & 0xFF00) >> 8); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, + 0x0072); + v2 = (s8)(tmp & 0x00FF); + v3 = (s8)((tmp & 0xFF00) >> 8); + if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) + return; + v0 = (v0 + 0x20) & 0x3F; + v1 = (v1 + 0x20) & 0x3F; + v2 = (v2 + 0x20) & 0x3F; + v3 = (v3 + 0x20) & 0x3F; + tmp = 1; + } + b43legacy_radio_clear_tssi(dev); + + average = (v0 + v1 + v2 + v3 + 2) / 4; + + if (tmp && (b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x005E) + & 0x8)) + average -= 13; + + estimated_pwr = b43legacy_phy_estimate_power_out(dev, average); + + max_pwr = dev->dev->bus->sprom.r1.maxpwr_bg; + + if ((dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_PACTRL) && + (phy->type == B43legacy_PHYTYPE_G)) + max_pwr -= 0x3; + if (unlikely(max_pwr <= 0)) { + b43legacywarn(dev->wl, "Invalid max-TX-power value in SPROM." + "\n"); + max_pwr = 74; /* fake it */ + dev->dev->bus->sprom.r1.maxpwr_bg = max_pwr; + } + + /* Use regulatory information to get the maximum power. + * In the absence of such data from mac80211, we will use 20 dBm, which + * is the value for the EU, US, Canada, and most of the world. + * The regulatory maximum is reduced by the antenna gain (from sprom) + * and 1.5 dBm (a safety factor??). The result is in Q5.2 format + * which accounts for the factor of 4 */ +#define REG_MAX_PWR 20 + max_pwr = min(REG_MAX_PWR * 4 - dev->dev->bus->sprom.r1.antenna_gain_bg + - 0x6, max_pwr); + + /* find the desired power in Q5.2 - power_level is in dBm + * and limit it - max_pwr is already in Q5.2 */ + desired_pwr = limit_value(phy->power_level << 2, 0, max_pwr); + if (b43legacy_debug(dev, B43legacy_DBG_XMITPOWER)) + b43legacydbg(dev->wl, "Current TX power output: " Q52_FMT + " dBm, Desired TX power output: " Q52_FMT + " dBm\n", Q52_ARG(estimated_pwr), + Q52_ARG(desired_pwr)); + /* Check if we need to adjust the current power. The factor of 2 is + * for damping */ + pwr_adjust = (desired_pwr - estimated_pwr) / 2; + /* RF attenuation delta + * The minus sign is because lower attenuation => more power */ + radio_att_delta = -(pwr_adjust + 7) >> 3; + /* Baseband attenuation delta */ + baseband_att_delta = -(pwr_adjust >> 1) - (4 * radio_att_delta); + /* Do we need to adjust anything? */ + if ((radio_att_delta == 0) && (baseband_att_delta == 0)) { + b43legacy_phy_lo_mark_current_used(dev); + return; + } + + /* Calculate the new attenuation values. */ + baseband_attenuation = phy->bbatt; + baseband_attenuation += baseband_att_delta; + radio_attenuation = phy->rfatt; + radio_attenuation += radio_att_delta; + + /* Get baseband and radio attenuation values into permitted ranges. + * baseband 0-11, radio 0-9. + * Radio attenuation affects power level 4 times as much as baseband. + */ + if (radio_attenuation < 0) { + baseband_attenuation -= (4 * -radio_attenuation); + radio_attenuation = 0; + } else if (radio_attenuation > 9) { + baseband_attenuation += (4 * (radio_attenuation - 9)); + radio_attenuation = 9; + } else { + while (baseband_attenuation < 0 && radio_attenuation > 0) { + baseband_attenuation += 4; + radio_attenuation--; + } + while (baseband_attenuation > 11 && radio_attenuation < 9) { + baseband_attenuation -= 4; + radio_attenuation++; + } + } + baseband_attenuation = limit_value(baseband_attenuation, 0, 11); + + txpower = phy->txctl1; + if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) { + if (radio_attenuation <= 1) { + if (txpower == 0) { + txpower = 3; + radio_attenuation += 2; + baseband_attenuation += 2; + } else if (dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_PACTRL) { + baseband_attenuation += 4 * + (radio_attenuation - 2); + radio_attenuation = 2; + } + } else if (radio_attenuation > 4 && txpower != 0) { + txpower = 0; + if (baseband_attenuation < 3) { + radio_attenuation -= 3; + baseband_attenuation += 2; + } else { + radio_attenuation -= 2; + baseband_attenuation -= 2; + } + } + } + /* Save the control values */ + phy->txctl1 = txpower; + baseband_attenuation = limit_value(baseband_attenuation, 0, 11); + radio_attenuation = limit_value(radio_attenuation, 0, 9); + phy->rfatt = radio_attenuation; + phy->bbatt = baseband_attenuation; + + /* Adjust the hardware */ + b43legacy_phy_lock(dev, phylock_flags); + b43legacy_radio_lock(dev); + b43legacy_radio_set_txpower_bg(dev, baseband_attenuation, + radio_attenuation, txpower); + b43legacy_phy_lo_mark_current_used(dev); + b43legacy_radio_unlock(dev); + b43legacy_phy_unlock(dev, phylock_flags); +} + +static inline +s32 b43legacy_tssi2dbm_ad(s32 num, s32 den) +{ + if (num < 0) + return num/den; + else + return (num+den/2)/den; +} + +static inline +s8 b43legacy_tssi2dbm_entry(s8 entry [], u8 index, s16 pab0, s16 pab1, s16 pab2) +{ + s32 m1; + s32 m2; + s32 f = 256; + s32 q; + s32 delta; + s8 i = 0; + + m1 = b43legacy_tssi2dbm_ad(16 * pab0 + index * pab1, 32); + m2 = max(b43legacy_tssi2dbm_ad(32768 + index * pab2, 256), 1); + do { + if (i > 15) + return -EINVAL; + q = b43legacy_tssi2dbm_ad(f * 4096 - + b43legacy_tssi2dbm_ad(m2 * f, 16) * + f, 2048); + delta = abs(q - f); + f = q; + i++; + } while (delta >= 2); + entry[index] = limit_value(b43legacy_tssi2dbm_ad(m1 * f, 8192), + -127, 128); + return 0; +} + +/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */ +int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s16 pab0; + s16 pab1; + s16 pab2; + u8 idx; + s8 *dyn_tssi2dbm; + + B43legacy_WARN_ON(!(phy->type == B43legacy_PHYTYPE_B || + phy->type == B43legacy_PHYTYPE_G)); + pab0 = (s16)(dev->dev->bus->sprom.r1.pa0b0); + pab1 = (s16)(dev->dev->bus->sprom.r1.pa0b1); + pab2 = (s16)(dev->dev->bus->sprom.r1.pa0b2); + + if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) { + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_b_table; + return 0; + } + + if (pab0 != 0 && pab1 != 0 && pab2 != 0 && + pab0 != -1 && pab1 != -1 && pab2 != -1) { + /* The pabX values are set in SPROM. Use them. */ + if ((s8)dev->dev->bus->sprom.r1.itssi_bg != 0 && + (s8)dev->dev->bus->sprom.r1.itssi_bg != -1) + phy->idle_tssi = (s8)(dev->dev->bus->sprom.r1.itssi_bg); + else + phy->idle_tssi = 62; + dyn_tssi2dbm = kmalloc(64, GFP_KERNEL); + if (dyn_tssi2dbm == NULL) { + b43legacyerr(dev->wl, "Could not allocate memory" + "for tssi2dbm table\n"); + return -ENOMEM; + } + for (idx = 0; idx < 64; idx++) + if (b43legacy_tssi2dbm_entry(dyn_tssi2dbm, idx, pab0, + pab1, pab2)) { + phy->tssi2dbm = NULL; + b43legacyerr(dev->wl, "Could not generate " + "tssi2dBm table\n"); + kfree(dyn_tssi2dbm); + return -ENODEV; + } + phy->tssi2dbm = dyn_tssi2dbm; + phy->dyn_tssi_tbl = 1; + } else { + /* pabX values not set in SPROM. */ + switch (phy->type) { + case B43legacy_PHYTYPE_B: + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_b_table; + break; + case B43legacy_PHYTYPE_G: + phy->idle_tssi = 0x34; + phy->tssi2dbm = b43legacy_tssi2dbm_g_table; + break; + } + } + + return 0; +} + +int b43legacy_phy_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err = -ENODEV; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + switch (phy->rev) { + case 2: + b43legacy_phy_initb2(dev); + err = 0; + break; + case 4: + b43legacy_phy_initb4(dev); + err = 0; + break; + case 5: + b43legacy_phy_initb5(dev); + err = 0; + break; + case 6: + b43legacy_phy_initb6(dev); + err = 0; + break; + } + break; + case B43legacy_PHYTYPE_G: + b43legacy_phy_initg(dev); + err = 0; + break; + } + if (err) + b43legacyerr(dev->wl, "Unknown PHYTYPE found\n"); + + return err; +} + +void b43legacy_phy_set_antenna_diversity(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 antennadiv; + u16 offset; + u16 value; + u32 ucodeflags; + + antennadiv = phy->antenna_diversity; + + if (antennadiv == 0xFFFF) + antennadiv = 3; + B43legacy_WARN_ON(antennadiv > 3); + + ucodeflags = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + ucodeflags & ~B43legacy_UCODEFLAG_AUTODIV); + + switch (phy->type) { + case B43legacy_PHYTYPE_G: + offset = 0x0400; + + if (antennadiv == 2) + value = (3/*automatic*/ << 7); + else + value = (antennadiv << 7); + b43legacy_phy_write(dev, offset + 1, + (b43legacy_phy_read(dev, offset + 1) + & 0x7E7F) | value); + + if (antennadiv >= 2) { + if (antennadiv == 2) + value = (antennadiv << 7); + else + value = (0/*force0*/ << 7); + b43legacy_phy_write(dev, offset + 0x2B, + (b43legacy_phy_read(dev, + offset + 0x2B) + & 0xFEFF) | value); + } + + if (phy->type == B43legacy_PHYTYPE_G) { + if (antennadiv >= 2) + b43legacy_phy_write(dev, 0x048C, + b43legacy_phy_read(dev, + 0x048C) | 0x2000); + else + b43legacy_phy_write(dev, 0x048C, + b43legacy_phy_read(dev, + 0x048C) & ~0x2000); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0461, + b43legacy_phy_read(dev, + 0x0461) | 0x0010); + b43legacy_phy_write(dev, 0x04AD, + (b43legacy_phy_read(dev, + 0x04AD) + & 0x00FF) | 0x0015); + if (phy->rev == 2) + b43legacy_phy_write(dev, 0x0427, + 0x0008); + else + b43legacy_phy_write(dev, 0x0427, + (b43legacy_phy_read(dev, 0x0427) + & 0x00FF) | 0x0008); + } else if (phy->rev >= 6) + b43legacy_phy_write(dev, 0x049B, 0x00DC); + } else { + if (phy->rev < 3) + b43legacy_phy_write(dev, 0x002B, + (b43legacy_phy_read(dev, + 0x002B) & 0x00FF) + | 0x0024); + else { + b43legacy_phy_write(dev, 0x0061, + b43legacy_phy_read(dev, + 0x0061) | 0x0010); + if (phy->rev == 3) { + b43legacy_phy_write(dev, 0x0093, + 0x001D); + b43legacy_phy_write(dev, 0x0027, + 0x0008); + } else { + b43legacy_phy_write(dev, 0x0093, + 0x003A); + b43legacy_phy_write(dev, 0x0027, + (b43legacy_phy_read(dev, 0x0027) + & 0x00FF) | 0x0008); + } + } + } + break; + case B43legacy_PHYTYPE_B: + if (dev->dev->id.revision == 2) + value = (3/*automatic*/ << 7); + else + value = (antennadiv << 7); + b43legacy_phy_write(dev, 0x03E2, + (b43legacy_phy_read(dev, 0x03E2) + & 0xFE7F) | value); + break; + default: + B43legacy_WARN_ON(1); + } + + if (antennadiv >= 2) { + ucodeflags = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + ucodeflags | B43legacy_UCODEFLAG_AUTODIV); + } + + phy->antenna_diversity = antennadiv; +} + +/* Set the PowerSavingControlBits. + * Bitvalues: + * 0 => unset the bit + * 1 => set the bit + * -1 => calculate the bit + */ +void b43legacy_power_saving_ctl_bits(struct b43legacy_wldev *dev, + int bit25, int bit26) +{ + int i; + u32 status; + +/* FIXME: Force 25 to off and 26 to on for now: */ +bit25 = 0; +bit26 = 1; + + if (bit25 == -1) { + /* TODO: If powersave is not off and FIXME is not set and we + * are not in adhoc and thus is not an AP and we arei + * associated, set bit 25 */ + } + if (bit26 == -1) { + /* TODO: If the device is awake or this is an AP, or we are + * scanning, or FIXME, or we are associated, or FIXME, + * or the latest PS-Poll packet sent was successful, + * set bit26 */ + } + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + if (bit25) + status |= B43legacy_SBF_PS1; + else + status &= ~B43legacy_SBF_PS1; + if (bit26) + status |= B43legacy_SBF_PS2; + else + status &= ~B43legacy_SBF_PS2; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + if (bit26 && dev->dev->id.revision >= 5) { + for (i = 0; i < 100; i++) { + if (b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + 0x0040) != 4) + break; + udelay(10); + } + } +} diff --git a/drivers/net/wireless/b43legacy/phy.h b/drivers/net/wireless/b43legacy/phy.h new file mode 100644 index 0000000000000000000000000000000000000000..f11b4271714c1ad2e52b71bae36ba7b5a4bfc55c --- /dev/null +++ b/drivers/net/wireless/b43legacy/phy.h @@ -0,0 +1,219 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_PHY_H_ +#define B43legacy_PHY_H_ + +#include + +enum { + B43legacy_ANTENNA0, /* Antenna 0 */ + B43legacy_ANTENNA1, /* Antenna 0 */ + B43legacy_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */ + B43legacy_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */ + + B43legacy_ANTENNA_AUTO = B43legacy_ANTENNA_AUTO0, + B43legacy_ANTENNA_DEFAULT = B43legacy_ANTENNA_AUTO, +}; + +enum { + B43legacy_INTERFMODE_NONE, + B43legacy_INTERFMODE_NONWLAN, + B43legacy_INTERFMODE_MANUALWLAN, + B43legacy_INTERFMODE_AUTOWLAN, +}; + +/*** PHY Registers ***/ + +/* Routing */ +#define B43legacy_PHYROUTE_OFDM_GPHY 0x400 +#define B43legacy_PHYROUTE_EXT_GPHY 0x800 + +/* Base registers. */ +#define B43legacy_PHY_BASE(reg) (reg) +/* OFDM (A) registers of a G-PHY */ +#define B43legacy_PHY_OFDM(reg) ((reg) | B43legacy_PHYROUTE_OFDM_GPHY) +/* Extended G-PHY registers */ +#define B43legacy_PHY_EXTG(reg) ((reg) | B43legacy_PHYROUTE_EXT_GPHY) + + +/* Extended G-PHY Registers */ +#define B43legacy_PHY_CLASSCTL B43legacy_PHY_EXTG(0x02) /* Classify control */ +#define B43legacy_PHY_GTABCTL B43legacy_PHY_EXTG(0x03) /* G-PHY table control (see below) */ +#define B43legacy_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */ +#define B43legacy_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */ +#define B43legacy_PHY_GTABNR_SHIFT 10 +#define B43legacy_PHY_GTABDATA B43legacy_PHY_EXTG(0x04) /* G-PHY table data */ +#define B43legacy_PHY_LO_MASK B43legacy_PHY_EXTG(0x0F) /* Local Oscillator control mask */ +#define B43legacy_PHY_LO_CTL B43legacy_PHY_EXTG(0x10) /* Local Oscillator control */ +#define B43legacy_PHY_RFOVER B43legacy_PHY_EXTG(0x11) /* RF override */ +#define B43legacy_PHY_RFOVERVAL B43legacy_PHY_EXTG(0x12) /* RF override value */ +/*** OFDM table numbers ***/ +#define B43legacy_OFDMTAB(number, offset) \ + (((number) << B43legacy_PHY_OTABLENR_SHIFT) \ + | (offset)) +#define B43legacy_OFDMTAB_AGC1 B43legacy_OFDMTAB(0x00, 0) +#define B43legacy_OFDMTAB_GAIN0 B43legacy_OFDMTAB(0x00, 0) +#define B43legacy_OFDMTAB_GAINX B43legacy_OFDMTAB(0x01, 0) +#define B43legacy_OFDMTAB_GAIN1 B43legacy_OFDMTAB(0x01, 4) +#define B43legacy_OFDMTAB_AGC3 B43legacy_OFDMTAB(0x02, 0) +#define B43legacy_OFDMTAB_GAIN2 B43legacy_OFDMTAB(0x02, 3) +#define B43legacy_OFDMTAB_LNAHPFGAIN1 B43legacy_OFDMTAB(0x03, 0) +#define B43legacy_OFDMTAB_WRSSI B43legacy_OFDMTAB(0x04, 0) +#define B43legacy_OFDMTAB_LNAHPFGAIN2 B43legacy_OFDMTAB(0x04, 0) +#define B43legacy_OFDMTAB_NOISESCALE B43legacy_OFDMTAB(0x05, 0) +#define B43legacy_OFDMTAB_AGC2 B43legacy_OFDMTAB(0x06, 0) +#define B43legacy_OFDMTAB_ROTOR B43legacy_OFDMTAB(0x08, 0) +#define B43legacy_OFDMTAB_ADVRETARD B43legacy_OFDMTAB(0x09, 0) +#define B43legacy_OFDMTAB_DAC B43legacy_OFDMTAB(0x0C, 0) +#define B43legacy_OFDMTAB_DC B43legacy_OFDMTAB(0x0E, 7) +#define B43legacy_OFDMTAB_PWRDYN2 B43legacy_OFDMTAB(0x0E, 12) +#define B43legacy_OFDMTAB_LNAGAIN B43legacy_OFDMTAB(0x0E, 13) + +#define B43legacy_OFDMTAB_LPFGAIN B43legacy_OFDMTAB(0x0F, 12) +#define B43legacy_OFDMTAB_RSSI B43legacy_OFDMTAB(0x10, 0) + +#define B43legacy_OFDMTAB_AGC1_R1 B43legacy_OFDMTAB(0x13, 0) +#define B43legacy_OFDMTAB_GAINX_R1 B43legacy_OFDMTAB(0x14, 0) +#define B43legacy_OFDMTAB_MINSIGSQ B43legacy_OFDMTAB(0x14, 1) +#define B43legacy_OFDMTAB_AGC3_R1 B43legacy_OFDMTAB(0x15, 0) +#define B43legacy_OFDMTAB_WRSSI_R1 B43legacy_OFDMTAB(0x15, 4) +#define B43legacy_OFDMTAB_TSSI B43legacy_OFDMTAB(0x15, 0) +#define B43legacy_OFDMTAB_DACRFPABB B43legacy_OFDMTAB(0x16, 0) +#define B43legacy_OFDMTAB_DACOFF B43legacy_OFDMTAB(0x17, 0) +#define B43legacy_OFDMTAB_DCBIAS B43legacy_OFDMTAB(0x18, 0) + +void b43legacy_put_attenuation_into_ranges(int *_bbatt, int *_rfatt); + +/* OFDM (A) PHY Registers */ +#define B43legacy_PHY_VERSION_OFDM B43legacy_PHY_OFDM(0x00) /* Versioning register for A-PHY */ +#define B43legacy_PHY_BBANDCFG B43legacy_PHY_OFDM(0x01) /* Baseband config */ +#define B43legacy_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */ +#define B43legacy_PHY_BBANDCFG_RXANT_SHIFT 7 +#define B43legacy_PHY_PWRDOWN B43legacy_PHY_OFDM(0x03) /* Powerdown */ +#define B43legacy_PHY_CRSTHRES1 B43legacy_PHY_OFDM(0x06) /* CRS Threshold 1 */ +#define B43legacy_PHY_LNAHPFCTL B43legacy_PHY_OFDM(0x1C) /* LNA/HPF control */ +#define B43legacy_PHY_ADIVRELATED B43legacy_PHY_OFDM(0x27) /* FIXME rename */ +#define B43legacy_PHY_CRS0 B43legacy_PHY_OFDM(0x29) +#define B43legacy_PHY_ANTDWELL B43legacy_PHY_OFDM(0x2B) /* Antenna dwell */ +#define B43legacy_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */ +#define B43legacy_PHY_ENCORE B43legacy_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */ +#define B43legacy_PHY_ENCORE_EN 0x0200 /* Encore enable */ +#define B43legacy_PHY_LMS B43legacy_PHY_OFDM(0x55) +#define B43legacy_PHY_OFDM61 B43legacy_PHY_OFDM(0x61) /* FIXME rename */ +#define B43legacy_PHY_OFDM61_10 0x0010 /* FIXME rename */ +#define B43legacy_PHY_IQBAL B43legacy_PHY_OFDM(0x69) /* I/Q balance */ +#define B43legacy_PHY_OTABLECTL B43legacy_PHY_OFDM(0x72) /* OFDM table control (see below) */ +#define B43legacy_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */ +#define B43legacy_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */ +#define B43legacy_PHY_OTABLENR_SHIFT 10 +#define B43legacy_PHY_OTABLEI B43legacy_PHY_OFDM(0x73) /* OFDM table data I */ +#define B43legacy_PHY_OTABLEQ B43legacy_PHY_OFDM(0x74) /* OFDM table data Q */ +#define B43legacy_PHY_HPWR_TSSICTL B43legacy_PHY_OFDM(0x78) /* Hardware power TSSI control */ +#define B43legacy_PHY_NRSSITHRES B43legacy_PHY_OFDM(0x8A) /* NRSSI threshold */ +#define B43legacy_PHY_ANTWRSETT B43legacy_PHY_OFDM(0x8C) /* Antenna WR settle */ +#define B43legacy_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */ +#define B43legacy_PHY_CLIPPWRDOWNT B43legacy_PHY_OFDM(0x93) /* Clip powerdown threshold */ +#define B43legacy_PHY_OFDM9B B43legacy_PHY_OFDM(0x9B) /* FIXME rename */ +#define B43legacy_PHY_N1P1GAIN B43legacy_PHY_OFDM(0xA0) +#define B43legacy_PHY_P1P2GAIN B43legacy_PHY_OFDM(0xA1) +#define B43legacy_PHY_N1N2GAIN B43legacy_PHY_OFDM(0xA2) +#define B43legacy_PHY_CLIPTHRES B43legacy_PHY_OFDM(0xA3) +#define B43legacy_PHY_CLIPN1P2THRES B43legacy_PHY_OFDM(0xA4) +#define B43legacy_PHY_DIVSRCHIDX B43legacy_PHY_OFDM(0xA8) /* Divider search gain/index */ +#define B43legacy_PHY_CLIPP2THRES B43legacy_PHY_OFDM(0xA9) +#define B43legacy_PHY_CLIPP3THRES B43legacy_PHY_OFDM(0xAA) +#define B43legacy_PHY_DIVP1P2GAIN B43legacy_PHY_OFDM(0xAB) +#define B43legacy_PHY_DIVSRCHGAINBACK B43legacy_PHY_OFDM(0xAD) /* Divider search gain back */ +#define B43legacy_PHY_DIVSRCHGAINCHNG B43legacy_PHY_OFDM(0xAE) /* Divider search gain change */ +#define B43legacy_PHY_CRSTHRES1_R1 B43legacy_PHY_OFDM(0xC0) /* CRS Threshold 1 (rev 1 only) */ +#define B43legacy_PHY_CRSTHRES2_R1 B43legacy_PHY_OFDM(0xC1) /* CRS Threshold 2 (rev 1 only) */ +#define B43legacy_PHY_TSSIP_LTBASE B43legacy_PHY_OFDM(0x380) /* TSSI power lookup table base */ +#define B43legacy_PHY_DC_LTBASE B43legacy_PHY_OFDM(0x3A0) /* DC lookup table base */ +#define B43legacy_PHY_GAIN_LTBASE B43legacy_PHY_OFDM(0x3C0) /* Gain lookup table base */ + +void b43legacy_put_attenuation_into_ranges(int *_bbatt, int *_rfatt); + +/* Masks for the different PHY versioning registers. */ +#define B43legacy_PHYVER_ANALOG 0xF000 +#define B43legacy_PHYVER_ANALOG_SHIFT 12 +#define B43legacy_PHYVER_TYPE 0x0F00 +#define B43legacy_PHYVER_TYPE_SHIFT 8 +#define B43legacy_PHYVER_VERSION 0x00FF + +struct b43legacy_wldev; + +void b43legacy_raw_phy_lock(struct b43legacy_wldev *dev); +#define b43legacy_phy_lock(bcm, flags) \ + do { \ + local_irq_save(flags); \ + b43legacy_raw_phy_lock(bcm); \ + } while (0) +void b43legacy_raw_phy_unlock(struct b43legacy_wldev *dev); +#define b43legacy_phy_unlock(bcm, flags) \ + do { \ + b43legacy_raw_phy_unlock(bcm); \ + local_irq_restore(flags); \ + } while (0) + +/* Card uses the loopback gain stuff */ +#define has_loopback_gain(phy) \ + (((phy)->rev > 1) || ((phy)->gmode)) + +u16 b43legacy_phy_read(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_phy_write(struct b43legacy_wldev *dev, u16 offset, u16 val); + +int b43legacy_phy_init_tssi2dbm_table(struct b43legacy_wldev *dev); +int b43legacy_phy_init(struct b43legacy_wldev *dev); + +void b43legacy_set_rx_antenna(struct b43legacy_wldev *dev, int antenna); + +void b43legacy_phy_set_antenna_diversity(struct b43legacy_wldev *dev); +void b43legacy_phy_calibrate(struct b43legacy_wldev *dev); +int b43legacy_phy_connect(struct b43legacy_wldev *dev, int connect); + +void b43legacy_phy_lo_b_measure(struct b43legacy_wldev *dev); +void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev); +void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev); + +/* Adjust the LocalOscillator to the saved values. + * "fixed" is only set to 1 once in initialization. Set to 0 otherwise. + */ +void b43legacy_phy_lo_adjust(struct b43legacy_wldev *dev, int fixed); +void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev); + +void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, + u16 baseband_attenuation); + +void b43legacy_power_saving_ctl_bits(struct b43legacy_wldev *dev, + int bit25, int bit26); + +#endif /* B43legacy_PHY_H_ */ diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c new file mode 100644 index 0000000000000000000000000000000000000000..de843ac147ae4016625bd12401d0bd02243d017f --- /dev/null +++ b/drivers/net/wireless/b43legacy/pio.c @@ -0,0 +1,668 @@ +/* + + Broadcom B43legacy wireless driver + + PIO Transmission + + Copyright (c) 2005 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "b43legacy.h" +#include "pio.h" +#include "main.h" +#include "xmit.h" + +#include + + +static void tx_start(struct b43legacy_pioqueue *queue) +{ + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_INIT); +} + +static void tx_octet(struct b43legacy_pioqueue *queue, + u8 octet) +{ + if (queue->need_workarounds) { + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO); + } else { + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); + } +} + +static u16 tx_get_next_word(const u8 *txhdr, + const u8 *packet, + size_t txhdr_size, + unsigned int *pos) +{ + const u8 *source; + unsigned int i = *pos; + u16 ret; + + if (i < txhdr_size) + source = txhdr; + else { + source = packet; + i -= txhdr_size; + } + ret = le16_to_cpu(*((__le16 *)(source + i))); + *pos += 2; + + return ret; +} + +static void tx_data(struct b43legacy_pioqueue *queue, + u8 *txhdr, + const u8 *packet, + unsigned int octets) +{ + u16 data; + unsigned int i = 0; + + if (queue->need_workarounds) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43legacy_txhdr_fw3), &i); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); + } + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO | + B43legacy_PIO_TXCTL_WRITEHI); + while (i < octets - 1) { + data = tx_get_next_word(txhdr, packet, + sizeof(struct b43legacy_txhdr_fw3), &i); + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); + } + if (octets % 2) + tx_octet(queue, packet[octets - + sizeof(struct b43legacy_txhdr_fw3) - 1]); +} + +static void tx_complete(struct b43legacy_pioqueue *queue, + struct sk_buff *skb) +{ + if (queue->need_workarounds) { + b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, + skb->data[skb->len - 1]); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_WRITELO | + B43legacy_PIO_TXCTL_COMPLETE); + } else + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + B43legacy_PIO_TXCTL_COMPLETE); +} + +static u16 generate_cookie(struct b43legacy_pioqueue *queue, + struct b43legacy_pio_txpacket *packet) +{ + u16 cookie = 0x0000; + int packetindex; + + /* We use the upper 4 bits for the PIO + * controller ID and the lower 12 bits + * for the packet index (in the cache). + */ + switch (queue->mmio_base) { + case B43legacy_MMIO_PIO1_BASE: + break; + case B43legacy_MMIO_PIO2_BASE: + cookie = 0x1000; + break; + case B43legacy_MMIO_PIO3_BASE: + cookie = 0x2000; + break; + case B43legacy_MMIO_PIO4_BASE: + cookie = 0x3000; + break; + default: + B43legacy_WARN_ON(1); + } + packetindex = pio_txpacket_getindex(packet); + B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000)); + cookie |= (u16)packetindex; + + return cookie; +} + +static +struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev, + u16 cookie, + struct b43legacy_pio_txpacket **packet) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue = NULL; + int packetindex; + + switch (cookie & 0xF000) { + case 0x0000: + queue = pio->queue0; + break; + case 0x1000: + queue = pio->queue1; + break; + case 0x2000: + queue = pio->queue2; + break; + case 0x3000: + queue = pio->queue3; + break; + default: + B43legacy_WARN_ON(1); + } + packetindex = (cookie & 0x0FFF); + B43legacy_WARN_ON(!(packetindex >= 0 && packetindex + < B43legacy_PIO_MAXTXPACKETS)); + *packet = &(queue->tx_packets_cache[packetindex]); + + return queue; +} + +union txhdr_union { + struct b43legacy_txhdr_fw3 txhdr_fw3; +}; + +static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue, + struct sk_buff *skb, + struct b43legacy_pio_txpacket *packet, + size_t txhdr_size) +{ + union txhdr_union txhdr_data; + u8 *txhdr = NULL; + unsigned int octets; + + txhdr = (u8 *)(&txhdr_data.txhdr_fw3); + + B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); + b43legacy_generate_txhdr(queue->dev, + txhdr, skb->data, skb->len, + &packet->txstat.control, + generate_cookie(queue, packet)); + + tx_start(queue); + octets = skb->len + txhdr_size; + if (queue->need_workarounds) + octets--; + tx_data(queue, txhdr, (u8 *)skb->data, octets); + tx_complete(queue, skb); +} + +static void free_txpacket(struct b43legacy_pio_txpacket *packet, + int irq_context) +{ + struct b43legacy_pioqueue *queue = packet->queue; + + if (packet->skb) { + if (irq_context) + dev_kfree_skb_irq(packet->skb); + else + dev_kfree_skb(packet->skb); + } + list_move(&packet->list, &queue->txfree); + queue->nr_txfree++; +} + +static int pio_tx_packet(struct b43legacy_pio_txpacket *packet) +{ + struct b43legacy_pioqueue *queue = packet->queue; + struct sk_buff *skb = packet->skb; + u16 octets; + + octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3); + if (queue->tx_devq_size < octets) { + b43legacywarn(queue->dev->wl, "PIO queue too small. " + "Dropping packet.\n"); + /* Drop it silently (return success) */ + free_txpacket(packet, 1); + return 0; + } + B43legacy_WARN_ON(queue->tx_devq_packets > + B43legacy_PIO_MAXTXDEVQPACKETS); + B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); + /* Check if there is sufficient free space on the device + * TX queue. If not, return and let the TX tasklet + * retry later. + */ + if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS) + return -EBUSY; + if (queue->tx_devq_used + octets > queue->tx_devq_size) + return -EBUSY; + /* Now poke the device. */ + pio_tx_write_fragment(queue, skb, packet, + sizeof(struct b43legacy_txhdr_fw3)); + + /* Account for the packet size. + * (We must not overflow the device TX queue) + */ + queue->tx_devq_packets++; + queue->tx_devq_used += octets; + + /* Transmission started, everything ok, move the + * packet to the txrunning list. + */ + list_move_tail(&packet->list, &queue->txrunning); + + return 0; +} + +static void tx_tasklet(unsigned long d) +{ + struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d; + struct b43legacy_wldev *dev = queue->dev; + unsigned long flags; + struct b43legacy_pio_txpacket *packet, *tmp_packet; + int err; + u16 txctl; + + spin_lock_irqsave(&dev->wl->irq_lock, flags); + if (queue->tx_frozen) + goto out_unlock; + txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL); + if (txctl & B43legacy_PIO_TXCTL_SUSPEND) + goto out_unlock; + + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { + /* Try to transmit the packet. This can fail, if + * the device queue is full. In case of failure, the + * packet is left in the txqueue. + * If transmission succeed, the packet is moved to txrunning. + * If it is impossible to transmit the packet, it + * is dropped. + */ + err = pio_tx_packet(packet); + if (err) + break; + } +out_unlock: + spin_unlock_irqrestore(&dev->wl->irq_lock, flags); +} + +static void setup_txqueues(struct b43legacy_pioqueue *queue) +{ + struct b43legacy_pio_txpacket *packet; + int i; + + queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS; + for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) { + packet = &(queue->tx_packets_cache[i]); + + packet->queue = queue; + INIT_LIST_HEAD(&packet->list); + + list_add(&packet->list, &queue->txfree); + } +} + +static +struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev, + u16 pio_mmio_base) +{ + struct b43legacy_pioqueue *queue; + u32 value; + u16 qsize; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) + goto out; + + queue->dev = dev; + queue->mmio_base = pio_mmio_base; + queue->need_workarounds = (dev->dev->id.revision < 3); + + INIT_LIST_HEAD(&queue->txfree); + INIT_LIST_HEAD(&queue->txqueue); + INIT_LIST_HEAD(&queue->txrunning); + tasklet_init(&queue->txtask, tx_tasklet, + (unsigned long)queue); + + value = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + value &= ~B43legacy_SBF_XFER_REG_BYTESWAP; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, value); + + qsize = b43legacy_read16(dev, queue->mmio_base + + B43legacy_PIO_TXQBUFSIZE); + if (qsize == 0) { + b43legacyerr(dev->wl, "This card does not support PIO " + "operation mode. Please use DMA mode " + "(module parameter pio=0).\n"); + goto err_freequeue; + } + if (qsize <= B43legacy_PIO_TXQADJUST) { + b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n", + qsize); + goto err_freequeue; + } + qsize -= B43legacy_PIO_TXQADJUST; + queue->tx_devq_size = qsize; + + setup_txqueues(queue); + +out: + return queue; + +err_freequeue: + kfree(queue); + queue = NULL; + goto out; +} + +static void cancel_transfers(struct b43legacy_pioqueue *queue) +{ + struct b43legacy_pio_txpacket *packet, *tmp_packet; + + tasklet_disable(&queue->txtask); + + list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) + free_txpacket(packet, 0); + list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) + free_txpacket(packet, 0); +} + +static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) +{ + if (!queue) + return; + + cancel_transfers(queue); + kfree(queue); +} + +void b43legacy_pio_free(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + if (!b43legacy_using_pio(dev)) + return; + pio = &dev->pio; + + b43legacy_destroy_pioqueue(pio->queue3); + pio->queue3 = NULL; + b43legacy_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; + b43legacy_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; + b43legacy_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; +} + +int b43legacy_pio_init(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue; + int err = -ENOMEM; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE); + if (!queue) + goto out; + pio->queue0 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE); + if (!queue) + goto err_destroy0; + pio->queue1 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE); + if (!queue) + goto err_destroy1; + pio->queue2 = queue; + + queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE); + if (!queue) + goto err_destroy2; + pio->queue3 = queue; + + if (dev->dev->id.revision < 3) + dev->irq_savedstate |= B43legacy_IRQ_PIO_WORKAROUND; + + b43legacydbg(dev->wl, "PIO initialized\n"); + err = 0; +out: + return err; + +err_destroy2: + b43legacy_destroy_pioqueue(pio->queue2); + pio->queue2 = NULL; +err_destroy1: + b43legacy_destroy_pioqueue(pio->queue1); + pio->queue1 = NULL; +err_destroy0: + b43legacy_destroy_pioqueue(pio->queue0); + pio->queue0 = NULL; + goto out; +} + +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct b43legacy_pioqueue *queue = dev->pio.queue1; + struct b43legacy_pio_txpacket *packet; + + B43legacy_WARN_ON(queue->tx_suspended); + B43legacy_WARN_ON(list_empty(&queue->txfree)); + + packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket, + list); + packet->skb = skb; + + memset(&packet->txstat, 0, sizeof(packet->txstat)); + memcpy(&packet->txstat.control, ctl, sizeof(*ctl)); + + list_move_tail(&packet->list, &queue->txqueue); + queue->nr_txfree--; + queue->nr_tx_packets++; + B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS); + + tasklet_schedule(&queue->txtask); + + return 0; +} + +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + struct b43legacy_pioqueue *queue; + struct b43legacy_pio_txpacket *packet; + + queue = parse_cookie(dev, status->cookie, &packet); + B43legacy_WARN_ON(!queue); + + queue->tx_devq_packets--; + queue->tx_devq_used -= (packet->skb->len + + sizeof(struct b43legacy_txhdr_fw3)); + + if (status->acked) + packet->txstat.flags |= IEEE80211_TX_STATUS_ACK; + packet->txstat.retry_count = status->frame_count - 1; + ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb, + &(packet->txstat)); + packet->skb = NULL; + + free_txpacket(packet, 1); + /* If there are packets on the txqueue, poke the tasklet + * to transmit them. + */ + if (!list_empty(&queue->txqueue)) + tasklet_schedule(&queue->txtask); +} + +void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct b43legacy_pio *pio = &dev->pio; + struct b43legacy_pioqueue *queue; + struct ieee80211_tx_queue_stats_data *data; + + queue = pio->queue1; + data = &(stats->data[0]); + data->len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree; + data->limit = B43legacy_PIO_MAXTXPACKETS; + data->count = queue->nr_tx_packets; +} + +static void pio_rx_error(struct b43legacy_pioqueue *queue, + int clear_buffers, + const char *error) +{ + int i; + + b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error); + b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, + B43legacy_PIO_RXCTL_READY); + if (clear_buffers) { + B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE); + for (i = 0; i < 15; i++) { + /* Dummy read. */ + b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + } + } +} + +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) +{ + __le16 preamble[21] = { 0 }; + struct b43legacy_rxhdr_fw3 *rxhdr; + u16 tmp; + u16 len; + u16 macstat; + int i; + int preamble_readwords; + struct sk_buff *skb; + + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); + if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE)) + return; + b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, + B43legacy_PIO_RXCTL_DATAAVAILABLE); + + for (i = 0; i < 10; i++) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); + if (tmp & B43legacy_PIO_RXCTL_READY) + goto data_ready; + udelay(10); + } + b43legacydbg(queue->dev->wl, "PIO RX timed out\n"); + return; +data_ready: + + len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + if (unlikely(len > 0x700)) { + pio_rx_error(queue, 0, "len > 0x700"); + return; + } + if (unlikely(len == 0 && queue->mmio_base != + B43legacy_MMIO_PIO4_BASE)) { + pio_rx_error(queue, 0, "len == 0"); + return; + } + preamble[0] = cpu_to_le16(len); + if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) + preamble_readwords = 14 / sizeof(u16); + else + preamble_readwords = 18 / sizeof(u16); + for (i = 0; i < preamble_readwords; i++) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + preamble[i + 1] = cpu_to_le16(tmp); + } + rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble; + macstat = le16_to_cpu(rxhdr->mac_status); + if (macstat & B43legacy_RX_MAC_FCSERR) { + pio_rx_error(queue, + (queue->mmio_base == B43legacy_MMIO_PIO1_BASE), + "Frame FCS error"); + return; + } + if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) { + /* We received an xmit status. */ + struct b43legacy_hwtxstatus *hw; + + hw = (struct b43legacy_hwtxstatus *)(preamble + 1); + b43legacy_handle_hwtxstatus(queue->dev, hw); + + return; + } + + skb = dev_alloc_skb(len); + if (unlikely(!skb)) { + pio_rx_error(queue, 1, "OOM"); + return; + } + skb_put(skb, len); + for (i = 0; i < len - 1; i += 2) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp); + } + if (len % 2) { + tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); + skb->data[len - 1] = (tmp & 0x00FF); + } + b43legacy_rx(queue->dev, skb, rxhdr); +} + +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) +{ + b43legacy_power_saving_ctl_bits(queue->dev, -1, 1); + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) + | B43legacy_PIO_TXCTL_SUSPEND); +} + +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) +{ + b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, + b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) + & ~B43legacy_PIO_TXCTL_SUSPEND); + b43legacy_power_saving_ctl_bits(queue->dev, -1, -1); + tasklet_schedule(&queue->txtask); +} + +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + B43legacy_WARN_ON(!b43legacy_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 1; + pio->queue1->tx_frozen = 1; + pio->queue2->tx_frozen = 1; + pio->queue3->tx_frozen = 1; +} + +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) +{ + struct b43legacy_pio *pio; + + B43legacy_WARN_ON(!b43legacy_using_pio(dev)); + pio = &dev->pio; + pio->queue0->tx_frozen = 0; + pio->queue1->tx_frozen = 0; + pio->queue2->tx_frozen = 0; + pio->queue3->tx_frozen = 0; + if (!list_empty(&pio->queue0->txqueue)) + tasklet_schedule(&pio->queue0->txtask); + if (!list_empty(&pio->queue1->txqueue)) + tasklet_schedule(&pio->queue1->txtask); + if (!list_empty(&pio->queue2->txqueue)) + tasklet_schedule(&pio->queue2->txtask); + if (!list_empty(&pio->queue3->txqueue)) + tasklet_schedule(&pio->queue3->txtask); +} diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h new file mode 100644 index 0000000000000000000000000000000000000000..5bfed0c400307b777cb178926cc2e25cfef88139 --- /dev/null +++ b/drivers/net/wireless/b43legacy/pio.h @@ -0,0 +1,172 @@ +#ifndef B43legacy_PIO_H_ +#define B43legacy_PIO_H_ + +#include "b43legacy.h" + +#include +#include +#include + + +#define B43legacy_PIO_TXCTL 0x00 +#define B43legacy_PIO_TXDATA 0x02 +#define B43legacy_PIO_TXQBUFSIZE 0x04 +#define B43legacy_PIO_RXCTL 0x08 +#define B43legacy_PIO_RXDATA 0x0A + +#define B43legacy_PIO_TXCTL_WRITELO (1 << 0) +#define B43legacy_PIO_TXCTL_WRITEHI (1 << 1) +#define B43legacy_PIO_TXCTL_COMPLETE (1 << 2) +#define B43legacy_PIO_TXCTL_INIT (1 << 3) +#define B43legacy_PIO_TXCTL_SUSPEND (1 << 7) + +#define B43legacy_PIO_RXCTL_DATAAVAILABLE (1 << 0) +#define B43legacy_PIO_RXCTL_READY (1 << 1) + +/* PIO constants */ +#define B43legacy_PIO_MAXTXDEVQPACKETS 31 +#define B43legacy_PIO_TXQADJUST 80 + +/* PIO tuning knobs */ +#define B43legacy_PIO_MAXTXPACKETS 256 + + + +#ifdef CONFIG_B43LEGACY_PIO + + +struct b43legacy_pioqueue; +struct b43legacy_xmitstatus; + +struct b43legacy_pio_txpacket { + struct b43legacy_pioqueue *queue; + struct sk_buff *skb; + struct ieee80211_tx_status txstat; + struct list_head list; +}; + +#define pio_txpacket_getindex(packet) ((int)((packet) - \ + (packet)->queue->tx_packets_cache)) + +struct b43legacy_pioqueue { + struct b43legacy_wldev *dev; + u16 mmio_base; + + bool tx_suspended; + bool tx_frozen; + bool need_workarounds; /* Workarounds needed for core.rev < 3 */ + + /* Adjusted size of the device internal TX buffer. */ + u16 tx_devq_size; + /* Used octets of the device internal TX buffer. */ + u16 tx_devq_used; + /* Used packet slots in the device internal TX buffer. */ + u8 tx_devq_packets; + /* Packets from the txfree list can + * be taken on incoming TX requests. + */ + struct list_head txfree; + unsigned int nr_txfree; + /* Packets on the txqueue are queued, + * but not completely written to the chip, yet. + */ + struct list_head txqueue; + /* Packets on the txrunning queue are completely + * posted to the device. We are waiting for the txstatus. + */ + struct list_head txrunning; + /* Total number or packets sent. + * (This counter can obviously wrap). + */ + unsigned int nr_tx_packets; + struct tasklet_struct txtask; + struct b43legacy_pio_txpacket + tx_packets_cache[B43legacy_PIO_MAXTXPACKETS]; +}; + +static inline +u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, + u16 offset) +{ + return b43legacy_read16(queue->dev, queue->mmio_base + offset); +} + +static inline +void b43legacy_pio_write(struct b43legacy_pioqueue *queue, + u16 offset, u16 value) +{ + b43legacy_write16(queue->dev, queue->mmio_base + offset, value); + mmiowb(); +} + + +int b43legacy_pio_init(struct b43legacy_wldev *dev); +void b43legacy_pio_free(struct b43legacy_wldev *dev); + +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl); +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); +void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats); +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue); + +/* Suspend TX queue in hardware. */ +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue); +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue); +/* Suspend (freeze) the TX tasklet (software level). */ +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev); +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev); + +#else /* CONFIG_B43LEGACY_PIO */ + +static inline +int b43legacy_pio_init(struct b43legacy_wldev *dev) +{ + return 0; +} +static inline +void b43legacy_pio_free(struct b43legacy_wldev *dev) +{ +} +static inline +int b43legacy_pio_tx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + return 0; +} +static inline +void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ +} +static inline +void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev, + struct ieee80211_tx_queue_stats *stats) +{ +} +static inline +void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) +{ +} +static inline +void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev) +{ +} +static inline +void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev) +{ +} + +#endif /* CONFIG_B43LEGACY_PIO */ +#endif /* B43legacy_PIO_H_ */ diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c new file mode 100644 index 0000000000000000000000000000000000000000..a361dee664af59ebe7922955f23c6c0b379da3f4 --- /dev/null +++ b/drivers/net/wireless/b43legacy/radio.c @@ -0,0 +1,2158 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + Copyright (c) 2007 Larry Finger + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include + +#include "b43legacy.h" +#include "main.h" +#include "phy.h" +#include "radio.h" +#include "ilt.h" + + +/* Table for b43legacy_radio_calibrationvalue() */ +static const u16 rcc_table[16] = { + 0x0002, 0x0003, 0x0001, 0x000F, + 0x0006, 0x0007, 0x0005, 0x000F, + 0x000A, 0x000B, 0x0009, 0x000F, + 0x000E, 0x000F, 0x000D, 0x000F, +}; + +/* Reverse the bits of a 4bit value. + * Example: 1101 is flipped 1011 + */ +static u16 flip_4bit(u16 value) +{ + u16 flipped = 0x0000; + + B43legacy_BUG_ON(!((value & ~0x000F) == 0x0000)); + + flipped |= (value & 0x0001) << 3; + flipped |= (value & 0x0002) << 1; + flipped |= (value & 0x0004) >> 1; + flipped |= (value & 0x0008) >> 3; + + return flipped; +} + +/* Get the freq, as it has to be written to the device. */ +static inline +u16 channel2freq_bg(u8 channel) +{ + /* Frequencies are given as frequencies_bg[index] + 2.4GHz + * Starting with channel 1 + */ + static const u16 frequencies_bg[14] = { + 12, 17, 22, 27, + 32, 37, 42, 47, + 52, 57, 62, 67, + 72, 84, + }; + + if (unlikely(channel < 1 || channel > 14)) { + printk(KERN_INFO "b43legacy: Channel %d is out of range\n", + channel); + dump_stack(); + return 2412; + } + + return frequencies_bg[channel - 1]; +} + +void b43legacy_radio_lock(struct b43legacy_wldev *dev) +{ + u32 status; + + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status |= B43legacy_SBF_RADIOREG_LOCK; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + mmiowb(); + udelay(10); +} + +void b43legacy_radio_unlock(struct b43legacy_wldev *dev) +{ + u32 status; + + b43legacy_read16(dev, B43legacy_MMIO_PHY_VER); /* dummy read */ + status = b43legacy_read32(dev, B43legacy_MMIO_STATUS_BITFIELD); + status &= ~B43legacy_SBF_RADIOREG_LOCK; + b43legacy_write32(dev, B43legacy_MMIO_STATUS_BITFIELD, status); + mmiowb(); +} + +u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset) +{ + struct b43legacy_phy *phy = &dev->phy; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + if (phy->radio_ver == 0x2053) { + if (offset < 0x70) + offset += 0x80; + else if (offset < 0x80) + offset += 0x70; + } else if (phy->radio_ver == 0x2050) + offset |= 0x80; + else + B43legacy_WARN_ON(1); + break; + case B43legacy_PHYTYPE_G: + offset |= 0x80; + break; + default: + B43legacy_BUG_ON(1); + } + + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); + return b43legacy_read16(dev, B43legacy_MMIO_RADIO_DATA_LOW); +} + +void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val) +{ + b43legacy_write16(dev, B43legacy_MMIO_RADIO_CONTROL, offset); + mmiowb(); + b43legacy_write16(dev, B43legacy_MMIO_RADIO_DATA_LOW, val); +} + +static void b43legacy_set_all_gains(struct b43legacy_wldev *dev, + s16 first, s16 second, s16 third) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + u16 start = 0x08; + u16 end = 0x18; + u16 offset = 0x0400; + u16 tmp; + + if (phy->rev <= 1) { + offset = 0x5000; + start = 0x10; + end = 0x20; + } + + for (i = 0; i < 4; i++) + b43legacy_ilt_write(dev, offset + i, first); + + for (i = start; i < end; i++) + b43legacy_ilt_write(dev, offset + i, second); + + if (third != -1) { + tmp = ((u16)third << 14) | ((u16)third << 6); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) + | tmp); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) + | tmp); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) + | tmp); + } + b43legacy_dummy_transmission(dev); +} + +static void b43legacy_set_original_gains(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 i; + u16 tmp; + u16 offset = 0x0400; + u16 start = 0x0008; + u16 end = 0x0018; + + if (phy->rev <= 1) { + offset = 0x5000; + start = 0x0010; + end = 0x0020; + } + + for (i = 0; i < 4; i++) { + tmp = (i & 0xFFFC); + tmp |= (i & 0x0001) << 1; + tmp |= (i & 0x0002) >> 1; + + b43legacy_ilt_write(dev, offset + i, tmp); + } + + for (i = start; i < end; i++) + b43legacy_ilt_write(dev, offset + i, i - start); + + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xBFBF) + | 0x4040); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xBFBF) + | 0x4040); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xBFBF) + | 0x4000); + b43legacy_dummy_transmission(dev); +} + +/* Synthetic PU workaround */ +static void b43legacy_synth_pu_workaround(struct b43legacy_wldev *dev, + u8 channel) +{ + struct b43legacy_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) + /* We do not need the workaround. */ + return; + + if (channel <= 10) + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel + 4)); + else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); + msleep(1); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); +} + +u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel) +{ + struct b43legacy_phy *phy = &dev->phy; + u8 ret = 0; + u16 saved; + u16 rssi; + u16 temp; + int i; + int j = 0; + + saved = b43legacy_phy_read(dev, 0x0403); + b43legacy_radio_selectchannel(dev, channel, 0); + b43legacy_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5); + if (phy->aci_hw_rssi) + rssi = b43legacy_phy_read(dev, 0x048A) & 0x3F; + else + rssi = saved & 0x3F; + /* clamp temp to signed 5bit */ + if (rssi > 32) + rssi -= 64; + for (i = 0; i < 100; i++) { + temp = (b43legacy_phy_read(dev, 0x047F) >> 8) & 0x3F; + if (temp > 32) + temp -= 64; + if (temp < rssi) + j++; + if (j >= 20) + ret = 1; + } + b43legacy_phy_write(dev, 0x0403, saved); + + return ret; +} + +u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u8 ret[13]; + unsigned int channel = phy->channel; + unsigned int i; + unsigned int j; + unsigned int start; + unsigned int end; + unsigned long phylock_flags; + + if (!((phy->type == B43legacy_PHYTYPE_G) && (phy->rev > 0))) + return 0; + + b43legacy_phy_lock(dev, phylock_flags); + b43legacy_radio_lock(dev); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & 0xFFFC); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + & 0x7FFF); + b43legacy_set_all_gains(dev, 3, 8, 1); + + start = (channel - 5 > 0) ? channel - 5 : 1; + end = (channel + 5 < 14) ? channel + 5 : 13; + + for (i = start; i <= end; i++) { + if (abs(channel - i) > 2) + ret[i-1] = b43legacy_radio_aci_detect(dev, i); + } + b43legacy_radio_selectchannel(dev, channel, 0); + b43legacy_phy_write(dev, 0x0802, + (b43legacy_phy_read(dev, 0x0802) & 0xFFFC) + | 0x0003); + b43legacy_phy_write(dev, 0x0403, + b43legacy_phy_read(dev, 0x0403) & 0xFFF8); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x8000); + b43legacy_set_original_gains(dev); + for (i = 0; i < 13; i++) { + if (!ret[i]) + continue; + end = (i + 5 < 13) ? i + 5 : 13; + for (j = i; j < end; j++) + ret[j] = 1; + } + b43legacy_radio_unlock(dev); + b43legacy_phy_unlock(dev, phylock_flags); + + return ret[channel - 1]; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) +{ + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); + mmiowb(); + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) +{ + u16 val; + + b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); + val = b43legacy_phy_read(dev, B43legacy_PHY_NRSSILT_DATA); + + return (s16)val; +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) +{ + u16 i; + s16 tmp; + + for (i = 0; i < 64; i++) { + tmp = b43legacy_nrssi_hw_read(dev, i); + tmp -= val; + tmp = limit_value(tmp, -32, 31); + b43legacy_nrssi_hw_write(dev, i, tmp); + } +} + +/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s16 i; + s16 delta; + s32 tmp; + + delta = 0x1F - phy->nrssi[0]; + for (i = 0; i < 64; i++) { + tmp = (i - delta) * phy->nrssislope; + tmp /= 0x10000; + tmp += 0x3A; + tmp = limit_value(tmp, 0, 0x3F); + phy->nrssi_lt[i] = tmp; + } +} + +static void b43legacy_calc_nrssi_offset(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[20] = { 0 }; + s16 v47F; + u16 i; + u16 saved = 0xFFFF; + + backup[0] = b43legacy_phy_read(dev, 0x0001); + backup[1] = b43legacy_phy_read(dev, 0x0811); + backup[2] = b43legacy_phy_read(dev, 0x0812); + backup[3] = b43legacy_phy_read(dev, 0x0814); + backup[4] = b43legacy_phy_read(dev, 0x0815); + backup[5] = b43legacy_phy_read(dev, 0x005A); + backup[6] = b43legacy_phy_read(dev, 0x0059); + backup[7] = b43legacy_phy_read(dev, 0x0058); + backup[8] = b43legacy_phy_read(dev, 0x000A); + backup[9] = b43legacy_phy_read(dev, 0x0003); + backup[10] = b43legacy_radio_read16(dev, 0x007A); + backup[11] = b43legacy_radio_read16(dev, 0x0043); + + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) & 0x7FFF); + b43legacy_phy_write(dev, 0x0001, + (b43legacy_phy_read(dev, 0x0001) & 0x3FFF) + | 0x4000); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x000C); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) & 0xFFF3) + | 0x0004); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & ~(0x1 | 0x2)); + if (phy->rev >= 6) { + backup[12] = b43legacy_phy_read(dev, 0x002E); + backup[13] = b43legacy_phy_read(dev, 0x002F); + backup[14] = b43legacy_phy_read(dev, 0x080F); + backup[15] = b43legacy_phy_read(dev, 0x0810); + backup[16] = b43legacy_phy_read(dev, 0x0801); + backup[17] = b43legacy_phy_read(dev, 0x0060); + backup[18] = b43legacy_phy_read(dev, 0x0014); + backup[19] = b43legacy_phy_read(dev, 0x0478); + + b43legacy_phy_write(dev, 0x002E, 0); + b43legacy_phy_write(dev, 0x002F, 0); + b43legacy_phy_write(dev, 0x080F, 0); + b43legacy_phy_write(dev, 0x0810, 0); + b43legacy_phy_write(dev, 0x0478, + b43legacy_phy_read(dev, 0x0478) | 0x0100); + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, 0x0801) | 0x0040); + b43legacy_phy_write(dev, 0x0060, + b43legacy_phy_read(dev, 0x0060) | 0x0040); + b43legacy_phy_write(dev, 0x0014, + b43legacy_phy_read(dev, 0x0014) | 0x0200); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | 0x0070); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) | 0x0080); + udelay(30); + + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == 31) { + for (i = 7; i >= 4; i--) { + b43legacy_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) + & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F < 31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 4; + } else { + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0001); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFE); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x000C); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) | 0x000C); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) | 0x0030); + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) | 0x0030); + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->analog == 0) + b43legacy_phy_write(dev, 0x0003, 0x0122); + else + b43legacy_phy_write(dev, 0x000A, + b43legacy_phy_read(dev, 0x000A) + | 0x2000); + b43legacy_phy_write(dev, 0x0814, + b43legacy_phy_read(dev, 0x0814) | 0x0004); + b43legacy_phy_write(dev, 0x0815, + b43legacy_phy_read(dev, 0x0815) & 0xFFFB); + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) & 0xFF9F) + | 0x0040); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x000F); + b43legacy_set_all_gains(dev, 3, 0, 1); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0x00F0) | 0x000F); + udelay(30); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F == -32) { + for (i = 0; i < 4; i++) { + b43legacy_radio_write16(dev, 0x007B, i); + udelay(20); + v47F = (s16)((b43legacy_phy_read(dev, 0x047F) >> + 8) & 0x003F); + if (v47F >= 0x20) + v47F -= 0x40; + if (v47F > -31 && saved == 0xFFFF) + saved = i; + } + if (saved == 0xFFFF) + saved = 3; + } else + saved = 0; + } + b43legacy_radio_write16(dev, 0x007B, saved); + + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x002E, backup[12]); + b43legacy_phy_write(dev, 0x002F, backup[13]); + b43legacy_phy_write(dev, 0x080F, backup[14]); + b43legacy_phy_write(dev, 0x0810, backup[15]); + } + b43legacy_phy_write(dev, 0x0814, backup[3]); + b43legacy_phy_write(dev, 0x0815, backup[4]); + b43legacy_phy_write(dev, 0x005A, backup[5]); + b43legacy_phy_write(dev, 0x0059, backup[6]); + b43legacy_phy_write(dev, 0x0058, backup[7]); + b43legacy_phy_write(dev, 0x000A, backup[8]); + b43legacy_phy_write(dev, 0x0003, backup[9]); + b43legacy_radio_write16(dev, 0x0043, backup[11]); + b43legacy_radio_write16(dev, 0x007A, backup[10]); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x1 | 0x2); + b43legacy_phy_write(dev, 0x0429, + b43legacy_phy_read(dev, 0x0429) | 0x8000); + b43legacy_set_original_gains(dev); + if (phy->rev >= 6) { + b43legacy_phy_write(dev, 0x0801, backup[16]); + b43legacy_phy_write(dev, 0x0060, backup[17]); + b43legacy_phy_write(dev, 0x0014, backup[18]); + b43legacy_phy_write(dev, 0x0478, backup[19]); + } + b43legacy_phy_write(dev, 0x0001, backup[0]); + b43legacy_phy_write(dev, 0x0812, backup[2]); + b43legacy_phy_write(dev, 0x0811, backup[1]); +} + +void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[18] = { 0 }; + u16 tmp; + s16 nrssi0; + s16 nrssi1; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + backup[0] = b43legacy_radio_read16(dev, 0x007A); + backup[1] = b43legacy_radio_read16(dev, 0x0052); + backup[2] = b43legacy_radio_read16(dev, 0x0043); + backup[3] = b43legacy_phy_read(dev, 0x0030); + backup[4] = b43legacy_phy_read(dev, 0x0026); + backup[5] = b43legacy_phy_read(dev, 0x0015); + backup[6] = b43legacy_phy_read(dev, 0x002A); + backup[7] = b43legacy_phy_read(dev, 0x0020); + backup[8] = b43legacy_phy_read(dev, 0x005A); + backup[9] = b43legacy_phy_read(dev, 0x0059); + backup[10] = b43legacy_phy_read(dev, 0x0058); + backup[11] = b43legacy_read16(dev, 0x03E2); + backup[12] = b43legacy_read16(dev, 0x03E6); + backup[13] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + + tmp = b43legacy_radio_read16(dev, 0x007A); + tmp &= (phy->rev >= 5) ? 0x007F : 0x000F; + b43legacy_radio_write16(dev, 0x007A, tmp); + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x7F7F); + b43legacy_phy_write(dev, 0x0026, 0x0000); + b43legacy_phy_write(dev, 0x0015, + b43legacy_phy_read(dev, 0x0015) | 0x0020); + b43legacy_phy_write(dev, 0x002A, 0x08A3); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0080); + + nrssi0 = (s16)b43legacy_phy_read(dev, 0x0027); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + if (phy->analog >= 2) + b43legacy_write16(dev, 0x03E6, 0x0040); + else if (phy->analog == 0) + b43legacy_write16(dev, 0x03E6, 0x0122); + else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) & 0x2000); + b43legacy_phy_write(dev, 0x0020, 0x3F3F); + b43legacy_phy_write(dev, 0x0015, 0xF330); + b43legacy_radio_write16(dev, 0x005A, 0x0060); + b43legacy_radio_write16(dev, 0x0043, + b43legacy_radio_read16(dev, 0x0043) + & 0x00F0); + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + udelay(20); + + nrssi1 = (s16)b43legacy_phy_read(dev, 0x0027); + b43legacy_phy_write(dev, 0x0030, backup[3]); + b43legacy_radio_write16(dev, 0x007A, backup[0]); + b43legacy_write16(dev, 0x03E2, backup[11]); + b43legacy_phy_write(dev, 0x0026, backup[4]); + b43legacy_phy_write(dev, 0x0015, backup[5]); + b43legacy_phy_write(dev, 0x002A, backup[6]); + b43legacy_synth_pu_workaround(dev, phy->channel); + if (phy->analog != 0) + b43legacy_write16(dev, 0x03F4, backup[13]); + + b43legacy_phy_write(dev, 0x0020, backup[7]); + b43legacy_phy_write(dev, 0x005A, backup[8]); + b43legacy_phy_write(dev, 0x0059, backup[9]); + b43legacy_phy_write(dev, 0x0058, backup[10]); + b43legacy_radio_write16(dev, 0x0052, backup[1]); + b43legacy_radio_write16(dev, 0x0043, backup[2]); + + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + + if (nrssi0 <= -4) { + phy->nrssi[0] = nrssi0; + phy->nrssi[1] = nrssi1; + } + break; + case B43legacy_PHYTYPE_G: + if (phy->radio_rev >= 9) + return; + if (phy->radio_rev == 8) + b43legacy_calc_nrssi_offset(dev); + + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + & 0x7FFF); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) & 0xFFFC); + backup[7] = b43legacy_read16(dev, 0x03E2); + b43legacy_write16(dev, 0x03E2, + b43legacy_read16(dev, 0x03E2) | 0x8000); + backup[0] = b43legacy_radio_read16(dev, 0x007A); + backup[1] = b43legacy_radio_read16(dev, 0x0052); + backup[2] = b43legacy_radio_read16(dev, 0x0043); + backup[3] = b43legacy_phy_read(dev, 0x0015); + backup[4] = b43legacy_phy_read(dev, 0x005A); + backup[5] = b43legacy_phy_read(dev, 0x0059); + backup[6] = b43legacy_phy_read(dev, 0x0058); + backup[8] = b43legacy_read16(dev, 0x03E6); + backup[9] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + if (phy->rev >= 3) { + backup[10] = b43legacy_phy_read(dev, 0x002E); + backup[11] = b43legacy_phy_read(dev, 0x002F); + backup[12] = b43legacy_phy_read(dev, 0x080F); + backup[13] = b43legacy_phy_read(dev, + B43legacy_PHY_G_LO_CONTROL); + backup[14] = b43legacy_phy_read(dev, 0x0801); + backup[15] = b43legacy_phy_read(dev, 0x0060); + backup[16] = b43legacy_phy_read(dev, 0x0014); + backup[17] = b43legacy_phy_read(dev, 0x0478); + b43legacy_phy_write(dev, 0x002E, 0); + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, 0); + switch (phy->rev) { + case 4: case 6: case 7: + b43legacy_phy_write(dev, 0x0478, + b43legacy_phy_read(dev, + 0x0478) | 0x0100); + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, + 0x0801) | 0x0040); + break; + case 3: case 5: + b43legacy_phy_write(dev, 0x0801, + b43legacy_phy_read(dev, + 0x0801) & 0xFFBF); + break; + } + b43legacy_phy_write(dev, 0x0060, + b43legacy_phy_read(dev, 0x0060) + | 0x0040); + b43legacy_phy_write(dev, 0x0014, + b43legacy_phy_read(dev, 0x0014) + | 0x0200); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0070); + b43legacy_set_all_gains(dev, 0, 8, 0); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x00F7); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0811, + (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0030); + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0010); + } + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x0080); + udelay(20); + + nrssi0 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi0 >= 0x0020) + nrssi0 -= 0x0040; + + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + & 0x007F); + if (phy->analog >= 2) + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFF9F) | 0x0040); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | 0x2000); + b43legacy_radio_write16(dev, 0x007A, + b43legacy_radio_read16(dev, 0x007A) + | 0x000F); + b43legacy_phy_write(dev, 0x0015, 0xF330); + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0812, + (b43legacy_phy_read(dev, 0x0812) + & 0xFFCF) | 0x0020); + b43legacy_phy_write(dev, 0x0811, + (b43legacy_phy_read(dev, 0x0811) + & 0xFFCF) | 0x0020); + } + + b43legacy_set_all_gains(dev, 3, 0, 1); + if (phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0043, 0x001F); + else { + tmp = b43legacy_radio_read16(dev, 0x0052) & 0xFF0F; + b43legacy_radio_write16(dev, 0x0052, tmp | 0x0060); + tmp = b43legacy_radio_read16(dev, 0x0043) & 0xFFF0; + b43legacy_radio_write16(dev, 0x0043, tmp | 0x0009); + } + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0x0810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + udelay(20); + nrssi1 = (s16)((b43legacy_phy_read(dev, 0x047F) >> 8) & 0x003F); + if (nrssi1 >= 0x0020) + nrssi1 -= 0x0040; + if (nrssi0 == nrssi1) + phy->nrssislope = 0x00010000; + else + phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1); + if (nrssi0 >= -4) { + phy->nrssi[0] = nrssi1; + phy->nrssi[1] = nrssi0; + } + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x002E, backup[10]); + b43legacy_phy_write(dev, 0x002F, backup[11]); + b43legacy_phy_write(dev, 0x080F, backup[12]); + b43legacy_phy_write(dev, B43legacy_PHY_G_LO_CONTROL, + backup[13]); + } + if (phy->rev >= 2) { + b43legacy_phy_write(dev, 0x0812, + b43legacy_phy_read(dev, 0x0812) + & 0xFFCF); + b43legacy_phy_write(dev, 0x0811, + b43legacy_phy_read(dev, 0x0811) + & 0xFFCF); + } + + b43legacy_radio_write16(dev, 0x007A, backup[0]); + b43legacy_radio_write16(dev, 0x0052, backup[1]); + b43legacy_radio_write16(dev, 0x0043, backup[2]); + b43legacy_write16(dev, 0x03E2, backup[7]); + b43legacy_write16(dev, 0x03E6, backup[8]); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[9]); + b43legacy_phy_write(dev, 0x0015, backup[3]); + b43legacy_phy_write(dev, 0x005A, backup[4]); + b43legacy_phy_write(dev, 0x0059, backup[5]); + b43legacy_phy_write(dev, 0x0058, backup[6]); + b43legacy_synth_pu_workaround(dev, phy->channel); + b43legacy_phy_write(dev, 0x0802, + b43legacy_phy_read(dev, 0x0802) | 0x0003); + b43legacy_set_original_gains(dev); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x8000); + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x0801, backup[14]); + b43legacy_phy_write(dev, 0x0060, backup[15]); + b43legacy_phy_write(dev, 0x0014, backup[16]); + b43legacy_phy_write(dev, 0x0478, backup[17]); + } + b43legacy_nrssi_mem_update(dev); + b43legacy_calc_nrssi_threshold(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + s32 threshold; + s32 a; + s32 b; + s16 tmp16; + u16 tmp_u16; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: { + if (phy->radio_ver != 0x2050) + return; + if (!(dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_RSSI)) + return; + + if (phy->radio_rev >= 6) { + threshold = (phy->nrssi[1] - phy->nrssi[0]) * 32; + threshold += 20 * (phy->nrssi[0] + 1); + threshold /= 40; + } else + threshold = phy->nrssi[1] - 5; + + threshold = limit_value(threshold, 0, 0x3E); + b43legacy_phy_read(dev, 0x0020); /* dummy read */ + b43legacy_phy_write(dev, 0x0020, (((u16)threshold) << 8) + | 0x001C); + + if (phy->radio_rev >= 6) { + b43legacy_phy_write(dev, 0x0087, 0x0E0D); + b43legacy_phy_write(dev, 0x0086, 0x0C0B); + b43legacy_phy_write(dev, 0x0085, 0x0A09); + b43legacy_phy_write(dev, 0x0084, 0x0808); + b43legacy_phy_write(dev, 0x0083, 0x0808); + b43legacy_phy_write(dev, 0x0082, 0x0604); + b43legacy_phy_write(dev, 0x0081, 0x0302); + b43legacy_phy_write(dev, 0x0080, 0x0100); + } + break; + } + case B43legacy_PHYTYPE_G: + if (!phy->gmode || + !(dev->dev->bus->sprom.r1.boardflags_lo & + B43legacy_BFL_RSSI)) { + tmp16 = b43legacy_nrssi_hw_read(dev, 0x20); + if (tmp16 >= 0x20) + tmp16 -= 0x40; + if (tmp16 < 3) + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, + 0x048A) & 0xF000) | 0x09EB); + else + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, + 0x048A) & 0xF000) | 0x0AED); + } else { + if (phy->interfmode == + B43legacy_RADIO_INTERFMODE_NONWLAN) { + a = 0xE; + b = 0xA; + } else if (!phy->aci_wlan_automatic && + phy->aci_enable) { + a = 0x13; + b = 0x12; + } else { + a = 0xE; + b = 0x11; + } + + a = a * (phy->nrssi[1] - phy->nrssi[0]); + a += (phy->nrssi[0] << 6); + if (a < 32) + a += 31; + else + a += 32; + a = a >> 6; + a = limit_value(a, -31, 31); + + b = b * (phy->nrssi[1] - phy->nrssi[0]); + b += (phy->nrssi[0] << 6); + if (b < 32) + b += 31; + else + b += 32; + b = b >> 6; + b = limit_value(b, -31, 31); + + tmp_u16 = b43legacy_phy_read(dev, 0x048A) & 0xF000; + tmp_u16 |= ((u32)b & 0x0000003F); + tmp_u16 |= (((u32)a & 0x0000003F) << 6); + b43legacy_phy_write(dev, 0x048A, tmp_u16); + } + break; + default: + B43legacy_BUG_ON(1); + } +} + +/* Stack implementation to save/restore values from the + * interference mitigation code. + * It is save to restore values in random order. + */ +static void _stack_save(u32 *_stackptr, size_t *stackidx, + u8 id, u16 offset, u16 value) +{ + u32 *stackptr = &(_stackptr[*stackidx]); + + B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); + B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); + *stackptr = offset; + *stackptr |= ((u32)id) << 13; + *stackptr |= ((u32)value) << 16; + (*stackidx)++; + B43legacy_WARN_ON(!(*stackidx < B43legacy_INTERFSTACK_SIZE)); +} + +static u16 _stack_restore(u32 *stackptr, + u8 id, u16 offset) +{ + size_t i; + + B43legacy_WARN_ON(!((offset & 0xE000) == 0x0000)); + B43legacy_WARN_ON(!((id & 0xF8) == 0x00)); + for (i = 0; i < B43legacy_INTERFSTACK_SIZE; i++, stackptr++) { + if ((*stackptr & 0x00001FFF) != offset) + continue; + if (((*stackptr & 0x00007000) >> 13) != id) + continue; + return ((*stackptr & 0xFFFF0000) >> 16); + } + B43legacy_BUG_ON(1); + + return 0; +} + +#define phy_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x1, (offset), \ + b43legacy_phy_read(dev, (offset))); \ + } while (0) +#define phy_stackrestore(offset) \ + do { \ + b43legacy_phy_write(dev, (offset), \ + _stack_restore(stack, 0x1, \ + (offset))); \ + } while (0) +#define radio_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x2, (offset), \ + b43legacy_radio_read16(dev, (offset))); \ + } while (0) +#define radio_stackrestore(offset) \ + do { \ + b43legacy_radio_write16(dev, (offset), \ + _stack_restore(stack, 0x2, \ + (offset))); \ + } while (0) +#define ilt_stacksave(offset) \ + do { \ + _stack_save(stack, &stackidx, 0x3, (offset), \ + b43legacy_ilt_read(dev, (offset))); \ + } while (0) +#define ilt_stackrestore(offset) \ + do { \ + b43legacy_ilt_write(dev, (offset), \ + _stack_restore(stack, 0x3, \ + (offset))); \ + } while (0) + +static void +b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 tmp; + u16 flipped; + u32 tmp32; + size_t stackidx = 0; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43legacy_RADIO_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + | 0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & ~0x4000); + break; + } + radio_stacksave(0x0078); + tmp = (b43legacy_radio_read16(dev, 0x0078) & 0x001E); + flipped = flip_4bit(tmp); + if (flipped < 10 && flipped >= 8) + flipped = 7; + else if (flipped >= 10) + flipped -= 3; + flipped = flip_4bit(flipped); + flipped = (flipped << 1) | 0x0020; + b43legacy_radio_write16(dev, 0x0078, flipped); + + b43legacy_calc_nrssi_threshold(dev); + + phy_stacksave(0x0406); + b43legacy_phy_write(dev, 0x0406, 0x7E28); + + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) | 0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) | 0x1000); + + phy_stacksave(0x04A0); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) & 0xC0C0) + | 0x0008); + phy_stacksave(0x04A1); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) & 0xC0C0) + | 0x0605); + phy_stacksave(0x04A2); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) & 0xC0C0) + | 0x0204); + phy_stacksave(0x04A8); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) & 0xC0C0) + | 0x0803); + phy_stacksave(0x04AB); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) & 0xC0C0) + | 0x0605); + + phy_stacksave(0x04A7); + b43legacy_phy_write(dev, 0x04A7, 0x0002); + phy_stacksave(0x04A3); + b43legacy_phy_write(dev, 0x04A3, 0x287A); + phy_stacksave(0x04A9); + b43legacy_phy_write(dev, 0x04A9, 0x2027); + phy_stacksave(0x0493); + b43legacy_phy_write(dev, 0x0493, 0x32F5); + phy_stacksave(0x04AA); + b43legacy_phy_write(dev, 0x04AA, 0x2027); + phy_stacksave(0x04AC); + b43legacy_phy_write(dev, 0x04AC, 0x32F5); + break; + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + if (b43legacy_phy_read(dev, 0x0033) & 0x0800) + break; + + phy->aci_enable = 1; + + phy_stacksave(B43legacy_PHY_RADIO_BITFIELD); + phy_stacksave(B43legacy_PHY_G_CRS); + if (phy->rev < 2) + phy_stacksave(0x0406); + else { + phy_stacksave(0x04C0); + phy_stacksave(0x04C1); + } + phy_stacksave(0x0033); + phy_stacksave(0x04A7); + phy_stacksave(0x04A3); + phy_stacksave(0x04A9); + phy_stacksave(0x04AA); + phy_stacksave(0x04AC); + phy_stacksave(0x0493); + phy_stacksave(0x04A1); + phy_stacksave(0x04A0); + phy_stacksave(0x04A2); + phy_stacksave(0x048A); + phy_stacksave(0x04A8); + phy_stacksave(0x04AB); + if (phy->rev == 2) { + phy_stacksave(0x04AD); + phy_stacksave(0x04AE); + } else if (phy->rev >= 3) { + phy_stacksave(0x04AD); + phy_stacksave(0x0415); + phy_stacksave(0x0416); + phy_stacksave(0x0417); + ilt_stacksave(0x1A00 + 0x2); + ilt_stacksave(0x1A00 + 0x3); + } + phy_stacksave(0x042B); + phy_stacksave(0x048C); + + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) & ~0x1000); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) + & 0xFFFC) | 0x0002); + + b43legacy_phy_write(dev, 0x0033, 0x0800); + b43legacy_phy_write(dev, 0x04A3, 0x2027); + b43legacy_phy_write(dev, 0x04A9, 0x1CA8); + b43legacy_phy_write(dev, 0x0493, 0x287A); + b43legacy_phy_write(dev, 0x04AA, 0x1CA8); + b43legacy_phy_write(dev, 0x04AC, 0x287A); + + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) + & 0xFFC0) | 0x001A); + b43legacy_phy_write(dev, 0x04A7, 0x000D); + + if (phy->rev < 2) + b43legacy_phy_write(dev, 0x0406, 0xFF0D); + else if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04C0, 0xFFFF); + b43legacy_phy_write(dev, 0x04C1, 0x00A9); + } else { + b43legacy_phy_write(dev, 0x04C0, 0x00C1); + b43legacy_phy_write(dev, 0x04C1, 0x0059); + } + + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) + & 0xC0FF) | 0x1800); + b43legacy_phy_write(dev, 0x04A1, + (b43legacy_phy_read(dev, 0x04A1) + & 0xFFC0) | 0x0015); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xCFFF) | 0x1000); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xF0FF) | 0x0A00); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xCFFF) | 0x1000); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xF0FF) | 0x0800); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xFFCF) | 0x0010); + b43legacy_phy_write(dev, 0x04AB, + (b43legacy_phy_read(dev, 0x04AB) + & 0xFFF0) | 0x0005); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xFFCF) | 0x0010); + b43legacy_phy_write(dev, 0x04A8, + (b43legacy_phy_read(dev, 0x04A8) + & 0xFFF0) | 0x0006); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xF0FF) | 0x0800); + b43legacy_phy_write(dev, 0x04A0, + (b43legacy_phy_read(dev, 0x04A0) + & 0xF0FF) | 0x0500); + b43legacy_phy_write(dev, 0x04A2, + (b43legacy_phy_read(dev, 0x04A2) + & 0xFFF0) | 0x000B); + + if (phy->rev >= 3) { + b43legacy_phy_write(dev, 0x048A, + b43legacy_phy_read(dev, 0x048A) + & ~0x8000); + b43legacy_phy_write(dev, 0x0415, + (b43legacy_phy_read(dev, 0x0415) + & 0x8000) | 0x36D8); + b43legacy_phy_write(dev, 0x0416, + (b43legacy_phy_read(dev, 0x0416) + & 0x8000) | 0x36D8); + b43legacy_phy_write(dev, 0x0417, + (b43legacy_phy_read(dev, 0x0417) + & 0xFE00) | 0x016D); + } else { + b43legacy_phy_write(dev, 0x048A, + b43legacy_phy_read(dev, 0x048A) + | 0x1000); + b43legacy_phy_write(dev, 0x048A, + (b43legacy_phy_read(dev, 0x048A) + & 0x9FFF) | 0x2000); + tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + if (!(tmp32 & 0x800)) { + tmp32 |= 0x800; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + tmp32); + } + } + if (phy->rev >= 2) + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + | 0x0800); + b43legacy_phy_write(dev, 0x048C, + (b43legacy_phy_read(dev, 0x048C) + & 0xF0FF) | 0x0200); + if (phy->rev == 2) { + b43legacy_phy_write(dev, 0x04AE, + (b43legacy_phy_read(dev, 0x04AE) + & 0xFF00) | 0x007F); + b43legacy_phy_write(dev, 0x04AD, + (b43legacy_phy_read(dev, 0x04AD) + & 0x00FF) | 0x1300); + } else if (phy->rev >= 6) { + b43legacy_ilt_write(dev, 0x1A00 + 0x3, 0x007F); + b43legacy_ilt_write(dev, 0x1A00 + 0x2, 0x007F); + b43legacy_phy_write(dev, 0x04AD, + b43legacy_phy_read(dev, 0x04AD) + & 0x00FF); + } + b43legacy_calc_nrssi_slope(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +static void +b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + u32 tmp32; + u32 *stack = phy->interfstack; + + switch (mode) { + case B43legacy_RADIO_INTERFMODE_NONWLAN: + if (phy->rev != 1) { + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) + & ~0x0800); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) | 0x4000); + break; + } + phy_stackrestore(0x0078); + b43legacy_calc_nrssi_threshold(dev); + phy_stackrestore(0x0406); + b43legacy_phy_write(dev, 0x042B, + b43legacy_phy_read(dev, 0x042B) & ~0x0800); + if (!dev->bad_frames_preempt) + b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, + b43legacy_phy_read(dev, + B43legacy_PHY_RADIO_BITFIELD) + & ~(1 << 11)); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + b43legacy_phy_read(dev, B43legacy_PHY_G_CRS) + | 0x4000); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A7); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + break; + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800)) + break; + + phy->aci_enable = 0; + + phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD); + phy_stackrestore(B43legacy_PHY_G_CRS); + phy_stackrestore(0x0033); + phy_stackrestore(0x04A3); + phy_stackrestore(0x04A9); + phy_stackrestore(0x0493); + phy_stackrestore(0x04AA); + phy_stackrestore(0x04AC); + phy_stackrestore(0x04A0); + phy_stackrestore(0x04A7); + if (phy->rev >= 2) { + phy_stackrestore(0x04C0); + phy_stackrestore(0x04C1); + } else + phy_stackrestore(0x0406); + phy_stackrestore(0x04A1); + phy_stackrestore(0x04AB); + phy_stackrestore(0x04A8); + if (phy->rev == 2) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x04AE); + } else if (phy->rev >= 3) { + phy_stackrestore(0x04AD); + phy_stackrestore(0x0415); + phy_stackrestore(0x0416); + phy_stackrestore(0x0417); + ilt_stackrestore(0x1A00 + 0x2); + ilt_stackrestore(0x1A00 + 0x3); + } + phy_stackrestore(0x04A2); + phy_stackrestore(0x04A8); + phy_stackrestore(0x042B); + phy_stackrestore(0x048C); + tmp32 = b43legacy_shm_read32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET); + if (tmp32 & 0x800) { + tmp32 &= ~0x800; + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + tmp32); + } + b43legacy_calc_nrssi_slope(dev); + break; + default: + B43legacy_BUG_ON(1); + } +} + +#undef phy_stacksave +#undef phy_stackrestore +#undef radio_stacksave +#undef radio_stackrestore +#undef ilt_stacksave +#undef ilt_stackrestore + +int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, + int mode) +{ + struct b43legacy_phy *phy = &dev->phy; + int currentmode; + + if ((phy->type != B43legacy_PHYTYPE_G) || + (phy->rev == 0) || (!phy->gmode)) + return -ENODEV; + + phy->aci_wlan_automatic = 0; + switch (mode) { + case B43legacy_RADIO_INTERFMODE_AUTOWLAN: + phy->aci_wlan_automatic = 1; + if (phy->aci_enable) + mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN; + else + mode = B43legacy_RADIO_INTERFMODE_NONE; + break; + case B43legacy_RADIO_INTERFMODE_NONE: + case B43legacy_RADIO_INTERFMODE_NONWLAN: + case B43legacy_RADIO_INTERFMODE_MANUALWLAN: + break; + default: + return -EINVAL; + } + + currentmode = phy->interfmode; + if (currentmode == mode) + return 0; + if (currentmode != B43legacy_RADIO_INTERFMODE_NONE) + b43legacy_radio_interference_mitigation_disable(dev, + currentmode); + + if (mode == B43legacy_RADIO_INTERFMODE_NONE) { + phy->aci_enable = 0; + phy->aci_hw_rssi = 0; + } else + b43legacy_radio_interference_mitigation_enable(dev, mode); + phy->interfmode = mode; + + return 0; +} + +u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev) +{ + u16 reg; + u16 index; + u16 ret; + + reg = b43legacy_radio_read16(dev, 0x0060); + index = (reg & 0x001E) >> 1; + ret = rcc_table[index] << 1; + ret |= (reg & 0x0001); + ret |= 0x0020; + + return ret; +} + +#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0)) +static u16 b43legacy_get_812_value(struct b43legacy_wldev *dev, u8 lpd) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 loop_or = 0; + u16 adj_loopback_gain = phy->loopback_gain[0]; + u8 loop; + u16 extern_lna_control; + + if (!phy->gmode) + return 0; + if (!has_loopback_gain(phy)) { + if (phy->rev < 7 || !(dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_EXTLNA)) { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0FB2; + case LPD(0, 0, 1): + return 0x00B2; + case LPD(1, 0, 1): + return 0x30B2; + case LPD(1, 0, 0): + return 0x30B3; + default: + B43legacy_BUG_ON(1); + } + } else { + switch (lpd) { + case LPD(0, 1, 1): + return 0x8FB2; + case LPD(0, 0, 1): + return 0x80B2; + case LPD(1, 0, 1): + return 0x20B2; + case LPD(1, 0, 0): + return 0x20B3; + default: + B43legacy_BUG_ON(1); + } + } + } else { + if (phy->radio_rev == 8) + adj_loopback_gain += 0x003E; + else + adj_loopback_gain += 0x0026; + if (adj_loopback_gain >= 0x46) { + adj_loopback_gain -= 0x46; + extern_lna_control = 0x3000; + } else if (adj_loopback_gain >= 0x3A) { + adj_loopback_gain -= 0x3A; + extern_lna_control = 0x2000; + } else if (adj_loopback_gain >= 0x2E) { + adj_loopback_gain -= 0x2E; + extern_lna_control = 0x1000; + } else { + adj_loopback_gain -= 0x10; + extern_lna_control = 0x0000; + } + for (loop = 0; loop < 16; loop++) { + u16 tmp = adj_loopback_gain - 6 * loop; + if (tmp < 6) + break; + } + + loop_or = (loop << 8) | extern_lna_control; + if (phy->rev >= 7 && dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_EXTLNA) { + if (extern_lna_control) + loop_or |= 0x8000; + switch (lpd) { + case LPD(0, 1, 1): + return 0x8F92; + case LPD(0, 0, 1): + return (0x8092 | loop_or); + case LPD(1, 0, 1): + return (0x2092 | loop_or); + case LPD(1, 0, 0): + return (0x2093 | loop_or); + default: + B43legacy_BUG_ON(1); + } + } else { + switch (lpd) { + case LPD(0, 1, 1): + return 0x0F92; + case LPD(0, 0, 1): + case LPD(1, 0, 1): + return (0x0092 | loop_or); + case LPD(1, 0, 0): + return (0x0093 | loop_or); + default: + B43legacy_BUG_ON(1); + } + } + } + return 0; +} + +u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 backup[21] = { 0 }; + u16 ret; + u16 i; + u16 j; + u32 tmp1 = 0; + u32 tmp2 = 0; + + backup[0] = b43legacy_radio_read16(dev, 0x0043); + backup[14] = b43legacy_radio_read16(dev, 0x0051); + backup[15] = b43legacy_radio_read16(dev, 0x0052); + backup[1] = b43legacy_phy_read(dev, 0x0015); + backup[16] = b43legacy_phy_read(dev, 0x005A); + backup[17] = b43legacy_phy_read(dev, 0x0059); + backup[18] = b43legacy_phy_read(dev, 0x0058); + if (phy->type == B43legacy_PHYTYPE_B) { + backup[2] = b43legacy_phy_read(dev, 0x0030); + backup[3] = b43legacy_read16(dev, 0x03EC); + b43legacy_phy_write(dev, 0x0030, 0x00FF); + b43legacy_write16(dev, 0x03EC, 0x3F3F); + } else { + if (phy->gmode) { + backup[4] = b43legacy_phy_read(dev, 0x0811); + backup[5] = b43legacy_phy_read(dev, 0x0812); + backup[6] = b43legacy_phy_read(dev, 0x0814); + backup[7] = b43legacy_phy_read(dev, 0x0815); + backup[8] = b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS); + backup[9] = b43legacy_phy_read(dev, 0x0802); + b43legacy_phy_write(dev, 0x0814, + (b43legacy_phy_read(dev, 0x0814) + | 0x0003)); + b43legacy_phy_write(dev, 0x0815, + (b43legacy_phy_read(dev, 0x0815) + & 0xFFFC)); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + (b43legacy_phy_read(dev, + B43legacy_PHY_G_CRS) & 0x7FFF)); + b43legacy_phy_write(dev, 0x0802, + (b43legacy_phy_read(dev, 0x0802) + & 0xFFFC)); + if (phy->rev > 1) { /* loopback gain enabled */ + backup[19] = b43legacy_phy_read(dev, 0x080F); + backup[20] = b43legacy_phy_read(dev, 0x0810); + if (phy->rev >= 3) + b43legacy_phy_write(dev, 0x080F, + 0xC020); + else + b43legacy_phy_write(dev, 0x080F, + 0x8020); + b43legacy_phy_write(dev, 0x0810, 0x0000); + } + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 1, 1))); + if (phy->rev < 7 || + !(dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_EXTLNA)) + b43legacy_phy_write(dev, 0x0811, 0x01B3); + else + b43legacy_phy_write(dev, 0x0811, 0x09B3); + } + } + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, + (b43legacy_read16(dev, B43legacy_MMIO_PHY_RADIO) + | 0x8000)); + backup[10] = b43legacy_phy_read(dev, 0x0035); + b43legacy_phy_write(dev, 0x0035, + (b43legacy_phy_read(dev, 0x0035) & 0xFF7F)); + backup[11] = b43legacy_read16(dev, 0x03E6); + backup[12] = b43legacy_read16(dev, B43legacy_MMIO_CHANNEL_EXT); + + /* Initialization */ + if (phy->analog == 0) + b43legacy_write16(dev, 0x03E6, 0x0122); + else { + if (phy->analog >= 2) + b43legacy_phy_write(dev, 0x0003, + (b43legacy_phy_read(dev, 0x0003) + & 0xFFBF) | 0x0040); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + (b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | 0x2000)); + } + + ret = b43legacy_radio_calibrationvalue(dev); + + if (phy->type == B43legacy_PHYTYPE_B) + b43legacy_radio_write16(dev, 0x0078, 0x0026); + + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 1, 1))); + b43legacy_phy_write(dev, 0x0015, 0xBFAF); + b43legacy_phy_write(dev, 0x002B, 0x1403); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(0, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xBFA0); + b43legacy_radio_write16(dev, 0x0051, + (b43legacy_radio_read16(dev, 0x0051) + | 0x0004)); + if (phy->radio_rev == 8) + b43legacy_radio_write16(dev, 0x0043, 0x001F); + else { + b43legacy_radio_write16(dev, 0x0052, 0x0000); + b43legacy_radio_write16(dev, 0x0043, + (b43legacy_radio_read16(dev, 0x0043) + & 0xFFF0) | 0x0009); + } + b43legacy_phy_write(dev, 0x0058, 0x0000); + + for (i = 0; i < 16; i++) { + b43legacy_phy_write(dev, 0x005A, 0x0480); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xEFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 0))); + b43legacy_phy_write(dev, 0x0015, 0xFFF0); + udelay(20); + tmp1 += b43legacy_phy_read(dev, 0x002D); + b43legacy_phy_write(dev, 0x0058, 0x0000); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + } + + tmp1++; + tmp1 >>= 9; + udelay(10); + b43legacy_phy_write(dev, 0x0058, 0x0000); + + for (i = 0; i < 16; i++) { + b43legacy_radio_write16(dev, 0x0078, (flip_4bit(i) << 1) + | 0x0020); + backup[13] = b43legacy_radio_read16(dev, 0x0078); + udelay(10); + for (j = 0; j < 16; j++) { + b43legacy_phy_write(dev, 0x005A, 0x0D80); + b43legacy_phy_write(dev, 0x0059, 0xC810); + b43legacy_phy_write(dev, 0x0058, 0x000D); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xEFB0); + udelay(10); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 0))); + b43legacy_phy_write(dev, 0x0015, 0xFFF0); + udelay(10); + tmp2 += b43legacy_phy_read(dev, 0x002D); + b43legacy_phy_write(dev, 0x0058, 0x0000); + if (phy->gmode) + b43legacy_phy_write(dev, 0x0812, + b43legacy_get_812_value(dev, + LPD(1, 0, 1))); + b43legacy_phy_write(dev, 0x0015, 0xAFB0); + } + tmp2++; + tmp2 >>= 8; + if (tmp1 < tmp2) + break; + } + + /* Restore the registers */ + b43legacy_phy_write(dev, 0x0015, backup[1]); + b43legacy_radio_write16(dev, 0x0051, backup[14]); + b43legacy_radio_write16(dev, 0x0052, backup[15]); + b43legacy_radio_write16(dev, 0x0043, backup[0]); + b43legacy_phy_write(dev, 0x005A, backup[16]); + b43legacy_phy_write(dev, 0x0059, backup[17]); + b43legacy_phy_write(dev, 0x0058, backup[18]); + b43legacy_write16(dev, 0x03E6, backup[11]); + if (phy->analog != 0) + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, backup[12]); + b43legacy_phy_write(dev, 0x0035, backup[10]); + b43legacy_radio_selectchannel(dev, phy->channel, 1); + if (phy->type == B43legacy_PHYTYPE_B) { + b43legacy_phy_write(dev, 0x0030, backup[2]); + b43legacy_write16(dev, 0x03EC, backup[3]); + } else { + if (phy->gmode) { + b43legacy_write16(dev, B43legacy_MMIO_PHY_RADIO, + (b43legacy_read16(dev, + B43legacy_MMIO_PHY_RADIO) & 0x7FFF)); + b43legacy_phy_write(dev, 0x0811, backup[4]); + b43legacy_phy_write(dev, 0x0812, backup[5]); + b43legacy_phy_write(dev, 0x0814, backup[6]); + b43legacy_phy_write(dev, 0x0815, backup[7]); + b43legacy_phy_write(dev, B43legacy_PHY_G_CRS, + backup[8]); + b43legacy_phy_write(dev, 0x0802, backup[9]); + if (phy->rev > 1) { + b43legacy_phy_write(dev, 0x080F, backup[19]); + b43legacy_phy_write(dev, 0x0810, backup[20]); + } + } + } + if (i >= 15) + ret = backup[13]; + + return ret; +} + +static inline +u16 freq_r3A_value(u16 frequency) +{ + u16 value; + + if (frequency < 5091) + value = 0x0040; + else if (frequency < 5321) + value = 0x0000; + else if (frequency < 5806) + value = 0x0080; + else + value = 0x0040; + + return value; +} + +void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev) +{ + static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; + static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; + u16 tmp = b43legacy_radio_read16(dev, 0x001E); + int i; + int j; + + for (i = 0; i < 5; i++) { + for (j = 0; j < 5; j++) { + if (tmp == (data_high[i] | data_low[j])) { + b43legacy_phy_write(dev, 0x0069, (i - j) << 8 | + 0x00C0); + return; + } + } + } +} + +int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, + u8 channel, + int synthetic_pu_workaround) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (channel == 0xFF) { + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + channel = B43legacy_RADIO_DEFAULT_CHANNEL_BG; + break; + default: + B43legacy_WARN_ON(1); + } + } + +/* TODO: Check if channel is valid - return -EINVAL if not */ + if (synthetic_pu_workaround) + b43legacy_synth_pu_workaround(dev, channel); + + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL, + channel2freq_bg(channel)); + + if (channel == 14) { + if (dev->dev->bus->sprom.r1.country_code == 5) /* JAPAN) */ + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + & ~(1 << 7)); + else + b43legacy_shm_write32(dev, B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET, + b43legacy_shm_read32(dev, + B43legacy_SHM_SHARED, + B43legacy_UCODEFLAGS_OFFSET) + | (1 << 7)); + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) | (1 << 11)); + } else + b43legacy_write16(dev, B43legacy_MMIO_CHANNEL_EXT, + b43legacy_read16(dev, + B43legacy_MMIO_CHANNEL_EXT) & 0xF7BF); + + phy->channel = channel; + /*XXX: Using the longer of 2 timeouts (8000 vs 2000 usecs). Specs states + * that 2000 usecs might suffice. */ + msleep(8); + + return 0; +} + +void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val) +{ + u16 tmp; + + val <<= 8; + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0022) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0022, tmp | val); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x03A8) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x03A8, tmp | val); + tmp = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED, 0x0054) & 0xFCFF; + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0054, tmp | val); +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */ +static u16 b43legacy_get_txgain_base_band(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = 2; + else if (txpower >= 49) + ret = 4; + else if (txpower >= 44) + ret = 5; + else + ret = 6; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */ +static u16 b43legacy_get_txgain_freq_power_amp(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 32) + ret = 0; + else if (txpower >= 25) + ret = 1; + else if (txpower >= 20) + ret = 2; + else if (txpower >= 12) + ret = 3; + else + ret = 4; + + return ret; +} + +/* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */ +static u16 b43legacy_get_txgain_dac(u16 txpower) +{ + u16 ret; + + B43legacy_WARN_ON(txpower > 63); + + if (txpower >= 54) + ret = txpower - 53; + else if (txpower >= 49) + ret = txpower - 42; + else if (txpower >= 44) + ret = txpower - 37; + else if (txpower >= 32) + ret = txpower - 32; + else if (txpower >= 25) + ret = txpower - 20; + else if (txpower >= 20) + ret = txpower - 13; + else if (txpower >= 12) + ret = txpower - 8; + else + ret = txpower; + + return ret; +} + +void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 pamp; + u16 base; + u16 dac; + u16 ilt; + + txpower = limit_value(txpower, 0, 63); + + pamp = b43legacy_get_txgain_freq_power_amp(txpower); + pamp <<= 5; + pamp &= 0x00E0; + b43legacy_phy_write(dev, 0x0019, pamp); + + base = b43legacy_get_txgain_base_band(txpower); + base &= 0x000F; + b43legacy_phy_write(dev, 0x0017, base | 0x0020); + + ilt = b43legacy_ilt_read(dev, 0x3001); + ilt &= 0x0007; + + dac = b43legacy_get_txgain_dac(txpower); + dac <<= 3; + dac |= ilt; + + b43legacy_ilt_write(dev, 0x3001, dac); + + phy->txpwr_offset = txpower; + + /* TODO: FuncPlaceholder (Adjust BB loft cancel) */ +} + +void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, + u16 baseband_attenuation, + u16 radio_attenuation, + u16 txpower) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (baseband_attenuation == 0xFFFF) + baseband_attenuation = phy->bbatt; + if (radio_attenuation == 0xFFFF) + radio_attenuation = phy->rfatt; + if (txpower == 0xFFFF) + txpower = phy->txctl1; + phy->bbatt = baseband_attenuation; + phy->rfatt = radio_attenuation; + phy->txctl1 = txpower; + + B43legacy_WARN_ON(baseband_attenuation > 11); + if (phy->radio_rev < 6) + B43legacy_WARN_ON(radio_attenuation > 9); + else + B43legacy_WARN_ON(radio_attenuation > 31); + B43legacy_WARN_ON(txpower > 7); + + b43legacy_phy_set_baseband_attenuation(dev, baseband_attenuation); + b43legacy_radio_write16(dev, 0x0043, radio_attenuation); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0064, + radio_attenuation); + if (phy->radio_ver == 0x2050) + b43legacy_radio_write16(dev, 0x0052, + (b43legacy_radio_read16(dev, 0x0052) + & ~0x0070) | ((txpower << 4) & 0x0070)); + /* FIXME: The spec is very weird and unclear here. */ + if (phy->type == B43legacy_PHYTYPE_G) + b43legacy_phy_lo_adjust(dev, 0); +} + +u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->radio_ver == 0x2050 && phy->radio_rev < 6) + return 0; + return 2; +} + +u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + u16 att = 0xFFFF; + + switch (phy->radio_ver) { + case 0x2053: + switch (phy->radio_rev) { + case 1: + att = 6; + break; + } + break; + case 0x2050: + switch (phy->radio_rev) { + case 0: + att = 5; + break; + case 1: + if (phy->type == B43legacy_PHYTYPE_G) { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 3; + else if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x416) + att = 3; + else + att = 1; + } else { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 7; + else + att = 6; + } + break; + case 2: + if (phy->type == B43legacy_PHYTYPE_G) { + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421 && + dev->dev->bus->boardinfo.rev >= 30) + att = 3; + else if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == + 0x416) + att = 5; + else if (dev->dev->bus->chip_id == 0x4320) + att = 4; + else + att = 3; + } else + att = 6; + break; + case 3: + att = 5; + break; + case 4: + case 5: + att = 1; + break; + case 6: + case 7: + att = 5; + break; + case 8: + att = 0x1A; + break; + case 9: + default: + att = 5; + } + } + if (is_bcm_board_vendor(dev) && + dev->dev->bus->boardinfo.type == 0x421) { + if (dev->dev->bus->boardinfo.rev < 0x43) + att = 2; + else if (dev->dev->bus->boardinfo.rev < 0x51) + att = 3; + } + if (att == 0xFFFF) + att = 5; + + return att; +} + +u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->radio_ver != 0x2050) + return 0; + if (phy->radio_rev == 1) + return 3; + if (phy->radio_rev < 6) + return 2; + if (phy->radio_rev == 8) + return 1; + return 0; +} + +void b43legacy_radio_turn_on(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + int err; + u8 channel; + + might_sleep(); + + if (phy->radio_on) + return; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + b43legacy_phy_write(dev, 0x0015, 0x8000); + b43legacy_phy_write(dev, 0x0015, 0xCC00); + b43legacy_phy_write(dev, 0x0015, + (phy->gmode ? 0x00C0 : 0x0000)); + if (phy->radio_off_context.valid) { + /* Restore the RFover values. */ + b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, + phy->radio_off_context.rfover); + b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, + phy->radio_off_context.rfoverval); + phy->radio_off_context.valid = 0; + } + channel = phy->channel; + err = b43legacy_radio_selectchannel(dev, + B43legacy_RADIO_DEFAULT_CHANNEL_BG, 1); + err |= b43legacy_radio_selectchannel(dev, channel, 0); + B43legacy_WARN_ON(err); + break; + default: + B43legacy_BUG_ON(1); + } + phy->radio_on = 1; + b43legacy_leds_update(dev, 0); +} + +void b43legacy_radio_turn_off(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + if (phy->type == B43legacy_PHYTYPE_G && dev->dev->id.revision >= 5) { + u16 rfover, rfoverval; + + rfover = b43legacy_phy_read(dev, B43legacy_PHY_RFOVER); + rfoverval = b43legacy_phy_read(dev, B43legacy_PHY_RFOVERVAL); + phy->radio_off_context.rfover = rfover; + phy->radio_off_context.rfoverval = rfoverval; + phy->radio_off_context.valid = 1; + b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C); + b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL, + rfoverval & 0xFF73); + } else + b43legacy_phy_write(dev, 0x0015, 0xAA00); + phy->radio_on = 0; + b43legacydbg(dev->wl, "Radio initialized\n"); + b43legacy_leds_update(dev, 0); +} + +void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev) +{ + struct b43legacy_phy *phy = &dev->phy; + + switch (phy->type) { + case B43legacy_PHYTYPE_B: + case B43legacy_PHYTYPE_G: + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0058, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x005a, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0070, + 0x7F7F); + b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0072, + 0x7F7F); + break; + } +} diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/b43legacy/radio.h new file mode 100644 index 0000000000000000000000000000000000000000..6c6a203439e196663ed6f65bdde60c80d50ab066 --- /dev/null +++ b/drivers/net/wireless/b43legacy/radio.h @@ -0,0 +1,98 @@ +/* + + Broadcom B43legacy wireless driver + + Copyright (c) 2005 Martin Langer , + Stefano Brivio + Michael Buesch + Danny van Dyk + Andreas Jaggi + + Some parts of the code in this file are derived from the ipw2200 + driver Copyright(c) 2003 - 2004 Intel Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#ifndef B43legacy_RADIO_H_ +#define B43legacy_RADIO_H_ + +#include "b43legacy.h" + + +#define B43legacy_RADIO_DEFAULT_CHANNEL_BG 6 + +/* Force antenna 0. */ +#define B43legacy_RADIO_TXANTENNA_0 0 +/* Force antenna 1. */ +#define B43legacy_RADIO_TXANTENNA_1 1 +/* Use the RX antenna, that was selected for the most recently + * received good PLCP header. + */ +#define B43legacy_RADIO_TXANTENNA_LASTPLCP 3 +#define B43legacy_RADIO_TXANTENNA_DEFAULT B43legacy_RADIO_TXANTENNA_LASTPLCP + +#define B43legacy_RADIO_INTERFMODE_NONE 0 +#define B43legacy_RADIO_INTERFMODE_NONWLAN 1 +#define B43legacy_RADIO_INTERFMODE_MANUALWLAN 2 +#define B43legacy_RADIO_INTERFMODE_AUTOWLAN 3 + + +void b43legacy_radio_lock(struct b43legacy_wldev *dev); +void b43legacy_radio_unlock(struct b43legacy_wldev *dev); + +u16 b43legacy_radio_read16(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_radio_write16(struct b43legacy_wldev *dev, u16 offset, u16 val); + +u16 b43legacy_radio_init2050(struct b43legacy_wldev *dev); + +void b43legacy_radio_turn_on(struct b43legacy_wldev *dev); +void b43legacy_radio_turn_off(struct b43legacy_wldev *dev); + +int b43legacy_radio_selectchannel(struct b43legacy_wldev *dev, u8 channel, + int synthetic_pu_workaround); + +void b43legacy_radio_set_txpower_a(struct b43legacy_wldev *dev, u16 txpower); +void b43legacy_radio_set_txpower_bg(struct b43legacy_wldev *dev, + u16 baseband_attenuation, u16 attenuation, + u16 txpower); + +u16 b43legacy_default_baseband_attenuation(struct b43legacy_wldev *dev); +u16 b43legacy_default_radio_attenuation(struct b43legacy_wldev *dev); +u16 b43legacy_default_txctl1(struct b43legacy_wldev *dev); + +void b43legacy_radio_set_txantenna(struct b43legacy_wldev *dev, u32 val); + +void b43legacy_radio_clear_tssi(struct b43legacy_wldev *dev); + +u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel); +u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev); + +int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev, + int mode); + +void b43legacy_calc_nrssi_slope(struct b43legacy_wldev *dev); +void b43legacy_calc_nrssi_threshold(struct b43legacy_wldev *dev); +s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset); +void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val); +void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val); +void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev); + +void b43legacy_radio_set_tx_iq(struct b43legacy_wldev *dev); +u16 b43legacy_radio_calibrationvalue(struct b43legacy_wldev *dev); + +#endif /* B43legacy_RADIO_H_ */ diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..56c384fa9b1feea926bdc194741344fc8ce29fb5 --- /dev/null +++ b/drivers/net/wireless/b43legacy/sysfs.c @@ -0,0 +1,238 @@ +/* + + Broadcom B43legacy wireless driver + + SYSFS support routines + + Copyright (c) 2006 Michael Buesch + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include "sysfs.h" +#include "b43legacy.h" +#include "main.h" +#include "phy.h" +#include "radio.h" + +#include + + +#define GENERIC_FILESIZE 64 + + +static int get_integer(const char *buf, size_t count) +{ + char tmp[10 + 1] = { 0 }; + int ret = -EINVAL; + + if (count == 0) + goto out; + count = min(count, (size_t)10); + memcpy(tmp, buf, count); + ret = simple_strtol(tmp, NULL, 10); +out: + return ret; +} + +static int get_boolean(const char *buf, size_t count) +{ + if (count != 0) { + if (buf[0] == '1') + return 1; + if (buf[0] == '0') + return 0; + if (count >= 4 && memcmp(buf, "true", 4) == 0) + return 1; + if (count >= 5 && memcmp(buf, "false", 5) == 0) + return 0; + if (count >= 3 && memcmp(buf, "yes", 3) == 0) + return 1; + if (count >= 2 && memcmp(buf, "no", 2) == 0) + return 0; + if (count >= 2 && memcmp(buf, "on", 2) == 0) + return 1; + if (count >= 3 && memcmp(buf, "off", 3) == 0) + return 0; + } + return -EINVAL; +} + +static ssize_t b43legacy_attr_interfmode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + ssize_t count = 0; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + switch (wldev->phy.interfmode) { + case B43legacy_INTERFMODE_NONE: + count = snprintf(buf, PAGE_SIZE, "0 (No Interference" + " Mitigation)\n"); + break; + case B43legacy_INTERFMODE_NONWLAN: + count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference" + " Mitigation)\n"); + break; + case B43legacy_INTERFMODE_MANUALWLAN: + count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference" + " Mitigation)\n"); + break; + default: + B43legacy_WARN_ON(1); + } + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43legacy_attr_interfmode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + unsigned long flags; + int err; + int mode; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mode = get_integer(buf, count); + switch (mode) { + case 0: + mode = B43legacy_INTERFMODE_NONE; + break; + case 1: + mode = B43legacy_INTERFMODE_NONWLAN; + break; + case 2: + mode = B43legacy_INTERFMODE_MANUALWLAN; + break; + case 3: + mode = B43legacy_INTERFMODE_AUTOWLAN; + break; + default: + return -EINVAL; + } + + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + err = b43legacy_radio_set_interference_mitigation(wldev, mode); + if (err) + b43legacyerr(wldev->wl, "Interference Mitigation not " + "supported by device\n"); + mmiowb(); + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return err ? err : count; +} + +static DEVICE_ATTR(interference, 0644, + b43legacy_attr_interfmode_show, + b43legacy_attr_interfmode_store); + +static ssize_t b43legacy_attr_preamble_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + ssize_t count; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + mutex_lock(&wldev->wl->mutex); + + if (wldev->short_preamble) + count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble" + " enabled)\n"); + else + count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble" + " disabled)\n"); + + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static ssize_t b43legacy_attr_preamble_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev); + unsigned long flags; + int value; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + value = get_boolean(buf, count); + if (value < 0) + return value; + mutex_lock(&wldev->wl->mutex); + spin_lock_irqsave(&wldev->wl->irq_lock, flags); + + wldev->short_preamble = !!value; + + spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); + mutex_unlock(&wldev->wl->mutex); + + return count; +} + +static DEVICE_ATTR(shortpreamble, 0644, + b43legacy_attr_preamble_show, + b43legacy_attr_preamble_store); + +int b43legacy_sysfs_register(struct b43legacy_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + int err; + + B43legacy_WARN_ON(b43legacy_status(wldev) != + B43legacy_STAT_INITIALIZED); + + err = device_create_file(dev, &dev_attr_interference); + if (err) + goto out; + err = device_create_file(dev, &dev_attr_shortpreamble); + if (err) + goto err_remove_interfmode; + +out: + return err; +err_remove_interfmode: + device_remove_file(dev, &dev_attr_interference); + goto out; +} + +void b43legacy_sysfs_unregister(struct b43legacy_wldev *wldev) +{ + struct device *dev = wldev->dev->dev; + + device_remove_file(dev, &dev_attr_shortpreamble); + device_remove_file(dev, &dev_attr_interference); +} diff --git a/drivers/net/wireless/b43legacy/sysfs.h b/drivers/net/wireless/b43legacy/sysfs.h new file mode 100644 index 0000000000000000000000000000000000000000..417d509803c708623fe91db1f54d6953daccd914 --- /dev/null +++ b/drivers/net/wireless/b43legacy/sysfs.h @@ -0,0 +1,9 @@ +#ifndef B43legacy_SYSFS_H_ +#define B43legacy_SYSFS_H_ + +struct b43legacy_wldev; + +int b43legacy_sysfs_register(struct b43legacy_wldev *dev); +void b43legacy_sysfs_unregister(struct b43legacy_wldev *dev); + +#endif /* B43legacy_SYSFS_H_ */ diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c new file mode 100644 index 0000000000000000000000000000000000000000..fa1e65687a63c65f541452dcff9a848e12ba89b1 --- /dev/null +++ b/drivers/net/wireless/b43legacy/xmit.c @@ -0,0 +1,642 @@ +/* + + Broadcom B43legacy wireless driver + + Transmission (TX/RX) related functions. + + Copyright (C) 2005 Martin Langer + Copyright (C) 2005 Stefano Brivio + Copyright (C) 2005, 2006 Michael Buesch + Copyright (C) 2005 Danny van Dyk + Copyright (C) 2005 Andreas Jaggi + Copyright (C) 2007 Larry Finger + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include + +#include "xmit.h" +#include "phy.h" +#include "dma.h" +#include "pio.h" + + +/* Extract the bitrate out of a CCK PLCP header. */ +static u8 b43legacy_plcp_get_bitrate_cck(struct b43legacy_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0]) { + case 0x0A: + return B43legacy_CCK_RATE_1MB; + case 0x14: + return B43legacy_CCK_RATE_2MB; + case 0x37: + return B43legacy_CCK_RATE_5MB; + case 0x6E: + return B43legacy_CCK_RATE_11MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +/* Extract the bitrate out of an OFDM PLCP header. */ +static u8 b43legacy_plcp_get_bitrate_ofdm(struct b43legacy_plcp_hdr6 *plcp) +{ + switch (plcp->raw[0] & 0xF) { + case 0xB: + return B43legacy_OFDM_RATE_6MB; + case 0xF: + return B43legacy_OFDM_RATE_9MB; + case 0xA: + return B43legacy_OFDM_RATE_12MB; + case 0xE: + return B43legacy_OFDM_RATE_18MB; + case 0x9: + return B43legacy_OFDM_RATE_24MB; + case 0xD: + return B43legacy_OFDM_RATE_36MB; + case 0x8: + return B43legacy_OFDM_RATE_48MB; + case 0xC: + return B43legacy_OFDM_RATE_54MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate) +{ + switch (bitrate) { + case B43legacy_CCK_RATE_1MB: + return 0x0A; + case B43legacy_CCK_RATE_2MB: + return 0x14; + case B43legacy_CCK_RATE_5MB: + return 0x37; + case B43legacy_CCK_RATE_11MB: + return 0x6E; + } + B43legacy_BUG_ON(1); + return 0; +} + +u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate) +{ + switch (bitrate) { + case B43legacy_OFDM_RATE_6MB: + return 0xB; + case B43legacy_OFDM_RATE_9MB: + return 0xF; + case B43legacy_OFDM_RATE_12MB: + return 0xA; + case B43legacy_OFDM_RATE_18MB: + return 0xE; + case B43legacy_OFDM_RATE_24MB: + return 0x9; + case B43legacy_OFDM_RATE_36MB: + return 0xD; + case B43legacy_OFDM_RATE_48MB: + return 0x8; + case B43legacy_OFDM_RATE_54MB: + return 0xC; + } + B43legacy_BUG_ON(1); + return 0; +} + +void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate) +{ + __le32 *data = &(plcp->data); + __u8 *raw = plcp->raw; + + if (b43legacy_is_ofdm_rate(bitrate)) { + u16 d; + + d = b43legacy_plcp_get_ratecode_ofdm(bitrate); + B43legacy_WARN_ON(octets & 0xF000); + d |= (octets << 5); + *data = cpu_to_le32(d); + } else { + u32 plen; + + plen = octets * 16 / bitrate; + if ((octets * 16 % bitrate) > 0) { + plen++; + if ((bitrate == B43legacy_CCK_RATE_11MB) + && ((octets * 8 % 11) < 4)) + raw[1] = 0x84; + else + raw[1] = 0x04; + } else + raw[1] = 0x04; + *data |= cpu_to_le32(plen << 16); + raw[0] = b43legacy_plcp_get_ratecode_cck(bitrate); + } +} + +static u8 b43legacy_calc_fallback_rate(u8 bitrate) +{ + switch (bitrate) { + case B43legacy_CCK_RATE_1MB: + return B43legacy_CCK_RATE_1MB; + case B43legacy_CCK_RATE_2MB: + return B43legacy_CCK_RATE_1MB; + case B43legacy_CCK_RATE_5MB: + return B43legacy_CCK_RATE_2MB; + case B43legacy_CCK_RATE_11MB: + return B43legacy_CCK_RATE_5MB; + case B43legacy_OFDM_RATE_6MB: + return B43legacy_CCK_RATE_5MB; + case B43legacy_OFDM_RATE_9MB: + return B43legacy_OFDM_RATE_6MB; + case B43legacy_OFDM_RATE_12MB: + return B43legacy_OFDM_RATE_9MB; + case B43legacy_OFDM_RATE_18MB: + return B43legacy_OFDM_RATE_12MB; + case B43legacy_OFDM_RATE_24MB: + return B43legacy_OFDM_RATE_18MB; + case B43legacy_OFDM_RATE_36MB: + return B43legacy_OFDM_RATE_24MB; + case B43legacy_OFDM_RATE_48MB: + return B43legacy_OFDM_RATE_36MB; + case B43legacy_OFDM_RATE_54MB: + return B43legacy_OFDM_RATE_48MB; + } + B43legacy_BUG_ON(1); + return 0; +} + +static void generate_txhdr_fw3(struct b43legacy_wldev *dev, + struct b43legacy_txhdr_fw3 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie) +{ + const struct ieee80211_hdr *wlhdr; + int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); + u16 fctl; + u8 rate; + u8 rate_fb; + int rate_ofdm; + int rate_fb_ofdm; + unsigned int plcp_fragment_len; + u32 mac_ctl = 0; + u16 phy_ctl = 0; + + wlhdr = (const struct ieee80211_hdr *)fragment_data; + fctl = le16_to_cpu(wlhdr->frame_control); + + memset(txhdr, 0, sizeof(*txhdr)); + + rate = txctl->tx_rate; + rate_ofdm = b43legacy_is_ofdm_rate(rate); + rate_fb = (txctl->alt_retry_rate == -1) ? rate : txctl->alt_retry_rate; + rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb); + + txhdr->mac_frame_ctl = wlhdr->frame_control; + memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); + + /* Calculate duration for fallback rate */ + if ((rate_fb == rate) || + (wlhdr->duration_id & cpu_to_le16(0x8000)) || + (wlhdr->duration_id == cpu_to_le16(0))) { + /* If the fallback rate equals the normal rate or the + * dur_id field contains an AID, CFP magic or 0, + * use the original dur_id field. */ + txhdr->dur_fb = wlhdr->duration_id; + } else { + int fbrate_base100kbps = B43legacy_RATE_TO_100KBPS(rate_fb); + txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, + dev->wl->if_id, + fragment_len, + fbrate_base100kbps); + } + + plcp_fragment_len = fragment_len + FCS_LEN; + if (use_encryption) { + u8 key_idx = (u16)(txctl->key_idx); + struct b43legacy_key *key; + int wlhdr_len; + size_t iv_len; + + B43legacy_WARN_ON(key_idx >= dev->max_nr_keys); + key = &(dev->key[key_idx]); + + if (key->enabled) { + /* Hardware appends ICV. */ + plcp_fragment_len += txctl->icv_len; + + key_idx = b43legacy_kidx_to_fw(dev, key_idx); + mac_ctl |= (key_idx << B43legacy_TX4_MAC_KEYIDX_SHIFT) & + B43legacy_TX4_MAC_KEYIDX; + mac_ctl |= (key->algorithm << + B43legacy_TX4_MAC_KEYALG_SHIFT) & + B43legacy_TX4_MAC_KEYALG; + wlhdr_len = ieee80211_get_hdrlen(fctl); + iv_len = min((size_t)txctl->iv_len, + ARRAY_SIZE(txhdr->iv)); + memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); + } + } + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->plcp), plcp_fragment_len, + rate); + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->plcp_fb), plcp_fragment_len, + rate_fb); + + /* PHY TX Control word */ + if (rate_ofdm) + phy_ctl |= B43legacy_TX4_PHY_OFDM; + if (dev->short_preamble) + phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; + switch (txctl->antenna_sel_tx) { + case 0: + phy_ctl |= B43legacy_TX4_PHY_ANTLAST; + break; + case 1: + phy_ctl |= B43legacy_TX4_PHY_ANT0; + break; + case 2: + phy_ctl |= B43legacy_TX4_PHY_ANT1; + break; + default: + B43legacy_BUG_ON(1); + } + + /* MAC control */ + if (!(txctl->flags & IEEE80211_TXCTL_NO_ACK)) + mac_ctl |= B43legacy_TX4_MAC_ACK; + if (!(((fctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fctl & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL))) + mac_ctl |= B43legacy_TX4_MAC_HWSEQ; + if (txctl->flags & IEEE80211_TXCTL_FIRST_FRAGMENT) + mac_ctl |= B43legacy_TX4_MAC_STMSDU; + if (rate_fb_ofdm) + mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM; + + /* Generate the RTS or CTS-to-self frame */ + if ((txctl->flags & IEEE80211_TXCTL_USE_RTS_CTS) || + (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { + unsigned int len; + struct ieee80211_hdr *hdr; + int rts_rate; + int rts_rate_fb; + int rts_rate_ofdm; + int rts_rate_fb_ofdm; + + rts_rate = txctl->rts_cts_rate; + rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate); + rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate); + rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb); + if (rts_rate_fb_ofdm) + mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM; + + if (txctl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + ieee80211_ctstoself_get(dev->wl->hw, + dev->wl->if_id, + fragment_data, + fragment_len, txctl, + (struct ieee80211_cts *) + (txhdr->rts_frame)); + mac_ctl |= B43legacy_TX4_MAC_SENDCTS; + len = sizeof(struct ieee80211_cts); + } else { + ieee80211_rts_get(dev->wl->hw, + dev->wl->if_id, + fragment_data, fragment_len, txctl, + (struct ieee80211_rts *) + (txhdr->rts_frame)); + mac_ctl |= B43legacy_TX4_MAC_SENDRTS; + len = sizeof(struct ieee80211_rts); + } + len += FCS_LEN; + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->rts_plcp), + len, rts_rate); + b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) + (&txhdr->rts_plcp_fb), + len, rts_rate_fb); + hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame); + txhdr->rts_dur_fb = hdr->duration_id; + mac_ctl |= B43legacy_TX4_MAC_LONGFRAME; + } + + /* Magic cookie */ + txhdr->cookie = cpu_to_le16(cookie); + + /* Apply the bitfields */ + txhdr->mac_ctl = cpu_to_le32(mac_ctl); + txhdr->phy_ctl = cpu_to_le16(phy_ctl); +} + +void b43legacy_generate_txhdr(struct b43legacy_wldev *dev, + u8 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie) +{ + generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, + fragment_data, fragment_len, + txctl, cookie); +} + +static s8 b43legacy_rssi_postprocess(struct b43legacy_wldev *dev, + u8 in_rssi, int ofdm, + int adjust_2053, int adjust_2050) +{ + struct b43legacy_phy *phy = &dev->phy; + s32 tmp; + + switch (phy->radio_ver) { + case 0x2050: + if (ofdm) { + tmp = in_rssi; + if (tmp > 127) + tmp -= 256; + tmp *= 73; + tmp /= 64; + if (adjust_2050) + tmp += 25; + else + tmp -= 3; + } else { + if (dev->dev->bus->sprom.r1.boardflags_lo + & B43legacy_BFL_RSSI) { + if (in_rssi > 63) + in_rssi = 63; + tmp = phy->nrssi_lt[in_rssi]; + tmp = 31 - tmp; + tmp *= -131; + tmp /= 128; + tmp -= 57; + } else { + tmp = in_rssi; + tmp = 31 - tmp; + tmp *= -149; + tmp /= 128; + tmp -= 68; + } + if (phy->type == B43legacy_PHYTYPE_G && + adjust_2050) + tmp += 25; + } + break; + case 0x2060: + if (in_rssi > 127) + tmp = in_rssi - 256; + else + tmp = in_rssi; + break; + default: + tmp = in_rssi; + tmp -= 11; + tmp *= 103; + tmp /= 64; + if (adjust_2053) + tmp -= 109; + else + tmp -= 83; + } + + return (s8)tmp; +} + +void b43legacy_rx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + const void *_rxhdr) +{ + struct ieee80211_rx_status status; + struct b43legacy_plcp_hdr6 *plcp; + struct ieee80211_hdr *wlhdr; + const struct b43legacy_rxhdr_fw3 *rxhdr = _rxhdr; + u16 fctl; + u16 phystat0; + u16 phystat3; + u16 chanstat; + u16 mactime; + u32 macstat; + u16 chanid; + u8 jssi; + int padding; + + memset(&status, 0, sizeof(status)); + + /* Get metadata about the frame from the header. */ + phystat0 = le16_to_cpu(rxhdr->phy_status0); + phystat3 = le16_to_cpu(rxhdr->phy_status3); + jssi = rxhdr->jssi; + macstat = le16_to_cpu(rxhdr->mac_status); + mactime = le16_to_cpu(rxhdr->mac_time); + chanstat = le16_to_cpu(rxhdr->channel); + + if (macstat & B43legacy_RX_MAC_FCSERR) + dev->wl->ieee_stats.dot11FCSErrorCount++; + + /* Skip PLCP and padding */ + padding = (macstat & B43legacy_RX_MAC_PADDING) ? 2 : 0; + if (unlikely(skb->len < (sizeof(struct b43legacy_plcp_hdr6) + + padding))) { + b43legacydbg(dev->wl, "RX: Packet size underrun (1)\n"); + goto drop; + } + plcp = (struct b43legacy_plcp_hdr6 *)(skb->data + padding); + skb_pull(skb, sizeof(struct b43legacy_plcp_hdr6) + padding); + /* The skb contains the Wireless Header + payload data now */ + if (unlikely(skb->len < (2+2+6/*minimum hdr*/ + FCS_LEN))) { + b43legacydbg(dev->wl, "RX: Packet size underrun (2)\n"); + goto drop; + } + wlhdr = (struct ieee80211_hdr *)(skb->data); + fctl = le16_to_cpu(wlhdr->frame_control); + + if ((macstat & B43legacy_RX_MAC_DEC) && + !(macstat & B43legacy_RX_MAC_DECERR)) { + unsigned int keyidx; + int wlhdr_len; + int iv_len; + int icv_len; + + keyidx = ((macstat & B43legacy_RX_MAC_KEYIDX) + >> B43legacy_RX_MAC_KEYIDX_SHIFT); + /* We must adjust the key index here. We want the "physical" + * key index, but the ucode passed it slightly different. + */ + keyidx = b43legacy_kidx_to_raw(dev, keyidx); + B43legacy_WARN_ON(keyidx >= dev->max_nr_keys); + + if (dev->key[keyidx].algorithm != B43legacy_SEC_ALGO_NONE) { + /* Remove PROTECTED flag to mark it as decrypted. */ + B43legacy_WARN_ON(!(fctl & IEEE80211_FCTL_PROTECTED)); + fctl &= ~IEEE80211_FCTL_PROTECTED; + wlhdr->frame_control = cpu_to_le16(fctl); + + wlhdr_len = ieee80211_get_hdrlen(fctl); + if (unlikely(skb->len < (wlhdr_len + 3))) { + b43legacydbg(dev->wl, "RX: Packet size" + " underrun3\n"); + goto drop; + } + if (skb->data[wlhdr_len + 3] & (1 << 5)) { + /* The Ext-IV Bit is set in the "KeyID" + * octet of the IV. + */ + iv_len = 8; + icv_len = 8; + } else { + iv_len = 4; + icv_len = 4; + } + if (unlikely(skb->len < (wlhdr_len + iv_len + + icv_len))) { + b43legacydbg(dev->wl, "RX: Packet size" + " underrun4\n"); + goto drop; + } + /* Remove the IV */ + memmove(skb->data + iv_len, skb->data, wlhdr_len); + skb_pull(skb, iv_len); + /* Remove the ICV */ + skb_trim(skb, skb->len - icv_len); + + status.flag |= RX_FLAG_DECRYPTED; + } + } + + status.ssi = b43legacy_rssi_postprocess(dev, jssi, + (phystat0 & B43legacy_RX_PHYST0_OFDM), + (phystat0 & B43legacy_RX_PHYST0_GAINCTL), + (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); + status.noise = dev->stats.link_noise; + status.signal = (jssi * 100) / B43legacy_RX_MAX_SSI; + if (phystat0 & B43legacy_RX_PHYST0_OFDM) + status.rate = b43legacy_plcp_get_bitrate_ofdm(plcp); + else + status.rate = b43legacy_plcp_get_bitrate_cck(plcp); + status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT); + status.mactime = mactime; + + chanid = (chanstat & B43legacy_RX_CHAN_ID) >> + B43legacy_RX_CHAN_ID_SHIFT; + switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) { + case B43legacy_PHYTYPE_B: + status.phymode = MODE_IEEE80211B; + status.freq = chanid + 2400; + status.channel = b43legacy_freq_to_channel_bg(chanid + 2400); + break; + case B43legacy_PHYTYPE_G: + status.phymode = MODE_IEEE80211G; + status.freq = chanid + 2400; + status.channel = b43legacy_freq_to_channel_bg(chanid + 2400); + break; + default: + b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", + chanstat); + } + + dev->stats.last_rx = jiffies; + ieee80211_rx_irqsafe(dev->wl->hw, skb, &status); + + return; +drop: + b43legacydbg(dev->wl, "RX: Packet dropped\n"); + dev_kfree_skb_any(skb); +} + +void b43legacy_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status) +{ + b43legacy_debugfs_log_txstat(dev, status); + + if (status->intermediate) + return; + if (status->for_ampdu) + return; + if (!status->acked) + dev->wl->ieee_stats.dot11ACKFailureCount++; + if (status->rts_count) { + if (status->rts_count == 0xF) /* FIXME */ + dev->wl->ieee_stats.dot11RTSFailureCount++; + else + dev->wl->ieee_stats.dot11RTSSuccessCount++; + } + + if (b43legacy_using_pio(dev)) + b43legacy_pio_handle_txstatus(dev, status); + else + b43legacy_dma_handle_txstatus(dev, status); +} + +/* Handle TX status report as received through DMA/PIO queues */ +void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev, + const struct b43legacy_hwtxstatus *hw) +{ + struct b43legacy_txstatus status; + u8 tmp; + + status.cookie = le16_to_cpu(hw->cookie); + status.seq = le16_to_cpu(hw->seq); + status.phy_stat = hw->phy_stat; + tmp = hw->count; + status.frame_count = (tmp >> 4); + status.rts_count = (tmp & 0x0F); + tmp = hw->flags; + status.supp_reason = ((tmp & 0x1C) >> 2); + status.pm_indicated = !!(tmp & 0x80); + status.intermediate = !!(tmp & 0x40); + status.for_ampdu = !!(tmp & 0x20); + status.acked = !!(tmp & 0x02); + + b43legacy_handle_txstatus(dev, &status); +} + +/* Stop any TX operation on the device (suspend the hardware queues) */ +void b43legacy_tx_suspend(struct b43legacy_wldev *dev) +{ + if (b43legacy_using_pio(dev)) + b43legacy_pio_freeze_txqueues(dev); + else + b43legacy_dma_tx_suspend(dev); +} + +/* Resume any TX operation on the device (resume the hardware queues) */ +void b43legacy_tx_resume(struct b43legacy_wldev *dev) +{ + if (b43legacy_using_pio(dev)) + b43legacy_pio_thaw_txqueues(dev); + else + b43legacy_dma_tx_resume(dev); +} + +/* Initialize the QoS parameters */ +void b43legacy_qos_init(struct b43legacy_wldev *dev) +{ + /* FIXME: This function must probably be called from the mac80211 + * config callback. */ +return; + + b43legacy_hf_write(dev, b43legacy_hf_read(dev) | B43legacy_HF_EDCF); + /* FIXME kill magic */ + b43legacy_write16(dev, 0x688, + b43legacy_read16(dev, 0x688) | 0x4); + + + /*TODO: We might need some stack support here to get the values. */ +} diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h new file mode 100644 index 0000000000000000000000000000000000000000..8a155d0a5d1fbb2c4b7d4528ce7ae900d8a261be --- /dev/null +++ b/drivers/net/wireless/b43legacy/xmit.h @@ -0,0 +1,259 @@ +#ifndef B43legacy_XMIT_H_ +#define B43legacy_XMIT_H_ + +#include "main.h" + + +#define _b43legacy_declare_plcp_hdr(size) \ + struct b43legacy_plcp_hdr##size { \ + union { \ + __le32 data; \ + __u8 raw[size]; \ + } __attribute__((__packed__)); \ + } __attribute__((__packed__)) + +/* struct b43legacy_plcp_hdr4 */ +_b43legacy_declare_plcp_hdr(4); +/* struct b43legacy_plcp_hdr6 */ +_b43legacy_declare_plcp_hdr(6); + +#undef _b43legacy_declare_plcp_hdr + + +/* TX header for v3 firmware */ +struct b43legacy_txhdr_fw3 { + __le32 mac_ctl; /* MAC TX control */ + __le16 mac_frame_ctl; /* Copy of the FrameControl */ + __le16 tx_fes_time_norm; /* TX FES Time Normal */ + __le16 phy_ctl; /* PHY TX control */ + __u8 iv[16]; /* Encryption IV */ + __u8 tx_receiver[6]; /* TX Frame Receiver address */ + __le16 tx_fes_time_fb; /* TX FES Time Fallback */ + struct b43legacy_plcp_hdr4 rts_plcp_fb; /* RTS fallback PLCP */ + __le16 rts_dur_fb; /* RTS fallback duration */ + struct b43legacy_plcp_hdr4 plcp_fb; /* Fallback PLCP */ + __le16 dur_fb; /* Fallback duration */ + PAD_BYTES(2); + __le16 cookie; + __le16 unknown_scb_stuff; + struct b43legacy_plcp_hdr6 rts_plcp; /* RTS PLCP */ + __u8 rts_frame[18]; /* The RTS frame (if used) */ + struct b43legacy_plcp_hdr6 plcp; +} __attribute__((__packed__)); + +/* MAC TX control */ +#define B43legacy_TX4_MAC_KEYIDX 0x0FF00000 /* Security key index */ +#define B43legacy_TX4_MAC_KEYIDX_SHIFT 20 +#define B43legacy_TX4_MAC_KEYALG 0x00070000 /* Security key algorithm */ +#define B43legacy_TX4_MAC_KEYALG_SHIFT 16 +#define B43legacy_TX4_MAC_LIFETIME 0x00001000 +#define B43legacy_TX4_MAC_FRAMEBURST 0x00000800 +#define B43legacy_TX4_MAC_SENDCTS 0x00000400 +#define B43legacy_TX4_MAC_AMPDU 0x00000300 +#define B43legacy_TX4_MAC_AMPDU_SHIFT 8 +#define B43legacy_TX4_MAC_CTSFALLBACKOFDM 0x00000200 +#define B43legacy_TX4_MAC_FALLBACKOFDM 0x00000100 +#define B43legacy_TX4_MAC_5GHZ 0x00000080 +#define B43legacy_TX4_MAC_IGNPMQ 0x00000020 +#define B43legacy_TX4_MAC_HWSEQ 0x00000010 /* Use Hardware Seq No */ +#define B43legacy_TX4_MAC_STMSDU 0x00000008 /* Start MSDU */ +#define B43legacy_TX4_MAC_SENDRTS 0x00000004 +#define B43legacy_TX4_MAC_LONGFRAME 0x00000002 +#define B43legacy_TX4_MAC_ACK 0x00000001 + +/* Extra Frame Types */ +#define B43legacy_TX4_EFT_FBOFDM 0x0001 /* Data frame fb rate type */ +#define B43legacy_TX4_EFT_RTSOFDM 0x0004 /* RTS/CTS rate type */ +#define B43legacy_TX4_EFT_RTSFBOFDM 0x0010 /* RTS/CTS fallback rate type */ + +/* PHY TX control word */ +#define B43legacy_TX4_PHY_OFDM 0x0001 /* Data frame rate type */ +#define B43legacy_TX4_PHY_SHORTPRMBL 0x0010 /* Use short preamble */ +#define B43legacy_TX4_PHY_ANT 0x03C0 /* Antenna selection */ +#define B43legacy_TX4_PHY_ANT0 0x0000 /* Use antenna 0 */ +#define B43legacy_TX4_PHY_ANT1 0x0100 /* Use antenna 1 */ +#define B43legacy_TX4_PHY_ANTLAST 0x0300 /* Use last used antenna */ + + + +void b43legacy_generate_txhdr(struct b43legacy_wldev *dev, + u8 *txhdr, + const unsigned char *fragment_data, + unsigned int fragment_len, + const struct ieee80211_tx_control *txctl, + u16 cookie); + + +/* Transmit Status */ +struct b43legacy_txstatus { + u16 cookie; /* The cookie from the txhdr */ + u16 seq; /* Sequence number */ + u8 phy_stat; /* PHY TX status */ + u8 frame_count; /* Frame transmit count */ + u8 rts_count; /* RTS transmit count */ + u8 supp_reason; /* Suppression reason */ + /* flags */ + u8 pm_indicated;/* PM mode indicated to AP */ + u8 intermediate;/* Intermediate status notification */ + u8 for_ampdu; /* Status is for an AMPDU (afterburner) */ + u8 acked; /* Wireless ACK received */ +}; + +/* txstatus supp_reason values */ +enum { + B43legacy_TXST_SUPP_NONE, /* Not suppressed */ + B43legacy_TXST_SUPP_PMQ, /* Suppressed due to PMQ entry */ + B43legacy_TXST_SUPP_FLUSH, /* Suppressed due to flush request */ + B43legacy_TXST_SUPP_PREV, /* Previous fragment failed */ + B43legacy_TXST_SUPP_CHAN, /* Channel mismatch */ + B43legacy_TXST_SUPP_LIFE, /* Lifetime expired */ + B43legacy_TXST_SUPP_UNDER, /* Buffer underflow */ + B43legacy_TXST_SUPP_ABNACK, /* Afterburner NACK */ +}; + +/* Transmit Status as received through DMA/PIO on old chips */ +struct b43legacy_hwtxstatus { + PAD_BYTES(4); + __le16 cookie; + u8 flags; + u8 count; + PAD_BYTES(2); + __le16 seq; + u8 phy_stat; + PAD_BYTES(1); +} __attribute__((__packed__)); + + +/* Receive header for v3 firmware. */ +struct b43legacy_rxhdr_fw3 { + __le16 frame_len; /* Frame length */ + PAD_BYTES(2); + __le16 phy_status0; /* PHY RX Status 0 */ + __u8 jssi; /* PHY RX Status 1: JSSI */ + __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ + PAD_BYTES(2); /* PHY RX Status 2 */ + __le16 phy_status3; /* PHY RX Status 3 */ + __le16 mac_status; /* MAC RX status */ + __le16 mac_time; + __le16 channel; +} __attribute__((__packed__)); + + +/* PHY RX Status 0 */ +#define B43legacy_RX_PHYST0_GAINCTL 0x4000 /* Gain Control */ +#define B43legacy_RX_PHYST0_PLCPHCF 0x0200 +#define B43legacy_RX_PHYST0_PLCPFV 0x0100 +#define B43legacy_RX_PHYST0_SHORTPRMBL 0x0080 /* Recvd with Short Preamble */ +#define B43legacy_RX_PHYST0_LCRS 0x0040 +#define B43legacy_RX_PHYST0_ANT 0x0020 /* Antenna */ +#define B43legacy_RX_PHYST0_UNSRATE 0x0010 +#define B43legacy_RX_PHYST0_CLIP 0x000C +#define B43legacy_RX_PHYST0_CLIP_SHIFT 2 +#define B43legacy_RX_PHYST0_FTYPE 0x0003 /* Frame type */ +#define B43legacy_RX_PHYST0_CCK 0x0000 /* Frame type: CCK */ +#define B43legacy_RX_PHYST0_OFDM 0x0001 /* Frame type: OFDM */ +#define B43legacy_RX_PHYST0_PRE_N 0x0002 /* Pre-standard N-PHY frame */ +#define B43legacy_RX_PHYST0_STD_N 0x0003 /* Standard N-PHY frame */ + +/* PHY RX Status 2 */ +#define B43legacy_RX_PHYST2_LNAG 0xC000 /* LNA Gain */ +#define B43legacy_RX_PHYST2_LNAG_SHIFT 14 +#define B43legacy_RX_PHYST2_PNAG 0x3C00 /* PNA Gain */ +#define B43legacy_RX_PHYST2_PNAG_SHIFT 10 +#define B43legacy_RX_PHYST2_FOFF 0x03FF /* F offset */ + +/* PHY RX Status 3 */ +#define B43legacy_RX_PHYST3_DIGG 0x1800 /* DIG Gain */ +#define B43legacy_RX_PHYST3_DIGG_SHIFT 11 +#define B43legacy_RX_PHYST3_TRSTATE 0x0400 /* TR state */ + +/* MAC RX Status */ +#define B43legacy_RX_MAC_BEACONSENT 0x00008000 /* Beacon send flag */ +#define B43legacy_RX_MAC_KEYIDX 0x000007E0 /* Key index */ +#define B43legacy_RX_MAC_KEYIDX_SHIFT 5 +#define B43legacy_RX_MAC_DECERR 0x00000010 /* Decrypt error */ +#define B43legacy_RX_MAC_DEC 0x00000008 /* Decryption attempted */ +#define B43legacy_RX_MAC_PADDING 0x00000004 /* Pad bytes present */ +#define B43legacy_RX_MAC_RESP 0x00000002 /* Response frame xmitted */ +#define B43legacy_RX_MAC_FCSERR 0x00000001 /* FCS error */ + +/* RX channel */ +#define B43legacy_RX_CHAN_GAIN 0xFC00 /* Gain */ +#define B43legacy_RX_CHAN_GAIN_SHIFT 10 +#define B43legacy_RX_CHAN_ID 0x03FC /* Channel ID */ +#define B43legacy_RX_CHAN_ID_SHIFT 2 +#define B43legacy_RX_CHAN_PHYTYPE 0x0003 /* PHY type */ + + + +u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate); +u8 b43legacy_plcp_get_ratecode_ofdm(const u8 bitrate); + +void b43legacy_generate_plcp_hdr(struct b43legacy_plcp_hdr4 *plcp, + const u16 octets, const u8 bitrate); + +void b43legacy_rx(struct b43legacy_wldev *dev, + struct sk_buff *skb, + const void *_rxhdr); + +void b43legacy_handle_txstatus(struct b43legacy_wldev *dev, + const struct b43legacy_txstatus *status); + +void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev, + const struct b43legacy_hwtxstatus *hw); + +void b43legacy_tx_suspend(struct b43legacy_wldev *dev); +void b43legacy_tx_resume(struct b43legacy_wldev *dev); + + +#define B43legacy_NR_QOSPARMS 22 +enum { + B43legacy_QOSPARM_TXOP = 0, + B43legacy_QOSPARM_CWMIN, + B43legacy_QOSPARM_CWMAX, + B43legacy_QOSPARM_CWCUR, + B43legacy_QOSPARM_AIFS, + B43legacy_QOSPARM_BSLOTS, + B43legacy_QOSPARM_REGGAP, + B43legacy_QOSPARM_STATUS, +}; + +void b43legacy_qos_init(struct b43legacy_wldev *dev); + + +/* Helper functions for converting the key-table index from "firmware-format" + * to "raw-format" and back. The firmware API changed for this at some revision. + * We need to account for that here. */ +static inline +int b43legacy_new_kidx_api(struct b43legacy_wldev *dev) +{ + /* FIXME: Not sure the change was at rev 351 */ + return (dev->fw.rev >= 351); +} +static inline +u8 b43legacy_kidx_to_fw(struct b43legacy_wldev *dev, u8 raw_kidx) +{ + u8 firmware_kidx; + if (b43legacy_new_kidx_api(dev)) + firmware_kidx = raw_kidx; + else { + if (raw_kidx >= 4) /* Is per STA key? */ + firmware_kidx = raw_kidx - 4; + else + firmware_kidx = raw_kidx; /* TX default key */ + } + return firmware_kidx; +} +static inline +u8 b43legacy_kidx_to_raw(struct b43legacy_wldev *dev, u8 firmware_kidx) +{ + u8 raw_kidx; + if (b43legacy_new_kidx_api(dev)) + raw_kidx = firmware_kidx; + else + /* RX default keys or per STA keys */ + raw_kidx = firmware_kidx + 4; + return raw_kidx; +} + +#endif /* B43legacy_XMIT_H_ */ diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index 10e07e8654260fa8bf4bf61436e51d8d47b391c3..5fdbf24d2443118880586f9c30cc2ab9c46ec741 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h @@ -994,10 +994,4 @@ int bcm43xx_pci_write_config32(struct bcm43xx_private *bcm, int offset, u32 valu __value; \ }) -/** Helpers to print MAC addresses. */ -#define BCM43xx_MACFMT "%02x:%02x:%02x:%02x:%02x:%02x" -#define BCM43xx_MACARG(x) ((u8*)(x))[0], ((u8*)(x))[1], \ - ((u8*)(x))[2], ((u8*)(x))[3], \ - ((u8*)(x))[4], ((u8*)(x))[5] - #endif /* BCM43xx_H_ */ diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index dfbd01eaaf34a5f5a088b15678d849b35de27316..45683437a98d9d7de55ff22a9942a5be5cf08c76 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c @@ -2380,7 +2380,7 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm) goto err_gpio_cleanup; bcm43xx_radio_turn_on(bcm); bcm->radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm); - dprintk(KERN_INFO PFX "Radio %s by hardware\n", + printk(KERN_INFO PFX "Radio %s by hardware\n", (bcm->radio_hw_enable == 0) ? "disabled" : "enabled"); bcm43xx_write16(bcm, 0x03E6, 0x0000); @@ -3129,7 +3129,7 @@ static void bcm43xx_periodic_every1sec(struct bcm43xx_private *bcm) radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm); if (unlikely(bcm->radio_hw_enable != radio_hw_enable)) { bcm->radio_hw_enable = radio_hw_enable; - dprintk(KERN_INFO PFX "Radio hardware status changed to %s\n", + printk(KERN_INFO PFX "Radio hardware status changed to %s\n", (radio_hw_enable == 0) ? "disabled" : "enabled"); bcm43xx_leds_update(bcm, 0); } @@ -4092,7 +4092,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev, goto out; } /* initialize the net_device struct */ - SET_MODULE_OWNER(net_dev); SET_NETDEV_DEV(net_dev, &pdev->dev); net_dev->open = bcm43xx_net_open; diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c index 6a109f4a1b719ca4aaab645f127f2f91b5eb5e25..c605099c9baf9037eb0bc7de3212a1ec14a2f7ec 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c @@ -2146,7 +2146,7 @@ void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm) } else bcm43xx_phy_write(bcm, 0x0015, 0xAA00); radio->enabled = 0; - dprintk(KERN_INFO PFX "Radio turned off\n"); + dprintk(KERN_INFO PFX "Radio initialized\n"); bcm43xx_leds_update(bcm, 0); } diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h index ef37a75d550b863cf4180fb4b466b00f4233b79f..547ba84dc797a3155138b0a6e853957ff7d0509c 100644 --- a/drivers/net/wireless/hostap/hostap.h +++ b/drivers/net/wireless/hostap/hostap.h @@ -30,12 +30,11 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx); void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx); -int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr); -int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr); +extern const struct header_ops hostap_80211_ops; int hostap_80211_get_hdrlen(u16 fc); struct net_device_stats *hostap_get_stats(struct net_device *dev); void hostap_setup_dev(struct net_device *dev, local_info_t *local, - int main_dev); + int type); void hostap_set_multicast_list_queue(struct work_struct *work); int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index cbedc9ee740aacdef462a791d277ca582ff921a4..ef084df3d48eb87d78204c014795b75a7970467e 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -19,6 +19,7 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, { struct ieee80211_hdr_4addr *hdr; u16 fc; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *) skb->data; @@ -44,10 +45,11 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctl)); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR, - MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3)); + printk(KERN_DEBUG " A1=%s", print_mac(mac, hdr->addr1)); + printk(" A2=%s", print_mac(mac, hdr->addr2)); + printk(" A3=%s", print_mac(mac, hdr->addr3)); if (skb->len >= 30) - printk(" A4=" MACSTR, MAC2STR(hdr->addr4)); + printk(" A4=%s", print_mac(mac, hdr->addr4)); printk("\n"); } @@ -534,6 +536,7 @@ static int hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, u16 fc, struct net_device **wds) { + DECLARE_MAC_BUF(mac); /* FIX: is this really supposed to accept WDS frames only in Master * mode? What about Repeater or Managed with WDS frames? */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) != @@ -549,10 +552,10 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) { /* RA (or BSSID) is not ours - drop */ PDEBUG(DEBUG_EXTRA, "%s: received WDS frame with " - "not own or broadcast %s=" MACSTR "\n", + "not own or broadcast %s=%s\n", local->dev->name, fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID", - MAC2STR(hdr->addr1)); + print_mac(mac, hdr->addr1)); return -1; } @@ -565,8 +568,8 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, /* require that WDS link has been registered with TA or the * frame is from current AP when using 'AP client mode' */ PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame " - "from unknown TA=" MACSTR "\n", - local->dev->name, MAC2STR(hdr->addr2)); + "from unknown TA=%s\n", + local->dev->name, print_mac(mac, hdr->addr2)); if (local->ap && local->ap->autom_ap_wds) hostap_wds_link_oper(local, hdr->addr2, WDS_ADD); return -1; @@ -632,6 +635,7 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; + DECLARE_MAC_BUF(mac); if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; @@ -643,8 +647,8 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, strcmp(crypt->ops->name, "TKIP") == 0) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "received packet from " MACSTR "\n", - local->dev->name, MAC2STR(hdr->addr2)); + "received packet from %s\n", + local->dev->name, print_mac(mac, hdr->addr2)); } return -1; } @@ -653,9 +657,9 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { - printk(KERN_DEBUG "%s: decryption failed (SA=" MACSTR + printk(KERN_DEBUG "%s: decryption failed (SA=%s" ") res=%d\n", - local->dev->name, MAC2STR(hdr->addr2), res); + local->dev->name, print_mac(mac, hdr->addr2), res); local->comm_tallies.rx_discards_wep_undecryptable++; return -1; } @@ -671,6 +675,7 @@ hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; + DECLARE_MAC_BUF(mac); if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; @@ -683,8 +688,8 @@ hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" - " (SA=" MACSTR " keyidx=%d)\n", - local->dev->name, MAC2STR(hdr->addr2), keyidx); + " (SA=%s keyidx=%d)\n", + local->dev->name, print_mac(mac, hdr->addr2), keyidx); return -1; } @@ -716,6 +721,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, struct ieee80211_crypt_data *crypt = NULL; void *sta = NULL; int keyidx = 0; + DECLARE_MAC_BUF(mac); iface = netdev_priv(dev); local = iface->local; @@ -792,8 +798,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, * frames silently instead of filling system log with * these reports. */ printk(KERN_DEBUG "%s: WEP decryption failed (not set)" - " (SA=" MACSTR ")\n", - local->dev->name, MAC2STR(hdr->addr2)); + " (SA=%s)\n", + local->dev->name, print_mac(mac, hdr->addr2)); #endif local->comm_tallies.rx_discards_wep_undecryptable++; goto rx_dropped; @@ -807,8 +813,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " - "from " MACSTR "\n", dev->name, - MAC2STR(hdr->addr2)); + "from %s\n", dev->name, + print_mac(mac, hdr->addr2)); /* TODO: could inform hostapd about this so that it * could send auth failure report */ goto rx_dropped; @@ -976,8 +982,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, "unencrypted EAPOL frame\n", local->dev->name); } else { printk(KERN_DEBUG "%s: encryption configured, but RX " - "frame not encrypted (SA=" MACSTR ")\n", - local->dev->name, MAC2STR(hdr->addr2)); + "frame not encrypted (SA=%s)\n", + local->dev->name, print_mac(mac, hdr->addr2)); goto rx_dropped; } } @@ -986,8 +992,9 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, !hostap_is_eapol_frame(local, skb)) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: dropped unencrypted RX data " - "frame from " MACSTR " (drop_unencrypted=1)\n", - dev->name, MAC2STR(hdr->addr2)); + "frame from %s" + " (drop_unencrypted=1)\n", + dev->name, print_mac(mac, hdr->addr2)); } goto rx_dropped; } diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 3df3c60263d4ac07e50e4aa4029a0f03affdc101..e7afc3ec3e6deffa6e672a8867b0611029c92029 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -17,6 +17,7 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { struct ieee80211_hdr_4addr *hdr; u16 fc; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *) skb->data; @@ -40,10 +41,11 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctl)); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR, - MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3)); + printk(KERN_DEBUG " A1=%s", print_mac(mac, hdr->addr1)); + printk(" A2=%s", print_mac(mac, hdr->addr2)); + printk(" A3=%s", print_mac(mac, hdr->addr3)); if (skb->len >= 30) - printk(" A4=" MACSTR, MAC2STR(hdr->addr4)); + printk(" A4=%s", print_mac(mac, hdr->addr4)); printk("\n"); } @@ -312,6 +314,7 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, struct ieee80211_hdr_4addr *hdr; u16 fc; int prefix_len, postfix_len, hdr_len, res; + DECLARE_MAC_BUF(mac); iface = netdev_priv(skb->dev); local = iface->local; @@ -326,8 +329,8 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, hdr = (struct ieee80211_hdr_4addr *) skb->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "TX packet to " MACSTR "\n", - local->dev->name, MAC2STR(hdr->addr1)); + "TX packet to %s\n", + local->dev->name, print_mac(mac, hdr->addr1)); } kfree_skb(skb); return NULL; diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 90900525379c045b89a6be172634694929d2b5e4..6bbdb76b32df5420f687107af1e42560664a609b 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -94,6 +94,7 @@ static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta) static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) { struct sta_info *s; + DECLARE_MAC_BUF(mac); s = ap->sta_hash[STA_HASH(sta->addr)]; if (s == NULL) return; @@ -108,18 +109,20 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) if (s->hnext != NULL) s->hnext = s->hnext->hnext; else - printk("AP: could not remove STA " MACSTR " from hash table\n", - MAC2STR(sta->addr)); + printk("AP: could not remove STA %s" + " from hash table\n", + print_mac(mac, sta->addr)); } static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) { + DECLARE_MAC_BUF(mac); if (sta->ap && sta->local) hostap_event_expired_sta(sta->local->dev, sta); if (ap->proc != NULL) { char name[20]; - sprintf(name, MACSTR, MAC2STR(sta->addr)); + sprintf(name, "%s", print_mac(mac, sta->addr)); remove_proc_entry(name, ap->proc); } @@ -182,6 +185,7 @@ static void ap_handle_timer(unsigned long data) struct ap_data *ap; unsigned long next_time = 0; int was_assoc; + DECLARE_MAC_BUF(mac); if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) { PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n"); @@ -238,8 +242,8 @@ static void ap_handle_timer(unsigned long data) if (sta->ap) { if (ap->autom_ap_wds) { PDEBUG(DEBUG_AP, "%s: removing automatic WDS " - "connection to AP " MACSTR "\n", - local->dev->name, MAC2STR(sta->addr)); + "connection to AP %s\n", + local->dev->name, print_mac(mac, sta->addr)); hostap_wds_link_oper(local, sta->addr, WDS_DEL); } } else if (sta->timeout_next == STA_NULLFUNC) { @@ -255,11 +259,11 @@ static void ap_handle_timer(unsigned long data) } else { int deauth = sta->timeout_next == STA_DEAUTH; u16 resp; - PDEBUG(DEBUG_AP, "%s: sending %s info to STA " MACSTR + PDEBUG(DEBUG_AP, "%s: sending %s info to STA %s" "(last=%lu, jiffies=%lu)\n", local->dev->name, deauth ? "deauthentication" : "disassociation", - MAC2STR(sta->addr), sta->last_rx, jiffies); + print_mac(mac, sta->addr), sta->last_rx, jiffies); resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID : WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); @@ -271,9 +275,10 @@ static void ap_handle_timer(unsigned long data) if (sta->timeout_next == STA_DEAUTH) { if (sta->flags & WLAN_STA_PERM) { - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " would have been " - "removed, but it has 'perm' flag\n", - local->dev->name, MAC2STR(sta->addr)); + PDEBUG(DEBUG_AP, "%s: STA %s" + " would have been removed, " + "but it has 'perm' flag\n", + local->dev->name, print_mac(mac, sta->addr)); } else ap_free_sta(ap, sta); return; @@ -327,6 +332,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off, struct ap_data *ap = (struct ap_data *) data; char *policy_txt; struct mac_entry *entry; + DECLARE_MAC_BUF(mac); if (off != 0) { *eof = 1; @@ -357,7 +363,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off, break; } - p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr)); + p += sprintf(p, "%s\n", print_mac(mac, entry->addr)); } spin_unlock_bh(&ap->mac_restrictions.lock); @@ -514,6 +520,7 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off, struct ap_data *ap = (struct ap_data *) data; struct sta_info *sta; int i; + DECLARE_MAC_BUF(mac); if (off > PROC_LIMIT) { *eof = 1; @@ -526,7 +533,8 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off, if (!sta->ap) continue; - p += sprintf(p, MACSTR " %d %d %d %d '", MAC2STR(sta->addr), + p += sprintf(p, "%s %d %d %d %d '", + print_mac(mac, sta->addr), sta->u.ap.channel, sta->last_rx_signal, sta->last_rx_silence, sta->last_rx_rate); for (i = 0; i < sta->u.ap.ssid_len; i++) @@ -623,6 +631,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) u16 fc, *pos, auth_alg, auth_transaction, status; struct sta_info *sta = NULL; char *txt = NULL; + DECLARE_MAC_BUF(mac); if (ap->local->hostapd) { dev_kfree_skb(skb); @@ -674,9 +683,9 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) if (sta) atomic_dec(&sta->users); if (txt) { - PDEBUG(DEBUG_AP, "%s: " MACSTR " auth_cb - alg=%d trans#=%d " - "status=%d - %s\n", - dev->name, MAC2STR(hdr->addr1), auth_alg, + PDEBUG(DEBUG_AP, "%s: %s auth_cb - alg=%d " + "trans#=%d status=%d - %s\n", + dev->name, print_mac(mac, hdr->addr1), auth_alg, auth_transaction, status, txt); } dev_kfree_skb(skb); @@ -692,6 +701,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) u16 fc, *pos, status; struct sta_info *sta = NULL; char *txt = NULL; + DECLARE_MAC_BUF(mac); if (ap->local->hostapd) { dev_kfree_skb(skb); @@ -742,8 +752,8 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) if (sta) atomic_dec(&sta->users); if (txt) { - PDEBUG(DEBUG_AP, "%s: " MACSTR " assoc_cb - %s\n", - dev->name, MAC2STR(hdr->addr1), txt); + PDEBUG(DEBUG_AP, "%s: %s assoc_cb - %s\n", + dev->name, print_mac(mac, hdr->addr1), txt); } dev_kfree_skb(skb); } @@ -755,6 +765,7 @@ static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) struct ap_data *ap = data; struct ieee80211_hdr_4addr *hdr; struct sta_info *sta; + DECLARE_MAC_BUF(mac); if (skb->len < 24) goto fail; @@ -766,9 +777,9 @@ static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) sta->flags &= ~WLAN_STA_PENDING_POLL; spin_unlock(&ap->sta_table_lock); } else { - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " did not ACK activity " - "poll frame\n", ap->local->dev->name, - MAC2STR(hdr->addr1)); + PDEBUG(DEBUG_AP, "%s: STA %s" + " did not ACK activity poll frame\n", + ap->local->dev->name, print_mac(mac, hdr->addr1)); } fail: @@ -985,6 +996,7 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, char *p = page; struct sta_info *sta = (struct sta_info *) data; int i; + DECLARE_MAC_BUF(mac); /* FIX: possible race condition.. the STA data could have just expired, * but proc entry was still here so that the read could have started; @@ -995,11 +1007,11 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, return 0; } - p += sprintf(p, "%s=" MACSTR "\nusers=%d\naid=%d\n" + p += sprintf(p, "%s=%s\nusers=%d\naid=%d\n" "flags=0x%04x%s%s%s%s%s%s%s\n" "capability=0x%02x\nlisten_interval=%d\nsupported_rates=", sta->ap ? "AP" : "STA", - MAC2STR(sta->addr), atomic_read(&sta->users), sta->aid, + print_mac(mac, sta->addr), atomic_read(&sta->users), sta->aid, sta->flags, sta->flags & WLAN_STA_AUTH ? " AUTH" : "", sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "", @@ -1060,6 +1072,7 @@ static void handle_add_proc_queue(struct work_struct *work) struct sta_info *sta; char name[20]; struct add_sta_proc_data *entry, *prev; + DECLARE_MAC_BUF(mac); entry = ap->add_sta_proc_entries; ap->add_sta_proc_entries = NULL; @@ -1072,7 +1085,7 @@ static void handle_add_proc_queue(struct work_struct *work) spin_unlock_bh(&ap->sta_table_lock); if (sta) { - sprintf(name, MACSTR, MAC2STR(sta->addr)); + sprintf(name, "%s", print_mac(mac, sta->addr)); sta->proc = create_proc_read_entry( name, 0, ap->proc, prism2_sta_proc_read, sta); @@ -1290,6 +1303,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, struct sta_info *sta = NULL; struct ieee80211_crypt_data *crypt; char *txt = ""; + DECLARE_MAC_BUF(mac); len = skb->len - IEEE80211_MGMT_HDR_LEN; @@ -1298,8 +1312,8 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, if (len < 6) { PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload " - "(len=%d) from " MACSTR "\n", dev->name, len, - MAC2STR(hdr->addr2)); + "(len=%d) from %s\n", dev->name, len, + print_mac(mac, hdr->addr2)); return; } @@ -1364,8 +1378,8 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, if (time_after(jiffies, sta->u.ap.last_beacon + (10 * sta->listen_interval * HZ) / 1024)) { PDEBUG(DEBUG_AP, "%s: no beacons received for a while," - " assuming AP " MACSTR " is now STA\n", - dev->name, MAC2STR(sta->addr)); + " assuming AP %s is now STA\n", + dev->name, print_mac(mac, sta->addr)); sta->ap = 0; sta->flags = 0; sta->u.sta.challenge = NULL; @@ -1480,9 +1494,9 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, } if (resp) { - PDEBUG(DEBUG_AP, "%s: " MACSTR " auth (alg=%d trans#=%d " - "stat=%d len=%d fc=%04x) ==> %d (%s)\n", - dev->name, MAC2STR(hdr->addr2), auth_alg, + PDEBUG(DEBUG_AP, "%s: %s auth (alg=%d " + "trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n", + dev->name, print_mac(mac, hdr->addr2), auth_alg, auth_transaction, status_code, len, fc, resp, txt); } } @@ -1502,13 +1516,14 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, int send_deauth = 0; char *txt = ""; u8 prev_ap[ETH_ALEN]; + DECLARE_MAC_BUF(mac); left = len = skb->len - IEEE80211_MGMT_HDR_LEN; if (len < (reassoc ? 10 : 4)) { PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload " - "(len=%d, reassoc=%d) from " MACSTR "\n", - dev->name, len, reassoc, MAC2STR(hdr->addr2)); + "(len=%d, reassoc=%d) from %s\n", + dev->name, len, reassoc, print_mac(mac, hdr->addr2)); return; } @@ -1585,9 +1600,9 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, } if (left > 0) { - PDEBUG(DEBUG_AP, "%s: assoc from " MACSTR " with extra" - " data (%d bytes) [", - dev->name, MAC2STR(hdr->addr2), left); + PDEBUG(DEBUG_AP, "%s: assoc from %s" + " with extra data (%d bytes) [", + dev->name, print_mac(mac, hdr->addr2), left); while (left > 0) { PDEBUG2(DEBUG_AP, "<%02x>", *u); u++; left--; @@ -1687,10 +1702,10 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, } #if 0 - PDEBUG(DEBUG_AP, "%s: " MACSTR " %sassoc (len=%d prev_ap=" MACSTR - ") => %d(%d) (%s)\n", - dev->name, MAC2STR(hdr->addr2), reassoc ? "re" : "", len, - MAC2STR(prev_ap), resp, send_deauth, txt); + PDEBUG(DEBUG_AP, "%s: %s %sassoc (len=%d " + "prev_ap=%s) => %d(%d) (%s)\n", + dev->name, print_mac(mac, hdr->addr2), reassoc ? "re" : "", len, + print_mac(mac, prev_ap), resp, send_deauth, txt); #endif } @@ -1705,6 +1720,7 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, int len; u16 reason_code, *pos; struct sta_info *sta = NULL; + DECLARE_MAC_BUF(mac); len = skb->len - IEEE80211_MGMT_HDR_LEN; @@ -1716,8 +1732,8 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, pos = (u16 *) body; reason_code = __le16_to_cpu(*pos); - PDEBUG(DEBUG_AP, "%s: deauthentication: " MACSTR " len=%d, " - "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len, + PDEBUG(DEBUG_AP, "%s: deauthentication: %s len=%d, " + "reason_code=%d\n", dev->name, print_mac(mac, hdr->addr2), len, reason_code); spin_lock_bh(&local->ap->sta_table_lock); @@ -1729,9 +1745,9 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, } spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { - printk("%s: deauthentication from " MACSTR ", " + printk("%s: deauthentication from %s, " "reason_code=%d, but STA not authenticated\n", dev->name, - MAC2STR(hdr->addr2), reason_code); + print_mac(mac, hdr->addr2), reason_code); } } @@ -1746,6 +1762,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, int len; u16 reason_code, *pos; struct sta_info *sta = NULL; + DECLARE_MAC_BUF(mac); len = skb->len - IEEE80211_MGMT_HDR_LEN; @@ -1757,8 +1774,8 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, pos = (u16 *) body; reason_code = __le16_to_cpu(*pos); - PDEBUG(DEBUG_AP, "%s: disassociation: " MACSTR " len=%d, " - "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len, + PDEBUG(DEBUG_AP, "%s: disassociation: %s len=%d, " + "reason_code=%d\n", dev->name, print_mac(mac, hdr->addr2), len, reason_code); spin_lock_bh(&local->ap->sta_table_lock); @@ -1770,9 +1787,9 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, } spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { - printk("%s: disassociation from " MACSTR ", " + printk("%s: disassociation from %s, " "reason_code=%d, but STA not authenticated\n", - dev->name, MAC2STR(hdr->addr2), reason_code); + dev->name, print_mac(mac, hdr->addr2), reason_code); } } @@ -1862,15 +1879,16 @@ static void handle_pspoll(local_info_t *local, struct sta_info *sta; u16 aid; struct sk_buff *skb; + DECLARE_MAC_BUF(mac); - PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=" MACSTR ", TA=" MACSTR - " PWRMGT=%d\n", - MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), + PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%s" + ", TA=%s PWRMGT=%d\n", + print_mac(mac, hdr->addr1), print_mac(mac, hdr->addr2), !!(le16_to_cpu(hdr->frame_ctl) & IEEE80211_FCTL_PM)); if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { - PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=" MACSTR - " not own MAC\n", MAC2STR(hdr->addr1)); + PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=%s" + " not own MAC\n", print_mac(mac, hdr->addr1)); return; } @@ -1948,6 +1966,7 @@ static void handle_wds_oper_queue(struct work_struct *work) wds_oper_queue); local_info_t *local = ap->local; struct wds_oper_data *entry, *prev; + DECLARE_MAC_BUF(mac); spin_lock_bh(&local->lock); entry = local->ap->wds_oper_entries; @@ -1956,10 +1975,10 @@ static void handle_wds_oper_queue(struct work_struct *work) while (entry) { PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection " - "to AP " MACSTR "\n", + "to AP %s\n", local->dev->name, entry->type == WDS_ADD ? "adding" : "removing", - MAC2STR(entry->addr)); + print_mac(mac, entry->addr)); if (entry->type == WDS_ADD) prism2_wds_add(local, entry->addr, 0); else if (entry->type == WDS_DEL) @@ -2135,6 +2154,7 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ u16 fc, type, stype; struct ieee80211_hdr_4addr *hdr; + DECLARE_MAC_BUF(mac); /* FIX: should give skb->len to handler functions and check that the * buffer is long enough */ @@ -2163,8 +2183,8 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=" - MACSTR " not own MAC\n", - MAC2STR(hdr->addr1)); + "%s not own MAC\n", + print_mac(mac, hdr->addr1)); goto done; } @@ -2200,14 +2220,14 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, } if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { - PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=" MACSTR - " not own MAC\n", MAC2STR(hdr->addr1)); + PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%s" + " not own MAC\n", print_mac(mac, hdr->addr1)); goto done; } if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) { - PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=" MACSTR - " not own MAC\n", MAC2STR(hdr->addr3)); + PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%s" + " not own MAC\n", print_mac(mac, hdr->addr3)); goto done; } @@ -2288,6 +2308,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) struct sk_buff *skb; struct ieee80211_hdr_4addr *hdr; struct hostap_80211_rx_status rx_stats; + DECLARE_MAC_BUF(mac); if (skb_queue_empty(&sta->tx_buf)) return; @@ -2308,8 +2329,8 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) memcpy(hdr->addr2, sta->addr, ETH_ALEN); hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14)); - PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for " - "STA " MACSTR "\n", local->dev->name, MAC2STR(sta->addr)); + PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for STA " + "%s\n", local->dev->name, print_mac(mac, sta->addr)); skb->dev = local->dev; @@ -2636,6 +2657,7 @@ static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev) int ret = sta->tx_rate; struct hostap_interface *iface; local_info_t *local; + DECLARE_MAC_BUF(mac); iface = netdev_priv(dev); local = iface->local; @@ -2663,9 +2685,9 @@ static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev) case 3: sta->tx_rate = 110; break; default: sta->tx_rate = 0; break; } - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate raised to" - " %d\n", dev->name, MAC2STR(sta->addr), - sta->tx_rate); + PDEBUG(DEBUG_AP, "%s: STA %s" + " TX rate raised to %d\n", + dev->name, print_mac(mac, sta->addr), sta->tx_rate); } sta->tx_since_last_failure = 0; } @@ -2683,6 +2705,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) int set_tim, ret; struct ieee80211_hdr_4addr *hdr; struct hostap_skb_tx_data *meta; + DECLARE_MAC_BUF(mac); meta = (struct hostap_skb_tx_data *) skb->cb; ret = AP_TX_CONTINUE; @@ -2718,7 +2741,8 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) * print out any errors here. */ if (net_ratelimit()) { printk(KERN_DEBUG "AP: drop packet to non-associated " - "STA " MACSTR "\n", MAC2STR(hdr->addr1)); + "STA %s\n", + print_mac(mac, hdr->addr1)); } #endif local->ap->tx_drop_nonassoc++; @@ -2756,8 +2780,9 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) } if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) { - PDEBUG(DEBUG_PS, "%s: No more space in STA (" MACSTR ")'s PS " - "mode buffer\n", local->dev->name, MAC2STR(sta->addr)); + PDEBUG(DEBUG_PS, "%s: No more space in STA (%s" + ")'s PS mode buffer\n", + local->dev->name, print_mac(mac, sta->addr)); /* Make sure that TIM is set for the station (it might not be * after AP wlan hw reset). */ /* FIX: should fix hw reset to restore bits based on STA @@ -2821,6 +2846,7 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) struct sta_info *sta; struct ieee80211_hdr_4addr *hdr; struct hostap_skb_tx_data *meta; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *) skb->data; meta = (struct hostap_skb_tx_data *) skb->cb; @@ -2829,9 +2855,9 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) sta = ap_get_sta(local->ap, hdr->addr1); if (!sta) { spin_unlock(&local->ap->sta_table_lock); - PDEBUG(DEBUG_AP, "%s: Could not find STA " MACSTR " for this " - "TX error (@%lu)\n", - local->dev->name, MAC2STR(hdr->addr1), jiffies); + PDEBUG(DEBUG_AP, "%s: Could not find STA %s" + " for this TX error (@%lu)\n", + local->dev->name, print_mac(mac, hdr->addr1), jiffies); return; } @@ -2858,8 +2884,9 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) case 3: sta->tx_rate = 110; break; default: sta->tx_rate = 0; break; } - PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate lowered " - "to %d\n", local->dev->name, MAC2STR(sta->addr), + PDEBUG(DEBUG_AP, "%s: STA %s" + " TX rate lowered to %d\n", + local->dev->name, print_mac(mac, sta->addr), sta->tx_rate); } sta->tx_consecutive_exc = 0; @@ -2871,16 +2898,17 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta, int pwrmgt, int type, int stype) { + DECLARE_MAC_BUF(mac); if (pwrmgt && !(sta->flags & WLAN_STA_PS)) { sta->flags |= WLAN_STA_PS; - PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to use PS " + PDEBUG(DEBUG_PS2, "STA %s changed to use PS " "mode (type=0x%02X, stype=0x%02X)\n", - MAC2STR(sta->addr), type >> 2, stype >> 4); + print_mac(mac, sta->addr), type >> 2, stype >> 4); } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) { sta->flags &= ~WLAN_STA_PS; - PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to not use " + PDEBUG(DEBUG_PS2, "STA %s changed to not use " "PS mode (type=0x%02X, stype=0x%02X)\n", - MAC2STR(sta->addr), type >> 2, stype >> 4); + print_mac(mac, sta->addr), type >> 2, stype >> 4); if (type != IEEE80211_FTYPE_CTL || stype != IEEE80211_STYPE_PSPOLL) schedule_packet_send(local, sta); @@ -2924,6 +2952,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, struct sta_info *sta; u16 fc, type, stype; struct ieee80211_hdr_4addr *hdr; + DECLARE_MAC_BUF(mac); if (local->ap == NULL) return AP_RX_CONTINUE; @@ -2954,9 +2983,10 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT } else { printk(KERN_DEBUG "%s: dropped received packet" - " from non-associated STA " MACSTR + " from non-associated STA " + "%s" " (type=0x%02x, subtype=0x%02x)\n", - dev->name, MAC2STR(hdr->addr2), + dev->name, print_mac(mac, hdr->addr2), type >> 2, stype >> 4); hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ @@ -2991,8 +3021,8 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, * being associated. */ printk(KERN_DEBUG "%s: rejected received nullfunc " "frame without ToDS from not associated STA " - MACSTR "\n", - dev->name, MAC2STR(hdr->addr2)); + "%s\n", + dev->name, print_mac(mac, hdr->addr2)); hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ } @@ -3009,9 +3039,9 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, * If BSSID is own, report the dropping of this frame. */ if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { printk(KERN_DEBUG "%s: dropped received packet from " - MACSTR " with no ToDS flag (type=0x%02x, " - "subtype=0x%02x)\n", dev->name, - MAC2STR(hdr->addr2), type >> 2, stype >> 4); + "%s with no ToDS flag " + "(type=0x%02x, subtype=0x%02x)\n", dev->name, + print_mac(mac, hdr->addr2), type >> 2, stype >> 4); hostap_dump_rx_80211(dev->name, skb, rx_stats); } ret = AP_RX_DROP; diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h index b31e6a05f23cc5a97a5b35fa448a12896eed1ed9..ceb7f1e5e9e096d50012a3999e21f1014ee2e48d 100644 --- a/drivers/net/wireless/hostap/hostap_common.h +++ b/drivers/net/wireless/hostap/hostap_common.h @@ -6,9 +6,6 @@ #define BIT(x) (1 << (x)) -#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] -#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" - /* IEEE 802.11 defines */ diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 30e723f65979fafa484c4102a8e61bd19445e617..877d3bdd37a077239045fd522e8fc2af0c8c645e 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -272,7 +272,7 @@ static int sandisk_enable_wireless(struct net_device *dev) { int res, ret = 0; conf_reg_t reg; - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; tuple_t tuple; cisparse_t *parse = NULL; @@ -822,6 +822,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), + PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x3301), PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), @@ -889,6 +890,10 @@ static struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_PROD_ID123( "corega", "WL PCCL-11", "ISL37300P", 0xa21501a, 0x59868926, 0xc9049a39), + PCMCIA_DEVICE_PROD_ID1234( + "The Linksys Group, Inc.", "Wireless Network CF Card", "ISL37300P", + "RevA", + 0xa5f472c2, 0x9c05598d, 0xc9049a39, 0x57a66194), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index 959887b70ca7bf1d7fc8b7cc8295054a52410db3..c592641e914e695d99aa1e408acae2501ca03496 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -825,7 +825,7 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, local->hw_downloading) return -ENODEV; - res = down_interruptible(&local->rid_bap_sem); + res = mutex_lock_interruptible(&local->rid_bap_mtx); if (res) return res; @@ -834,7 +834,7 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed " "(res=%d, rid=%04x, len=%d)\n", dev->name, res, rid, len); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); return res; } @@ -861,7 +861,7 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, res = hfa384x_from_bap(dev, BAP0, buf, len); spin_unlock_bh(&local->baplock); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); if (res) { if (res != -ENODATA) @@ -902,7 +902,7 @@ static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len) /* RID len in words and +1 for rec.rid */ rec.len = cpu_to_le16(len / 2 + len % 2 + 1); - res = down_interruptible(&local->rid_bap_sem); + res = mutex_lock_interruptible(&local->rid_bap_mtx); if (res) return res; @@ -917,12 +917,12 @@ static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len) if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - " "failed - res=%d\n", dev->name, rid, len, res); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); return res; } res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL); - up(&local->rid_bap_sem); + mutex_unlock(&local->rid_bap_mtx); if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE " @@ -2335,6 +2335,10 @@ static void prism2_txexc(local_info_t *local) int show_dump, res; char *payload = NULL; struct hfa384x_tx_frame txdesc; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR; local->stats.tx_errors++; @@ -2400,10 +2404,9 @@ static void prism2_txexc(local_info_t *local) WLAN_FC_GET_STYPE(fc) >> 4, fc & IEEE80211_FCTL_TODS ? " ToDS" : "", fc & IEEE80211_FCTL_FROMDS ? " FromDS" : ""); - PDEBUG(DEBUG_EXTRA, " A1=" MACSTR " A2=" MACSTR " A3=" - MACSTR " A4=" MACSTR "\n", - MAC2STR(txdesc.addr1), MAC2STR(txdesc.addr2), - MAC2STR(txdesc.addr3), MAC2STR(txdesc.addr4)); + PDEBUG(DEBUG_EXTRA, " A1=%s A2=%s A3=%s A4=%s\n", + print_mac(mac, txdesc.addr1), print_mac(mac2, txdesc.addr2), + print_mac(mac3, txdesc.addr3), print_mac(mac4, txdesc.addr4)); } @@ -3171,7 +3174,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, spin_lock_init(&local->cmdlock); spin_lock_init(&local->baplock); spin_lock_init(&local->lock); - init_MUTEX(&local->rid_bap_sem); + mutex_init(&local->rid_bap_mtx); if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES) card_idx = 0; @@ -3254,12 +3257,11 @@ while (0) INIT_LIST_HEAD(&local->bss_list); - hostap_setup_dev(dev, local, 1); - local->saved_eth_header_parse = dev->hard_header_parse; + hostap_setup_dev(dev, local, HOSTAP_INTERFACE_MASTER); dev->hard_start_xmit = hostap_master_start_xmit; dev->type = ARPHRD_IEEE80211; - dev->hard_header_parse = hostap_80211_header_parse; + dev->header_ops = &hostap_80211_ops; rtnl_lock(); ret = dev_alloc_name(dev, "wifi%d"); @@ -3424,7 +3426,7 @@ static void prism2_suspend(struct net_device *dev) struct local_info *local; union iwreq_data wrqu; - iface = dev->priv; + iface = netdev_priv(dev); local = iface->local; /* Send disconnect event, e.g., to trigger reassociation after resume diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index b6a02a02da74e645287aa943ea3cfc57d21b53a9..636f4b2382ea0c978d62b9d79e1d8cf2c24d5794 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c @@ -166,6 +166,7 @@ static void prism2_host_roaming(local_info_t *local) struct hfa384x_hostscan_result *selected, *entry; int i; unsigned long flags; + DECLARE_MAC_BUF(mac); if (local->last_join_time && time_before(jiffies, local->last_join_time + 10 * HZ)) { @@ -198,8 +199,9 @@ static void prism2_host_roaming(local_info_t *local) local->preferred_ap[2] || local->preferred_ap[3] || local->preferred_ap[4] || local->preferred_ap[5]) { /* Try to find preferred AP */ - PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " MACSTR "\n", - dev->name, MAC2STR(local->preferred_ap)); + PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " + "%s\n", + dev->name, print_mac(mac, local->preferred_ap)); for (i = 0; i < local->last_scan_results_count; i++) { entry = &local->last_scan_results[i]; if (memcmp(local->preferred_ap, entry->bssid, 6) == 0) @@ -216,8 +218,9 @@ static void prism2_host_roaming(local_info_t *local) req.channel = selected->chid; spin_unlock_irqrestore(&local->lock, flags); - PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=" MACSTR " channel=%d\n", - dev->name, MAC2STR(req.bssid), le16_to_cpu(req.channel)); + PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%s" + " channel=%d\n", + dev->name, print_mac(mac, req.bssid), le16_to_cpu(req.channel)); if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, sizeof(req))) { printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name); @@ -409,6 +412,7 @@ static void handle_info_queue_linkstatus(local_info_t *local) int val = local->prev_link_status; int connected; union iwreq_data wrqu; + DECLARE_MAC_BUF(mac); connected = val == HFA384X_LINKSTATUS_CONNECTED || @@ -420,9 +424,10 @@ static void handle_info_queue_linkstatus(local_info_t *local) printk(KERN_DEBUG "%s: could not read CURRENTBSSID after " "LinkStatus event\n", local->dev->name); } else { - PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" MACSTR "\n", + PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" + "%s\n", local->dev->name, - MAC2STR((unsigned char *) local->bssid)); + print_mac(mac, (unsigned char *) local->bssid)); if (local->wds_type & HOSTAP_WDS_AP_CLIENT) hostap_add_sta(local->ap, local->bssid); } diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index 8c71077d653cf10a076f1c2004c93eff5ec663aa..40f516d42c5e39894f4c14f45582852ce116fb7b 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -664,6 +664,7 @@ static int hostap_join_ap(struct net_device *dev) unsigned long flags; int i; struct hfa384x_hostscan_result *entry; + DECLARE_MAC_BUF(mac); iface = netdev_priv(dev); local = iface->local; @@ -685,14 +686,14 @@ static int hostap_join_ap(struct net_device *dev) if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, sizeof(req))) { - printk(KERN_DEBUG "%s: JoinRequest " MACSTR + printk(KERN_DEBUG "%s: JoinRequest %s" " failed\n", - dev->name, MAC2STR(local->preferred_ap)); + dev->name, print_mac(mac, local->preferred_ap)); return -1; } - printk(KERN_DEBUG "%s: Trying to join BSSID " MACSTR "\n", - dev->name, MAC2STR(local->preferred_ap)); + printk(KERN_DEBUG "%s: Trying to join BSSID %s\n", + dev->name, print_mac(mac, local->preferred_ap)); return 0; } @@ -896,11 +897,8 @@ static void hostap_monitor_set_type(local_info_t *local) if (local->monitor_type == PRISM2_MONITOR_PRISM || local->monitor_type == PRISM2_MONITOR_CAPHDR) { dev->type = ARPHRD_IEEE80211_PRISM; - dev->hard_header_parse = - hostap_80211_prism_header_parse; } else { dev->type = ARPHRD_IEEE80211; - dev->hard_header_parse = hostap_80211_header_parse; } } @@ -1140,7 +1138,7 @@ static int hostap_monitor_mode_disable(local_info_t *local) printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name); dev->type = ARPHRD_ETHER; - dev->hard_header_parse = local->saved_eth_header_parse; + if (local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_STOP << 8), 0, NULL, NULL)) @@ -3088,7 +3086,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p) static int prism2_set_genericelement(struct net_device *dev, u8 *elem, size_t len) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; u8 *buf; @@ -3116,7 +3114,7 @@ static int prism2_ioctl_siwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; switch (data->flags & IW_AUTH_INDEX) { @@ -3182,7 +3180,7 @@ static int prism2_ioctl_giwauth(struct net_device *dev, struct iw_request_info *info, struct iw_param *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; switch (data->flags & IW_AUTH_INDEX) { @@ -3221,7 +3219,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; int i, ret = 0; @@ -3395,7 +3393,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct ieee80211_crypt_data **crypt; void *sta_ptr; @@ -3697,8 +3695,10 @@ static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { - printk(KERN_DEBUG "%ssta: associated as client with AP " MACSTR "\n", - local->dev->name, MAC2STR(param->sta_addr)); + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "%ssta: associated as client with AP " + "%s\n", + local->dev->name, print_mac(mac, param->sta_addr)); memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN); return 0; } @@ -3716,7 +3716,7 @@ static int prism2_ioctl_giwgenie(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; int len = local->generic_elem_len - 2; @@ -3755,7 +3755,7 @@ static int prism2_ioctl_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { - struct hostap_interface *iface = dev->priv; + struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_mlme *mlme = (struct iw_mlme *) extra; u16 reason; @@ -3976,9 +3976,9 @@ static const iw_handler prism2_private_handler[] = const struct iw_handler_def hostap_iw_handler_def = { - .num_standard = sizeof(prism2_handler) / sizeof(iw_handler), - .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), - .num_private_args = sizeof(prism2_priv) / sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(prism2_handler), + .num_private = ARRAY_SIZE(prism2_private_handler), + .num_private_args = ARRAY_SIZE(prism2_priv), .standard = (iw_handler *) prism2_handler, .private = (iw_handler *) prism2_private_handler, .private_args = (struct iw_priv_args *) prism2_priv, diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 446de51bab74399d8b18db7cd07e085701327693..17c58e9bdad51178583f5d5844805e52f463415a 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -72,7 +73,7 @@ struct net_device * hostap_add_interface(struct local_info *local, dev->mem_start = mdev->mem_start; dev->mem_end = mdev->mem_end; - hostap_setup_dev(dev, local, 0); + hostap_setup_dev(dev, local, type); dev->destructor = free_netdev; sprintf(dev->name, "%s%s", prefix, name); @@ -529,6 +530,10 @@ int hostap_set_auth_algs(local_info_t *local) void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) { u16 status, fc; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); status = __le16_to_cpu(rx->status); @@ -547,13 +552,12 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4=" - MACSTR "\n", - MAC2STR(rx->addr1), MAC2STR(rx->addr2), MAC2STR(rx->addr3), - MAC2STR(rx->addr4)); + printk(KERN_DEBUG " A1=%s A2=%s A3=%s A4=%s\n", + print_mac(mac, rx->addr1), print_mac(mac2, rx->addr2), + print_mac(mac3, rx->addr3), print_mac(mac4, rx->addr4)); - printk(KERN_DEBUG " dst=" MACSTR " src=" MACSTR " len=%d\n", - MAC2STR(rx->dst_addr), MAC2STR(rx->src_addr), + printk(KERN_DEBUG " dst=%s src=%s len=%d\n", + print_mac(mac, rx->dst_addr), print_mac(mac2, rx->src_addr), __be16_to_cpu(rx->len)); } @@ -561,6 +565,10 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) { u16 fc; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d " "tx_control=0x%04x; jiffies=%ld\n", @@ -576,35 +584,37 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); - printk(KERN_DEBUG " A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4=" - MACSTR "\n", - MAC2STR(tx->addr1), MAC2STR(tx->addr2), MAC2STR(tx->addr3), - MAC2STR(tx->addr4)); + printk(KERN_DEBUG " A1=%s A2=%s A3=%s A4=%s\n", + print_mac(mac, tx->addr1), print_mac(mac2, tx->addr2), + print_mac(mac3, tx->addr3), print_mac(mac4, tx->addr4)); - printk(KERN_DEBUG " dst=" MACSTR " src=" MACSTR " len=%d\n", - MAC2STR(tx->dst_addr), MAC2STR(tx->src_addr), + printk(KERN_DEBUG " dst=%s src=%s len=%d\n", + print_mac(mac, tx->dst_addr), print_mac(mac2, tx->src_addr), __be16_to_cpu(tx->len)); } -int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr) +int hostap_80211_header_parse(const struct sk_buff *skb, unsigned char *haddr) { - memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ - return ETH_ALEN; -} - + struct hostap_interface *iface = netdev_priv(skb->dev); + local_info_t *local = iface->local; + + if (local->monitor_type == PRISM2_MONITOR_PRISM || + local->monitor_type == PRISM2_MONITOR_CAPHDR) { + const unsigned char *mac = skb_mac_header(skb); + + if (*(u32 *)mac == LWNG_CAP_DID_BASE) { + memcpy(haddr, + mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, + ETH_ALEN); /* addr2 */ + } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ + memcpy(haddr, + mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, + ETH_ALEN); /* addr2 */ + } + } else + memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ -int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr) -{ - const unsigned char *mac = skb_mac_header(skb); - - if (*(u32 *)mac == LWNG_CAP_DID_BASE) { - memcpy(haddr, mac + sizeof(struct linux_wlan_ng_prism_hdr) + 10, - ETH_ALEN); /* addr2 */ - } else { /* (*(u32 *)mac == htonl(LWNG_CAPHDR_VERSION)) */ - memcpy(haddr, mac + sizeof(struct linux_wlan_ng_cap_hdr) + 10, - ETH_ALEN); /* addr2 */ - } return ETH_ALEN; } @@ -836,9 +846,18 @@ static void prism2_tx_timeout(struct net_device *dev) local->func->schedule_reset(local); } +const struct header_ops hostap_80211_ops = { + .create = eth_header, + .rebuild = eth_rebuild_header, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, + + .parse = hostap_80211_header_parse, +}; +EXPORT_SYMBOL(hostap_80211_ops); void hostap_setup_dev(struct net_device *dev, local_info_t *local, - int main_dev) + int type) { struct hostap_interface *iface; @@ -858,15 +877,22 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, dev->do_ioctl = hostap_ioctl; dev->open = prism2_open; dev->stop = prism2_close; - dev->hard_start_xmit = hostap_data_start_xmit; dev->set_mac_address = prism2_set_mac_address; dev->set_multicast_list = hostap_set_multicast_list; dev->change_mtu = prism2_change_mtu; dev->tx_timeout = prism2_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; + if (type == HOSTAP_INTERFACE_AP) { + dev->hard_start_xmit = hostap_mgmt_start_xmit; + dev->type = ARPHRD_IEEE80211; + dev->header_ops = &hostap_80211_ops; + } else { + dev->hard_start_xmit = hostap_data_start_xmit; + } + dev->mtu = local->mtu; - if (!main_dev) { + if (type != HOSTAP_INTERFACE_MASTER) { /* use main radio device queue */ dev->tx_queue_len = 0; } @@ -876,7 +902,6 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, netif_stop_queue(dev); } - static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked) { struct net_device *dev = local->dev; @@ -892,10 +917,6 @@ static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked) if (local->apdev == NULL) return -ENOMEM; - local->apdev->hard_start_xmit = hostap_mgmt_start_xmit; - local->apdev->type = ARPHRD_IEEE80211; - local->apdev->hard_header_parse = hostap_80211_header_parse; - return 0; } @@ -1093,8 +1114,8 @@ struct proc_dir_entry *hostap_proc; static int __init hostap_init(void) { - if (proc_net != NULL) { - hostap_proc = proc_mkdir("hostap", proc_net); + if (init_net.proc_net != NULL) { + hostap_proc = proc_mkdir("hostap", init_net.proc_net); if (!hostap_proc) printk(KERN_WARNING "Failed to mkdir " "/proc/net/hostap\n"); @@ -1109,7 +1130,7 @@ static void __exit hostap_exit(void) { if (hostap_proc != NULL) { hostap_proc = NULL; - remove_proc_entry("hostap", proc_net); + remove_proc_entry("hostap", init_net.proc_net); } } diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c index d1d8ce022e63af6d3060589e504adc3765df085d..b03536008ad98df43218fe8a9aef9e7b05fb0fe2 100644 --- a/drivers/net/wireless/hostap/hostap_proc.c +++ b/drivers/net/wireless/hostap/hostap_proc.c @@ -106,6 +106,7 @@ static int prism2_wds_proc_read(char *page, char **start, off_t off, local_info_t *local = (local_info_t *) data; struct list_head *ptr; struct hostap_interface *iface; + DECLARE_MAC_BUF(mac); if (off > PROC_LIMIT) { *eof = 1; @@ -117,9 +118,9 @@ static int prism2_wds_proc_read(char *page, char **start, off_t off, iface = list_entry(ptr, struct hostap_interface, list); if (iface->type != HOSTAP_INTERFACE_WDS) continue; - p += sprintf(p, "%s\t" MACSTR "\n", + p += sprintf(p, "%s\t%s\n", iface->dev->name, - MAC2STR(iface->u.wds.remote_addr)); + print_mac(mac, iface->u.wds.remote_addr)); if ((p - page) > PROC_LIMIT) { printk(KERN_DEBUG "%s: wds proc did not fit\n", local->dev->name); @@ -147,6 +148,7 @@ static int prism2_bss_list_proc_read(char *page, char **start, off_t off, struct list_head *ptr; struct hostap_bss_info *bss; int i; + DECLARE_MAC_BUF(mac); if (off > PROC_LIMIT) { *eof = 1; @@ -158,8 +160,8 @@ static int prism2_bss_list_proc_read(char *page, char **start, off_t off, spin_lock_bh(&local->lock); list_for_each(ptr, &local->bss_list) { bss = list_entry(ptr, struct hostap_bss_info, list); - p += sprintf(p, MACSTR "\t%lu\t%u\t0x%x\t", - MAC2STR(bss->bssid), bss->last_update, + p += sprintf(p, "%s\t%lu\t%u\t0x%x\t", + print_mac(mac, bss->bssid), bss->last_update, bss->count, bss->capab_info); for (i = 0; i < bss->ssid_len; i++) { p += sprintf(p, "%c", @@ -312,6 +314,7 @@ static int prism2_scan_results_proc_read(char *page, char **start, off_t off, int entry, i, len, total = 0; struct hfa384x_hostscan_result *scanres; u8 *pos; + DECLARE_MAC_BUF(mac); p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates " "SSID\n"); @@ -329,14 +332,14 @@ static int prism2_scan_results_proc_read(char *page, char **start, off_t off, if ((p - page) > (PAGE_SIZE - 200)) break; - p += sprintf(p, "%d %d %d %d 0x%02x %d " MACSTR " %d ", + p += sprintf(p, "%d %d %d %d 0x%02x %d %s %d ", le16_to_cpu(scanres->chid), (s16) le16_to_cpu(scanres->anl), (s16) le16_to_cpu(scanres->sl), le16_to_cpu(scanres->beacon_interval), le16_to_cpu(scanres->capability), le16_to_cpu(scanres->rate), - MAC2STR(scanres->bssid), + print_mac(mac, scanres->bssid), le16_to_cpu(scanres->atim)); pos = scanres->sup_rates; diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index 87a54aa6f4dd2a1e2c1bd6cdbf393cb9f7aa4411..c27b2c1c06af5de29aedbd996e32dc2787423866 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -3,6 +3,7 @@ #include #include +#include #include #include "hostap_config.h" @@ -641,7 +642,7 @@ struct local_info { * when removing entries from the list. * TX and RX paths can use read lock. */ spinlock_t cmdlock, baplock, lock; - struct semaphore rid_bap_sem; + struct mutex rid_bap_mtx; u16 infofid; /* MAC buffer id for info frame */ /* txfid, intransmitfid, next_txtid, and next_alloc are protected by * txfidlock */ @@ -735,8 +736,6 @@ struct local_info { PRISM2_MONITOR_80211 = 0, PRISM2_MONITOR_PRISM = 1, PRISM2_MONITOR_CAPHDR = 2 } monitor_type; - int (*saved_eth_header_parse)(struct sk_buff *skb, - unsigned char *haddr); int monitor_allow_fcserr; int hostapd; /* whether user space daemon, hostapd, is used for AP diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 8990585bd2285503faa941fccf5b5e408a36488b..2d46a16c09450c73d3ea5121af6f17391eca2fee 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c @@ -1922,6 +1922,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) u32 chan; char *txratename; u8 bssid[ETH_ALEN]; + DECLARE_MAC_BUF(mac); /* * TBD: BSSID is usually 00:00:00:00:00:00 here and not @@ -1983,9 +1984,9 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) } IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID=" - MAC_FMT ")\n", + "%s)\n", priv->net_dev->name, escape_essid(essid, essid_len), - txratename, chan, MAC_ARG(bssid)); + txratename, chan, print_mac(mac, bssid)); /* now we copy read ssid into dev */ if (!(priv->config & CFG_STATIC_ESSID)) { @@ -2053,10 +2054,12 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) { + DECLARE_MAC_BUF(mac); + IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "disassociated: '%s' " MAC_FMT " \n", + "disassociated: '%s' %s \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); @@ -4049,6 +4052,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, char *out = buf; int length; int ret; + DECLARE_MAC_BUF(mac); if (priv->status & STATUS_RF_KILL_MASK) return 0; @@ -4076,9 +4080,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, __LINE__); out += sprintf(out, "ESSID: %s\n", essid); - out += sprintf(out, "BSSID: %02x:%02x:%02x:%02x:%02x:%02x\n", - bssid[0], bssid[1], bssid[2], - bssid[3], bssid[4], bssid[5]); + out += sprintf(out, "BSSID: %s\n", print_mac(mac, bssid)); out += sprintf(out, "Channel: %d\n", chan); return out - buf; @@ -4652,19 +4654,20 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv) static int ipw2100_read_mac_address(struct ipw2100_priv *priv) { u32 length = ETH_ALEN; - u8 mac[ETH_ALEN]; + u8 addr[ETH_ALEN]; + DECLARE_MAC_BUF(mac); int err; - err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, mac, &length); + err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, addr, &length); if (err) { IPW_DEBUG_INFO("MAC address read failed\n"); return -EIO; } - IPW_DEBUG_INFO("card MAC is %02X:%02X:%02X:%02X:%02X:%02X\n", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); - memcpy(priv->net_dev->dev_addr, mac, ETH_ALEN); + memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN); + IPW_DEBUG_INFO("card MAC is %s\n", + print_mac(mac, priv->net_dev->dev_addr)); return 0; } @@ -5043,10 +5046,10 @@ static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid, int err; #ifdef CONFIG_IPW2100_DEBUG + DECLARE_MAC_BUF(mac); if (bssid != NULL) - IPW_DEBUG_HC("MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n", - bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], - bssid[5]); + IPW_DEBUG_HC("MANDATORY_BSSID: %s\n", + print_mac(mac, bssid)); else IPW_DEBUG_HC("MANDATORY_BSSID: \n"); #endif @@ -6239,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, IPW_DEBUG_INFO("Attempting to register device...\n"); - SET_MODULE_OWNER(dev); - printk(KERN_INFO DRV_NAME ": Detected Intel PRO/Wireless 2100 Network Connection\n"); @@ -6894,6 +6895,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev, static const unsigned char off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + DECLARE_MAC_BUF(mac); // sanity checks if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) @@ -6919,13 +6921,8 @@ static int ipw2100_wx_set_wap(struct net_device *dev, err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0); - IPW_DEBUG_WX("SET BSSID -> %02X:%02X:%02X:%02X:%02X:%02X\n", - wrqu->ap_addr.sa_data[0] & 0xff, - wrqu->ap_addr.sa_data[1] & 0xff, - wrqu->ap_addr.sa_data[2] & 0xff, - wrqu->ap_addr.sa_data[3] & 0xff, - wrqu->ap_addr.sa_data[4] & 0xff, - wrqu->ap_addr.sa_data[5] & 0xff); + IPW_DEBUG_WX("SET BSSID -> %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); done: mutex_unlock(&priv->action_mutex); @@ -6941,6 +6938,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev, */ struct ipw2100_priv *priv = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac); /* If we are associated, trying to associate, or have a statically * configured BSSID then return that; otherwise return ANY */ @@ -6950,8 +6948,8 @@ static int ipw2100_wx_get_wap(struct net_device *dev, } else memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); - IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", - MAC_ARG(wrqu->ap_addr.sa_data)); + IPW_DEBUG_WX("Getting WAP BSSID: %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); return 0; } @@ -8279,10 +8277,9 @@ static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev) static struct iw_handler_def ipw2100_wx_handler_def = { .standard = ipw2100_wx_handlers, - .num_standard = sizeof(ipw2100_wx_handlers) / sizeof(iw_handler), - .num_private = sizeof(ipw2100_private_handler) / sizeof(iw_handler), - .num_private_args = sizeof(ipw2100_private_args) / - sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(ipw2100_wx_handlers), + .num_private = ARRAY_SIZE(ipw2100_private_handler), + .num_private_args = ARRAY_SIZE(ipw2100_private_args), .private = (iw_handler *) ipw2100_private_handler, .private_args = (struct iw_priv_args *)ipw2100_private_args, .get_wireless_stats = ipw2100_wx_wireless_stats, diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 61497c4674671698f932d3228b5abf18deceb887..feb8fcbab2d5a35b4c81dbd449c64b5a0dd5a174 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -1740,8 +1740,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) if (disable_radio) { priv->status |= STATUS_RF_KILL_SW; - if (priv->workqueue) + if (priv->workqueue) { cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); + } queue_work(priv->workqueue, &priv->down); } else { priv->status &= ~STATUS_RF_KILL_SW; @@ -1992,6 +1994,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) wake_up_interruptible(&priv->wait_command_queue); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); schedule_work(&priv->link_down); queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); handled |= IPW_INTA_BIT_RF_KILL_DONE; @@ -2247,8 +2250,8 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) return -1; } - IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n", - priv->net_dev->name, MAC_ARG(mac)); + IPW_DEBUG_INFO("%s: Setting MAC to %s\n", + priv->net_dev->name, print_mac(mac, mac)); return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); } @@ -3796,6 +3799,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) { struct ipw_station_entry entry; int i; + DECLARE_MAC_BUF(mac); for (i = 0; i < priv->num_stations; i++) { if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { @@ -3812,7 +3816,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid) if (i == MAX_STATIONS) return IPW_INVALID_STATION; - IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid)); + IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid)); entry.reserved = 0; entry.support_mode = 0; @@ -3839,6 +3843,7 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid) static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) { int err; + DECLARE_MAC_BUF(mac); if (priv->status & STATUS_ASSOCIATING) { IPW_DEBUG_ASSOC("Disassociating while associating.\n"); @@ -3851,9 +3856,9 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) return; } - IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " " + IPW_DEBUG_ASSOC("Disassocation attempt from %s " "on channel %d.\n", - MAC_ARG(priv->assoc_request.bssid), + print_mac(mac, priv->assoc_request.bssid), priv->assoc_request.channel); priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); @@ -4341,6 +4346,37 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); } +static void ipw_scan_event(struct work_struct *work) +{ + union iwreq_data wrqu; + + struct ipw_priv *priv = + container_of(work, struct ipw_priv, scan_event.work); + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); +} + +static void handle_scan_event(struct ipw_priv *priv) +{ + /* Only userspace-requested scan completion events go out immediately */ + if (!priv->user_requested_scan) { + if (!delayed_work_pending(&priv->scan_event)) + queue_delayed_work(priv->workqueue, &priv->scan_event, + round_jiffies(msecs_to_jiffies(4000))); + } else { + union iwreq_data wrqu; + + priv->user_requested_scan = 0; + cancel_delayed_work(&priv->scan_event); + + wrqu.data.length = 0; + wrqu.data.flags = 0; + wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); + } +} + /** * Handle host notification packet. * Called from interrupt routine @@ -4348,6 +4384,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, static void ipw_rx_notification(struct ipw_priv *priv, struct ipw_rx_notification *notif) { + DECLARE_MAC_BUF(mac); notif->size = le16_to_cpu(notif->size); IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size); @@ -4360,11 +4397,11 @@ static void ipw_rx_notification(struct ipw_priv *priv, case CMAS_ASSOCIATED:{ IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "associated: '%s' " MAC_FMT + "associated: '%s' %s" " \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); switch (priv->ieee->iw_mode) { case IW_MODE_INFRA: @@ -4444,13 +4481,13 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DL_STATE | IPW_DL_ASSOC, "deauthenticated: '%s' " - MAC_FMT + "%s" ": (0x%04X) - %s \n", escape_essid(priv-> essid, priv-> essid_len), - MAC_ARG(priv->bssid), + print_mac(mac, priv->bssid), ntohs(auth->status), ipw_get_status_code (ntohs @@ -4467,11 +4504,11 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "authenticated: '%s' " MAC_FMT + "authenticated: '%s' %s" "\n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); break; } @@ -4496,11 +4533,11 @@ static void ipw_rx_notification(struct ipw_priv *priv, IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "disassociated: '%s' " MAC_FMT + "disassociated: '%s' %s" " \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status &= ~(STATUS_DISASSOCIATING | @@ -4535,10 +4572,10 @@ static void ipw_rx_notification(struct ipw_priv *priv, switch (auth->state) { case CMAS_AUTHENTICATED: IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, - "authenticated: '%s' " MAC_FMT " \n", + "authenticated: '%s' %s \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status |= STATUS_AUTH; break; @@ -4554,10 +4591,10 @@ static void ipw_rx_notification(struct ipw_priv *priv, } IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, - "deauthenticated: '%s' " MAC_FMT "\n", + "deauthenticated: '%s' %s\n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); priv->status &= ~(STATUS_ASSOCIATING | STATUS_AUTH | @@ -4702,14 +4739,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, * on how the scan was initiated. User space can just * sync on periodic scan to get fresh data... * Jean II */ - if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) { - union iwreq_data wrqu; - - wrqu.data.length = 0; - wrqu.data.flags = 0; - wireless_send_event(priv->net_dev, SIOCGIWSCAN, - &wrqu, NULL); - } + if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) + handle_scan_event(priv); break; } @@ -5383,25 +5414,27 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, int roaming) { struct ipw_supported_rates rates; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); /* Verify that this network's capability is compatible with the * current mode (AdHoc or Infrastructure) */ if ((priv->ieee->iw_mode == IW_MODE_ADHOC && !(network->capability & WLAN_CAPABILITY_IBSS))) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to " "capability mismatch.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* If we do not have an ESSID for this AP, we can not associate with * it */ if (network->flags & NETWORK_EMPTY_ESSID) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of hidden ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5411,11 +5444,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, if ((network->ssid_len != match->network->ssid_len) || memcmp(network->ssid, match->network->ssid, network->ssid_len)) { - IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of non-network ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } } else { @@ -5430,9 +5463,9 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, strncpy(escaped, escape_essid(network->ssid, network->ssid_len), sizeof(escaped)); - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of ESSID mismatch: '%s'.\n", - escaped, MAC_ARG(network->bssid), + escaped, print_mac(mac, network->bssid), escape_essid(priv->essid, priv->essid_len)); return 0; @@ -5459,10 +5492,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, /* Now go through and see if the requested network is valid... */ if (priv->ieee->scan_age != 0 && time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of age: %ums.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network->last_scanned)); return 0; @@ -5470,10 +5503,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, if ((priv->config & CFG_STATIC_CHANNEL) && (network->channel != priv->channel)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of channel mismatch: %d != %d.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), network->channel, priv->channel); return 0; } @@ -5481,10 +5514,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, /* Verify privacy compatability */ if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of privacy mismatch: %s != %s.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), priv-> capability & CAP_PRIVACY_ON ? "on" : "off", network-> @@ -5494,40 +5527,41 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, } if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " - "because of the same BSSID match: " MAC_FMT + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " + "because of the same BSSID match: %s" ".\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), MAC_ARG(priv->bssid)); + print_mac(mac, network->bssid), + print_mac(mac2, priv->bssid)); return 0; } /* Filter out any incompatible freq / mode combinations */ if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of invalid frequency/mode " "combination.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* Ensure that the rates supported by the driver are compatible with * this AP, including verification of basic rates (mandatory) */ if (!ipw_compatible_rates(priv, network, &rates)) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because configured rate mask excludes " "AP mandatory rate.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } if (rates.num_rates == 0) { - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_MERGE("Network '%s (%s)' excluded " "because of no compatible rates.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5538,9 +5572,9 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, /* Set up 'new' AP to this network */ ipw_copy_rates(&match->rates, &rates); match->network = network; - IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n", + IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 1; } @@ -5594,6 +5628,7 @@ static int ipw_best_network(struct ipw_priv *priv, struct ieee80211_network *network, int roaming) { struct ipw_supported_rates rates; + DECLARE_MAC_BUF(mac); /* Verify that this network's capability is compatible with the * current mode (AdHoc or Infrastructure) */ @@ -5601,20 +5636,20 @@ static int ipw_best_network(struct ipw_priv *priv, !(network->capability & WLAN_CAPABILITY_ESS)) || (priv->ieee->iw_mode == IW_MODE_ADHOC && !(network->capability & WLAN_CAPABILITY_IBSS))) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to " "capability mismatch.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* If we do not have an ESSID for this AP, we can not associate with * it */ if (network->flags & NETWORK_EMPTY_ESSID) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of hidden ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5624,11 +5659,11 @@ static int ipw_best_network(struct ipw_priv *priv, if ((network->ssid_len != match->network->ssid_len) || memcmp(network->ssid, match->network->ssid, network->ssid_len)) { - IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of non-network ESSID.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } } else { @@ -5642,9 +5677,9 @@ static int ipw_best_network(struct ipw_priv *priv, strncpy(escaped, escape_essid(network->ssid, network->ssid_len), sizeof(escaped)); - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of ESSID mismatch: '%s'.\n", - escaped, MAC_ARG(network->bssid), + escaped, print_mac(mac, network->bssid), escape_essid(priv->essid, priv->essid_len)); return 0; @@ -5658,12 +5693,12 @@ static int ipw_best_network(struct ipw_priv *priv, strncpy(escaped, escape_essid(network->ssid, network->ssid_len), sizeof(escaped)); - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because " - "'%s (" MAC_FMT ")' has a stronger signal.\n", - escaped, MAC_ARG(network->bssid), + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because " + "'%s (%s)' has a stronger signal.\n", + escaped, print_mac(mac, network->bssid), escape_essid(match->network->ssid, match->network->ssid_len), - MAC_ARG(match->network->bssid)); + print_mac(mac, match->network->bssid)); return 0; } @@ -5671,11 +5706,11 @@ static int ipw_best_network(struct ipw_priv *priv, * last 3 seconds, do not try and associate again... */ if (network->last_associate && time_after(network->last_associate + (HZ * 3UL), jiffies)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of storming (%ums since last " "assoc attempt).\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network->last_associate)); return 0; @@ -5684,10 +5719,10 @@ static int ipw_best_network(struct ipw_priv *priv, /* Now go through and see if the requested network is valid... */ if (priv->ieee->scan_age != 0 && time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of age: %ums.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network->last_scanned)); return 0; @@ -5695,10 +5730,10 @@ static int ipw_best_network(struct ipw_priv *priv, if ((priv->config & CFG_STATIC_CHANNEL) && (network->channel != priv->channel)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of channel mismatch: %d != %d.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), network->channel, priv->channel); return 0; } @@ -5706,10 +5741,10 @@ static int ipw_best_network(struct ipw_priv *priv, /* Verify privacy compatability */ if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of privacy mismatch: %s != %s.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), priv->capability & CAP_PRIVACY_ON ? "on" : "off", network->capability & @@ -5719,48 +5754,48 @@ static int ipw_best_network(struct ipw_priv *priv, if ((priv->config & CFG_STATIC_BSSID) && memcmp(network->bssid, priv->bssid, ETH_ALEN)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " - "because of BSSID mismatch: " MAC_FMT ".\n", + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " + "because of BSSID mismatch: %s.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), MAC_ARG(priv->bssid)); + print_mac(mac, network->bssid), print_mac(mac, priv->bssid)); return 0; } /* Filter out any incompatible freq / mode combinations */ if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of invalid frequency/mode " "combination.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* Filter out invalid channel in current GEO */ if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of invalid channel in current GEO\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } /* Ensure that the rates supported by the driver are compatible with * this AP, including verification of basic rates (mandatory) */ if (!ipw_compatible_rates(priv, network, &rates)) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because configured rate mask excludes " "AP mandatory rate.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } if (rates.num_rates == 0) { - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " + IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " "because of no compatible rates.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 0; } @@ -5772,9 +5807,9 @@ static int ipw_best_network(struct ipw_priv *priv, ipw_copy_rates(&match->rates, &rates); match->network = network; - IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n", + IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 1; } @@ -6016,6 +6051,7 @@ static void ipw_bg_adhoc_check(struct work_struct *work) static void ipw_debug_config(struct ipw_priv *priv) { + DECLARE_MAC_BUF(mac); IPW_DEBUG_INFO("Scan completed, no valid APs matched " "[CFG 0x%08X]\n", priv->config); if (priv->config & CFG_STATIC_CHANNEL) @@ -6028,8 +6064,8 @@ static void ipw_debug_config(struct ipw_priv *priv) else IPW_DEBUG_INFO("ESSID unlocked.\n"); if (priv->config & CFG_STATIC_BSSID) - IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n", - MAC_ARG(priv->bssid)); + IPW_DEBUG_INFO("BSSID locked to %s\n", + print_mac(mac, priv->bssid)); else IPW_DEBUG_INFO("BSSID unlocked.\n"); if (priv->capability & CAP_PRIVACY_ON) @@ -7221,6 +7257,7 @@ static int ipw_associate_network(struct ipw_priv *priv, struct ipw_supported_rates *rates, int roaming) { int err; + DECLARE_MAC_BUF(mac); if (priv->config & CFG_FIXED_RATE) ipw_set_fixed_rate(priv, network->mode); @@ -7388,9 +7425,9 @@ static int ipw_associate_network(struct ipw_priv *priv, return err; } - IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n", + IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n", escape_essid(priv->essid, priv->essid_len), - MAC_ARG(priv->bssid)); + print_mac(mac, priv->bssid)); return 0; } @@ -8202,6 +8239,9 @@ static void ipw_rx(struct ipw_priv *priv) struct ieee80211_hdr_4addr *header; u32 r, w, i; u8 network_packet; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); r = ipw_read32(priv, IPW_RX_READ_INDEX); w = ipw_read32(priv, IPW_RX_WRITE_INDEX); @@ -8328,14 +8368,17 @@ static void ipw_rx(struct ipw_priv *priv) header))) { IPW_DEBUG_DROP("Dropping: " - MAC_FMT ", " - MAC_FMT ", " - MAC_FMT "\n", - MAC_ARG(header-> + "%s, " + "%s, " + "%s\n", + print_mac(mac, + header-> addr1), - MAC_ARG(header-> + print_mac(mac2, + header-> addr2), - MAC_ARG(header-> + print_mac(mac3, + header-> addr3)); break; } @@ -8867,6 +8910,7 @@ static int ipw_wx_set_wap(struct net_device *dev, union iwreq_data *wrqu, char *extra) { struct ipw_priv *priv = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac); static const unsigned char any[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff @@ -8897,8 +8941,8 @@ static int ipw_wx_set_wap(struct net_device *dev, return 0; } - IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n", - MAC_ARG(wrqu->ap_addr.sa_data)); + IPW_DEBUG_WX("Setting mandatory BSSID to %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); @@ -8916,6 +8960,8 @@ static int ipw_wx_get_wap(struct net_device *dev, union iwreq_data *wrqu, char *extra) { struct ipw_priv *priv = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac); + /* If we are associated, trying to associate, or have a statically * configured BSSID then return that; otherwise return ANY */ mutex_lock(&priv->mutex); @@ -8926,8 +8972,8 @@ static int ipw_wx_get_wap(struct net_device *dev, } else memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); - IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n", - MAC_ARG(wrqu->ap_addr.sa_data)); + IPW_DEBUG_WX("Getting WAP BSSID: %s\n", + print_mac(mac, wrqu->ap_addr.sa_data)); mutex_unlock(&priv->mutex); return 0; } @@ -9472,6 +9518,10 @@ static int ipw_wx_set_scan(struct net_device *dev, struct ipw_priv *priv = ieee80211_priv(dev); struct iw_scan_req *req = (struct iw_scan_req *)extra; + mutex_lock(&priv->mutex); + priv->user_requested_scan = 1; + mutex_unlock(&priv->mutex); + if (wrqu->data.length == sizeof(struct iw_scan_req)) { if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { ipw_request_direct_scan(priv, req->essid, @@ -10133,6 +10183,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, u8 id, hdr_len, unicast; u16 remaining_bytes; int fc; + DECLARE_MAC_BUF(mac); hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); switch (priv->ieee->iw_mode) { @@ -10143,8 +10194,8 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb, id = ipw_add_station(priv, hdr->addr1); if (id == IPW_INVALID_STATION) { IPW_WARNING("Attempt to send data to " - "invalid cell: " MAC_FMT "\n", - MAC_ARG(hdr->addr1)); + "invalid cell: %s\n", + print_mac(mac, hdr->addr1)); goto drop; } } @@ -10460,13 +10511,15 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) { struct ipw_priv *priv = ieee80211_priv(dev); struct sockaddr *addr = p; + DECLARE_MAC_BUF(mac); + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; mutex_lock(&priv->mutex); priv->config |= CFG_CUSTOM_MAC; memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); - printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n", - priv->net_dev->name, MAC_ARG(priv->mac_addr)); + printk(KERN_INFO "%s: Setting MAC to %s\n", + priv->net_dev->name, print_mac(mac, priv->mac_addr)); queue_work(priv->workqueue, &priv->adapter_restart); mutex_unlock(&priv->mutex); return 0; @@ -10647,6 +10700,7 @@ static void ipw_link_up(struct ipw_priv *priv) } cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); ipw_reset_stats(priv); /* Ensure the rate is updated immediately */ priv->last_rate = ipw_get_current_rate(priv); @@ -10684,7 +10738,8 @@ static void ipw_link_down(struct ipw_priv *priv) if (!(priv->status & STATUS_EXIT_PENDING)) { /* Queue up another scan... */ queue_delayed_work(priv->workqueue, &priv->request_scan, 0); - } + } else + cancel_delayed_work(&priv->scan_event); } static void ipw_bg_link_down(struct work_struct *work) @@ -10714,6 +10769,7 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) INIT_WORK(&priv->up, ipw_bg_up); INIT_WORK(&priv->down, ipw_bg_down); INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); + INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event); INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); @@ -11625,7 +11681,6 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_destroy_workqueue; } - SET_MODULE_OWNER(net_dev); SET_NETDEV_DEV(net_dev, &pdev->dev); mutex_lock(&priv->mutex); @@ -11746,6 +11801,7 @@ static void ipw_pci_remove(struct pci_dev *pdev) cancel_delayed_work(&priv->adhoc_check); cancel_delayed_work(&priv->gather_stats); cancel_delayed_work(&priv->request_scan); + cancel_delayed_work(&priv->scan_event); cancel_delayed_work(&priv->rf_kill); cancel_delayed_work(&priv->scan_check); destroy_workqueue(priv->workqueue); diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index 626a240a87d869f111e73d9dc43300ac3276b689..bec8e37a17380fa31da5aae7fad50b271cde31ae 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h @@ -45,7 +45,6 @@ #include #include -#include #include #include @@ -1288,6 +1287,8 @@ struct ipw_priv { struct iw_public_data wireless_data; + int user_requested_scan; + struct workqueue_struct *workqueue; struct delayed_work adhoc_check; @@ -1296,6 +1297,7 @@ struct ipw_priv { struct work_struct system_config; struct work_struct rx_replenish; struct delayed_work request_scan; + struct delayed_work scan_event; struct work_struct request_passive_scan; struct work_struct adapter_restart; struct delayed_work rf_kill; diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..25cfc6c32509f8e71544516c00d715938b3efbd2 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -0,0 +1,128 @@ +config IWLWIFI + bool "Intel Wireless WiFi Link Drivers" + depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL + select FW_LOADER + default n + ---help--- + Select to enable drivers based on the iwlwifi project. This + project provides a common foundation for Intel's wireless + drivers designed to use the mac80211 subsystem. + + See for + information on the capabilities currently enabled in this + driver and for tips for debugging issues and problems. + +config IWLWIFI_DEBUG + bool "Enable full debugging output in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable debug tracing output for the iwlwifi + drivers. + + This will result in the kernel module being ~100k larger. You can + control which debug output is sent to the kernel log by setting the + value in + + /sys/bus/pci/drivers/${DRIVER}/debug_level + + This entry will only exist if this option is enabled. + + To set a value, simply echo an 8-byte hex value to the same file: + + % echo 0x43fff > /sys/bus/pci/drivers/${DRIVER}/debug_level + + You can find the list of debug mask values in: + drivers/net/wireless/mac80211/iwlwifi/iwl-debug.h + + If this is your first time using this driver, you should say Y here + as the debug information can assist others in helping you resolve + any problems you may encounter. + +config IWLWIFI_SENSITIVITY + bool "Enable Sensitivity Calibration in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable sensitivity calibration for the iwlwifi + drivers. + +config IWLWIFI_SPECTRUM_MEASUREMENT + bool "Enable Spectrum Measurement in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable spectrum measurement for the iwlwifi drivers. + +config IWLWIFI_QOS + bool "Enable Wireless QoS in iwlwifi drivers" + depends on IWLWIFI + default y + ---help--- + This option will enable wireless quality of service (QoS) for the + iwlwifi drivers. + +config IWLWIFI_HT + bool "Enable 802.11n HT features in iwlwifi drivers" + depends on EXPERIMENTAL + depends on IWLWIFI && MAC80211_HT + default n + ---help--- + This option enables IEEE 802.11n High Throughput features + for the iwlwifi drivers. + +config IWL4965 + tristate "Intel Wireless WiFi 4965AGN" + depends on m && IWLWIFI && EXPERIMENTAL + default m + ---help--- + Select to build the driver supporting the: + + Intel Wireless WiFi Link 4965AGN + + This driver uses the kernel's mac80211 subsystem. + + See for + information on the capabilities currently enabled in this + driver and for tips for debugging any issues or problems. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + . + + See the above referenced README.iwlwifi for information on where + to install the microcode images. + + If you want to compile the driver as a module ( = code which can be + inserted in and remvoed from the running kernel whenever you want), + say M here and read . The module + will be called iwl4965.ko. + +config IWL3945 + tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" + depends on m && IWLWIFI && EXPERIMENTAL + default m + ---help--- + Select to build the driver supporting the: + + Intel PRO/Wireless 3945ABG/BG Network Connection + + This driver uses the kernel's mac80211 subsystem. + + See for + information on the capabilities currently enabled in this + driver and for tips for debugging any issues or problems. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + . + + See the above referenced README.iwlwifi for information on where + to install the microcode images. + + If you want to compile the driver as a module ( = code which can be + inserted in and remvoed from the running kernel whenever you want), + say M here and read . The module + will be called iwl3945.ko. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..3bbd38358d5341f0c009c2605f851474fcf18db7 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_IWL3945) += iwl3945.o +iwl3945-objs = iwl3945-base.o iwl-3945.o iwl-3945-rs.o + +obj-$(CONFIG_IWL4965) += iwl4965.o +iwl4965-objs = iwl4965-base.o iwl-4965.o iwl-4965-rs.o diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h new file mode 100644 index 0000000000000000000000000000000000000000..fb5f0649f4f6498cc26668a4278522cb8412831a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h @@ -0,0 +1,118 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_3945_hw__ +#define __iwl_3945_hw__ + +#define IWL_RX_BUF_SIZE 3000 +/* card static random access memory (SRAM) for processor data and instructs */ +#define ALM_RTC_INST_UPPER_BOUND (0x014000) +#define ALM_RTC_DATA_UPPER_BOUND (0x808000) + +#define ALM_RTC_INST_SIZE (ALM_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) +#define ALM_RTC_DATA_SIZE (ALM_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) + +#define IWL_MAX_BSM_SIZE ALM_RTC_INST_SIZE +#define IWL_MAX_INST_SIZE ALM_RTC_INST_SIZE +#define IWL_MAX_DATA_SIZE ALM_RTC_DATA_SIZE +#define IWL_MAX_NUM_QUEUES 8 + +static inline int iwl_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= RTC_DATA_LOWER_BOUND) && + (addr < ALM_RTC_DATA_UPPER_BOUND); +} + +/* Base physical address of iwl_shared is provided to FH_TSSR_CBB_BASE + * and &iwl_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */ +struct iwl_shared { + __le32 tx_base_ptr[8]; + __le32 rx_read_ptr[3]; +} __attribute__ ((packed)); + +struct iwl_tfd_frame_data { + __le32 addr; + __le32 len; +} __attribute__ ((packed)); + +struct iwl_tfd_frame { + __le32 control_flags; + struct iwl_tfd_frame_data pa[4]; + u8 reserved[28]; +} __attribute__ ((packed)); + +static inline u8 iwl_hw_get_rate(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags) & 0xFF; +} + +static inline u16 iwl_hw_get_rate_n_flags(__le16 rate_n_flags) +{ + return le16_to_cpu(rate_n_flags); +} + +static inline __le16 iwl_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le16((u16)rate|flags); +} +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c new file mode 100644 index 0000000000000000000000000000000000000000..f4aabcf480e46636479ebf00fc4f9d70b95f5ebf --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -0,0 +1,982 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#define IWL 3945 + +#include "../net/mac80211/ieee80211_rate.h" + +#include "iwlwifi.h" + +#define RS_NAME "iwl-3945-rs" + +struct iwl_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl_rate_scale_priv { + spinlock_t lock; + s32 *expected_tpt; + unsigned long last_partial_flush; + unsigned long last_flush; + u32 flush_time; + u32 last_tx_packets; + u32 tx_packets; + u8 tgg; + u8 flush_pending; + u8 start_rate; + u8 ibss_sta_added; + struct timer_list rate_scale_flush; + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; +}; + +static s32 iwl_expected_tpt_g[IWL_RATE_COUNT] = { + 0, 0, 76, 104, 130, 168, 191, 202, 7, 13, 35, 58 +}; + +static s32 iwl_expected_tpt_g_prot[IWL_RATE_COUNT] = { + 0, 0, 0, 80, 93, 113, 123, 125, 7, 13, 35, 58 +}; + +static s32 iwl_expected_tpt_a[IWL_RATE_COUNT] = { + 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0, 0 +}; + +static s32 iwl_expected_tpt_b[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 0, 0, 0, 0, 7, 13, 35, 58 +}; + +struct iwl_tpt_entry { + s8 min_rssi; + u8 index; +}; + +static struct iwl_tpt_entry iwl_tpt_table_a[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-72, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-87, IWL_RATE_9M_INDEX}, + {-89, IWL_RATE_6M_INDEX} +}; + +static struct iwl_tpt_entry iwl_tpt_table_b[] = { + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} + +}; + +static struct iwl_tpt_entry iwl_tpt_table_g[] = { + {-60, IWL_RATE_54M_INDEX}, + {-64, IWL_RATE_48M_INDEX}, + {-68, IWL_RATE_36M_INDEX}, + {-80, IWL_RATE_24M_INDEX}, + {-84, IWL_RATE_18M_INDEX}, + {-85, IWL_RATE_12M_INDEX}, + {-86, IWL_RATE_11M_INDEX}, + {-88, IWL_RATE_5M_INDEX}, + {-90, IWL_RATE_2M_INDEX}, + {-92, IWL_RATE_1M_INDEX} +}; + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_FLUSH (3*HZ/10) +#define IWL_RATE_WIN_FLUSH (HZ/2) +#define IWL_RATE_HIGH_TH 11520 +#define IWL_RATE_MIN_FAILURE_TH 8 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 + +static u8 iwl_get_rate_index_by_rssi(s32 rssi, u8 mode) +{ + u32 index = 0; + u32 table_size = 0; + struct iwl_tpt_entry *tpt_table = NULL; + + if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) + rssi = IWL_MIN_RSSI_VAL; + + switch (mode) { + case MODE_IEEE80211G: + tpt_table = iwl_tpt_table_g; + table_size = ARRAY_SIZE(iwl_tpt_table_g); + break; + + case MODE_IEEE80211A: + tpt_table = iwl_tpt_table_a; + table_size = ARRAY_SIZE(iwl_tpt_table_a); + break; + + default: + case MODE_IEEE80211B: + tpt_table = iwl_tpt_table_b; + table_size = ARRAY_SIZE(iwl_tpt_table_b); + break; + } + + while ((index < table_size) && (rssi < tpt_table[index].min_rssi)) + index++; + + index = min(index, (table_size - 1)); + + return tpt_table[index].index; +} + +static void iwl_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +/** + * iwl_rate_scale_flush_windows - flush out the rate scale windows + * + * Returns the number of windows that have gathered data but were + * not flushed. If there were any that were not flushed, then + * reschedule the rate flushing routine. + */ +static int iwl_rate_scale_flush_windows(struct iwl_rate_scale_priv *rs_priv) +{ + int unflushed = 0; + int i; + unsigned long flags; + + /* + * For each rate, if we have collected data on that rate + * and it has been more than IWL_RATE_WIN_FLUSH + * since we flushed, clear out the gathered statistics + */ + for (i = 0; i < IWL_RATE_COUNT; i++) { + if (!rs_priv->win[i].counter) + continue; + + spin_lock_irqsave(&rs_priv->lock, flags); + if (time_after(jiffies, rs_priv->win[i].stamp + + IWL_RATE_WIN_FLUSH)) { + IWL_DEBUG_RATE("flushing %d samples of rate " + "index %d\n", + rs_priv->win[i].counter, i); + iwl_clear_window(&rs_priv->win[i]); + } else + unflushed++; + spin_unlock_irqrestore(&rs_priv->lock, flags); + } + + return unflushed; +} + +#define IWL_RATE_FLUSH_MAX 5000 /* msec */ +#define IWL_RATE_FLUSH_MIN 50 /* msec */ + +static void iwl_bg_rate_scale_flush(unsigned long data) +{ + struct iwl_rate_scale_priv *rs_priv = (void *)data; + int unflushed = 0; + unsigned long flags; + u32 packet_count, duration, pps; + + IWL_DEBUG_RATE("enter\n"); + + unflushed = iwl_rate_scale_flush_windows(rs_priv); + + spin_lock_irqsave(&rs_priv->lock, flags); + + rs_priv->flush_pending = 0; + + /* Number of packets Rx'd since last time this timer ran */ + packet_count = (rs_priv->tx_packets - rs_priv->last_tx_packets) + 1; + + rs_priv->last_tx_packets = rs_priv->tx_packets + 1; + + if (unflushed) { + duration = + jiffies_to_msecs(jiffies - rs_priv->last_partial_flush); +/* duration = jiffies_to_msecs(rs_priv->flush_time); */ + + IWL_DEBUG_RATE("Tx'd %d packets in %dms\n", + packet_count, duration); + + /* Determine packets per second */ + if (duration) + pps = (packet_count * 1000) / duration; + else + pps = 0; + + if (pps) { + duration = IWL_RATE_FLUSH_MAX / pps; + if (duration < IWL_RATE_FLUSH_MIN) + duration = IWL_RATE_FLUSH_MIN; + } else + duration = IWL_RATE_FLUSH_MAX; + + rs_priv->flush_time = msecs_to_jiffies(duration); + + IWL_DEBUG_RATE("new flush period: %d msec ave %d\n", + duration, packet_count); + + mod_timer(&rs_priv->rate_scale_flush, jiffies + + rs_priv->flush_time); + + rs_priv->last_partial_flush = jiffies; + } + + /* If there weren't any unflushed entries, we don't schedule the timer + * to run again */ + + rs_priv->last_flush = jiffies; + + spin_unlock_irqrestore(&rs_priv->lock, flags); + + IWL_DEBUG_RATE("leave\n"); +} + +/** + * iwl_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 64 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static void iwl_collect_tx_data(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate_scale_data *window, + int success, int retries) +{ + unsigned long flags; + + if (!retries) { + IWL_DEBUG_RATE("leave: retries == 0 -- should be at least 1\n"); + return; + } + + while (retries--) { + spin_lock_irqsave(&rs_priv->lock, flags); + + /* If we have filled up the window then subtract one from the + * success counter if the high-bit is counting toward + * success */ + if (window->counter == IWL_RATE_MAX_WINDOW) { + if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) + window->success_counter--; + } else + window->counter++; + + /* Slide the window to the left one bit */ + window->data = (window->data << 1); + + /* If this packet was a success then set the low bit high */ + if (success) { + window->success_counter++; + window->data |= 1; + } + + /* window->counter can't be 0 -- it is either >0 or + * IWL_RATE_MAX_WINDOW */ + window->success_ratio = 12800 * window->success_counter / + window->counter; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + spin_unlock_irqrestore(&rs_priv->lock, flags); + } +} + +static void rs_rate_init(void *priv_rate, void *priv_sta, + struct ieee80211_local *local, struct sta_info *sta) +{ + int i; + + IWL_DEBUG_RATE("enter\n"); + + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + for (i = IWL_RATE_COUNT - 1; i >= 0; i--) { + if (sta->supp_rates & (1 << i)) { + sta->txrate = i; + break; + } + } + + sta->last_txrate = sta->txrate; + + IWL_DEBUG_RATE("leave\n"); +} + +static void *rs_alloc(struct ieee80211_local *local) +{ + return local->hw.priv; +} + +/* rate scale requires free function to be implmented */ +static void rs_free(void *priv) +{ + return; +} +static void rs_clear(void *priv) +{ + return; +} + + +static void *rs_alloc_sta(void *priv, gfp_t gfp) +{ + struct iwl_rate_scale_priv *rs_priv; + int i; + + IWL_DEBUG_RATE("enter\n"); + + rs_priv = kzalloc(sizeof(struct iwl_rate_scale_priv), gfp); + if (!rs_priv) { + IWL_DEBUG_RATE("leave: ENOMEM\n"); + return NULL; + } + + spin_lock_init(&rs_priv->lock); + + rs_priv->start_rate = IWL_RATE_INVALID; + + /* default to just 802.11b */ + rs_priv->expected_tpt = iwl_expected_tpt_b; + + rs_priv->last_partial_flush = jiffies; + rs_priv->last_flush = jiffies; + rs_priv->flush_time = IWL_RATE_FLUSH; + rs_priv->last_tx_packets = 0; + rs_priv->ibss_sta_added = 0; + + init_timer(&rs_priv->rate_scale_flush); + rs_priv->rate_scale_flush.data = (unsigned long)rs_priv; + rs_priv->rate_scale_flush.function = &iwl_bg_rate_scale_flush; + + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl_clear_window(&rs_priv->win[i]); + + IWL_DEBUG_RATE("leave\n"); + + return rs_priv; +} + +static void rs_free_sta(void *priv, void *priv_sta) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + + IWL_DEBUG_RATE("enter\n"); + del_timer_sync(&rs_priv->rate_scale_flush); + kfree(rs_priv); + IWL_DEBUG_RATE("leave\n"); +} + +/** + * rs_tx_status - Update rate control values based on Tx results + * + * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by + * the hardware for each rate. + */ +static void rs_tx_status(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct ieee80211_tx_status *tx_resp) +{ + u8 retries, current_count; + int scale_rate_index, first_index, last_index; + unsigned long flags; + struct sta_info *sta; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct iwl_rate_scale_priv *rs_priv; + + IWL_DEBUG_RATE("enter\n"); + + retries = tx_resp->retry_count; + + first_index = tx_resp->control.tx_rate; + if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { + IWL_DEBUG_RATE("leave: Rate out of bounds: %0x for %d\n", + tx_resp->control.tx_rate, first_index); + return; + } + + sta = sta_info_get(local, hdr->addr1); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); + return; + } + + rs_priv = (void *)sta->rate_ctrl_priv; + + rs_priv->tx_packets++; + + scale_rate_index = first_index; + last_index = first_index; + + /* + * Update the window for each rate. We determine which rates + * were Tx'd based on the total number of retries vs. the number + * of retries configured for each rate -- currently set to the + * priv value 'retry_rate' vs. rate specific + * + * On exit from this while loop last_index indicates the rate + * at which the frame was finally transmitted (or failed if no + * ACK) + */ + while (retries > 0) { + if (retries < priv->retry_rate) { + current_count = retries; + last_index = scale_rate_index; + } else { + current_count = priv->retry_rate; + last_index = iwl_get_prev_ieee_rate(scale_rate_index); + } + + /* Update this rate accounting for as many retries + * as was used for it (per current_count) */ + iwl_collect_tx_data(rs_priv, + &rs_priv->win[scale_rate_index], + 0, current_count); + IWL_DEBUG_RATE("Update rate %d for %d retries.\n", + scale_rate_index, current_count); + + retries -= current_count; + + if (retries) + scale_rate_index = + iwl_get_prev_ieee_rate(scale_rate_index); + } + + /* Update the last index window with success/failure based on ACK */ + IWL_DEBUG_RATE("Update rate %d with %s.\n", + last_index, + (tx_resp->flags & IEEE80211_TX_STATUS_ACK) ? + "success" : "failure"); + iwl_collect_tx_data(rs_priv, + &rs_priv->win[last_index], + tx_resp->flags & IEEE80211_TX_STATUS_ACK, 1); + + /* We updated the rate scale window -- if its been more than + * flush_time since the last run, schedule the flush + * again */ + spin_lock_irqsave(&rs_priv->lock, flags); + + if (!rs_priv->flush_pending && + time_after(jiffies, rs_priv->last_partial_flush + + rs_priv->flush_time)) { + + rs_priv->flush_pending = 1; + mod_timer(&rs_priv->rate_scale_flush, + jiffies + rs_priv->flush_time); + } + + spin_unlock_irqrestore(&rs_priv->lock, flags); + + sta_info_put(sta); + + IWL_DEBUG_RATE("leave\n"); + + return; +} + +static struct ieee80211_rate *iwl_get_lowest_rate(struct ieee80211_local + *local) +{ + struct ieee80211_hw_mode *mode = local->oper_hw_mode; + int i; + + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *rate = &mode->rates[i]; + + if (rate->flags & IEEE80211_RATE_SUPPORTED) + return rate; + } + + return &mode->rates[0]; +} + +static u16 iwl_get_adjacent_rate(struct iwl_rate_scale_priv *rs_priv, + u8 index, u16 rate_mask, int phymode) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A walks to the next literal adjascent rate in + * the rate table */ + if (unlikely(phymode == MODE_IEEE80211A)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + if (rs_priv->tgg) + low = iwl_rates[low].prev_rs_tgg; + else + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + if (rs_priv->tgg) + high = iwl_rates[high].next_rs_tgg; + else + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +/** + * rs_get_rate - find the rate for the requested packet + * + * Returns the ieee80211_rate structure allocated by the driver. + * + * The rate control algorithm has no internal mapping between hw_mode's + * rate ordering and the rate ordering used by the rate control algorithm. + * + * The rate control algorithm uses a single table of rates that goes across + * the entire A/B/G spectrum vs. being limited to just one particular + * hw_mode. + * + * As such, we can't convert the index obtained below into the hw_mode's + * rate table and must reference the driver allocated rate table + * + */ +static struct ieee80211_rate *rs_get_rate(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct rate_control_extra *extra) +{ + u8 low = IWL_RATE_INVALID; + u8 high = IWL_RATE_INVALID; + u16 high_low; + int index; + struct iwl_rate_scale_priv *rs_priv; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + unsigned long flags; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + u16 fc, rate_mask; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_RATE("enter\n"); + + memset(extra, 0, sizeof(*extra)); + + fc = le16_to_cpu(hdr->frame_control); + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || + (is_multicast_ether_addr(hdr->addr1))) { + /* Send management frames and broadcast/multicast data using + * lowest rate. */ + /* TODO: this could probably be improved.. */ + IWL_DEBUG_RATE("leave: lowest rate (not data or is " + "multicast)\n"); + + return iwl_get_lowest_rate(local); + } + + sta = sta_info_get(local, hdr->addr1); + if (!sta || !sta->rate_ctrl_priv) { + IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); + if (sta) + sta_info_put(sta); + return NULL; + } + + rate_mask = sta->supp_rates; + index = min(sta->txrate & 0xffff, IWL_RATE_COUNT - 1); + + rs_priv = (void *)sta->rate_ctrl_priv; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !rs_priv->ibss_sta_added) { + u8 sta_id = iwl_hw_find_station(priv, hdr->addr1); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE("LQ: ADD station %s\n", + print_mac(mac, hdr->addr1)); + sta_id = iwl_add_station(priv, + hdr->addr1, 0, CMD_ASYNC); + } + if (sta_id != IWL_INVALID_STATION) + rs_priv->ibss_sta_added = 1; + } + + spin_lock_irqsave(&rs_priv->lock, flags); + + if (rs_priv->start_rate != IWL_RATE_INVALID) { + index = rs_priv->start_rate; + rs_priv->start_rate = IWL_RATE_INVALID; + } + + window = &(rs_priv->win[index]); + + fail_count = window->counter - window->success_counter; + + if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { + window->average_tpt = IWL_INVALID_VALUE; + spin_unlock_irqrestore(&rs_priv->lock, flags); + + IWL_DEBUG_RATE("Invalid average_tpt on rate %d: " + "counter: %d, success_counter: %d, " + "expected_tpt is %sNULL\n", + index, + window->counter, + window->success_counter, + rs_priv->expected_tpt ? "not " : ""); + goto out; + + } + + window->average_tpt = ((window->success_ratio * + rs_priv->expected_tpt[index] + 64) / 128); + current_tpt = window->average_tpt; + + high_low = iwl_get_adjacent_rate(rs_priv, index, rate_mask, + local->hw.conf.phymode); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + if (low != IWL_RATE_INVALID) + low_tpt = rs_priv->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = rs_priv->win[high].average_tpt; + + spin_unlock_irqrestore(&rs_priv->lock, flags); + + scale_action = 1; + + if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { + IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); + scale_action = -1; + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) + scale_action = 1; + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) + && (low_tpt < current_tpt) + && (high_tpt < current_tpt)) { + IWL_DEBUG_RATE("No action -- low [%d] & high [%d] < " + "current_tpt [%d]\n", + low_tpt, high_tpt, current_tpt); + scale_action = 0; + } else { + if (high_tpt != IWL_INVALID_VALUE) { + if (high_tpt > current_tpt) + scale_action = 1; + else { + IWL_DEBUG_RATE + ("decrease rate because of high tpt\n"); + scale_action = -1; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE + ("decrease rate because of low tpt\n"); + scale_action = -1; + } else + scale_action = 1; + } + } + + if ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > window->average_tpt)) { + IWL_DEBUG_RATE("No action -- success_ratio [%d] > HIGH_TH or " + "current_tpt [%d] > average_tpt [%d]\n", + window->success_ratio, + current_tpt, window->average_tpt); + scale_action = 0; + } + + switch (scale_action) { + case -1: + if (low != IWL_RATE_INVALID) + index = low; + break; + + case 1: + if (high != IWL_RATE_INVALID) + index = high; + + break; + + case 0: + default: + break; + } + + IWL_DEBUG_RATE("Selected %d (action %d) - low %d high %d\n", + index, scale_action, low, high); + + out: + + sta->last_txrate = index; + sta->txrate = sta->last_txrate; + sta_info_put(sta); + + IWL_DEBUG_RATE("leave: %d\n", index); + + return &priv->ieee_rates[index]; +} + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init, + .clear = rs_clear, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +}; + +int iwl_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct iwl_priv *priv = hw->priv; + struct iwl_rate_scale_priv *rs_priv; + struct sta_info *sta; + unsigned long flags; + int count = 0, i; + u32 samples = 0, success = 0, good = 0; + unsigned long now = jiffies; + u32 max_time = 0; + + sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) { + sta_info_put(sta); + IWL_DEBUG_RATE("leave - no private rate data!\n"); + } else + IWL_DEBUG_RATE("leave - no station!\n"); + return sprintf(buf, "station %d not found\n", sta_id); + } + + rs_priv = (void *)sta->rate_ctrl_priv; + spin_lock_irqsave(&rs_priv->lock, flags); + i = IWL_RATE_54M_INDEX; + while (1) { + u64 mask; + int j; + + count += + sprintf(&buf[count], " %2dMbs: ", iwl_rates[i].ieee / 2); + + mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); + for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) + buf[count++] = + (rs_priv->win[i].data & mask) ? '1' : '0'; + + samples += rs_priv->win[i].counter; + good += rs_priv->win[i].success_counter; + success += rs_priv->win[i].success_counter * iwl_rates[i].ieee; + + if (rs_priv->win[i].stamp) { + int delta = + jiffies_to_msecs(now - rs_priv->win[i].stamp); + + if (delta > max_time) + max_time = delta; + + count += sprintf(&buf[count], "%5dms\n", delta); + } else + buf[count++] = '\n'; + + j = iwl_get_prev_ieee_rate(i); + if (j == i) + break; + i = j; + } + spin_unlock_irqrestore(&rs_priv->lock, flags); + sta_info_put(sta); + + /* Display the average rate of all samples taken. + * + * NOTE: We multiple # of samples by 2 since the IEEE measurement + * added from iwl_rates is actually 2X the rate */ + if (samples) + count += sprintf( + &buf[count], + "\nAverage rate is %3d.%02dMbs over last %4dms\n" + "%3d%% success (%d good packets over %d tries)\n", + success / (2 * samples), (success * 5 / samples) % 10, + max_time, good * 100 / samples, good, samples); + else + count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n"); + + return count; +} + +void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + s32 rssi = 0; + unsigned long flags; + struct ieee80211_local *local = hw_to_local(hw); + struct iwl_rate_scale_priv *rs_priv; + struct sta_info *sta; + + IWL_DEBUG_RATE("enter\n"); + + if (!local->rate_ctrl->ops->name || + strcmp(local->rate_ctrl->ops->name, RS_NAME)) { + IWL_WARNING("iwl-3945-rs not selected as rate control algo!\n"); + IWL_DEBUG_RATE("leave - mac80211 picked the wrong RC algo.\n"); + return; + } + + sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + IWL_DEBUG_RATE("leave - no private rate data!\n"); + return; + } + + rs_priv = (void *)sta->rate_ctrl_priv; + + spin_lock_irqsave(&rs_priv->lock, flags); + + rs_priv->tgg = 0; + switch (priv->phymode) { + case MODE_IEEE80211G: + if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) { + rs_priv->tgg = 1; + rs_priv->expected_tpt = iwl_expected_tpt_g_prot; + } else + rs_priv->expected_tpt = iwl_expected_tpt_g; + break; + + case MODE_IEEE80211A: + rs_priv->expected_tpt = iwl_expected_tpt_a; + break; + + default: + IWL_WARNING("Invalid phymode. Defaulting to 802.11b\n"); + case MODE_IEEE80211B: + rs_priv->expected_tpt = iwl_expected_tpt_b; + break; + } + + sta_info_put(sta); + spin_unlock_irqrestore(&rs_priv->lock, flags); + + rssi = priv->last_rx_rssi; + if (rssi == 0) + rssi = IWL_MIN_RSSI_VAL; + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RATE, "Network RSSI: %d\n", rssi); + + rs_priv->start_rate = iwl_get_rate_index_by_rssi(rssi, priv->phymode); + + IWL_DEBUG_RATE("leave: rssi %d assign rate index: " + "%d (plcp 0x%x)\n", rssi, rs_priv->start_rate, + iwl_rates[rs_priv->start_rate].plcp); +} + +void iwl_rate_control_register(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_register(&rs_ops); +} + +void iwl_rate_control_unregister(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + + diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h new file mode 100644 index 0000000000000000000000000000000000000000..b926738e0ea1b8725356e875d4a90f4c4972d44a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h @@ -0,0 +1,191 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_rs_h__ +#define __iwl_3945_rs_h__ + +struct iwl_rate_info { + u8 plcp; + u8 ieee; + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +enum { + IWL_RATE_6M_INDEX = 0, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_1M_INDEX, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_INVM_INDEX, + IWL_RATE_INVALID = IWL_RATE_INVM_INDEX +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1< + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define IWL 3945 + +#include "iwlwifi.h" +#include "iwl-helpers.h" +#include "iwl-3945.h" +#include "iwl-3945-rs.h" + +#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */ +}; + +/* 1 = enable the iwl_disable_events() function */ +#define IWL_EVT_DISABLE (0) +#define IWL_EVT_DISABLE_SIZE (1532/32) + +/** + * iwl_disable_events - Disable selected events in uCode event log + * + * Disable an event by writing "1"s into "disable" + * bitmap in SRAM. Bit position corresponds to Event # (id/type). + * Default values of 0 enable uCode events to be logged. + * Use for only special debugging. This function is just a placeholder as-is, + * you'll need to provide the special bits! ... + * ... and set IWL_EVT_DISABLE to 1. */ +void iwl_disable_events(struct iwl_priv *priv) +{ + int rc; + int i; + u32 base; /* SRAM address of event log header */ + u32 disable_ptr; /* SRAM address of event-disable bitmap array */ + u32 array_size; /* # of u32 entries in array */ + u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { + 0x00000000, /* 31 - 0 Event id numbers */ + 0x00000000, /* 63 - 32 */ + 0x00000000, /* 95 - 64 */ + 0x00000000, /* 127 - 96 */ + 0x00000000, /* 159 - 128 */ + 0x00000000, /* 191 - 160 */ + 0x00000000, /* 223 - 192 */ + 0x00000000, /* 255 - 224 */ + 0x00000000, /* 287 - 256 */ + 0x00000000, /* 319 - 288 */ + 0x00000000, /* 351 - 320 */ + 0x00000000, /* 383 - 352 */ + 0x00000000, /* 415 - 384 */ + 0x00000000, /* 447 - 416 */ + 0x00000000, /* 479 - 448 */ + 0x00000000, /* 511 - 480 */ + 0x00000000, /* 543 - 512 */ + 0x00000000, /* 575 - 544 */ + 0x00000000, /* 607 - 576 */ + 0x00000000, /* 639 - 608 */ + 0x00000000, /* 671 - 640 */ + 0x00000000, /* 703 - 672 */ + 0x00000000, /* 735 - 704 */ + 0x00000000, /* 767 - 736 */ + 0x00000000, /* 799 - 768 */ + 0x00000000, /* 831 - 800 */ + 0x00000000, /* 863 - 832 */ + 0x00000000, /* 895 - 864 */ + 0x00000000, /* 927 - 896 */ + 0x00000000, /* 959 - 928 */ + 0x00000000, /* 991 - 960 */ + 0x00000000, /* 1023 - 992 */ + 0x00000000, /* 1055 - 1024 */ + 0x00000000, /* 1087 - 1056 */ + 0x00000000, /* 1119 - 1088 */ + 0x00000000, /* 1151 - 1120 */ + 0x00000000, /* 1183 - 1152 */ + 0x00000000, /* 1215 - 1184 */ + 0x00000000, /* 1247 - 1216 */ + 0x00000000, /* 1279 - 1248 */ + 0x00000000, /* 1311 - 1280 */ + 0x00000000, /* 1343 - 1312 */ + 0x00000000, /* 1375 - 1344 */ + 0x00000000, /* 1407 - 1376 */ + 0x00000000, /* 1439 - 1408 */ + 0x00000000, /* 1471 - 1440 */ + 0x00000000, /* 1503 - 1472 */ + }; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Invalid event log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + disable_ptr = iwl_read_restricted_mem(priv, base + (4 * sizeof(u32))); + array_size = iwl_read_restricted_mem(priv, base + (5 * sizeof(u32))); + iwl_release_restricted_access(priv); + + if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { + IWL_DEBUG_INFO("Disabling selected uCode log events at 0x%x\n", + disable_ptr); + rc = iwl_grab_restricted_access(priv); + for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) + iwl_write_restricted_mem(priv, + disable_ptr + + (i * sizeof(u32)), + evt_disable[i]); + + iwl_release_restricted_access(priv); + } else { + IWL_DEBUG_INFO("Selected uCode log events may be disabled\n"); + IWL_DEBUG_INFO(" by writing \"1\"s into disable bitmap\n"); + IWL_DEBUG_INFO(" in SRAM at 0x%x, size %d u32s\n", + disable_ptr, array_size); + } + +} + +/** + * iwl3945_get_antenna_flags - Get antenna flags for RXON command + * @priv: eeprom and antenna fields are used to determine antenna flags + * + * priv->eeprom is used to determine if antenna AUX/MAIN are reversed + * priv->antenna specifies the antenna diversity mode: + * + * IWL_ANTENNA_DIVERISTY - NIC selects best antenna by itself + * IWL_ANTENNA_MAIN - Force MAIN antenna + * IWL_ANTENNA_AUX - Force AUX antenna + */ +__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) +{ + switch (priv->antenna) { + case IWL_ANTENNA_DIVERSITY: + return 0; + + case IWL_ANTENNA_MAIN: + if (priv->eeprom.antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + + case IWL_ANTENNA_AUX: + if (priv->eeprom.antenna_switch_type) + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; + return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; + } + + /* bad antenna selector value */ + IWL_ERROR("Bad antenna selector value (0x%x)\n", priv->antenna); + return 0; /* "diversity" is default if error */ +} + +/***************************************************************************** + * + * Intel PRO/Wireless 3945ABG/BG Network Connection + * + * RX handler implementations + * + * Used by iwl-base.c + * + *****************************************************************************/ + +void iwl_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len)); + + memcpy(&priv->statistics, pkt->u.raw, sizeof(priv->statistics)); + + priv->last_statistics_time = jiffies; +} + +static void iwl3945_handle_data_packet(struct iwl_priv *priv, int is_data, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats, + u16 phy_flags) +{ + struct ieee80211_hdr *hdr; + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + short len = le16_to_cpu(rx_hdr->len); + + /* We received data from the HW, so stop the watchdog */ + if (unlikely((len + IWL_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { + IWL_DEBUG_DROP("Corruption detected!\n"); + return; + } + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT + ("Dropping packet while interface is not open.\n"); + return; + } + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, + le32_to_cpu(rx_end->status), + stats); + iwl_handle_data_packet_monitor(priv, rxb, IWL_RX_DATA(pkt), + len, stats, phy_flags); + return; + } + + skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt); + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); + + hdr = (void *)rxb->skb->data; + + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, + le32_to_cpu(rx_end->status), stats); + + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + rxb->skb = NULL; +} + +static void iwl3945_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + struct ieee80211_hdr *header; + u16 phy_flags = le16_to_cpu(rx_hdr->phy_flags); + u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); + u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); + struct ieee80211_rx_status stats = { + .mactime = le64_to_cpu(rx_end->timestamp), + .freq = ieee80211chan2mhz(le16_to_cpu(rx_hdr->channel)), + .channel = le16_to_cpu(rx_hdr->channel), + .phymode = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + MODE_IEEE80211G : MODE_IEEE80211A, + .antenna = 0, + .rate = rx_hdr->rate, + .flag = 0, + }; + u8 network_packet; + int snr; + + if ((unlikely(rx_stats->phy_count > 20))) { + IWL_DEBUG_DROP + ("dsp size out of range [0,20]: " + "%d/n", rx_stats->phy_count); + return; + } + + if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) + || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); + return; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + iwl3945_handle_data_packet(priv, 1, rxb, &stats, phy_flags); + return; + } + + /* Convert 3945's rssi indicator to dBm */ + stats.ssi = rx_stats->rssi - IWL_RSSI_OFFSET; + + /* Set default noise value to -127 */ + if (priv->last_rx_noise == 0) + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + /* 3945 provides noise info for OFDM frames only. + * sig_avg and noise_diff are measured by the 3945's digital signal + * processor (DSP), and indicate linear levels of signal level and + * distortion/noise within the packet preamble after + * automatic gain control (AGC). sig_avg should stay fairly + * constant if the radio's AGC is working well. + * Since these values are linear (not dB or dBm), linear + * signal-to-noise ratio (SNR) is (sig_avg / noise_diff). + * Convert linear SNR to dB SNR, then subtract that from rssi dBm + * to obtain noise level in dBm. + * Calculate stats.signal (quality indicator in %) based on SNR. */ + if (rx_stats_noise_diff) { + snr = rx_stats_sig_avg / rx_stats_noise_diff; + stats.noise = stats.ssi - iwl_calc_db_from_ratio(snr); + stats.signal = iwl_calc_sig_qual(stats.ssi, stats.noise); + + /* If noise info not available, calculate signal quality indicator (%) + * using just the dBm signal level. */ + } else { + stats.noise = priv->last_rx_noise; + stats.signal = iwl_calc_sig_qual(stats.ssi, 0); + } + + + IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", + stats.ssi, stats.noise, stats.signal, + rx_stats_sig_avg, rx_stats_noise_diff); + + stats.freq = ieee80211chan2mhz(stats.channel); + + /* can be covered by iwl_report_frame() in most cases */ +/* IWL_DEBUG_RX("RX status: 0x%08X\n", rx_end->status); */ + + header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); + + network_packet = iwl_is_network_packet(priv, header); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_STATS && net_ratelimit()) + IWL_DEBUG_STATS + ("[%c] %d RSSI: %d Signal: %u, Noise: %u, Rate: %u\n", + network_packet ? '*' : ' ', + stats.channel, stats.ssi, stats.ssi, + stats.ssi, stats.rate); + + if (iwl_debug_level & (IWL_DL_RX)) + /* Set "1" to report good data frames in groups of 100 */ + iwl_report_frame(priv, pkt, header, 1); +#endif + + if (network_packet) { + priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); + priv->last_tsf = le64_to_cpu(rx_end->timestamp); + priv->last_rx_rssi = stats.ssi; + priv->last_rx_noise = stats.noise; + } + + switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_MGMT: + switch (le16_to_cpu(header->frame_control) & + IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON:{ + /* If this is a beacon or probe response for + * our network then cache the beacon + * timestamp */ + if ((((priv->iw_mode == IEEE80211_IF_TYPE_STA) + && !compare_ether_addr(header->addr2, + priv->bssid)) || + ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + && !compare_ether_addr(header->addr3, + priv->bssid)))) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)header; + __le32 *pos; + pos = + (__le32 *) & mgmt->u.beacon. + timestamp; + priv->timestamp0 = le32_to_cpu(pos[0]); + priv->timestamp1 = le32_to_cpu(pos[1]); + priv->beacon_int = le16_to_cpu( + mgmt->u.beacon.beacon_int); + if (priv->call_post_assoc_from_beacon && + (priv->iw_mode == + IEEE80211_IF_TYPE_STA)) + queue_work(priv->workqueue, + &priv->post_associate.work); + + priv->call_post_assoc_from_beacon = 0; + } + + break; + } + + case IEEE80211_STYPE_ACTION: + /* TODO: Parse 802.11h frames for CSA... */ + break; + + /* + * TODO: There is no callback function from upper + * stack to inform us when associated status. this + * work around to sniff assoc_resp management frame + * and finish the association process. + */ + case IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_STYPE_REASSOC_RESP:{ + struct ieee80211_mgmt *mgnt = + (struct ieee80211_mgmt *)header; + priv->assoc_id = (~((1 << 15) | (1 << 14)) & + le16_to_cpu(mgnt->u. + assoc_resp.aid)); + priv->assoc_capability = + le16_to_cpu(mgnt->u.assoc_resp.capab_info); + if (priv->beacon_int) + queue_work(priv->workqueue, + &priv->post_associate.work); + else + priv->call_post_assoc_from_beacon = 1; + break; + } + + case IEEE80211_STYPE_PROBE_REQ:{ + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + IWL_DEBUG_DROP + ("Dropping (non network): %s" + ", %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + return; + } + } + + iwl3945_handle_data_packet(priv, 0, rxb, &stats, phy_flags); + break; + + case IEEE80211_FTYPE_CTL: + break; + + case IEEE80211_FTYPE_DATA: { + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + + if (unlikely(is_duplicate_packet(priv, header))) + IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + else + iwl3945_handle_data_packet(priv, 1, rxb, &stats, + phy_flags); + break; + } + } +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, + dma_addr_t addr, u16 len) +{ + int count; + u32 pad; + struct iwl_tfd_frame *tfd = (struct iwl_tfd_frame *)ptr; + + count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags)); + pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags)); + + if ((count >= NUM_TFD_CHUNKS) || (count < 0)) { + IWL_ERROR("Error can not send more than %d chunks\n", + NUM_TFD_CHUNKS); + return -EINVAL; + } + + tfd->pa[count].addr = cpu_to_le32(addr); + tfd->pa[count].len = cpu_to_le32(len); + + count++; + + tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) | + TFD_CTL_PAD_SET(pad)); + + return 0; +} + +/** + * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] + * + * Does NOT advance any indexes + */ +int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; + struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter; + + /* classify bd */ + if (txq->q.id == IWL_CMD_QUEUE_NUM) + /* nothing to cleanup after for host commands */ + return 0; + + /* sanity check */ + counter = TFD_CTL_COUNT_GET(le32_to_cpu(bd->control_flags)); + if (counter > NUM_TFD_CHUNKS) { + IWL_ERROR("Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return 0; + } + + /* unmap chunks if any */ + + for (i = 1; i < counter; i++) { + pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr), + le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE); + if (txq->txb[txq->q.last_used].skb[0]) { + struct sk_buff *skb = txq->txb[txq->q.last_used].skb[0]; + if (txq->txb[txq->q.last_used].skb[0]) { + /* Can be called from interrupt context */ + dev_kfree_skb_any(skb); + txq->txb[txq->q.last_used].skb[0] = NULL; + } + } + } + return 0; +} + +u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int ret = IWL_INVALID_STATION; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) + if ((priv->stations[i].used) && + (!compare_ether_addr + (priv->stations[i].sta.sta.addr, addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_INFO("can not find STA %s (total %d)\n", + print_mac(mac, addr), priv->num_stations); + out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +/** + * iwl_hw_build_tx_cmd_rate - Add rate portion to TX_CMD: + * +*/ +void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, int sta_id, int tx_id) +{ + unsigned long flags; + u16 rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1); + u16 rate_mask; + int rate; + u8 rts_retry_limit; + u8 data_retry_limit; + __le32 tx_flags; + u16 fc = le16_to_cpu(hdr->frame_control); + + rate = iwl_rates[rate_index].plcp; + tx_flags = cmd->cmd.tx.tx_flags; + + /* We need to figure out how to get the sta->supp_rates while + * in this running context; perhaps encoding into ctrl->tx_rate? */ + rate_mask = IWL_RATES_MASK; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].current_rate.rate_n_flags = rate; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + (sta_id != IWL3945_BROADCAST_ID) && + (sta_id != IWL_MULTICAST_ID)) + priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate; + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + if (tx_id >= IWL_CMD_QUEUE_NUM) + rts_retry_limit = 3; + else + rts_retry_limit = 7; + + if (ieee80211_is_probe_response(fc)) { + data_retry_limit = 3; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + } else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + + if (priv->data_retry_limit != -1) + data_retry_limit = priv->data_retry_limit; + + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_AUTH: + case IEEE80211_STYPE_DEAUTH: + case IEEE80211_STYPE_ASSOC_REQ: + case IEEE80211_STYPE_REASSOC_REQ: + if (tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; + } + } + + cmd->cmd.tx.rts_retry_limit = rts_retry_limit; + cmd->cmd.tx.data_retry_limit = data_retry_limit; + cmd->cmd.tx.rate = rate; + cmd->cmd.tx.tx_flags = tx_flags; + + /* OFDM */ + cmd->cmd.tx.supp_rates[0] = rate_mask & IWL_OFDM_RATES_MASK; + + /* CCK */ + cmd->cmd.tx.supp_rates[1] = (rate_mask >> 8) & 0xF; + + IWL_DEBUG_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " + "cck/ofdm mask: 0x%x/0x%x\n", sta_id, + cmd->cmd.tx.rate, le32_to_cpu(cmd->cmd.tx.tx_flags), + cmd->cmd.tx.supp_rates[1], cmd->cmd.tx.supp_rates[0]); +} + +u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) +{ + unsigned long flags_spin; + struct iwl_station_entry *station; + + if (sta_id == IWL_INVALID_STATION) + return IWL_INVALID_STATION; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + station = &priv->stations[sta_id]; + + station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; + station->sta.rate_n_flags = cpu_to_le16(tx_rate); + station->current_rate.rate_n_flags = tx_rate; + station->sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + iwl_send_add_station(priv, &station->sta, flags); + IWL_DEBUG_RATE("SCALE sync station %d to rate %d\n", + sta_id, tx_rate); + return sta_id; +} + +void iwl_hw_card_show_info(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO("3945ABG HW Version %u.%u.%u\n", + ((priv->eeprom.board_revision >> 8) & 0x0F), + ((priv->eeprom.board_revision >> 8) >> 4), + (priv->eeprom.board_revision & 0x00FF)); + + IWL_DEBUG_INFO("3945ABG PBA Number %.*s\n", + (int)sizeof(priv->eeprom.board_pba_number), + priv->eeprom.board_pba_number); + + IWL_DEBUG_INFO("EEPROM_ANTENNA_SWITCH_TYPE is 0x%02X\n", + priv->eeprom.antenna_switch_type); +} + +static int iwl3945_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + if (!pwr_max) { + u32 val; + + rc = pci_read_config_dword(priv->pci_dev, + PCI_POWER_SOURCE, &val); + if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) { + iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + iwl_release_restricted_access(priv); + + iwl_poll_bit(priv, CSR_GPIO_IN, + CSR_GPIO_IN_VAL_VAUX_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); + } else + iwl_release_restricted_access(priv); + } else { + iwl_set_bits_mask_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_release_restricted_access(priv); + iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, + CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ + } + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_write_restricted(priv, FH_RCSR_RBD_BASE(0), rxq->dma_addr); + iwl_write_restricted(priv, FH_RCSR_RPTR_ADDR(0), + priv->hw_setting.shared_phys + + offsetof(struct iwl_shared, rx_read_ptr[0])); + iwl_write_restricted(priv, FH_RCSR_WPTR(0), 0); + iwl_write_restricted(priv, FH_RCSR_CONFIG(0), + ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | + ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | + (RX_QUEUE_SIZE_LOG << ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | + (1 << ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) | + ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); + + /* fake read to flush all prev I/O */ + iwl_read_restricted(priv, FH_RSSR_CTRL); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int iwl3945_tx_reset(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* bypass mode */ + iwl_write_restricted_reg(priv, SCD_MODE_REG, 0x2); + + /* RA 0 is active */ + iwl_write_restricted_reg(priv, SCD_ARASTAT_REG, 0x01); + + /* all 6 fifo are active */ + iwl_write_restricted_reg(priv, SCD_TXFACT_REG, 0x3f); + + iwl_write_restricted_reg(priv, SCD_SBYP_MODE_1_REG, 0x010000); + iwl_write_restricted_reg(priv, SCD_SBYP_MODE_2_REG, 0x030002); + iwl_write_restricted_reg(priv, SCD_TXF4MF_REG, 0x000004); + iwl_write_restricted_reg(priv, SCD_TXF5MF_REG, 0x000005); + + iwl_write_restricted(priv, FH_TSSR_CBB_BASE, + priv->hw_setting.shared_phys); + + iwl_write_restricted(priv, FH_TSSR_MSG_CONFIG, + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH | + ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * iwl3945_txq_ctx_reset - Reset TX queue context + * + * Destroys all DMA structures and initialize them again + */ +static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc; + int txq_id, slots_num; + + iwl_hw_txq_ctx_free(priv); + + /* Tx CMD queue */ + rc = iwl3945_tx_reset(priv); + if (rc) + goto error; + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) { + slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (rc) { + IWL_ERROR("Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl_hw_txq_ctx_free(priv); + return rc; +} + +int iwl_hw_nic_init(struct iwl_priv *priv) +{ + u8 rev_id; + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + + iwl_power_init_handle(priv); + + spin_lock_irqsave(&priv->lock, flags); + iwl_set_bit(priv, CSR_ANA_PLL_CFG, (1 << 24)); + iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + rc = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("Failed to init the card\n"); + return rc; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + iwl_write_restricted_reg(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + udelay(20); + iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Determine HW type */ + rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); + if (rc) + return rc; + IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); + + iwl3945_nic_set_pwr_src(priv, 1); + spin_lock_irqsave(&priv->lock, flags); + + if (rev_id & PCI_CFG_REV_ID_BIT_RTP) + IWL_DEBUG_INFO("RTP type \n"); + else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { + IWL_DEBUG_INFO("ALM-MB type\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB); + } else { + IWL_DEBUG_INFO("ALM-MM type\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Initialize the EEPROM */ + rc = iwl_eeprom_init(priv); + if (rc) + return rc; + + spin_lock_irqsave(&priv->lock, flags); + if (EEPROM_SKU_CAP_OP_MODE_MRC == priv->eeprom.sku_cap) { + IWL_DEBUG_INFO("SKU OP mode is mrc\n"); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC); + } else + IWL_DEBUG_INFO("SKU OP mode is basic\n"); + + if ((priv->eeprom.board_revision & 0xF0) == 0xD0) { + IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", + priv->eeprom.board_revision); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } else { + IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", + priv->eeprom.board_revision); + iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); + } + + if (priv->eeprom.almgor_m_version <= 1) { + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); + IWL_DEBUG_INFO("Card M type A version is 0x%X\n", + priv->eeprom.almgor_m_version); + } else { + IWL_DEBUG_INFO("Card M type B version is 0x%X\n", + priv->eeprom.almgor_m_version); + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n"); + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n"); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_rx_queue_alloc(priv); + if (rc) { + IWL_ERROR("Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl_rx_queue_reset(priv, rxq); + + iwl_rx_replenish(priv); + + iwl3945_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + /* Look at using this instead: + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + */ + + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + iwl_write_restricted(priv, FH_RCSR_WPTR(0), rxq->write & ~7); + iwl_release_restricted_access(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + rc = iwl3945_txq_ctx_reset(priv); + if (rc) + return rc; + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) + iwl_tx_queue_free(priv, &priv->txq[txq_id]); +} + +void iwl_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + int queue; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_grab_restricted_access(priv)) { + spin_unlock_irqrestore(&priv->lock, flags); + iwl_hw_txq_ctx_free(priv); + return; + } + + /* stop SCD */ + iwl_write_restricted_reg(priv, SCD_MODE_REG, 0); + + /* reset TFD queues */ + for (queue = TFD_QUEUE_MIN; queue < TFD_QUEUE_MAX; queue++) { + iwl_write_restricted(priv, FH_TCSR_CONFIG(queue), 0x0); + iwl_poll_restricted_bit(priv, FH_TSSR_TX_STATUS, + ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(queue), + 1000); + } + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_txq_ctx_free(priv); +} + +int iwl_hw_nic_stop_master(struct iwl_priv *priv) +{ + int rc = 0; + u32 reg_val; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* set stop master bit */ + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + reg_val = iwl_read32(priv, CSR_GP_CNTRL); + + if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == + (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) + IWL_DEBUG_INFO("Card in power save, master is already " + "stopped\n"); + else { + rc = iwl_poll_bit(priv, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("stop master\n"); + + return rc; +} + +int iwl_hw_nic_reset(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + iwl_hw_nic_stop_master(priv); + + spin_lock_irqsave(&priv->lock, flags); + + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + rc = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + + rc = iwl_grab_restricted_access(priv); + if (!rc) { + iwl_write_restricted_reg(priv, APMG_CLK_CTRL_REG, + APMG_CLK_VAL_BSM_CLK_RQT); + + udelay(10); + + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + iwl_write_restricted_reg(priv, APMG_RTC_INT_MSK_REG, 0x0); + iwl_write_restricted_reg(priv, APMG_RTC_INT_STT_REG, + 0xFFFFFFFF); + + /* enable DMA */ + iwl_write_restricted_reg(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + udelay(10); + + iwl_set_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + iwl_release_restricted_access(priv); + } + + /* Clear the 'host command active' bit... */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + + wake_up_interruptible(&priv->wait_command_queue); + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +/** + * iwl_hw_reg_adjust_power_by_temp - return index delta into power gain settings table + */ +static int iwl_hw_reg_adjust_power_by_temp(int new_reading, int old_reading) +{ + return (new_reading - old_reading) * (-11) / 100; +} + +/** + * iwl_hw_reg_temp_out_of_range - Keep temperature in sane range + */ +static inline int iwl_hw_reg_temp_out_of_range(int temperature) +{ + return (((temperature < -260) || (temperature > 25)) ? 1 : 0); +} + +int iwl_hw_get_temperature(struct iwl_priv *priv) +{ + return iwl_read32(priv, CSR_UCODE_DRV_GP2); +} + +/** + * iwl_hw_reg_txpower_get_temperature - get current temperature by reading from NIC + */ +static int iwl_hw_reg_txpower_get_temperature(struct iwl_priv *priv) +{ + int temperature; + + temperature = iwl_hw_get_temperature(priv); + + /* driver's okay range is -260 to +25. + * human readable okay range is 0 to +285 */ + IWL_DEBUG_INFO("Temperature: %d\n", temperature + IWL_TEMP_CONVERT); + + /* handle insane temp reading */ + if (iwl_hw_reg_temp_out_of_range(temperature)) { + IWL_ERROR("Error bad temperature value %d\n", temperature); + + /* if really really hot(?), + * substitute the 3rd band/group's temp measured at factory */ + if (priv->last_temperature > 100) + temperature = priv->eeprom.groups[2].temperature; + else /* else use most recent "sane" value from driver */ + temperature = priv->last_temperature; + } + + return temperature; /* raw, not "human readable" */ +} + +/* Adjust Txpower only if temperature variance is greater than threshold. + * + * Both are lower than older versions' 9 degrees */ +#define IWL_TEMPERATURE_LIMIT_TIMER 6 + +/** + * is_temp_calib_needed - determines if new calibration is needed + * + * records new temperature in tx_mgr->temperature. + * replaces tx_mgr->last_temperature *only* if calib needed + * (assumes caller will actually do the calibration!). */ +static int is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + priv->temperature = iwl_hw_reg_txpower_get_temperature(priv); + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER("Getting cooler, delta %d,\n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER("Same temp,\n"); + else + IWL_DEBUG_POWER("Getting warmer, delta %d,\n", temp_diff); + + /* if we don't need calibration, *don't* update last_temperature */ + if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) { + IWL_DEBUG_POWER("Timed thermal calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER("Timed thermal calib needed\n"); + + /* assume that caller will actually do calib ... + * update the "last temperature" value */ + priv->last_temperature = priv->temperature; + return 1; +} + +#define IWL_MAX_GAIN_ENTRIES 78 +#define IWL_CCK_FROM_OFDM_POWER_DIFF -5 +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10) + +/* radio and DSP power table, each step is 1/2 dB. + * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */ +static struct iwl_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = { + { + {251, 127}, /* 2.4 GHz, highest power */ + {251, 127}, + {251, 127}, + {251, 127}, + {251, 125}, + {251, 110}, + {251, 105}, + {251, 98}, + {187, 125}, + {187, 115}, + {187, 108}, + {187, 99}, + {243, 119}, + {243, 111}, + {243, 105}, + {243, 97}, + {243, 92}, + {211, 106}, + {211, 100}, + {179, 120}, + {179, 113}, + {179, 107}, + {147, 125}, + {147, 119}, + {147, 112}, + {147, 106}, + {147, 101}, + {147, 97}, + {147, 91}, + {115, 107}, + {235, 121}, + {235, 115}, + {235, 109}, + {203, 127}, + {203, 121}, + {203, 115}, + {203, 108}, + {203, 102}, + {203, 96}, + {203, 92}, + {171, 110}, + {171, 104}, + {171, 98}, + {139, 116}, + {227, 125}, + {227, 119}, + {227, 113}, + {227, 107}, + {227, 101}, + {227, 96}, + {195, 113}, + {195, 106}, + {195, 102}, + {195, 95}, + {163, 113}, + {163, 106}, + {163, 102}, + {163, 95}, + {131, 113}, + {131, 106}, + {131, 102}, + {131, 95}, + {99, 113}, + {99, 106}, + {99, 102}, + {99, 95}, + {67, 113}, + {67, 106}, + {67, 102}, + {67, 95}, + {35, 113}, + {35, 106}, + {35, 102}, + {35, 95}, + {3, 113}, + {3, 106}, + {3, 102}, + {3, 95} }, /* 2.4 GHz, lowest power */ + { + {251, 127}, /* 5.x GHz, highest power */ + {251, 120}, + {251, 114}, + {219, 119}, + {219, 101}, + {187, 113}, + {187, 102}, + {155, 114}, + {155, 103}, + {123, 117}, + {123, 107}, + {123, 99}, + {123, 92}, + {91, 108}, + {59, 125}, + {59, 118}, + {59, 109}, + {59, 102}, + {59, 96}, + {59, 90}, + {27, 104}, + {27, 98}, + {27, 92}, + {115, 118}, + {115, 111}, + {115, 104}, + {83, 126}, + {83, 121}, + {83, 113}, + {83, 105}, + {83, 99}, + {51, 118}, + {51, 111}, + {51, 104}, + {51, 98}, + {19, 116}, + {19, 109}, + {19, 102}, + {19, 98}, + {19, 93}, + {171, 113}, + {171, 107}, + {171, 99}, + {139, 120}, + {139, 113}, + {139, 107}, + {139, 99}, + {107, 120}, + {107, 113}, + {107, 107}, + {107, 99}, + {75, 120}, + {75, 113}, + {75, 107}, + {75, 99}, + {43, 120}, + {43, 113}, + {43, 107}, + {43, 99}, + {11, 120}, + {11, 113}, + {11, 107}, + {11, 99}, + {131, 107}, + {131, 99}, + {99, 120}, + {99, 113}, + {99, 107}, + {99, 99}, + {67, 120}, + {67, 113}, + {67, 107}, + {67, 99}, + {35, 120}, + {35, 113}, + {35, 107}, + {35, 99}, + {3, 120} } /* 5.x GHz, lowest power */ +}; + +static inline u8 iwl_hw_reg_fix_power_index(int index) +{ + if (index < 0) + return 0; + if (index >= IWL_MAX_GAIN_ENTRIES) + return IWL_MAX_GAIN_ENTRIES - 1; + return (u8) index; +} + +/* Kick off thermal recalibration check every 60 seconds */ +#define REG_RECALIB_PERIOD (60) + +/** + * iwl_hw_reg_set_scan_power - Set Tx power for scan probe requests + * + * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK) + * or 6 Mbit (OFDM) rates. + */ +static void iwl_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index, + s32 rate_index, const s8 *clip_pwrs, + struct iwl_channel_info *ch_info, + int band_index) +{ + struct iwl_scan_power_info *scan_power_info; + s8 power; + u8 power_index; + + scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index]; + + /* use this channel group's 6Mbit clipping/saturation pwr, + * but cap at regulatory scan power restriction (set during init + * based on eeprom channel data) for this channel. */ + power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX]); + + /* further limit to user's max power preference. + * FIXME: Other spectrum management power limitations do not + * seem to apply?? */ + power = min(power, priv->user_txpower_limit); + scan_power_info->requested_power = power; + + /* find difference between new scan *power* and current "normal" + * Tx *power* for 6Mb. Use this difference (x2) to adjust the + * current "normal" temperature-compensated Tx power *index* for + * this rate (1Mb or 6Mb) to yield new temp-compensated scan power + * *index*. */ + power_index = ch_info->power_info[rate_index].power_table_index + - (power - ch_info->power_info + [IWL_RATE_6M_INDEX].requested_power) * 2; + + /* store reference index that we use when adjusting *all* scan + * powers. So we can accommodate user (all channel) or spectrum + * management (single channel) power changes "between" temperature + * feedback compensation procedures. + * don't force fit this reference index into gain table; it may be a + * negative number. This will help avoid errors when we're at + * the lower bounds (highest gains, for warmest temperatures) + * of the table. */ + + /* don't exceed table bounds for "real" setting */ + power_index = iwl_hw_reg_fix_power_index(power_index); + + scan_power_info->power_table_index = power_index; + scan_power_info->tpc.tx_gain = + power_gain_table[band_index][power_index].tx_gain; + scan_power_info->tpc.dsp_atten = + power_gain_table[band_index][power_index].dsp_atten; +} + +/** + * iwl_hw_reg_send_txpower - fill in Tx Power command with gain settings + * + * Configures power settings for all rates for the current channel, + * using values from channel info struct, and send to NIC + */ +int iwl_hw_reg_send_txpower(struct iwl_priv *priv) +{ + int rate_idx; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_txpowertable_cmd txpower = { + .channel = priv->active_rxon.channel, + }; + + txpower.band = (priv->phymode == MODE_IEEE80211A) ? 0 : 1; + ch_info = iwl_get_channel_info(priv, + priv->phymode, + le16_to_cpu(priv->active_rxon.channel)); + if (!ch_info) { + IWL_ERROR + ("Failed to get channel info for channel %d [%d]\n", + le16_to_cpu(priv->active_rxon.channel), priv->phymode); + return -EINVAL; + } + + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_POWER("Not calling TX_PWR_TABLE_CMD on " + "non-Tx channel.\n"); + return 0; + } + + /* fill cmd with power settings for all rates for current channel */ + for (rate_idx = 0; rate_idx < IWL_RATE_COUNT; rate_idx++) { + txpower.power[rate_idx].tpc = ch_info->power_info[rate_idx].tpc; + txpower.power[rate_idx].rate = iwl_rates[rate_idx].plcp; + + IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n", + le16_to_cpu(txpower.channel), + txpower.band, + txpower.power[rate_idx].tpc.tx_gain, + txpower.power[rate_idx].tpc.dsp_atten, + txpower.power[rate_idx].rate); + } + + return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, + sizeof(struct iwl_txpowertable_cmd), &txpower); + +} + +/** + * iwl_hw_reg_set_new_power - Configures power tables at new levels + * @ch_info: Channel to update. Uses power_info.requested_power. + * + * Replace requested_power and base_power_index ch_info fields for + * one channel. + * + * Called if user or spectrum management changes power preferences. + * Takes into account h/w and modulation limitations (clip power). + * + * This does *not* send anything to NIC, just sets up ch_info for one channel. + * + * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to + * properly fill out the scan powers, and actual h/w gain settings, + * and send changes to NIC + */ +static int iwl_hw_reg_set_new_power(struct iwl_priv *priv, + struct iwl_channel_info *ch_info) +{ + struct iwl_channel_power_info *power_info; + int power_changed = 0; + int i; + const s8 *clip_pwrs; + int power; + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; + + /* Get this channel's rate-to-current-power settings table */ + power_info = ch_info->power_info; + + /* update OFDM Txpower settings */ + for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE; + i++, ++power_info) { + int delta_idx; + + /* limit new power to be no more than h/w capability */ + power = min(ch_info->curr_txpow, clip_pwrs[i]); + if (power == power_info->requested_power) + continue; + + /* find difference between old and new requested powers, + * update base (non-temp-compensated) power index */ + delta_idx = (power - power_info->requested_power) * 2; + power_info->base_power_index -= delta_idx; + + /* save new requested power value */ + power_info->requested_power = power; + + power_changed = 1; + } + + /* update CCK Txpower settings, based on OFDM 12M setting ... + * ... all CCK power settings for a given channel are the *same*. */ + if (power_changed) { + power = + ch_info->power_info[IWL_RATE_12M_INDEX]. + requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF; + + /* do all CCK rates' iwl_channel_power_info structures */ + for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++) { + power_info->requested_power = power; + power_info->base_power_index = + ch_info->power_info[IWL_RATE_12M_INDEX]. + base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF; + ++power_info; + } + } + + return 0; +} + +/** + * iwl_hw_reg_get_ch_txpower_limit - returns new power limit for channel + * + * NOTE: Returned power limit may be less (but not more) than requested, + * based strictly on regulatory (eeprom and spectrum mgt) limitations + * (no consideration for h/w clipping limitations). + */ +static int iwl_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info) +{ + s8 max_power; + +#if 0 + /* if we're using TGd limits, use lower of TGd or EEPROM */ + if (ch_info->tgd_data.max_power != 0) + max_power = min(ch_info->tgd_data.max_power, + ch_info->eeprom.max_power_avg); + + /* else just use EEPROM limits */ + else +#endif + max_power = ch_info->eeprom.max_power_avg; + + return min(max_power, ch_info->max_power_avg); +} + +/** + * iwl_hw_reg_comp_txpower_temp - Compensate for temperature + * + * Compensate txpower settings of *all* channels for temperature. + * This only accounts for the difference between current temperature + * and the factory calibration temperatures, and bases the new settings + * on the channel's base_power_index. + * + * If RxOn is "associated", this sends the new Txpower to NIC! + */ +static int iwl_hw_reg_comp_txpower_temp(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + int delta_index; + const s8 *clip_pwrs; /* array of h/w max power levels for each rate */ + u8 a_band; + u8 rate_index; + u8 scan_tbl_index; + u8 i; + int ref_temp; + int temperature = priv->temperature; + + /* set up new Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = is_channel_a_band(ch_info); + + /* Get this chnlgrp's factory calibration temperature */ + ref_temp = (s16)priv->eeprom.groups[ch_info->group_index]. + temperature; + + /* get power index adjustment based on curr and factory + * temps */ + delta_index = iwl_hw_reg_adjust_power_by_temp(temperature, + ref_temp); + + /* set tx power value for all rates, OFDM and CCK */ + for (rate_index = 0; rate_index < IWL_RATE_COUNT; + rate_index++) { + int power_idx = + ch_info->power_info[rate_index].base_power_index; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within table range */ + power_idx = iwl_hw_reg_fix_power_index(power_idx); + ch_info->power_info[rate_index]. + power_table_index = (u8) power_idx; + ch_info->power_info[rate_index].tpc = + power_gain_table[a_band][power_idx]; + } + + /* Get this chnlgrp's rate-to-max/clip-powers table */ + clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX; + iwl_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, + ch_info, a_band); + } + } + + /* send Txpower command for current channel to ucode */ + return iwl_hw_reg_send_txpower(priv); +} + +int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + struct iwl_channel_info *ch_info; + s8 max_power; + u8 a_band; + u8 i; + + if (priv->user_txpower_limit == power) { + IWL_DEBUG_POWER("Requested Tx power same as current " + "limit: %ddBm.\n", power); + return 0; + } + + IWL_DEBUG_POWER("Setting upper limit clamp to %ddBm.\n", power); + priv->user_txpower_limit = power; + + /* set up new Tx powers for each and every channel, 2.4 and 5.x */ + + for (i = 0; i < priv->channel_count; i++) { + ch_info = &priv->channel_info[i]; + a_band = is_channel_a_band(ch_info); + + /* find minimum power of all user and regulatory constraints + * (does not consider h/w clipping limitations) */ + max_power = iwl_hw_reg_get_ch_txpower_limit(ch_info); + max_power = min(power, max_power); + if (max_power != ch_info->curr_txpow) { + ch_info->curr_txpow = max_power; + + /* this considers the h/w clipping limitations */ + iwl_hw_reg_set_new_power(priv, ch_info); + } + } + + /* update txpower settings for all channels, + * send to NIC if associated. */ + is_temp_calib_needed(priv); + iwl_hw_reg_comp_txpower_temp(priv); + + return 0; +} + +/* will add 3945 channel switch cmd handling later */ +int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + return 0; +} + +/** + * iwl3945_reg_txpower_periodic - called when time to check our temperature. + * + * -- reset periodic timer + * -- see if temp has changed enough to warrant re-calibration ... if so: + * -- correct coeffs for temp (can reset temp timer) + * -- save this temp as "last", + * -- send new set of gain settings to NIC + * NOTE: This should continue working, even when we're not associated, + * so we can keep our internal table of scan powers current. */ +void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) +{ + /* This will kick in the "brute force" + * iwl_hw_reg_comp_txpower_temp() below */ + if (!is_temp_calib_needed(priv)) + goto reschedule; + + /* Set up a new set of temp-adjusted TxPowers, send to NIC. + * This is based *only* on current temperature, + * ignoring any previous power measurements */ + iwl_hw_reg_comp_txpower_temp(priv); + + reschedule: + queue_delayed_work(priv->workqueue, + &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ); +} + +void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + thermal_periodic.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl3945_reg_txpower_periodic(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl_hw_reg_get_ch_grp_index - find the channel-group index (0-4) + * for the channel. + * + * This function is used when initializing channel-info structs. + * + * NOTE: These channel groups do *NOT* match the bands above! + * These channel groups are based on factory-tested channels; + * on A-band, EEPROM's "group frequency" entries represent the top + * channel in each group 1-4. Group 5 All B/G channels are in group 0. + */ +static u16 iwl_hw_reg_get_ch_grp_index(struct iwl_priv *priv, + const struct iwl_channel_info *ch_info) +{ + struct iwl_eeprom_txpower_group *ch_grp = &priv->eeprom.groups[0]; + u8 group; + u16 group_index = 0; /* based on factory calib frequencies */ + u8 grp_channel; + + /* Find the group index for the channel ... don't use index 1(?) */ + if (is_channel_a_band(ch_info)) { + for (group = 1; group < 5; group++) { + grp_channel = ch_grp[group].group_channel; + if (ch_info->channel <= grp_channel) { + group_index = group; + break; + } + } + /* group 4 has a few channels *above* its factory cal freq */ + if (group == 5) + group_index = 4; + } else + group_index = 0; /* 2.4 GHz, group 0 */ + + IWL_DEBUG_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, + group_index); + return group_index; +} + +/** + * iwl_hw_reg_get_matched_power_index - Interpolate to get nominal index + * + * Interpolate to get nominal (i.e. at factory calibration temperature) index + * into radio/DSP gain settings table for requested power. + */ +static int iwl_hw_reg_get_matched_power_index(struct iwl_priv *priv, + s8 requested_power, + s32 setting_index, s32 *new_index) +{ + const struct iwl_eeprom_txpower_group *chnl_grp = NULL; + s32 index0, index1; + s32 power = 2 * requested_power; + s32 i; + const struct iwl_eeprom_txpower_sample *samples; + s32 gains0, gains1; + s32 res; + s32 denominator; + + chnl_grp = &priv->eeprom.groups[setting_index]; + samples = chnl_grp->samples; + for (i = 0; i < 5; i++) { + if (power == samples[i].power) { + *new_index = samples[i].gain_index; + return 0; + } + } + + if (power > samples[1].power) { + index0 = 0; + index1 = 1; + } else if (power > samples[2].power) { + index0 = 1; + index1 = 2; + } else if (power > samples[3].power) { + index0 = 2; + index1 = 3; + } else { + index0 = 3; + index1 = 4; + } + + denominator = (s32) samples[index1].power - (s32) samples[index0].power; + if (denominator == 0) + return -EINVAL; + gains0 = (s32) samples[index0].gain_index * (1 << 19); + gains1 = (s32) samples[index1].gain_index * (1 << 19); + res = gains0 + (gains1 - gains0) * + ((s32) power - (s32) samples[index0].power) / denominator + + (1 << 18); + *new_index = res >> 19; + return 0; +} + +static void iwl_hw_reg_init_channel_groups(struct iwl_priv *priv) +{ + u32 i; + s32 rate_index; + const struct iwl_eeprom_txpower_group *group; + + IWL_DEBUG_POWER("Initializing factory calib info from EEPROM\n"); + + for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) { + s8 *clip_pwrs; /* table of power levels for each rate */ + s8 satur_pwr; /* saturation power for each chnl group */ + group = &priv->eeprom.groups[i]; + + /* sanity check on factory saturation power value */ + if (group->saturation_power < 40) { + IWL_WARNING("Error: saturation power is %d, " + "less than minimum expected 40\n", + group->saturation_power); + return; + } + + /* + * Derive requested power levels for each rate, based on + * hardware capabilities (saturation power for band). + * Basic value is 3dB down from saturation, with further + * power reductions for highest 3 data rates. These + * backoffs provide headroom for high rate modulation + * power peaks, without too much distortion (clipping). + */ + /* we'll fill in this array with h/w max power levels */ + clip_pwrs = (s8 *) priv->clip_groups[i].clip_powers; + + /* divide factory saturation power by 2 to find -3dB level */ + satur_pwr = (s8) (group->saturation_power >> 1); + + /* fill in channel group's nominal powers for each rate */ + for (rate_index = 0; + rate_index < IWL_RATE_COUNT; rate_index++, clip_pwrs++) { + switch (rate_index) { + case IWL_RATE_36M_INDEX: + if (i == 0) /* B/G */ + *clip_pwrs = satur_pwr; + else /* A */ + *clip_pwrs = satur_pwr - 5; + break; + case IWL_RATE_48M_INDEX: + if (i == 0) + *clip_pwrs = satur_pwr - 7; + else + *clip_pwrs = satur_pwr - 10; + break; + case IWL_RATE_54M_INDEX: + if (i == 0) + *clip_pwrs = satur_pwr - 9; + else + *clip_pwrs = satur_pwr - 12; + break; + default: + *clip_pwrs = satur_pwr; + break; + } + } + } +} + +/** + * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM + * + * Second pass (during init) to set up priv->channel_info + * + * Set up Tx-power settings in our channel info database for each VALID + * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values + * and current temperature. + * + * Since this is based on current temperature (at init time), these values may + * not be valid for very long, but it gives us a starting/default point, + * and allows us to active (i.e. using Tx) scan. + * + * This does *not* write values to NIC, just sets up our internal table. + */ +int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch_info = NULL; + struct iwl_channel_power_info *pwr_info; + int delta_index; + u8 rate_index; + u8 scan_tbl_index; + const s8 *clip_pwrs; /* array of power levels for each rate */ + u8 gain, dsp_atten; + s8 power; + u8 pwr_index, base_pwr_index, a_band; + u8 i; + int temperature; + + /* save temperature reference, + * so we can determine next time to calibrate */ + temperature = iwl_hw_reg_txpower_get_temperature(priv); + priv->last_temperature = temperature; + + iwl_hw_reg_init_channel_groups(priv); + + /* initialize Tx power info for each and every channel, 2.4 and 5.x */ + for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; + i++, ch_info++) { + a_band = is_channel_a_band(ch_info); + if (!is_channel_valid(ch_info)) + continue; + + /* find this channel's channel group (*not* "band") index */ + ch_info->group_index = + iwl_hw_reg_get_ch_grp_index(priv, ch_info); + + /* Get this chnlgrp's rate->max/clip-powers table */ + clip_pwrs = priv->clip_groups[ch_info->group_index].clip_powers; + + /* calculate power index *adjustment* value according to + * diff between current temperature and factory temperature */ + delta_index = iwl_hw_reg_adjust_power_by_temp(temperature, + priv->eeprom.groups[ch_info->group_index]. + temperature); + + IWL_DEBUG_POWER("Delta index for channel %d: %d [%d]\n", + ch_info->channel, delta_index, temperature + + IWL_TEMP_CONVERT); + + /* set tx power value for all OFDM rates */ + for (rate_index = 0; rate_index < IWL_OFDM_RATES; + rate_index++) { + s32 power_idx; + int rc; + + /* use channel group's clip-power table, + * but don't exceed channel's max power */ + s8 pwr = min(ch_info->max_power_avg, + clip_pwrs[rate_index]); + + pwr_info = &ch_info->power_info[rate_index]; + + /* get base (i.e. at factory-measured temperature) + * power table index for this rate's power */ + rc = iwl_hw_reg_get_matched_power_index(priv, pwr, + ch_info->group_index, + &power_idx); + if (rc) { + IWL_ERROR("Invalid power index\n"); + return rc; + } + pwr_info->base_power_index = (u8) power_idx; + + /* temperature compensate */ + power_idx += delta_index; + + /* stay within range of gain table */ + power_idx = iwl_hw_reg_fix_power_index(power_idx); + + /* fill 1 OFDM rate's iwl_channel_power_info struct */ + pwr_info->requested_power = pwr; + pwr_info->power_table_index = (u8) power_idx; + pwr_info->tpc.tx_gain = + power_gain_table[a_band][power_idx].tx_gain; + pwr_info->tpc.dsp_atten = + power_gain_table[a_band][power_idx].dsp_atten; + } + + /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/ + pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX]; + power = pwr_info->requested_power + + IWL_CCK_FROM_OFDM_POWER_DIFF; + pwr_index = pwr_info->power_table_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + base_pwr_index = pwr_info->base_power_index + + IWL_CCK_FROM_OFDM_INDEX_DIFF; + + /* stay within table range */ + pwr_index = iwl_hw_reg_fix_power_index(pwr_index); + gain = power_gain_table[a_band][pwr_index].tx_gain; + dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten; + + /* fill each CCK rate's iwl_channel_power_info structure + * NOTE: All CCK-rate Txpwrs are the same for a given chnl! + * NOTE: CCK rates start at end of OFDM rates! */ + for (rate_index = IWL_OFDM_RATES; + rate_index < IWL_RATE_COUNT; rate_index++) { + pwr_info = &ch_info->power_info[rate_index]; + pwr_info->requested_power = power; + pwr_info->power_table_index = pwr_index; + pwr_info->base_power_index = base_pwr_index; + pwr_info->tpc.tx_gain = gain; + pwr_info->tpc.dsp_atten = dsp_atten; + } + + /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ + for (scan_tbl_index = 0; + scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) { + s32 actual_index = (scan_tbl_index == 0) ? + IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX; + iwl_hw_reg_set_scan_power(priv, scan_tbl_index, + actual_index, clip_pwrs, ch_info, a_band); + } + } + + return 0; +} + +int iwl_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_write_restricted(priv, FH_RCSR_CONFIG(0), 0); + rc = iwl_poll_restricted_bit(priv, FH_RSSR_STATUS, (1 << 24), 1000); + if (rc < 0) + IWL_ERROR("Can't stop Rx DMA.\n"); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int rc; + unsigned long flags; + int txq_id = txq->q.id; + + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + iwl_write_restricted(priv, FH_CBCC_CTRL(txq_id), 0); + iwl_write_restricted(priv, FH_CBCC_BASE(txq_id), 0); + + iwl_write_restricted(priv, FH_TCSR_CONFIG(txq_id), + ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL | + ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE); + iwl_release_restricted_access(priv); + + /* fake read to flush all prev. writes */ + iwl_read32(priv, FH_TSSR_CBB_BASE); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl_hw_get_rx_read(struct iwl_priv *priv) +{ + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + return le32_to_cpu(shared_data->rx_read_ptr[0]); +} + +/** + * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table + */ +int iwl3945_init_hw_rate_table(struct iwl_priv *priv) +{ + int rc, i; + struct iwl_rate_scaling_cmd rate_cmd = { + .reserved = {0, 0, 0}, + }; + struct iwl_rate_scaling_info *table = rate_cmd.table; + + for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) { + table[i].rate_n_flags = + iwl_hw_set_rate_n_flags(iwl_rates[i].plcp, 0); + table[i].try_cnt = priv->retry_rate; + table[i].next_rate_index = iwl_get_prev_ieee_rate(i); + } + + switch (priv->phymode) { + case MODE_IEEE80211A: + IWL_DEBUG_RATE("Select A mode rate scale\n"); + /* If one of the following CCK rates is used, + * have it fall back to the 6M OFDM rate */ + for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++) + table[i].next_rate_index = IWL_FIRST_OFDM_RATE; + + /* Don't fall back to CCK rates */ + table[IWL_RATE_12M_INDEX].next_rate_index = IWL_RATE_9M_INDEX; + + /* Don't drop out of OFDM rates */ + table[IWL_FIRST_OFDM_RATE].next_rate_index = + IWL_FIRST_OFDM_RATE; + break; + + case MODE_IEEE80211B: + IWL_DEBUG_RATE("Select B mode rate scale\n"); + /* If an OFDM rate is used, have it fall back to the + * 1M CCK rates */ + for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE; i++) + table[i].next_rate_index = IWL_FIRST_CCK_RATE; + + /* CCK shouldn't fall back to OFDM... */ + table[IWL_RATE_11M_INDEX].next_rate_index = IWL_RATE_5M_INDEX; + break; + + default: + IWL_DEBUG_RATE("Select G mode rate scale\n"); + break; + } + + /* Update the rate scaling for control frame Tx */ + rate_cmd.table_id = 0; + rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); + if (rc) + return rc; + + /* Update the rate scaling for data frame Tx */ + rate_cmd.table_id = 1; + return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + &rate_cmd); +} + +int iwl_hw_set_hw_setting(struct iwl_priv *priv) +{ + memset((void *)&priv->hw_setting, 0, + sizeof(struct iwl_driver_hw_info)); + + priv->hw_setting.shared_virt = + pci_alloc_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + &priv->hw_setting.shared_phys); + + if (!priv->hw_setting.shared_virt) { + IWL_ERROR("failed to allocate pci memory\n"); + mutex_unlock(&priv->mutex); + return -ENOMEM; + } + + priv->hw_setting.ac_queue_count = AC_NUM; + priv->hw_setting.rx_buffer_size = IWL_RX_BUF_SIZE; + priv->hw_setting.tx_cmd_len = sizeof(struct iwl_tx_cmd); + priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; + priv->hw_setting.cck_flag = 0; + priv->hw_setting.max_stations = IWL3945_STATION_COUNT; + priv->hw_setting.bcast_sta_id = IWL3945_BROADCAST_ID; + return 0; +} + +unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame, u8 rate) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = (struct iwl_tx_beacon_cmd *)&frame->u; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = IWL3945_BROADCAST_ID; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + BROADCAST_ADDR, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + tx_beacon_cmd->tx.rate = rate; + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK); + + /* supp_rates[0] == OFDM */ + tx_beacon_cmd->tx.supp_rates[0] = IWL_OFDM_BASIC_RATES_MASK; + + /* supp_rates[1] == CCK + * + * NOTE: IWL_*_RATES_MASK are not in the order that supp_rates + * expects so we have to shift them around. + * + * supp_rates expects: + * CCK rates are bit0..3 + * + * However IWL_*_RATES_MASK has: + * CCK rates are bit8..11 + */ + tx_beacon_cmd->tx.supp_rates[1] = + (IWL_CCK_BASIC_RATES_MASK >> 8) & 0xF; + + return (sizeof(struct iwl_tx_beacon_cmd) + frame_size); +} + +void iwl_hw_rx_handler_setup(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; +} + +void iwl_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_DELAYED_WORK(&priv->thermal_periodic, + iwl3945_bg_reg_txpower_periodic); +} + +void iwl_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_delayed_work(&priv->thermal_periodic); +} + +struct pci_device_id iwl_hw_card_ids[] = { + {0x8086, 0x4222, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x8086, 0x4227, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0} +}; + +inline int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv) +{ + _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + return 0; +} + +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h new file mode 100644 index 0000000000000000000000000000000000000000..813902e9f8c2fb096804c39befaeefae2dc03146 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-3945.h @@ -0,0 +1,41 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_3945_h__ +#define __iwl_3945_h__ + +/* + * Forward declare iwl-3945.c functions for iwl-base.c + */ +extern int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv); +extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); +extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); +extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); +extern void iwl3945_bg_reg_txpower_periodic(struct work_struct *work); +extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); +extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, + u16 tx_rate, u8 flags); +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h new file mode 100644 index 0000000000000000000000000000000000000000..99a19ef4c7433a9a8339cd352a37d7d7ec57abb1 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h @@ -0,0 +1,581 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_4965_hw_h__ +#define __iwl_4965_hw_h__ + +#define IWL_RX_BUF_SIZE (4 * 1024) +#define IWL_MAX_BSM_SIZE BSM_SRAM_SIZE +#define KDR_RTC_INST_UPPER_BOUND (0x018000) +#define KDR_RTC_DATA_UPPER_BOUND (0x80A000) +#define KDR_RTC_INST_SIZE (KDR_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) +#define KDR_RTC_DATA_SIZE (KDR_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) + +#define IWL_MAX_INST_SIZE KDR_RTC_INST_SIZE +#define IWL_MAX_DATA_SIZE KDR_RTC_DATA_SIZE + +static inline int iwl_hw_valid_rtc_data_addr(u32 addr) +{ + return (addr >= RTC_DATA_LOWER_BOUND) && + (addr < KDR_RTC_DATA_UPPER_BOUND); +} + +/********************* START TXPOWER *****************************************/ +enum { + HT_IE_EXT_CHANNEL_NONE = 0, + HT_IE_EXT_CHANNEL_ABOVE, + HT_IE_EXT_CHANNEL_INVALID, + HT_IE_EXT_CHANNEL_BELOW, + HT_IE_EXT_CHANNEL_MAX +}; + +enum { + CALIB_CH_GROUP_1 = 0, + CALIB_CH_GROUP_2 = 1, + CALIB_CH_GROUP_3 = 2, + CALIB_CH_GROUP_4 = 3, + CALIB_CH_GROUP_5 = 4, + CALIB_CH_GROUP_MAX +}; + +/* Temperature calibration offset is 3% 0C in Kelvin */ +#define TEMPERATURE_CALIB_KELVIN_OFFSET 8 +#define TEMPERATURE_CALIB_A_VAL 259 + +#define IWL_TX_POWER_TEMPERATURE_MIN (263) +#define IWL_TX_POWER_TEMPERATURE_MAX (410) + +#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \ + (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \ + ((t) > IWL_TX_POWER_TEMPERATURE_MAX)) + +#define IWL_TX_POWER_ILLEGAL_TEMPERATURE (300) + +#define IWL_TX_POWER_TEMPERATURE_DIFFERENCE (2) + +#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6) + +#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */ +#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */ + +/* timeout equivalent to 3 minutes */ +#define IWL_TX_POWER_TIMELIMIT_NOCALIB 1800000000 + +#define IWL_TX_POWER_CCK_COMPENSATION (9) + +#define MIN_TX_GAIN_INDEX (0) +#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) +#define MAX_TX_GAIN_INDEX_52GHZ (98) +#define MIN_TX_GAIN_52GHZ (98) +#define MAX_TX_GAIN_INDEX_24GHZ (98) +#define MIN_TX_GAIN_24GHZ (98) +#define MAX_TX_GAIN (0) +#define MAX_TX_GAIN_52GHZ_EXT (-9) + +#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34) +#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34) +#define IWL_TX_POWER_REGULATORY_MIN (0) +#define IWL_TX_POWER_REGULATORY_MAX (34) +#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38) +#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38) +#define IWL_TX_POWER_SATURATION_MIN (20) +#define IWL_TX_POWER_SATURATION_MAX (50) + +/* dv *0.4 = dt; so that 5 degrees temperature diff equals + * 12.5 in voltage diff */ +#define IWL_TX_TEMPERATURE_UPDATE_LIMIT 9 + +#define IWL_INVALID_CHANNEL (0xffffffff) +#define IWL_TX_POWER_REGITRY_BIT (2) + +#define MIN_IWL_TX_POWER_CALIB_DUR (100) +#define IWL_CCK_FROM_OFDM_POWER_DIFF (-5) +#define IWL_CCK_FROM_OFDM_INDEX_DIFF (9) + +/* Number of entries in the gain table */ +#define POWER_GAIN_NUM_ENTRIES 78 +#define TX_POW_MAX_SESSION_NUM 5 +/* timeout equivalent to 3 minutes */ +#define TX_IWL_TIMELIMIT_NOCALIB 1800000000 + +/* Kedron TX_CALIB_STATES */ +#define IWL_TX_CALIB_STATE_SEND_TX 0x00000001 +#define IWL_TX_CALIB_WAIT_TX_RESPONSE 0x00000002 +#define IWL_TX_CALIB_ENABLED 0x00000004 +#define IWL_TX_CALIB_XVT_ON 0x00000008 +#define IWL_TX_CALIB_TEMPERATURE_CORRECT 0x00000010 +#define IWL_TX_CALIB_WORKING_WITH_XVT 0x00000020 +#define IWL_TX_CALIB_XVT_PERIODICAL 0x00000040 + +#define NUM_IWL_TX_CALIB_SETTINS 5 /* Number of tx correction groups */ + +#define IWL_MIN_POWER_IN_VP_TABLE 1 /* 0.5dBm multiplied by 2 */ +#define IWL_MAX_POWER_IN_VP_TABLE 40 /* 20dBm - multiplied by 2 (because + * entries are for each 0.5dBm) */ +#define IWL_STEP_IN_VP_TABLE 1 /* 0.5dB - multiplied by 2 */ +#define IWL_NUM_POINTS_IN_VPTABLE \ + (1 + IWL_MAX_POWER_IN_VP_TABLE - IWL_MIN_POWER_IN_VP_TABLE) + +#define MIN_TX_GAIN_INDEX (0) +#define MAX_TX_GAIN_INDEX_52GHZ (98) +#define MIN_TX_GAIN_52GHZ (98) +#define MAX_TX_GAIN_INDEX_24GHZ (98) +#define MIN_TX_GAIN_24GHZ (98) +#define MAX_TX_GAIN (0) + +/* First and last channels of all groups */ +#define CALIB_IWL_TX_ATTEN_GR1_FCH 34 +#define CALIB_IWL_TX_ATTEN_GR1_LCH 43 +#define CALIB_IWL_TX_ATTEN_GR2_FCH 44 +#define CALIB_IWL_TX_ATTEN_GR2_LCH 70 +#define CALIB_IWL_TX_ATTEN_GR3_FCH 71 +#define CALIB_IWL_TX_ATTEN_GR3_LCH 124 +#define CALIB_IWL_TX_ATTEN_GR4_FCH 125 +#define CALIB_IWL_TX_ATTEN_GR4_LCH 200 +#define CALIB_IWL_TX_ATTEN_GR5_FCH 1 +#define CALIB_IWL_TX_ATTEN_GR5_LCH 20 + + +union iwl_tx_power_dual_stream { + struct { + u8 radio_tx_gain[2]; + u8 dsp_predis_atten[2]; + } s; + u32 dw; +}; + +/********************* END TXPOWER *****************************************/ + +/* HT flags */ +#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) +#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK __constant_cpu_to_le32(0x1<<22) + +#define RXON_FLG_HT_OPERATING_MODE_POS (23) + +#define RXON_FLG_HT_PROT_MSK __constant_cpu_to_le32(0x1<<23) +#define RXON_FLG_FAT_PROT_MSK __constant_cpu_to_le32(0x2<<23) + +#define RXON_FLG_CHANNEL_MODE_POS (25) +#define RXON_FLG_CHANNEL_MODE_MSK __constant_cpu_to_le32(0x3<<25) +#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK __constant_cpu_to_le32(0x1<<25) +#define RXON_FLG_CHANNEL_MODE_MIXED_MSK __constant_cpu_to_le32(0x2<<25) + +#define RXON_RX_CHAIN_DRIVER_FORCE_MSK __constant_cpu_to_le16(0x1<<0) +#define RXON_RX_CHAIN_VALID_MSK __constant_cpu_to_le16(0x7<<1) +#define RXON_RX_CHAIN_VALID_POS (1) +#define RXON_RX_CHAIN_FORCE_SEL_MSK __constant_cpu_to_le16(0x7<<4) +#define RXON_RX_CHAIN_FORCE_SEL_POS (4) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK __constant_cpu_to_le16(0x7<<7) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define RXON_RX_CHAIN_CNT_MSK __constant_cpu_to_le16(0x3<<10) +#define RXON_RX_CHAIN_CNT_POS (10) +#define RXON_RX_CHAIN_MIMO_CNT_MSK __constant_cpu_to_le16(0x3<<12) +#define RXON_RX_CHAIN_MIMO_CNT_POS (12) +#define RXON_RX_CHAIN_MIMO_FORCE_MSK __constant_cpu_to_le16(0x1<<14) +#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) + + +#define MCS_DUP_6M_PLCP 0x20 + +/* OFDM HT rate masks */ +/* ***************************************** */ +#define R_MCS_6M_MSK 0x1 +#define R_MCS_12M_MSK 0x2 +#define R_MCS_18M_MSK 0x4 +#define R_MCS_24M_MSK 0x8 +#define R_MCS_36M_MSK 0x10 +#define R_MCS_48M_MSK 0x20 +#define R_MCS_54M_MSK 0x40 +#define R_MCS_60M_MSK 0x80 +#define R_MCS_12M_DUAL_MSK 0x100 +#define R_MCS_24M_DUAL_MSK 0x200 +#define R_MCS_36M_DUAL_MSK 0x400 +#define R_MCS_48M_DUAL_MSK 0x800 + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) (((tbl) == LQ_SISO)) +#define is_mimo(tbl) (((tbl) == LQ_MIMO)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) (((tbl) == LQ_A)) +#define is_g_and(tbl) (((tbl) == LQ_G)) + +/* Flow Handler Definitions */ + +/**********************/ +/* Addresses */ +/**********************/ + +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x1EF0) + +#define IWL_FH_REGS_LOWER_BOUND (0x1000) +#define IWL_FH_REGS_UPPER_BOUND (0x2000) + +#define IWL_FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + +/* CBBC Area - Circular buffers base address cache pointers table */ +#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) +/* queues 0 - 15 */ +#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + +/* RSCSR Area */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) + +/* RCSR Area - Registers address map */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) + +/* RSSR Area - Rx shared ctrl & status registers */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +/* TCSR */ +#define IWL_FH_TCSR_LOWER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xD00) +#define IWL_FH_TCSR_UPPER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xE60) + +#define IWL_FH_TCSR_CHNL_NUM (7) +#define IWL_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (IWL_FH_TCSR_LOWER_BOUND + 0x20 * _chnl) + +/* TSSR Area - Tx shared status registers */ +/* TSSR */ +#define IWL_FH_TSSR_LOWER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xEA0) +#define IWL_FH_TSSR_UPPER_BOUND (IWL_FH_REGS_LOWER_BOUND + 0xEC0) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG (IWL_FH_TSSR_LOWER_BOUND + 0x008) +#define IWL_FH_TSSR_TX_STATUS_REG (IWL_FH_TSSR_LOWER_BOUND + 0x010) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_64B (0x00000000) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_256B (0x00000800) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_512B (0x00000C00) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define IWL_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) \ + ((1 << (_chnl)) << 24) +#define IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) \ + ((1 << (_chnl)) << 16) + +#define IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) \ + (IWL_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \ + IWL_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl)) + +/* TCSR: tx_config register values */ +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_ARC (0x00000002) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define IWL_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/* RCSR: channel 0 rx_config register defines */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MASK (0xC0000000) /* bits 30-31 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MASK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MASK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MASK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MASK (0x00001000) /* bit 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MASK (0x00000FF0) /* bit 4-11 */ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20) +#define FH_RCSR_RX_CONFIG_RB_SIZE_BITSHIFT (16) + +/* RCSR: rx_config register values */ +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) + +/* RCSR channel 0 config register values */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +/* RSCSR: defs used in normal mode */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_MASK (0x00000FFF) /* bits 0-11 */ + +#define SCD_WIN_SIZE 64 +#define SCD_FRAME_LIMIT 64 + +/* memory mapped registers */ +#define SCD_START_OFFSET 0xa02c00 + +#define SCD_SRAM_BASE_ADDR (SCD_START_OFFSET + 0x0) +#define SCD_EMPTY_BITS (SCD_START_OFFSET + 0x4) +#define SCD_DRAM_BASE_ADDR (SCD_START_OFFSET + 0x10) +#define SCD_AIT (SCD_START_OFFSET + 0x18) +#define SCD_TXFACT (SCD_START_OFFSET + 0x1c) +#define SCD_QUEUE_WRPTR(x) (SCD_START_OFFSET + 0x24 + (x) * 4) +#define SCD_QUEUE_RDPTR(x) (SCD_START_OFFSET + 0x64 + (x) * 4) +#define SCD_SETQUEUENUM (SCD_START_OFFSET + 0xa4) +#define SCD_SET_TXSTAT_TXED (SCD_START_OFFSET + 0xa8) +#define SCD_SET_TXSTAT_DONE (SCD_START_OFFSET + 0xac) +#define SCD_SET_TXSTAT_NOT_SCHD (SCD_START_OFFSET + 0xb0) +#define SCD_DECREASE_CREDIT (SCD_START_OFFSET + 0xb4) +#define SCD_DECREASE_SCREDIT (SCD_START_OFFSET + 0xb8) +#define SCD_LOAD_CREDIT (SCD_START_OFFSET + 0xbc) +#define SCD_LOAD_SCREDIT (SCD_START_OFFSET + 0xc0) +#define SCD_BAR (SCD_START_OFFSET + 0xc4) +#define SCD_BAR_DW0 (SCD_START_OFFSET + 0xc8) +#define SCD_BAR_DW1 (SCD_START_OFFSET + 0xcc) +#define SCD_QUEUECHAIN_SEL (SCD_START_OFFSET + 0xd0) +#define SCD_QUERY_REQ (SCD_START_OFFSET + 0xd8) +#define SCD_QUERY_RES (SCD_START_OFFSET + 0xdc) +#define SCD_PENDING_FRAMES (SCD_START_OFFSET + 0xe0) +#define SCD_INTERRUPT_MASK (SCD_START_OFFSET + 0xe4) +#define SCD_INTERRUPT_THRESHOLD (SCD_START_OFFSET + 0xe8) +#define SCD_QUERY_MIN_FRAME_SIZE (SCD_START_OFFSET + 0x100) +#define SCD_QUEUE_STATUS_BITS(x) (SCD_START_OFFSET + 0x104 + (x) * 4) + +/* SRAM structures */ +#define SCD_CONTEXT_DATA_OFFSET 0x380 +#define SCD_TX_STTS_BITMAP_OFFSET 0x400 +#define SCD_TRANSLATE_TBL_OFFSET 0x500 +#define SCD_CONTEXT_QUEUE_OFFSET(x) (SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) +#define SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) + +#define SCD_TXFACT_REG_TXFIFO_MASK(lo, hi) \ + ((1<<(hi))|((1<<(hi))-(1<<(lo)))) + + +#define SCD_MODE_REG_BIT_SEARCH_MODE (1<<0) +#define SCD_MODE_REG_BIT_SBYP_MODE (1<<1) + +#define SCD_TXFIFO_POS_TID (0) +#define SCD_TXFIFO_POS_RA (4) +#define SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define SCD_QUEUE_STTS_REG_POS_TXF (1) +#define SCD_QUEUE_STTS_REG_POS_WSL (5) +#define SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) +#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define SCD_QUEUE_STTS_REG_MSK (0x0007FC00) + +#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) + +#define SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8) +#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) +#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +#define CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R (0x00000010) +#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) +#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) + +static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} +static inline u16 iwl_hw_get_rate_n_flags(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFFFF; +} +static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u16 flags) +{ + return cpu_to_le32(flags|(u16)rate); +} + +struct iwl_tfd_frame_data { + __le32 tb1_addr; + + __le32 val1; + /* __le32 ptb1_32_35:4; */ +#define IWL_tb1_addr_hi_POS 0 +#define IWL_tb1_addr_hi_LEN 4 +#define IWL_tb1_addr_hi_SYM val1 + /* __le32 tb_len1:12; */ +#define IWL_tb1_len_POS 4 +#define IWL_tb1_len_LEN 12 +#define IWL_tb1_len_SYM val1 + /* __le32 ptb2_0_15:16; */ +#define IWL_tb2_addr_lo16_POS 16 +#define IWL_tb2_addr_lo16_LEN 16 +#define IWL_tb2_addr_lo16_SYM val1 + + __le32 val2; + /* __le32 ptb2_16_35:20; */ +#define IWL_tb2_addr_hi20_POS 0 +#define IWL_tb2_addr_hi20_LEN 20 +#define IWL_tb2_addr_hi20_SYM val2 + /* __le32 tb_len2:12; */ +#define IWL_tb2_len_POS 20 +#define IWL_tb2_len_LEN 12 +#define IWL_tb2_len_SYM val2 +} __attribute__ ((packed)); + +struct iwl_tfd_frame { + __le32 val0; + /* __le32 rsvd1:24; */ + /* __le32 num_tbs:5; */ +#define IWL_num_tbs_POS 24 +#define IWL_num_tbs_LEN 5 +#define IWL_num_tbs_SYM val0 + /* __le32 rsvd2:1; */ + /* __le32 padding:2; */ + struct iwl_tfd_frame_data pa[10]; + __le32 reserved; +} __attribute__ ((packed)); + +#define IWL4965_MAX_WIN_SIZE 64 +#define IWL4965_QUEUE_SIZE 256 +#define IWL4965_NUM_FIFOS 7 +#define IWL_MAX_NUM_QUEUES 16 + +struct iwl4965_queue_byte_cnt_entry { + __le16 val; + /* __le16 byte_cnt:12; */ +#define IWL_byte_cnt_POS 0 +#define IWL_byte_cnt_LEN 12 +#define IWL_byte_cnt_SYM val + /* __le16 rsvd:4; */ +} __attribute__ ((packed)); + +struct iwl4965_sched_queue_byte_cnt_tbl { + struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL4965_QUEUE_SIZE + + IWL4965_MAX_WIN_SIZE]; + u8 dont_care[1024 - + (IWL4965_QUEUE_SIZE + IWL4965_MAX_WIN_SIZE) * + sizeof(__le16)]; +} __attribute__ ((packed)); + +/* Base physical address of iwl_shared is provided to SCD_DRAM_BASE_ADDR + * and &iwl_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */ +struct iwl_shared { + struct iwl4965_sched_queue_byte_cnt_tbl + queues_byte_cnt_tbls[IWL_MAX_NUM_QUEUES]; + __le32 val0; + + /* __le32 rb_closed_stts_rb_num:12; */ +#define IWL_rb_closed_stts_rb_num_POS 0 +#define IWL_rb_closed_stts_rb_num_LEN 12 +#define IWL_rb_closed_stts_rb_num_SYM val0 + /* __le32 rsrv1:4; */ + /* __le32 rb_closed_stts_rx_frame_num:12; */ +#define IWL_rb_closed_stts_rx_frame_num_POS 16 +#define IWL_rb_closed_stts_rx_frame_num_LEN 12 +#define IWL_rb_closed_stts_rx_frame_num_SYM val0 + /* __le32 rsrv2:4; */ + + __le32 val1; + /* __le32 frame_finished_stts_rb_num:12; */ +#define IWL_frame_finished_stts_rb_num_POS 0 +#define IWL_frame_finished_stts_rb_num_LEN 12 +#define IWL_frame_finished_stts_rb_num_SYM val1 + /* __le32 rsrv3:4; */ + /* __le32 frame_finished_stts_rx_frame_num:12; */ +#define IWL_frame_finished_stts_rx_frame_num_POS 16 +#define IWL_frame_finished_stts_rx_frame_num_LEN 12 +#define IWL_frame_finished_stts_rx_frame_num_SYM val1 + /* __le32 rsrv4:4; */ + + __le32 padding1; /* so that allocation will be aligned to 16B */ + __le32 padding2; +} __attribute__ ((packed)); + +#endif /* __iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c new file mode 100644 index 0000000000000000000000000000000000000000..287c75705c44db0a72729dad2f0833294e5ac1ff --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c @@ -0,0 +1,2295 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#define IWL 4965 + +#include "../net/mac80211/ieee80211_rate.h" + +#include "iwlwifi.h" +#include "iwl-helpers.h" + +#define RS_NAME "iwl-4965-rs" + +#define NUM_TRY_BEFORE_ANTENNA_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 +#define IWL_RATE_HIGH_TH 10880 +#define IWL_RATE_MIN_FAILURE_TH 6 +#define IWL_RATE_MIN_SUCCESS_TH 8 +#define IWL_RATE_DECREASE_TH 1920 +#define IWL_RATE_INCREASE_TH 8960 +#define IWL_RATE_SCALE_FLUSH_INTVL (2*HZ) /*2 seconds */ + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +struct iwl_rate { + u32 rate_n_flags; +} __attribute__ ((packed)); + +struct iwl_rate_scale_data { + u64 data; + s32 success_counter; + s32 success_ratio; + s32 counter; + s32 average_tpt; + unsigned long stamp; +}; + +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + enum iwl_antenna_type antenna_type; + u8 is_SGI; + u8 is_fat; + u8 is_dup; + u8 action; + s32 *expected_tpt; + struct iwl_rate current_rate; + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; +}; + +struct iwl_rate_scale_priv { + u8 active_tbl; + u8 enable_counter; + u8 stay_in_tbl; + u8 search_better_tbl; + s32 last_tpt; + u32 table_count_limit; + u32 max_failure_limit; + u32 max_success_limit; + u32 table_count; + u32 total_failed; + u32 total_success; + u32 flush_timer; + u8 action_counter; + u8 antenna; + u8 valid_antenna; + u8 is_green; + u8 is_dup; + u8 phymode; + u8 ibss_sta_added; + u32 supp_rates; + u16 active_rate; + u16 active_siso_rate; + u16 active_mimo_rate; + u16 active_rate_basic; + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct iwl_rate dbg_fixed; + struct iwl_priv *drv; +#endif +}; + +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct net_device *dev, + struct ieee80211_hdr *hdr, + struct sta_info *sta); +static void rs_fill_link_cmd(struct iwl_rate_scale_priv *lq_data, + struct iwl_rate *tx_mcs, + struct iwl_link_quality_cmd *tbl); + + +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_dbgfs_set_mcs(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate *mcs, int index); +#else +static void rs_dbgfs_set_mcs(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate *mcs, int index) +{} +#endif +static s32 expected_tpt_A[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 +}; + +static s32 expected_tpt_G[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 186 +}; + +static s32 expected_tpt_siso20MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 42, 42, 76, 102, 124, 159, 183, 193, 202 +}; + +static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211 +}; + +static s32 expected_tpt_mimo20MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251 +}; + +static s32 expected_tpt_mimo20MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257 +}; + +static s32 expected_tpt_siso40MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 77, 77, 127, 160, 184, 220, 242, 250, 257 +}; + +static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264 +}; + +static s32 expected_tpt_mimo40MHz[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289 +}; + +static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = { + 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 +}; + +static int iwl_lq_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + /*We didn't cache the SKB; let the caller free it */ + return 1; +} + +static inline u8 iwl_rate_get_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & 0xFF); +} + +static int rs_send_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq, u8 flags) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + int i; +#endif + int rc = -1; + + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = sizeof(struct iwl_link_quality_cmd), + .meta.flags = flags, + .data = lq, + }; + + if ((lq->sta_id == 0xFF) && + (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) + return rc; + + if (lq->sta_id == 0xFF) + lq->sta_id = IWL_AP_ID; + + IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE("lq dta 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); +#ifdef CONFIG_IWLWIFI_DEBUG + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE("lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +#endif + + if (flags & CMD_ASYNC) + cmd.meta.u.callback = iwl_lq_sync_callback; + + if (iwl_is_associated(priv) && priv->assoc_station_added && + priv->lq_mngr.lq_ready) + rc = iwl_send_cmd(priv, &cmd); + + return rc; +} + +static int rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; + + return 0; +} + +static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, + int scale_index, s32 tpt, u32 status) +{ + int rc = 0; + struct iwl_rate_scale_data *window = NULL; + u64 mask; + u8 win_size = IWL_RATE_MAX_WINDOW; + s32 fail_count; + + if (scale_index < 0) + return -1; + + if (scale_index >= IWL_RATE_COUNT) + return -1; + + window = &(windows[scale_index]); + + if (window->counter >= win_size) { + + window->counter = win_size - 1; + mask = 1; + mask = (mask << (win_size - 1)); + if ((window->data & mask)) { + window->data &= ~mask; + window->success_counter = window->success_counter - 1; + } + } + + window->counter = window->counter + 1; + mask = window->data; + window->data = (mask << 1); + if (status != 0) { + window->success_counter = window->success_counter + 1; + window->data |= 0x1; + } + + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + window->stamp = jiffies; + + return rc; +} + +int static rs_mcs_from_tbl(struct iwl_rate *mcs_rate, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + int rc = 0; + + if (is_legacy(tbl->lq_type)) { + mcs_rate->rate_n_flags = iwl_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + mcs_rate->rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_siso(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) + index = IWL_LAST_OFDM_RATE; + mcs_rate->rate_n_flags = iwl_rates[index].plcp_siso | + RATE_MCS_HT_MSK; + } else { + if (index > IWL_LAST_OFDM_RATE) + index = IWL_LAST_OFDM_RATE; + mcs_rate->rate_n_flags = iwl_rates[index].plcp_mimo | + RATE_MCS_HT_MSK; + } + + switch (tbl->antenna_type) { + case ANT_BOTH: + mcs_rate->rate_n_flags |= RATE_MCS_ANT_AB_MSK; + break; + case ANT_MAIN: + mcs_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; + break; + case ANT_AUX: + mcs_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; + break; + case ANT_NONE: + break; + } + + if (is_legacy(tbl->lq_type)) + return rc; + + if (tbl->is_fat) { + if (tbl->is_dup) + mcs_rate->rate_n_flags |= RATE_MCS_DUP_MSK; + else + mcs_rate->rate_n_flags |= RATE_MCS_FAT_MSK; + } + if (tbl->is_SGI) + mcs_rate->rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + mcs_rate->rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type)) + mcs_rate->rate_n_flags &= ~RATE_MCS_SGI_MSK; + } + return rc; +} + +static int rs_get_tbl_info_from_mcs(const struct iwl_rate *mcs_rate, + int phymode, struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + int index; + u32 ant_msk; + + index = iwl_rate_index_from_plcp(mcs_rate->rate_n_flags); + + if (index == IWL_RATE_INVALID) { + *rate_idx = -1; + return -1; + } + tbl->is_SGI = 0; + tbl->is_fat = 0; + tbl->is_dup = 0; + tbl->antenna_type = ANT_BOTH; + + if (!(mcs_rate->rate_n_flags & RATE_MCS_HT_MSK)) { + ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); + + if (ant_msk == RATE_MCS_ANT_AB_MSK) + tbl->lq_type = LQ_NONE; + else { + + if (phymode == MODE_IEEE80211A) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK) + tbl->antenna_type = ANT_MAIN; + else + tbl->antenna_type = ANT_AUX; + } + *rate_idx = index; + + } else if (iwl_rate_get_rate(mcs_rate->rate_n_flags) + <= IWL_RATE_SISO_60M_PLCP) { + tbl->lq_type = LQ_SISO; + + ant_msk = (mcs_rate->rate_n_flags & RATE_MCS_ANT_AB_MSK); + if (ant_msk == RATE_MCS_ANT_AB_MSK) + tbl->lq_type = LQ_NONE; + else { + if (mcs_rate->rate_n_flags & RATE_MCS_ANT_A_MSK) + tbl->antenna_type = ANT_MAIN; + else + tbl->antenna_type = ANT_AUX; + } + if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || + (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_fat = 1; + + if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + *rate_idx = index; + } else { + tbl->lq_type = LQ_MIMO; + if (mcs_rate->rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((mcs_rate->rate_n_flags & RATE_MCS_FAT_MSK) || + (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_fat = 1; + + if (mcs_rate->rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + *rate_idx = index; + } + return 0; +} + +static inline void rs_toggle_antenna(struct iwl_rate *new_rate, + struct iwl_scale_tbl_info *tbl) +{ + if (tbl->antenna_type == ANT_AUX) { + tbl->antenna_type = ANT_MAIN; + new_rate->rate_n_flags &= ~RATE_MCS_ANT_B_MSK; + new_rate->rate_n_flags |= RATE_MCS_ANT_A_MSK; + } else { + tbl->antenna_type = ANT_AUX; + new_rate->rate_n_flags &= ~RATE_MCS_ANT_A_MSK; + new_rate->rate_n_flags |= RATE_MCS_ANT_B_MSK; + } +} + +static inline s8 rs_use_green(struct iwl_priv *priv) +{ + s8 rc = 0; +#ifdef CONFIG_IWLWIFI_HT + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht) + return 0; + + if ((priv->current_assoc_ht.is_green_field) && + !(priv->current_assoc_ht.operating_mode & 0x4)) + rc = 1; +#endif /*CONFIG_IWLWIFI_HT */ + return rc; +} + +/** + * rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static void rs_get_supported_rates(struct iwl_rate_scale_priv *lq_data, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type, + u16 *data_rate) +{ + if (is_legacy(rate_type)) + *data_rate = lq_data->active_rate; + else { + if (is_siso(rate_type)) + *data_rate = lq_data->active_siso_rate; + else + *data_rate = lq_data->active_mimo_rate; + } + + if (hdr && is_multicast_ether_addr(hdr->addr1) && + lq_data->active_rate_basic) + *data_rate = lq_data->active_rate_basic; +} + +static u16 rs_get_adjacent_rate(u8 index, u16 rate_mask, int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjascent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwl_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE("Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwl_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE("Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static int rs_get_lower_rate(struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, u8 scale_index, + u8 ht_possible, struct iwl_rate *mcs_rate) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_data->is_green; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_data->phymode == MODE_IEEE80211A) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if ((tbl->antenna_type == ANT_BOTH) || + (tbl->antenna_type == ANT_NONE)) + tbl->antenna_type = ANT_MAIN; + + tbl->is_fat = 0; + tbl->is_SGI = 0; + } + + rs_get_supported_rates(lq_data, NULL, tbl->lq_type, &rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_data->phymode == (u8) MODE_IEEE80211A) + rate_mask = (u16)(rate_mask & + (lq_data->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_data->supp_rates); + } + + /* if we did switched from HT to legacy check current rate */ + if ((switch_to_legacy) && + (rate_mask & (1 << scale_index))) { + rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); + return 0; + } + + high_low = rs_get_adjacent_rate(scale_index, rate_mask, tbl->lq_type); + low = high_low & 0xff; + + if (low != IWL_RATE_INVALID) + rs_mcs_from_tbl(mcs_rate, tbl, low, is_green); + else + rs_mcs_from_tbl(mcs_rate, tbl, scale_index, is_green); + + return 0; +} + +static void rs_tx_status(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct ieee80211_tx_status *tx_resp) +{ + int status; + u8 retries; + int rs_index, index = 0; + struct iwl_rate_scale_priv *lq; + struct iwl_link_quality_cmd *table; + struct sta_info *sta; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct iwl_rate_scale_data *window = NULL; + struct iwl_rate_scale_data *search_win = NULL; + struct iwl_rate tx_mcs; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *search_tbl; + u8 active_index = 0; + u16 fc = le16_to_cpu(hdr->frame_control); + s32 tpt = 0; + + IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n"); + + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) + return; + + retries = tx_resp->retry_count; + + if (retries > 15) + retries = 15; + + + sta = sta_info_get(local, hdr->addr1); + + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + return; + } + + lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + + if (!priv->lq_mngr.lq_ready) + return; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && !lq->ibss_sta_added) + return; + + table = &lq->lq; + active_index = lq->active_tbl; + + lq->antenna = (lq->valid_antenna & local->hw.conf.antenna_sel_tx); + if (!lq->antenna) + lq->antenna = lq->valid_antenna; + + lq->antenna = lq->valid_antenna; + curr_tbl = &(lq->lq_info[active_index]); + search_tbl = &(lq->lq_info[(1 - active_index)]); + window = (struct iwl_rate_scale_data *) + &(curr_tbl->win[0]); + search_win = (struct iwl_rate_scale_data *) + &(search_tbl->win[0]); + + tx_mcs.rate_n_flags = tx_resp->control.tx_rate; + + rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, + &tbl_type, &rs_index); + if ((rs_index < 0) || (rs_index >= IWL_RATE_COUNT)) { + IWL_DEBUG_RATE("bad rate index at: %d rate 0x%X\n", + rs_index, tx_mcs.rate_n_flags); + sta_info_put(sta); + return; + } + + if (retries && + (tx_mcs.rate_n_flags != + le32_to_cpu(table->rs_table[0].rate_n_flags))) { + IWL_DEBUG_RATE("initial rate does not match 0x%x 0x%x\n", + tx_mcs.rate_n_flags, + le32_to_cpu(table->rs_table[0].rate_n_flags)); + sta_info_put(sta); + return; + } + + while (retries) { + tx_mcs.rate_n_flags = + le32_to_cpu(table->rs_table[index].rate_n_flags); + rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, + &tbl_type, &rs_index); + + if ((tbl_type.lq_type == search_tbl->lq_type) && + (tbl_type.antenna_type == search_tbl->antenna_type) && + (tbl_type.is_SGI == search_tbl->is_SGI)) { + if (search_tbl->expected_tpt) + tpt = search_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(search_win, + rs_index, tpt, 0); + } else if ((tbl_type.lq_type == curr_tbl->lq_type) && + (tbl_type.antenna_type == curr_tbl->antenna_type) && + (tbl_type.is_SGI == curr_tbl->is_SGI)) { + if (curr_tbl->expected_tpt) + tpt = curr_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(window, rs_index, tpt, 0); + } + if (lq->stay_in_tbl) + lq->total_failed++; + --retries; + index++; + + } + + if (!tx_resp->retry_count) + tx_mcs.rate_n_flags = tx_resp->control.tx_rate; + else + tx_mcs.rate_n_flags = + le32_to_cpu(table->rs_table[index].rate_n_flags); + + rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, + &tbl_type, &rs_index); + + if (tx_resp->flags & IEEE80211_TX_STATUS_ACK) + status = 1; + else + status = 0; + + if ((tbl_type.lq_type == search_tbl->lq_type) && + (tbl_type.antenna_type == search_tbl->antenna_type) && + (tbl_type.is_SGI == search_tbl->is_SGI)) { + if (search_tbl->expected_tpt) + tpt = search_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(search_win, + rs_index, tpt, status); + } else if ((tbl_type.lq_type == curr_tbl->lq_type) && + (tbl_type.antenna_type == curr_tbl->antenna_type) && + (tbl_type.is_SGI == curr_tbl->is_SGI)) { + if (curr_tbl->expected_tpt) + tpt = curr_tbl->expected_tpt[rs_index]; + else + tpt = 0; + rs_collect_tx_data(window, rs_index, tpt, status); + } + + if (lq->stay_in_tbl) { + if (status) + lq->total_success++; + else + lq->total_failed++; + } + + rs_rate_scale_perform(priv, dev, hdr, sta); + sta_info_put(sta); + return; +} + +static u8 rs_is_ant_connected(u8 valid_antenna, + enum iwl_antenna_type antenna_type) +{ + if (antenna_type == ANT_AUX) + return ((valid_antenna & 0x2) ? 1:0); + else if (antenna_type == ANT_MAIN) + return ((valid_antenna & 0x1) ? 1:0); + else if (antenna_type == ANT_BOTH) { + if ((valid_antenna & 0x3) == 0x3) + return 1; + else + return 0; + } + + return 1; +} + +static u8 rs_is_other_ant_connected(u8 valid_antenna, + enum iwl_antenna_type antenna_type) +{ + if (antenna_type == ANT_AUX) + return (rs_is_ant_connected(valid_antenna, ANT_MAIN)); + else + return (rs_is_ant_connected(valid_antenna, ANT_AUX)); + + return 0; +} + +static void rs_set_stay_in_table(u8 is_legacy, + struct iwl_rate_scale_priv *lq_data) +{ + IWL_DEBUG_HT("we are staying in the same table\n"); + lq_data->stay_in_tbl = 1; + if (is_legacy) { + lq_data->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_data->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_data->max_success_limit = IWL_LEGACY_TABLE_COUNT; + } else { + lq_data->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_data->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_data->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_data->table_count = 0; + lq_data->total_failed = 0; + lq_data->total_success = 0; +} + +static void rs_get_expected_tpt_table(struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl) +{ + if (is_legacy(tbl->lq_type)) { + if (!is_a_band(tbl->lq_type)) + tbl->expected_tpt = expected_tpt_G; + else + tbl->expected_tpt = expected_tpt_A; + } else if (is_siso(tbl->lq_type)) { + if (tbl->is_fat && !lq_data->is_dup) + if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_siso40MHzSGI; + else + tbl->expected_tpt = expected_tpt_siso40MHz; + else if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_siso20MHzSGI; + else + tbl->expected_tpt = expected_tpt_siso20MHz; + + } else if (is_mimo(tbl->lq_type)) { + if (tbl->is_fat && !lq_data->is_dup) + if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_mimo40MHzSGI; + else + tbl->expected_tpt = expected_tpt_mimo40MHz; + else if (tbl->is_SGI) + tbl->expected_tpt = expected_tpt_mimo20MHzSGI; + else + tbl->expected_tpt = expected_tpt_mimo20MHz; + } else + tbl->expected_tpt = expected_tpt_G; +} + +#ifdef CONFIG_IWLWIFI_HT +static s32 rs_get_best_rate(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, + u16 rate_mask, s8 index, s8 rate) +{ + struct iwl_scale_tbl_info *active_tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + s32 new_rate, high, low, start_hi; + s32 active_sr = active_tbl->win[index].success_ratio; + s32 *tpt_tbl = tbl->expected_tpt; + s32 active_tpt = active_tbl->expected_tpt[index]; + u16 high_low; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = rs_get_adjacent_rate(rate, rate_mask, tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + if ((((100 * tpt_tbl[rate]) > lq_data->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + new_rate = rate; + if (low != IWL_RATE_INVALID) + rate = low; + else + break; + } else { + if (new_rate != IWL_RATE_INVALID) + break; + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} +#endif /* CONFIG_IWLWIFI_HT */ + +static inline u8 rs_is_both_ant_supp(u8 valid_antenna) +{ + return (rs_is_ant_connected(valid_antenna, ANT_BOTH)); +} + +static int rs_switch_to_mimo(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, int index) +{ + int rc = -1; +#ifdef CONFIG_IWLWIFI_HT + u16 rate_mask; + s32 rate; + s8 is_green = lq_data->is_green; + + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht) + return -1; + + IWL_DEBUG_HT("LQ: try to switch to MIMO\n"); + tbl->lq_type = LQ_MIMO; + rs_get_supported_rates(lq_data, NULL, tbl->lq_type, + &rate_mask); + + if (priv->current_assoc_ht.tx_mimo_ps_mode == IWL_MIMO_PS_STATIC) + return -1; + + if (!rs_is_both_ant_supp(lq_data->antenna)) + return -1; + + rc = 0; + tbl->is_dup = lq_data->is_dup; + tbl->action = 0; + if (priv->current_channel_width == IWL_CHANNEL_WIDTH_40MHZ) + tbl->is_fat = 1; + else + tbl->is_fat = 0; + + if (tbl->is_fat) { + if (priv->current_assoc_ht.sgf & HT_SHORT_GI_40MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + } else if (priv->current_assoc_ht.sgf & HT_SHORT_GI_20MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + + rs_get_expected_tpt_table(lq_data, tbl); + + rate = rs_get_best_rate(priv, lq_data, tbl, rate_mask, index, index); + + IWL_DEBUG_HT("LQ: MIMO best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) + return -1; + rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); + + IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate.rate_n_flags, is_green); + +#endif /*CONFIG_IWLWIFI_HT */ + return rc; +} + +static int rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + struct iwl_scale_tbl_info *tbl, int index) +{ + int rc = -1; +#ifdef CONFIG_IWLWIFI_HT + u16 rate_mask; + u8 is_green = lq_data->is_green; + s32 rate; + + IWL_DEBUG_HT("LQ: try to switch to SISO\n"); + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht) + return -1; + + rc = 0; + tbl->is_dup = lq_data->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + rs_get_supported_rates(lq_data, NULL, tbl->lq_type, + &rate_mask); + + if (priv->current_channel_width == IWL_CHANNEL_WIDTH_40MHZ) + tbl->is_fat = 1; + else + tbl->is_fat = 0; + + if (tbl->is_fat) { + if (priv->current_assoc_ht.sgf & HT_SHORT_GI_40MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + } else if (priv->current_assoc_ht.sgf & HT_SHORT_GI_20MHZ_ONLY) + tbl->is_SGI = 1; + else + tbl->is_SGI = 0; + + if (is_green) + tbl->is_SGI = 0; + + rs_get_expected_tpt_table(lq_data, tbl); + rate = rs_get_best_rate(priv, lq_data, tbl, rate_mask, index, index); + + IWL_DEBUG_HT("LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_HT("can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + rs_mcs_from_tbl(&tbl->current_rate, tbl, rate, is_green); + IWL_DEBUG_HT("LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate.rate_n_flags, is_green); + +#endif /*CONFIG_IWLWIFI_HT */ + return rc; +} + +static int rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + int index) +{ + int rc = 0; + struct iwl_scale_tbl_info *tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + + for (; ;) { + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA: + IWL_DEBUG_HT("LQ Legacy switch Antenna\n"); + + search_tbl->lq_type = LQ_NONE; + lq_data->action_counter++; + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + if (!rs_is_other_ant_connected(lq_data->antenna, + tbl->antenna_type)) + break; + + memcpy(search_tbl, tbl, sz); + + rs_toggle_antenna(&(search_tbl->current_rate), + search_tbl); + rs_get_expected_tpt_table(lq_data, search_tbl); + lq_data->search_better_tbl = 1; + goto out; + + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_HT("LQ: Legacy switch to SISO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_SISO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + rc = rs_switch_to_siso(priv, lq_data, search_tbl, + index); + if (!rc) { + lq_data->search_better_tbl = 1; + lq_data->action_counter = 0; + } + if (!rc) + goto out; + + break; + case IWL_LEGACY_SWITCH_MIMO: + IWL_DEBUG_HT("LQ: Legacy switch MIMO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_MIMO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + search_tbl->antenna_type = ANT_BOTH; + rc = rs_switch_to_mimo(priv, lq_data, search_tbl, + index); + if (!rc) { + lq_data->search_better_tbl = 1; + lq_data->action_counter = 0; + } + if (!rc) + goto out; + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA; + + if (tbl->action == start_action) + break; + + } + return 0; + + out: + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA; + return 0; + +} + +static int rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + int index) +{ + int rc = -1; + u8 is_green = lq_data->is_green; + struct iwl_scale_tbl_info *tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + + for (;;) { + lq_data->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA: + IWL_DEBUG_HT("LQ: SISO SWITCH ANTENNA SISO\n"); + search_tbl->lq_type = LQ_NONE; + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + if (!rs_is_other_ant_connected(lq_data->antenna, + tbl->antenna_type)) + break; + + memcpy(search_tbl, tbl, sz); + search_tbl->action = IWL_SISO_SWITCH_MIMO; + rs_toggle_antenna(&(search_tbl->current_rate), + search_tbl); + lq_data->search_better_tbl = 1; + + goto out; + + case IWL_SISO_SWITCH_MIMO: + IWL_DEBUG_HT("LQ: SISO SWITCH TO MIMO FROM SISO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_MIMO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + search_tbl->antenna_type = ANT_BOTH; + rc = rs_switch_to_mimo(priv, lq_data, search_tbl, + index); + if (!rc) + lq_data->search_better_tbl = 1; + + if (!rc) + goto out; + break; + case IWL_SISO_SWITCH_GI: + IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->action = 0; + if (search_tbl->is_SGI) + search_tbl->is_SGI = 0; + else if (!is_green) + search_tbl->is_SGI = 1; + else + break; + lq_data->search_better_tbl = 1; + if ((tbl->lq_type == LQ_SISO) && + (tbl->is_SGI)) { + s32 tpt = lq_data->last_tpt / 100; + if (((!tbl->is_fat) && + (tpt >= expected_tpt_siso20MHz[index])) || + ((tbl->is_fat) && + (tpt >= expected_tpt_siso40MHz[index]))) + lq_data->search_better_tbl = 0; + } + rs_get_expected_tpt_table(lq_data, search_tbl); + rs_mcs_from_tbl(&search_tbl->current_rate, + search_tbl, index, is_green); + goto out; + } + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA; + + if (tbl->action == start_action) + break; + } + return 0; + + out: + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA; + return 0; +} + +static int rs_move_mimo_to_other(struct iwl_priv *priv, + struct iwl_rate_scale_priv *lq_data, + int index) +{ + int rc = -1; + s8 is_green = lq_data->is_green; + struct iwl_scale_tbl_info *tbl = + &(lq_data->lq_info[lq_data->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action = tbl->action; + + for (;;) { + lq_data->action_counter++; + switch (tbl->action) { + case IWL_MIMO_SWITCH_ANTENNA_A: + case IWL_MIMO_SWITCH_ANTENNA_B: + IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_SISO; + search_tbl->is_SGI = 0; + search_tbl->is_fat = 0; + if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A) + search_tbl->antenna_type = ANT_MAIN; + else + search_tbl->antenna_type = ANT_AUX; + + rc = rs_switch_to_siso(priv, lq_data, search_tbl, + index); + if (!rc) { + lq_data->search_better_tbl = 1; + goto out; + } + break; + + case IWL_MIMO_SWITCH_GI: + IWL_DEBUG_HT("LQ: MIMO SWITCH TO GI\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->lq_type = LQ_MIMO; + search_tbl->antenna_type = ANT_BOTH; + search_tbl->action = 0; + if (search_tbl->is_SGI) + search_tbl->is_SGI = 0; + else + search_tbl->is_SGI = 1; + lq_data->search_better_tbl = 1; + if ((tbl->lq_type == LQ_MIMO) && + (tbl->is_SGI)) { + s32 tpt = lq_data->last_tpt / 100; + if (((!tbl->is_fat) && + (tpt >= expected_tpt_mimo20MHz[index])) || + ((tbl->is_fat) && + (tpt >= expected_tpt_mimo40MHz[index]))) + lq_data->search_better_tbl = 0; + } + rs_get_expected_tpt_table(lq_data, search_tbl); + rs_mcs_from_tbl(&search_tbl->current_rate, + search_tbl, index, is_green); + goto out; + + } + tbl->action++; + if (tbl->action > IWL_MIMO_SWITCH_GI) + tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; + + if (tbl->action == start_action) + break; + } + + return 0; + out: + tbl->action++; + if (tbl->action > IWL_MIMO_SWITCH_GI) + tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; + return 0; + +} + +static void rs_stay_in_table(struct iwl_rate_scale_priv *lq_data) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + + active_tbl = lq_data->active_tbl; + + tbl = &(lq_data->lq_info[active_tbl]); + + if (lq_data->stay_in_tbl) { + + if (lq_data->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_data->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + flush_interval_passed = 0; + if ((lq_data->total_failed > lq_data->max_failure_limit) || + (lq_data->total_success > lq_data->max_success_limit) || + ((!lq_data->search_better_tbl) && (lq_data->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_HT("LQ: stay is expired %d %d %d\n:", + lq_data->total_failed, + lq_data->total_success, + flush_interval_passed); + lq_data->stay_in_tbl = 0; + lq_data->total_failed = 0; + lq_data->total_success = 0; + lq_data->flush_timer = 0; + } else if (lq_data->table_count > 0) { + lq_data->table_count++; + if (lq_data->table_count >= + lq_data->table_count_limit) { + lq_data->table_count = 0; + + IWL_DEBUG_HT("LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + if (!lq_data->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + } + } +} + +static void rs_rate_scale_perform(struct iwl_priv *priv, + struct net_device *dev, + struct ieee80211_hdr *hdr, + struct sta_info *sta) +{ + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 fc, rate_mask; + u8 update_lq = 0; + struct iwl_rate_scale_priv *lq_data; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + struct iwl_rate mcs_rate; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + + IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); + + fc = le16_to_cpu(hdr->frame_control); + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) { + /* Send management frames and broadcast/multicast data using + * lowest rate. */ + /* TODO: this could probably be improved.. */ + return; + } + + if (!sta || !sta->rate_ctrl_priv) + return; + + if (!priv->lq_mngr.lq_ready) { + IWL_DEBUG_RATE("still rate scaling not ready\n"); + return; + } + lq_data = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + + if (!lq_data->search_better_tbl) + active_tbl = lq_data->active_tbl; + else + active_tbl = 1 - lq_data->active_tbl; + + tbl = &(lq_data->lq_info[active_tbl]); + is_green = lq_data->is_green; + + index = sta->last_txrate; + + IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index, + tbl->lq_type); + + rs_get_supported_rates(lq_data, hdr, tbl->lq_type, + &rate_mask); + + IWL_DEBUG_RATE("mask 0x%04X \n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_data->phymode == (u8) MODE_IEEE80211A) + rate_scale_index_msk = (u16) (rate_mask & + (lq_data->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_data->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (index < 0 || !((1 << index) & rate_scale_index_msk)) { + index = IWL_INVALID_VALUE; + update_lq = 1; + + /* get the lowest availabe rate */ + for (i = 0; i <= IWL_RATE_COUNT; i++) { + if ((1 << i) & rate_scale_index_msk) + index = i; + } + + if (index == IWL_INVALID_VALUE) { + IWL_WARNING("Can not find a suitable rate\n"); + return; + } + } + + if (!tbl->expected_tpt) + rs_get_expected_tpt_table(lq_data, tbl); + + window = &(tbl->win[index]); + + fail_count = window->counter - window->success_counter; + if (((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) + || (tbl->expected_tpt == NULL)) { + IWL_DEBUG_RATE("LQ: still below TH succ %d total %d " + "for index %d\n", + window->success_counter, window->counter, index); + window->average_tpt = IWL_INVALID_VALUE; + rs_stay_in_table(lq_data); + if (update_lq) { + rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); + rs_fill_link_cmd(lq_data, &mcs_rate, &lq_data->lq); + rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC); + } + goto out; + + } else + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + + if (lq_data->search_better_tbl) { + int success_limit = IWL_RATE_SCALE_SWITCH; + + if ((window->success_ratio > success_limit) || + (window->average_tpt > lq_data->last_tpt)) { + if (!is_legacy(tbl->lq_type)) { + IWL_DEBUG_HT("LQ: we are switching to HT" + " rate suc %d current tpt %d" + " old tpt %d\n", + window->success_ratio, + window->average_tpt, + lq_data->last_tpt); + lq_data->enable_counter = 1; + } + lq_data->active_tbl = active_tbl; + current_tpt = window->average_tpt; + } else { + tbl->lq_type = LQ_NONE; + active_tbl = lq_data->active_tbl; + tbl = &(lq_data->lq_info[active_tbl]); + + index = iwl_rate_index_from_plcp( + tbl->current_rate.rate_n_flags); + + update_lq = 1; + current_tpt = lq_data->last_tpt; + IWL_DEBUG_HT("XXY GO BACK TO OLD TABLE\n"); + } + lq_data->search_better_tbl = 0; + done_search = 1; + goto lq_update; + } + + high_low = rs_get_adjacent_rate(index, rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + current_tpt = window->average_tpt; + + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + + scale_action = 1; + + if ((window->success_ratio <= IWL_RATE_DECREASE_TH) || + (current_tpt == 0)) { + IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); + scale_action = -1; + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) + scale_action = 1; + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + else { + if (high_tpt != IWL_INVALID_VALUE) { + if (high_tpt > current_tpt) + scale_action = 1; + else { + IWL_DEBUG_RATE + ("decrease rate because of high tpt\n"); + scale_action = -1; + } + } else if (low_tpt != IWL_INVALID_VALUE) { + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE + ("decrease rate because of low tpt\n"); + scale_action = -1; + } else + scale_action = 1; + } + } + + if (scale_action == -1) { + if ((low != IWL_RATE_INVALID) && + ((window->success_ratio > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + } else if ((scale_action == 1) && + (window->success_ratio < IWL_RATE_INCREASE_TH)) + scale_action = 0; + + switch (scale_action) { + case -1: + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + break; + case 1: + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + default: + break; + } + + IWL_DEBUG_HT("choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + + lq_update: + if (update_lq) { + rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); + rs_fill_link_cmd(lq_data, &mcs_rate, &lq_data->lq); + rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC); + } + rs_stay_in_table(lq_data); + + if (!update_lq && !done_search && !lq_data->stay_in_tbl) { + lq_data->last_tpt = current_tpt; + + if (is_legacy(tbl->lq_type)) + rs_move_legacy_other(priv, lq_data, index); + else if (is_siso(tbl->lq_type)) + rs_move_siso_to_other(priv, lq_data, index); + else + rs_move_mimo_to_other(priv, lq_data, index); + + if (lq_data->search_better_tbl) { + tbl = &(lq_data->lq_info[(1 - lq_data->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(tbl->win[i])); + + index = iwl_rate_index_from_plcp( + tbl->current_rate.rate_n_flags); + + IWL_DEBUG_HT("Switch current mcs: %X index: %d\n", + tbl->current_rate.rate_n_flags, index); + rs_fill_link_cmd(lq_data, &tbl->current_rate, + &lq_data->lq); + rs_send_lq_cmd(priv, &lq_data->lq, CMD_ASYNC); + } + tbl1 = &(lq_data->lq_info[lq_data->active_tbl]); + + if (is_legacy(tbl1->lq_type) && +#ifdef CONFIG_IWLWIFI_HT + !priv->current_assoc_ht.is_ht && +#endif + (lq_data->action_counter >= 1)) { + lq_data->action_counter = 0; + IWL_DEBUG_HT("LQ: STAY in legacy table\n"); + rs_set_stay_in_table(1, lq_data); + } + + if (lq_data->enable_counter && + (lq_data->action_counter >= IWL_ACTION_LIMIT)) { +#ifdef CONFIG_IWLWIFI_HT_AGG + if ((lq_data->last_tpt > TID_AGG_TPT_THREHOLD) && + (priv->lq_mngr.agg_ctrl.auto_agg)) { + priv->lq_mngr.agg_ctrl.tid_retry = + TID_ALL_SPECIFIED; + schedule_work(&priv->agg_work); + } +#endif /*CONFIG_IWLWIFI_HT_AGG */ + lq_data->action_counter = 0; + rs_set_stay_in_table(0, lq_data); + } + } else { + if ((!update_lq) && (!done_search) && (!lq_data->flush_timer)) + lq_data->flush_timer = jiffies; + } + +out: + rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green); + i = index; + sta->last_txrate = i; + + /* sta->txrate is an index to A mode rates which start + * at IWL_FIRST_OFDM_RATE + */ + if (lq_data->phymode == (u8) MODE_IEEE80211A) + sta->txrate = i - IWL_FIRST_OFDM_RATE; + else + sta->txrate = i; + + return; +} + + +static void rs_initialize_lq(struct iwl_priv *priv, + struct sta_info *sta) +{ + int i; + struct iwl_rate_scale_priv *lq; + struct iwl_scale_tbl_info *tbl; + u8 active_tbl = 0; + int rate_idx; + u8 use_green = rs_use_green(priv); + struct iwl_rate mcs_rate; + + if (!sta || !sta->rate_ctrl_priv) + goto out; + + lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + i = sta->last_txrate; + + if ((lq->lq.sta_id == 0xff) && + (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) + goto out; + + if (!lq->search_better_tbl) + active_tbl = lq->active_tbl; + else + active_tbl = 1 - lq->active_tbl; + + tbl = &(lq->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + mcs_rate.rate_n_flags = iwl_rates[i].plcp ; + mcs_rate.rate_n_flags |= RATE_MCS_ANT_B_MSK; + mcs_rate.rate_n_flags &= ~RATE_MCS_ANT_A_MSK; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK; + + tbl->antenna_type = ANT_AUX; + rs_get_tbl_info_from_mcs(&mcs_rate, priv->phymode, tbl, &rate_idx); + if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type)) + rs_toggle_antenna(&mcs_rate, tbl); + + rs_mcs_from_tbl(&mcs_rate, tbl, rate_idx, use_green); + tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags; + rs_get_expected_tpt_table(lq, tbl); + rs_fill_link_cmd(lq, &mcs_rate, &lq->lq); + rs_send_lq_cmd(priv, &lq->lq, CMD_ASYNC); + out: + return; +} + +static struct ieee80211_rate *rs_get_lowest_rate(struct ieee80211_local + *local) +{ + struct ieee80211_hw_mode *mode = local->oper_hw_mode; + int i; + + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *rate = &mode->rates[i]; + + if (rate->flags & IEEE80211_RATE_SUPPORTED) + return rate; + } + + return &mode->rates[0]; +} + +static struct ieee80211_rate *rs_get_rate(void *priv_rate, + struct net_device *dev, + struct sk_buff *skb, + struct rate_control_extra + *extra) +{ + + int i; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + u16 fc; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl_rate_scale_priv *lq; + + IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); + + memset(extra, 0, sizeof(*extra)); + + fc = le16_to_cpu(hdr->frame_control); + if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) { + /* Send management frames and broadcast/multicast data using + * lowest rate. */ + /* TODO: this could probably be improved.. */ + return rs_get_lowest_rate(local); + } + + sta = sta_info_get(local, hdr->addr1); + + if (!sta || !sta->rate_ctrl_priv) { + if (sta) + sta_info_put(sta); + return rs_get_lowest_rate(local); + } + + lq = (struct iwl_rate_scale_priv *)sta->rate_ctrl_priv; + i = sta->last_txrate; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && !lq->ibss_sta_added) { + u8 sta_id = iwl_hw_find_station(priv, hdr->addr1); + DECLARE_MAC_BUF(mac); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE("LQ: ADD station %s\n", + print_mac(mac, hdr->addr1)); + sta_id = iwl_add_station(priv, + hdr->addr1, 0, CMD_ASYNC); + } + if ((sta_id != IWL_INVALID_STATION)) { + lq->lq.sta_id = sta_id; + lq->lq.rs_table[0].rate_n_flags = 0; + lq->ibss_sta_added = 1; + rs_initialize_lq(priv, sta); + } + if (!lq->ibss_sta_added) + goto done; + } + + done: + sta_info_put(sta); + if ((i < 0) || (i > IWL_RATE_COUNT)) + return rs_get_lowest_rate(local); + + return &priv->ieee_rates[i]; +} + +static void *rs_alloc_sta(void *priv, gfp_t gfp) +{ + struct iwl_rate_scale_priv *crl; + int i, j; + + IWL_DEBUG_RATE("create station rate scale window\n"); + + crl = kzalloc(sizeof(struct iwl_rate_scale_priv), gfp); + + if (crl == NULL) + return NULL; + crl->lq.sta_id = 0xff; + + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(crl->lq_info[j].win[i])); + + return crl; +} + +static void rs_rate_init(void *priv_rate, void *priv_sta, + struct ieee80211_local *local, + struct sta_info *sta) +{ + int i, j; + struct ieee80211_hw_mode *mode = local->oper_hw_mode; + struct iwl_priv *priv = (struct iwl_priv *)priv_rate; + struct iwl_rate_scale_priv *crl = priv_sta; + + crl->flush_timer = 0; + crl->supp_rates = sta->supp_rates; + sta->txrate = 3; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + rs_rate_scale_clear_window(&(crl->lq_info[j].win[i])); + + IWL_DEBUG_RATE("rate scale global init\n"); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + crl->ibss_sta_added = 0; + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + u8 sta_id = iwl_hw_find_station(priv, sta->addr); + DECLARE_MAC_BUF(mac); + + /* for IBSS the call are from tasklet */ + IWL_DEBUG_HT("LQ: ADD station %s\n", + print_mac(mac, sta->addr)); + + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_RATE("LQ: ADD station %s\n", + print_mac(mac, sta->addr)); + sta_id = iwl_add_station(priv, + sta->addr, 0, CMD_ASYNC); + } + if ((sta_id != IWL_INVALID_STATION)) { + crl->lq.sta_id = sta_id; + crl->lq.rs_table[0].rate_n_flags = 0; + } + /* FIXME: this is w/a remove it later */ + priv->assoc_station_added = 1; + } + + for (i = 0; i < mode->num_rates; i++) { + if ((sta->supp_rates & BIT(i)) && + (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) + sta->txrate = i; + } + sta->last_txrate = sta->txrate; + /* For MODE_IEEE80211A mode cck rate are at end + * rate table + */ + if (local->hw.conf.phymode == MODE_IEEE80211A) + sta->last_txrate += IWL_FIRST_OFDM_RATE; + + crl->is_dup = priv->is_dup; + crl->valid_antenna = priv->valid_antenna; + crl->antenna = priv->antenna; + crl->is_green = rs_use_green(priv); + crl->active_rate = priv->active_rate; + crl->active_rate &= ~(0x1000); + crl->active_rate_basic = priv->active_rate_basic; + crl->phymode = priv->phymode; +#ifdef CONFIG_IWLWIFI_HT + crl->active_siso_rate = (priv->current_assoc_ht.supp_rates[0] << 1); + crl->active_siso_rate |= (priv->current_assoc_ht.supp_rates[0] & 0x1); + crl->active_siso_rate &= ~((u16)0x2); + crl->active_siso_rate = crl->active_siso_rate << IWL_FIRST_OFDM_RATE; + + crl->active_mimo_rate = (priv->current_assoc_ht.supp_rates[1] << 1); + crl->active_mimo_rate |= (priv->current_assoc_ht.supp_rates[1] & 0x1); + crl->active_mimo_rate &= ~((u16)0x2); + crl->active_mimo_rate = crl->active_mimo_rate << IWL_FIRST_OFDM_RATE; + IWL_DEBUG_HT("MIMO RATE 0x%X SISO MASK 0x%X\n", crl->active_siso_rate, + crl->active_mimo_rate); +#endif /*CONFIG_IWLWIFI_HT*/ +#ifdef CONFIG_MAC80211_DEBUGFS + crl->drv = priv; +#endif + + if (priv->assoc_station_added) + priv->lq_mngr.lq_ready = 1; + + rs_initialize_lq(priv, sta); +} + +static void rs_fill_link_cmd(struct iwl_rate_scale_priv *lq_data, + struct iwl_rate *tx_mcs, + struct iwl_link_quality_cmd *lq_cmd) +{ + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_count = 0; + u8 use_ht_possible = 1; + struct iwl_rate new_rate; + struct iwl_scale_tbl_info tbl_type = { 0 }; + + rs_dbgfs_set_mcs(lq_data, tx_mcs, index); + + rs_get_tbl_info_from_mcs(tx_mcs, lq_data->phymode, + &tbl_type, &rate_idx); + + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_count = 1; + repeat_rate = IWL_NUMBER_TRY; + } else + repeat_rate = IWL_HT_NUMBER_TRY; + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(tx_mcs->rate_n_flags); + new_rate.rate_n_flags = tx_mcs->rate_n_flags; + + if (is_mimo(tbl_type.lq_type) || (tbl_type.antenna_type == ANT_MAIN)) + lq_cmd->general_params.single_stream_ant_msk = 1; + else + lq_cmd->general_params.single_stream_ant_msk = 2; + + index++; + repeat_rate--; + + while (index < LINK_QUAL_MAX_RETRY_NUM) { + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_count < + NUM_TRY_BEFORE_ANTENNA_TOGGLE) + ant_toggle_count++; + else { + rs_toggle_antenna(&new_rate, &tbl_type); + ant_toggle_count = 1; + } + } + + rs_dbgfs_set_mcs(lq_data, &new_rate, index); + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate.rate_n_flags); + repeat_rate--; + index++; + } + + rs_get_tbl_info_from_mcs(&new_rate, lq_data->phymode, &tbl_type, + &rate_idx); + + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + rs_get_lower_rate(lq_data, &tbl_type, rate_idx, + use_ht_possible, &new_rate); + + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_count < NUM_TRY_BEFORE_ANTENNA_TOGGLE) + ant_toggle_count++; + else { + rs_toggle_antenna(&new_rate, &tbl_type); + ant_toggle_count = 1; + } + repeat_rate = IWL_NUMBER_TRY; + } else + repeat_rate = IWL_HT_NUMBER_TRY; + + use_ht_possible = 0; + + rs_dbgfs_set_mcs(lq_data, &new_rate, index); + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate.rate_n_flags); + + index++; + repeat_rate--; + } + + lq_cmd->general_params.dual_stream_ant_msk = 3; + lq_cmd->agg_params.agg_dis_start_th = 3; + lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); +} + +static void *rs_alloc(struct ieee80211_local *local) +{ + return local->hw.priv; +} +/* rate scale requires free function to be implemented */ +static void rs_free(void *priv_rate) +{ + return; +} + +static void rs_clear(void *priv_rate) +{ + struct iwl_priv *priv = (struct iwl_priv *) priv_rate; + + IWL_DEBUG_RATE("enter\n"); + + priv->lq_mngr.lq_ready = 0; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + if (priv->lq_mngr.agg_ctrl.granted_ba) + iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED); +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + IWL_DEBUG_RATE("leave\n"); +} + +static void rs_free_sta(void *priv, void *priv_sta) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + + IWL_DEBUG_RATE("enter\n"); + kfree(rs_priv); + IWL_DEBUG_RATE("leave\n"); +} + + +#ifdef CONFIG_MAC80211_DEBUGFS +static int open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +static void rs_dbgfs_set_mcs(struct iwl_rate_scale_priv *rs_priv, + struct iwl_rate *mcs, int index) +{ + const u32 cck_rate = 0x820A; + if (rs_priv->dbg_fixed.rate_n_flags) { + if (index < 12) + mcs->rate_n_flags = rs_priv->dbg_fixed.rate_n_flags; + else + mcs->rate_n_flags = cck_rate; + IWL_DEBUG_RATE("Fixed rate ON\n"); + return; + } + + IWL_DEBUG_RATE("Fixed rate OFF\n"); +} + +static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_rate_scale_priv *rs_priv = file->private_data; + char buf[64]; + int buf_size; + u32 parsed_rate; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + rs_priv->dbg_fixed.rate_n_flags = parsed_rate; + else + rs_priv->dbg_fixed.rate_n_flags = 0; + + rs_priv->active_rate = 0x0FFF; + rs_priv->active_siso_rate = 0x1FD0; + rs_priv->active_mimo_rate = 0x1FD0; + + IWL_DEBUG_RATE("sta_id %d rate 0x%X\n", + rs_priv->lq.sta_id, rs_priv->dbg_fixed.rate_n_flags); + + if (rs_priv->dbg_fixed.rate_n_flags) { + rs_fill_link_cmd(rs_priv, &rs_priv->dbg_fixed, &rs_priv->lq); + rs_send_lq_cmd(rs_priv->drv, &rs_priv->lq, CMD_ASYNC); + } + + return count; +} + +static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[1024]; + int desc = 0; + int i = 0; + + struct iwl_rate_scale_priv *rs_priv = file->private_data; + + desc += sprintf(buff+desc, "sta_id %d\n", rs_priv->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + rs_priv->total_failed, rs_priv->total_success, + rs_priv->active_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + rs_priv->dbg_fixed.rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + rs_priv->lq.general_params.flags, + rs_priv->lq.general_params.mimo_delimiter, + rs_priv->lq.general_params.single_stream_ant_msk, + rs_priv->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(rs_priv->lq.agg_params.agg_time_limit), + rs_priv->lq.agg_params.agg_dis_start_th, + rs_priv->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + rs_priv->lq.general_params.start_rate_index[0], + rs_priv->lq.general_params.start_rate_index[1], + rs_priv->lq.general_params.start_rate_index[2], + rs_priv->lq.general_params.start_rate_index[3]); + + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + desc += sprintf(buff+desc, " rate[%d] 0x%X\n", + i, le32_to_cpu(rs_priv->lq.rs_table[i].rate_n_flags)); + + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = rs_sta_dbgfs_scale_table_write, + .read = rs_sta_dbgfs_scale_table_read, + .open = open_file_generic, +}; +static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[1024]; + int desc = 0; + int i, j; + + struct iwl_rate_scale_priv *rs_priv = file->private_data; + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n" + "rate=0x%X\n", + rs_priv->active_tbl == i?"*":"x", + rs_priv->lq_info[i].lq_type, + rs_priv->lq_info[i].is_SGI, + rs_priv->lq_info[i].is_fat, + rs_priv->lq_info[i].is_dup, + rs_priv->lq_info[i].current_rate.rate_n_flags); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + rs_priv->lq_info[i].win[j].counter, + rs_priv->lq_info[i].win[j].success_counter, + rs_priv->lq_info[i].win[j].success_ratio); + } + } + return simple_read_from_buffer(user_buf, count, ppos, buff, desc); +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = rs_sta_dbgfs_stats_table_read, + .open = open_file_generic, +}; + +static void rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + rs_priv->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", 0600, dir, + rs_priv, &rs_sta_dbgfs_scale_table_ops); + rs_priv->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", 0600, dir, + rs_priv, &rs_sta_dbgfs_stats_table_ops); +} + +static void rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_rate_scale_priv *rs_priv = priv_sta; + debugfs_remove(rs_priv->rs_sta_dbgfs_scale_table_file); + debugfs_remove(rs_priv->rs_sta_dbgfs_stats_table_file); +} +#endif + +static struct rate_control_ops rs_ops = { + .module = NULL, + .name = RS_NAME, + .tx_status = rs_tx_status, + .get_rate = rs_get_rate, + .rate_init = rs_rate_init, + .clear = rs_clear, + .alloc = rs_alloc, + .free = rs_free, + .alloc_sta = rs_alloc_sta, + .free_sta = rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = rs_add_debugfs, + .remove_sta_debugfs = rs_remove_debugfs, +#endif +}; + +int iwl_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct iwl_priv *priv = hw->priv; + struct iwl_rate_scale_priv *rs_priv; + struct sta_info *sta; + int count = 0, i; + u32 samples = 0, success = 0, good = 0; + unsigned long now = jiffies; + u32 max_time = 0; + u8 lq_type, antenna; + + sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); + if (!sta || !sta->rate_ctrl_priv) { + if (sta) { + sta_info_put(sta); + IWL_DEBUG_RATE("leave - no private rate data!\n"); + } else + IWL_DEBUG_RATE("leave - no station!\n"); + return sprintf(buf, "station %d not found\n", sta_id); + } + + rs_priv = (void *)sta->rate_ctrl_priv; + + lq_type = rs_priv->lq_info[rs_priv->active_tbl].lq_type; + antenna = rs_priv->lq_info[rs_priv->active_tbl].antenna_type; + + if (is_legacy(lq_type)) + i = IWL_RATE_54M_INDEX; + else + i = IWL_RATE_60M_INDEX; + while (1) { + u64 mask; + int j; + int active = rs_priv->active_tbl; + + count += + sprintf(&buf[count], " %2dMbs: ", iwl_rates[i].ieee / 2); + + mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1)); + for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1) + buf[count++] = + (rs_priv->lq_info[active].win[i].data & mask) + ? '1' : '0'; + + samples += rs_priv->lq_info[active].win[i].counter; + good += rs_priv->lq_info[active].win[i].success_counter; + success += rs_priv->lq_info[active].win[i].success_counter * + iwl_rates[i].ieee; + + if (rs_priv->lq_info[active].win[i].stamp) { + int delta = + jiffies_to_msecs(now - + rs_priv->lq_info[active].win[i].stamp); + + if (delta > max_time) + max_time = delta; + + count += sprintf(&buf[count], "%5dms\n", delta); + } else + buf[count++] = '\n'; + + j = iwl_get_prev_ieee_rate(i); + if (j == i) + break; + i = j; + } + + /* Display the average rate of all samples taken. + * + * NOTE: We multiple # of samples by 2 since the IEEE measurement + * added from iwl_rates is actually 2X the rate */ + if (samples) + count += sprintf(&buf[count], + "\nAverage rate is %3d.%02dMbs over last %4dms\n" + "%3d%% success (%d good packets over %d tries)\n", + success / (2 * samples), (success * 5 / samples) % 10, + max_time, good * 100 / samples, good, samples); + else + count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n"); + count += sprintf(&buf[count], "\nrate scale type %d anntena %d " + "active_search %d rate index %d\n", lq_type, antenna, + rs_priv->search_better_tbl, sta->last_txrate); + + sta_info_put(sta); + return count; +} + +void iwl_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) +{ + struct iwl_priv *priv = hw->priv; + + priv->lq_mngr.lq_ready = 1; +} + +void iwl_rate_control_register(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_register(&rs_ops); +} + +void iwl_rate_control_unregister(struct ieee80211_hw *hw) +{ + ieee80211_rate_control_unregister(&rs_ops); +} + diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h new file mode 100644 index 0000000000000000000000000000000000000000..c6325f72df68fc8545a52f3bcdf20171876c2741 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h @@ -0,0 +1,266 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_4965_rs_h__ +#define __iwl_4965_rs_h__ + +#include "iwl-4965.h" + +struct iwl_rate_info { + u8 plcp; + u8 plcp_siso; + u8 plcp_mimo; + u8 ieee; + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_INVM_INDEX +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1< + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IWL 4965 + +#include "iwlwifi.h" +#include "iwl-4965.h" +#include "iwl-helpers.h" + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO_##s##M_PLCP, \ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ +}; + +static int is_fat_channel(__le32 rxon_flags) +{ + return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || + (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); +} + +static u8 is_single_stream(struct iwl_priv *priv) +{ +#ifdef CONFIG_IWLWIFI_HT + if (!priv->is_ht_enabled || !priv->current_assoc_ht.is_ht || + (priv->active_rate_ht[1] == 0) || + (priv->ps_mode == IWL_MIMO_PS_STATIC)) + return 1; +#else + return 1; +#endif /*CONFIG_IWLWIFI_HT */ + return 0; +} + +/* + * Determine how many receiver/antenna chains to use. + * More provides better reception via diversity. Fewer saves power. + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv, + u8 *idle_state, u8 *rx_state) +{ + u8 is_single = is_single_stream(priv); + u8 is_cam = test_bit(STATUS_POWER_PMI, &priv->status) ? 0 : 1; + + /* # of Rx chains to use when expecting MIMO. */ + if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) + *rx_state = 2; + else + *rx_state = 3; + + /* # Rx chains when idling and maybe trying to save power */ + switch (priv->ps_mode) { + case IWL_MIMO_PS_STATIC: + case IWL_MIMO_PS_DYNAMIC: + *idle_state = (is_cam) ? 2 : 1; + break; + case IWL_MIMO_PS_NONE: + *idle_state = (is_cam) ? *rx_state : 1; + break; + default: + *idle_state = 1; + break; + } + + return 0; +} + +int iwl_hw_rxq_stop(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* stop HW */ + iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + rc = iwl_poll_restricted_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + (1 << 24), 1000); + if (rc < 0) + IWL_ERROR("Can't stop Rx DMA.\n"); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *addr) +{ + int i; + int start = 0; + int ret = IWL_INVALID_STATION; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) || + (priv->iw_mode == IEEE80211_IF_TYPE_AP)) + start = IWL_STA_ID; + + if (is_broadcast_ether_addr(addr)) + return IWL4965_BROADCAST_ID; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = start; i < priv->hw_setting.max_stations; i++) + if ((priv->stations[i].used) && + (!compare_ether_addr + (priv->stations[i].sta.sta.addr, addr))) { + ret = i; + goto out; + } + + IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n", + print_mac(mac, addr), priv->num_stations); + + out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max) +{ + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + if (!pwr_max) { + u32 val; + + rc = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, + &val); + + if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) + iwl_set_bits_mask_restricted_reg( + priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + } else + iwl_set_bits_mask_restricted_reg( + priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* stop HW */ + iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + iwl_write_restricted(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + rxq->dma_addr >> 8); + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + (priv->hw_setting.shared_phys + + offsetof(struct iwl_shared, val0)) >> 4); + + iwl_write_restricted(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + IWL_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | + /*0x10 << 4 | */ + (RX_QUEUE_SIZE_LOG << + FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); + + /* + * iwl_write32(priv,CSR_INT_COAL_REG,0); + */ + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int iwl4965_kw_init(struct iwl_priv *priv) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) + goto out; + + iwl_write_restricted(priv, IWL_FH_KW_MEM_ADDR_REG, + priv->kw.dma_addr >> 4); + iwl_release_restricted_access(priv); +out: + spin_unlock_irqrestore(&priv->lock, flags); + return rc; +} + +static int iwl4965_kw_alloc(struct iwl_priv *priv) +{ + struct pci_dev *dev = priv->pci_dev; + struct iwl_kw *kw = &priv->kw; + + kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */ + kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr); + if (!kw->v_addr) + return -ENOMEM; + + return 0; +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +int iwl4965_set_fat_chan_info(struct iwl_priv *priv, int phymode, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 fat_extension_channel) +{ + struct iwl_channel_info *ch_info; + + ch_info = (struct iwl_channel_info *) + iwl_get_channel_info(priv, phymode, channel); + + if (!is_channel_valid(ch_info)) + return -1; + + IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" + " %ddBm): Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(NARROW), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? + "" : "not "); + + ch_info->fat_eeprom = *eeprom_ch; + ch_info->fat_max_power_avg = eeprom_ch->max_power_avg; + ch_info->fat_curr_txpow = eeprom_ch->max_power_avg; + ch_info->fat_min_power = 0; + ch_info->fat_scan_power = eeprom_ch->max_power_avg; + ch_info->fat_flags = eeprom_ch->flags; + ch_info->fat_extension_channel = fat_extension_channel; + + return 0; +} + +static void iwl4965_kw_free(struct iwl_priv *priv) +{ + struct pci_dev *dev = priv->pci_dev; + struct iwl_kw *kw = &priv->kw; + + if (kw->v_addr) { + pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr); + memset(kw, 0, sizeof(*kw)); + } +} + +/** + * iwl4965_txq_ctx_reset - Reset TX queue context + * Destroys all DMA structures and initialise them again + * + * @param priv + * @return error code + */ +static int iwl4965_txq_ctx_reset(struct iwl_priv *priv) +{ + int rc = 0; + int txq_id, slots_num; + unsigned long flags; + + iwl4965_kw_free(priv); + + iwl_hw_txq_ctx_free(priv); + + /* Tx CMD queue */ + rc = iwl4965_kw_alloc(priv); + if (rc) { + IWL_ERROR("Keep Warm allocation failed"); + goto error_kw; + } + + spin_lock_irqsave(&priv->lock, flags); + + rc = iwl_grab_restricted_access(priv); + if (unlikely(rc)) { + IWL_ERROR("TX reset failed"); + spin_unlock_irqrestore(&priv->lock, flags); + goto error_reset; + } + + iwl_write_restricted_reg(priv, SCD_TXFACT, 0); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + rc = iwl4965_kw_init(priv); + if (rc) { + IWL_ERROR("kw_init failed\n"); + goto error_reset; + } + + /* Tx queue(s) */ + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { + slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, + txq_id); + if (rc) { + IWL_ERROR("Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return rc; + + error: + iwl_hw_txq_ctx_free(priv); + error_reset: + iwl4965_kw_free(priv); + error_kw: + return rc; +} + +int iwl_hw_nic_init(struct iwl_priv *priv) +{ + int rc; + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + u8 rev_id; + u32 val; + u8 val_link; + + iwl_power_init_handle(priv); + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + + iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + rc = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("Failed to init the card\n"); + return rc; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_read_restricted_reg(priv, APMG_CLK_CTRL_REG); + + iwl_write_restricted_reg(priv, APMG_CLK_CTRL_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + iwl_read_restricted_reg(priv, APMG_CLK_CTRL_REG); + + udelay(20); + + iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + + iwl_release_restricted_access(priv); + iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Determine HW type */ + rc = pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); + if (rc) + return rc; + + IWL_DEBUG_INFO("HW Revision ID = 0x%X\n", rev_id); + + iwl4965_nic_set_pwr_src(priv, 1); + spin_lock_irqsave(&priv->lock, flags); + + if ((rev_id & 0x80) == 0x80 && (rev_id & 0x7f) < 8) { + pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val); + /* Enable No Snoop field */ + pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8, + val & ~(1 << 11)); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Read the EEPROM */ + rc = iwl_eeprom_init(priv); + if (rc) + return rc; + + if (priv->eeprom.calib_version < EEPROM_TX_POWER_VERSION_NEW) { + IWL_ERROR("Older EEPROM detected! Aborting.\n"); + return -EINVAL; + } + + pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); + + /* disable L1 entry -- workaround for pre-B1 */ + pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02); + + spin_lock_irqsave(&priv->lock, flags); + + /* set CSR_HW_CONFIG_REG for uCode use */ + + iwl_set_bit(priv, CSR_SW_VER, CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R | + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + rc = iwl_grab_restricted_access(priv); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("Failed to init the card\n"); + return rc; + } + + iwl_read_restricted_reg(priv, APMG_PS_CTRL_REG); + iwl_set_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_restricted_reg(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_card_show_info(priv); + + /* end nic_init */ + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + rc = iwl_rx_queue_alloc(priv); + if (rc) { + IWL_ERROR("Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl_rx_queue_reset(priv, rxq); + + iwl_rx_replenish(priv); + + iwl4965_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + rxq->need_update = 1; + iwl_rx_queue_update_write_ptr(priv, rxq); + + spin_unlock_irqrestore(&priv->lock, flags); + rc = iwl4965_txq_ctx_reset(priv); + if (rc) + return rc; + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("SW RF KILL supported in EEPROM.\n"); + + if (priv->eeprom.sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE) + IWL_DEBUG_RF_KILL("HW RF KILL supported in EEPROM.\n"); + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +int iwl_hw_nic_stop_master(struct iwl_priv *priv) +{ + int rc = 0; + u32 reg_val; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* set stop master bit */ + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + reg_val = iwl_read32(priv, CSR_GP_CNTRL); + + if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == + (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) + IWL_DEBUG_INFO("Card in power save, master is already " + "stopped\n"); + else { + rc = iwl_poll_bit(priv, CSR_RESET, + CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (rc < 0) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + } + + spin_unlock_irqrestore(&priv->lock, flags); + IWL_DEBUG_INFO("stop master\n"); + + return rc; +} + +void iwl_hw_txq_ctx_stop(struct iwl_priv *priv) +{ + + int txq_id; + unsigned long flags; + + /* reset TFD queues */ + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { + spin_lock_irqsave(&priv->lock, flags); + if (iwl_grab_restricted_access(priv)) { + spin_unlock_irqrestore(&priv->lock, flags); + continue; + } + + iwl_write_restricted(priv, + IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), + 0x0); + iwl_poll_restricted_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, + IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE + (txq_id), 200); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + } + + iwl_hw_txq_ctx_free(priv); +} + +int iwl_hw_nic_reset(struct iwl_priv *priv) +{ + int rc = 0; + unsigned long flags; + + iwl_hw_nic_stop_master(priv); + + spin_lock_irqsave(&priv->lock, flags); + + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + rc = iwl_poll_bit(priv, CSR_RESET, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); + + udelay(10); + + rc = iwl_grab_restricted_access(priv); + if (!rc) { + iwl_write_restricted_reg(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | + APMG_CLK_VAL_BSM_CLK_RQT); + + udelay(10); + + iwl_set_bits_restricted_reg(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + + iwl_release_restricted_access(priv); + } + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; + +} + +#define REG_RECALIB_PERIOD (60) + +/** + * iwl4965_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to queue the statistics_work + * in work_queue context (v. softirq) + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. However, + * we can't send the statistics command from softirq context (which + * is the context which timers run at) so we have to queue off the + * statistics_work to actually send the command to the hardware. + */ +static void iwl4965_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + queue_work(priv->workqueue, &priv->statistics_work); +} + +/** + * iwl4965_bg_statistics_work - Send the statistics request to the hardware. + * + * This is queued by iwl_bg_statistics_periodic. + */ +static void iwl4965_bg_statistics_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + statistics_work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_send_statistics_request(priv); + mutex_unlock(&priv->mutex); +} + +#define CT_LIMIT_CONST 259 +#define TM_CT_KILL_THRESHOLD 110 + +void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + u32 R1, R2, R3; + u32 temp_th; + u32 crit_temperature; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK) { + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + } else { + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + } + + temp_th = CELSIUS_TO_KELVIN(TM_CT_KILL_THRESHOLD); + + crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2; + cmd.critical_temperature_R = cpu_to_le32(crit_temperature); + rc = iwl_send_cmd_pdu(priv, + REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd); + if (rc) + IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n"); +} + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + +/* "false alarms" are signals that our DSP tries to lock onto, + * but then determines that they are either noise, or transmissions + * from a distant wireless network (also "noise", really) that get + * "stepped on" by stronger transmissions within our own network. + * This algorithm attempts to set a sensitivity level that is high + * enough to receive all of our own network traffic, but not so + * high that our DSP gets too busy trying to lock onto non-network + * activity/noise. */ +static int iwl4965_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) +{ + u32 max_nrg_cck = 0; + int i = 0; + u8 max_silence_rssi = 0; + u32 silence_ref = 0; + u8 silence_rssi_a = 0; + u8 silence_rssi_b = 0; + u8 silence_rssi_c = 0; + u32 val; + + /* "false_alarms" values below are cross-multiplications to assess the + * numbers of false alarms within the measured period of actual Rx + * (Rx is off when we're txing), vs the min/max expected false alarms + * (some should be expected if rx is sensitive enough) in a + * hypothetical listening period of 200 time units (TU), 204.8 msec: + * + * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time + * + * */ + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; + u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + + data = &(priv->sensitivity_data); + + data->nrg_auto_corr_silence_diff = 0; + + /* Find max silence rssi among all 3 receivers. + * This is background noise, which may include transmissions from other + * networks, measured during silence before our network's beacon */ + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER)>>8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER)>>8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER)>>8); + + val = max(silence_rssi_b, silence_rssi_c); + max_silence_rssi = max(silence_rssi_a, (u8) val); + + /* Store silence rssi in 20-beacon history table */ + data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; + data->nrg_silence_idx++; + if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) + data->nrg_silence_idx = 0; + + /* Find max silence rssi across 20 beacon history */ + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { + val = data->nrg_silence_rssi[i]; + silence_ref = max(silence_ref, val); + } + IWL_DEBUG_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); + + /* Find max rx energy (min value!) among all 3 receivers, + * measured during beacon frame. + * Save it in 10-beacon history table. */ + i = data->nrg_energy_idx; + val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); + data->nrg_value[i] = min(rx_info->beacon_energy_a, val); + + data->nrg_energy_idx++; + if (data->nrg_energy_idx >= 10) + data->nrg_energy_idx = 0; + + /* Find min rx energy (max value) across 10 beacon history. + * This is the minimum signal level that we want to receive well. + * Add backoff (margin so we don't miss slightly lower energy frames). + * This establishes an upper bound (min value) for energy threshold. */ + max_nrg_cck = data->nrg_value[0]; + for (i = 1; i < 10; i++) + max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); + max_nrg_cck += 6; + + IWL_DEBUG_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); + + /* Count number of consecutive beacons with fewer-than-desired + * false alarms. */ + if (false_alarms < min_false_alarms) + data->num_in_cck_no_fa++; + else + data->num_in_cck_no_fa = 0; + IWL_DEBUG_CALIB("consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + IWL_DEBUG_CALIB("norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB("... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; + + if (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) { + /* Store for "fewer than desired" on later beacon */ + data->nrg_silence_ref = silence_ref; + + /* increase energy threshold (reduce nrg value) + * to decrease sensitivity */ + if (data->nrg_th_cck > (NRG_MAX_CCK + NRG_STEP_CCK)) + data->nrg_th_cck = data->nrg_th_cck + - NRG_STEP_CCK; + } + + /* increase auto_corr values to decrease sensitivity */ + if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) + data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; + else { + val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; + data->auto_corr_cck = min((u32)AUTO_CORR_MAX_CCK, val); + } + val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = min((u32)AUTO_CORR_MAX_CCK_MRC, val); + + /* Else if we got fewer than desired, increase sensitivity */ + } else if (false_alarms < min_false_alarms) { + data->nrg_curr_state = IWL_FA_TOO_FEW; + + /* Compare silence level with silence level for most recent + * healthy number or too many false alarms */ + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; + + IWL_DEBUG_CALIB("norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); + + /* Increase value to increase sensitivity, but only if: + * 1a) previous beacon did *not* have *too many* false alarms + * 1b) AND there's a significant difference in Rx levels + * from a previous beacon with too many, or healthy # FAs + * OR 2) We've seen a lot of beacons (100) with too few + * false alarms */ + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + IWL_DEBUG_CALIB("... increasing sensitivity\n"); + /* Increase nrg value to increase sensitivity */ + val = data->nrg_th_cck + NRG_STEP_CCK; + data->nrg_th_cck = min((u32)NRG_MIN_CCK, val); + + /* Decrease auto_corr values to increase sensitivity */ + val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; + data->auto_corr_cck = max((u32)AUTO_CORR_MIN_CCK, val); + + val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + max((u32)AUTO_CORR_MIN_CCK_MRC, val); + + } else + IWL_DEBUG_CALIB("... but not changing sensitivity\n"); + + /* Else we got a healthy number of false alarms, keep status quo */ + } else { + IWL_DEBUG_CALIB(" FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; + + /* Store for use in "fewer than desired" with later beacon */ + data->nrg_silence_ref = silence_ref; + + /* If previous beacon had too many false alarms, + * give it some extra margin by reducing sensitivity again + * (but don't go below measured energy of desired Rx) */ + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB("... increasing margin\n"); + data->nrg_th_cck -= NRG_MARGIN; + } + } + + /* Make sure the energy threshold does not go above the measured + * energy of the desired Rx signals (reduced by backoff margin), + * or else we might start missing Rx frames. + * Lower value is higher energy, so we use max()! + */ + data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); + IWL_DEBUG_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck); + + data->nrg_prev_state = data->nrg_curr_state; + + return 0; +} + + +static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) +{ + u32 val; + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; + u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + + data = &(priv->sensitivity_data); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + + IWL_DEBUG_CALIB("norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); + + val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + min((u32)AUTO_CORR_MAX_OFDM, val); + + val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + min((u32)AUTO_CORR_MAX_OFDM_MRC, val); + + val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + min((u32)AUTO_CORR_MAX_OFDM_X1, val); + + val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + min((u32)AUTO_CORR_MAX_OFDM_MRC_X1, val); + } + + /* Else if we got fewer than desired, increase sensitivity */ + else if (false_alarms < min_false_alarms) { + + IWL_DEBUG_CALIB("norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); + + val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + max((u32)AUTO_CORR_MIN_OFDM, val); + + val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + max((u32)AUTO_CORR_MIN_OFDM_MRC, val); + + val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + max((u32)AUTO_CORR_MIN_OFDM_X1, val); + + val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + max((u32)AUTO_CORR_MIN_OFDM_MRC_X1, val); + } + + else + IWL_DEBUG_CALIB("min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); + + return 0; +} + +static int iwl_sensitivity_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + /* We didn't cache the SKB; let the caller free it */ + return 1; +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags) +{ + int rc = 0; + struct iwl_sensitivity_cmd cmd ; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), + .meta.flags = flags, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + cmd.table[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + cmd.table[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + cmd.table[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + cmd.table[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + cmd.table[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + __constant_cpu_to_le16(190); + cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + __constant_cpu_to_le16(390); + cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = + __constant_cpu_to_le16(62); + + IWL_DEBUG_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB("cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); + + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + if (flags & CMD_ASYNC) + cmd_out.meta.u.callback = iwl_sensitivity_callback; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB("No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); + + rc = iwl_send_cmd(priv, &cmd_out); + if (!rc) { + IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n"); + return rc; + } + + return 0; +} + +void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force) +{ + int rc = 0; + int i; + struct iwl_sensitivity_data *data = NULL; + + IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n"); + + if (force) + memset(&(priv->sensitivity_tbl[0]), 0, + sizeof(u16)*HD_TABLE_SIZE); + + /* Clear driver's sensitivity algo data */ + data = &(priv->sensitivity_data); + memset(data, 0, sizeof(struct iwl_sensitivity_data)); + + data->num_in_cck_no_fa = 0; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_silence_ref = 0; + data->nrg_silence_idx = 0; + data->nrg_energy_idx = 0; + + for (i = 0; i < 10; i++) + data->nrg_value[i] = 0; + + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) + data->nrg_silence_rssi[i] = 0; + + data->auto_corr_ofdm = 90; + data->auto_corr_ofdm_mrc = 170; + data->auto_corr_ofdm_x1 = 105; + data->auto_corr_ofdm_mrc_x1 = 220; + data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; + data->auto_corr_cck_mrc = 200; + data->nrg_th_cck = 100; + data->nrg_th_ofdm = 100; + + data->last_bad_plcp_cnt_ofdm = 0; + data->last_fa_cnt_ofdm = 0; + data->last_bad_plcp_cnt_cck = 0; + data->last_fa_cnt_cck = 0; + + /* Clear prior Sensitivity command data to force send to uCode */ + if (force) + memset(&(priv->sensitivity_tbl[0]), 0, + sizeof(u16)*HD_TABLE_SIZE); + + rc |= iwl4965_sensitivity_write(priv, flags); + IWL_DEBUG_CALIB("<chain_noise_data); + if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { + struct iwl_calibration_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = 0; + cmd.diff_gain_b = 0; + cmd.diff_gain_c = 0; + rc = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + msleep(4); + data->state = IWL_CHAIN_NOISE_ACCUMULATE; + IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); + } + return; +} + +/* + * Accumulate 20 beacons of signal and noise statistics for each of + * 3 receivers/antennas/rx-chains, then figure out: + * 1) Which antennas are connected. + * 2) Differential rx gain settings to balance the 3 receivers. + */ +static void iwl4965_noise_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *stat_resp) +{ + struct iwl_chain_noise_data *data = NULL; + int rc = 0; + + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_sig_a; + u32 chain_sig_b; + u32 chain_sig_c; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 max_average_sig; + u16 max_average_sig_antenna_i; + u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; + u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; + u16 i = 0; + u16 chan_num = INITIALIZATION_VALUE; + u32 band = INITIALIZATION_VALUE; + u32 active_chains = 0; + unsigned long flags; + struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); + + data = &(priv->chain_noise_data); + + /* Accumulate just the first 20 beacons after the first association, + * then we're done forever. */ + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB("Wait for noise calib reset\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(" << Interference data unavailable\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + band = (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) ? 0 : 1; + chan_num = le16_to_cpu(priv->staging_rxon.channel); + + /* Make sure we accumulate data for just the associated channel + * (even if scanning). */ + if ((chan_num != (le32_to_cpu(stat_resp->flag) >> 16)) || + ((STATISTICS_REPLY_FLG_BAND_24G_MSK == + (stat_resp->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK)) && band)) { + IWL_DEBUG_CALIB("Stats not from chan=%d, band=%d\n", + chan_num, band); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Accumulate beacon statistics values across 20 beacons */ + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; + + chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; + chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; + chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; + + spin_unlock_irqrestore(&priv->lock, flags); + + data->beacon_count++; + + data->chain_noise_a = (chain_noise_a + data->chain_noise_a); + data->chain_noise_b = (chain_noise_b + data->chain_noise_b); + data->chain_noise_c = (chain_noise_c + data->chain_noise_c); + + data->chain_signal_a = (chain_sig_a + data->chain_signal_a); + data->chain_signal_b = (chain_sig_b + data->chain_signal_b); + data->chain_signal_c = (chain_sig_c + data->chain_signal_c); + + IWL_DEBUG_CALIB("chan=%d, band=%d, beacon=%d\n", chan_num, band, + data->beacon_count); + IWL_DEBUG_CALIB("chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB("chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); + + /* If this is the 20th beacon, determine: + * 1) Disconnected antennas (using signal strengths) + * 2) Differential gain (using silence noise) to balance receivers */ + if (data->beacon_count == CAL_NUM_OF_BEACONS) { + + /* Analyze signal for disconnected antenna */ + average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS; + average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS; + average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS; + + if (average_sig[0] >= average_sig[1]) { + max_average_sig = average_sig[0]; + max_average_sig_antenna_i = 0; + active_chains = (1 << max_average_sig_antenna_i); + } else { + max_average_sig = average_sig[1]; + max_average_sig_antenna_i = 1; + active_chains = (1 << max_average_sig_antenna_i); + } + + if (average_sig[2] >= max_average_sig) { + max_average_sig = average_sig[2]; + max_average_sig_antenna_i = 2; + active_chains = (1 << max_average_sig_antenna_i); + } + + IWL_DEBUG_CALIB("average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB("max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); + + /* Compare signal strengths for all 3 receivers. */ + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (i != max_average_sig_antenna_i) { + s32 rssi_delta = (max_average_sig - + average_sig[i]); + + /* If signal is very weak, compared with + * strongest, mark it as disconnected. */ + if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) + data->disconn_array[i] = 1; + else + active_chains |= (1 << i); + IWL_DEBUG_CALIB("i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); + } + } + + /*If both chains A & B are disconnected - + * connect B and leave A as is */ + if (data->disconn_array[CHAIN_A] && + data->disconn_array[CHAIN_B]) { + data->disconn_array[CHAIN_B] = 0; + active_chains |= (1 << CHAIN_B); + IWL_DEBUG_CALIB("both A & B chains are disconnected! " + "W/A - declare B as connected\n"); + } + + IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", + active_chains); + + /* Save for use within RXON, TX, SCAN commands, etc. */ + priv->valid_antenna = active_chains; + + /* Analyze noise for rx balance */ + average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); + average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); + average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS); + + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { + /* This means that chain i is active and has + * lower noise values so far: */ + min_average_noise = average_noise[i]; + min_average_noise_antenna_i = i; + } + } + + data->delta_gain_code[min_average_noise_antenna_i] = 0; + + IWL_DEBUG_CALIB("average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); + + IWL_DEBUG_CALIB("min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); + + for (i = 0; i < NUM_RX_CHAINS; i++) { + s32 delta_g = 0; + + if (!(data->disconn_array[i]) && + (data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { + delta_g = average_noise[i] - min_average_noise; + data->delta_gain_code[i] = (u8)((delta_g * + 10) / 15); + if (CHAIN_NOISE_MAX_DELTA_GAIN_CODE < + data->delta_gain_code[i]) + data->delta_gain_code[i] = + CHAIN_NOISE_MAX_DELTA_GAIN_CODE; + + data->delta_gain_code[i] = + (data->delta_gain_code[i] | (1 << 2)); + } else + data->delta_gain_code[i] = 0; + } + IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n", + data->delta_gain_code[0], + data->delta_gain_code[1], + data->delta_gain_code[2]); + + /* Differential gain gets sent to uCode only once */ + if (!data->radio_write) { + struct iwl_calibration_cmd cmd; + data->radio_write = 1; + + memset(&cmd, 0, sizeof(cmd)); + cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = data->delta_gain_code[0]; + cmd.diff_gain_b = data->delta_gain_code[1]; + cmd.diff_gain_c = data->delta_gain_code[2]; + rc = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + if (rc) + IWL_DEBUG_CALIB("fail sending cmd " + "REPLY_PHY_CALIBRATION_CMD \n"); + + /* TODO we might want recalculate + * rx_chain in rxon cmd */ + + /* Mark so we run this algo only once! */ + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } + data->chain_noise_a = 0; + data->chain_noise_b = 0; + data->chain_noise_c = 0; + data->chain_signal_a = 0; + data->chain_signal_b = 0; + data->chain_signal_c = 0; + data->beacon_count = 0; + } + return; +} + +static void iwl4965_sensitivity_calibration(struct iwl_priv *priv, + struct iwl_notif_statistics *resp) +{ + int rc = 0; + u32 rx_enable_time; + u32 fa_cck; + u32 fa_ofdm; + u32 bad_plcp_cck; + u32 bad_plcp_ofdm; + u32 norm_fa_ofdm; + u32 norm_fa_cck; + struct iwl_sensitivity_data *data = NULL; + struct statistics_rx_non_phy *rx_info = &(resp->rx.general); + struct statistics_rx *statistics = &(resp->rx); + unsigned long flags; + struct statistics_general_data statis; + + data = &(priv->sensitivity_data); + + if (!iwl_is_associated(priv)) { + IWL_DEBUG_CALIB("<< - not associated\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB("<< invalid data.\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Extract Statistics: */ + rx_enable_time = le32_to_cpu(rx_info->channel_load); + fa_cck = le32_to_cpu(statistics->cck.false_alarm_cnt); + fa_ofdm = le32_to_cpu(statistics->ofdm.false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(statistics->cck.plcp_err); + bad_plcp_ofdm = le32_to_cpu(statistics->ofdm.plcp_err); + + statis.beacon_silence_rssi_a = + le32_to_cpu(statistics->general.beacon_silence_rssi_a); + statis.beacon_silence_rssi_b = + le32_to_cpu(statistics->general.beacon_silence_rssi_b); + statis.beacon_silence_rssi_c = + le32_to_cpu(statistics->general.beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(statistics->general.beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(statistics->general.beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(statistics->general.beacon_energy_c); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_CALIB("rx_enable_time = %u usecs\n", rx_enable_time); + + if (!rx_enable_time) { + IWL_DEBUG_CALIB("<< RX Enable Time == 0! \n"); + return; + } + + /* These statistics increase monotonically, and do not reset + * at each beacon. Calculate difference from last value, or just + * use the new statistics value if it has reset or wrapped around. */ + if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) + data->last_bad_plcp_cnt_cck = bad_plcp_cck; + else { + bad_plcp_cck -= data->last_bad_plcp_cnt_cck; + data->last_bad_plcp_cnt_cck += bad_plcp_cck; + } + + if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) + data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; + else { + bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; + data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; + } + + if (data->last_fa_cnt_ofdm > fa_ofdm) + data->last_fa_cnt_ofdm = fa_ofdm; + else { + fa_ofdm -= data->last_fa_cnt_ofdm; + data->last_fa_cnt_ofdm += fa_ofdm; + } + + if (data->last_fa_cnt_cck > fa_cck) + data->last_fa_cnt_cck = fa_cck; + else { + fa_cck -= data->last_fa_cnt_cck; + data->last_fa_cnt_cck += fa_cck; + } + + /* Total aborted signal locks */ + norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; + norm_fa_cck = fa_cck + bad_plcp_cck; + + IWL_DEBUG_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + + iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC); + + return; +} + +static void iwl4965_bg_sensitivity_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + sensitivity_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl4965_noise_calibration(priv, &priv->statistics); + + if (priv->sensitivity_data.state == + IWL_SENS_CALIB_NEED_REINIT) { + iwl4965_init_sensitivity(priv, CMD_ASYNC, 0); + priv->sensitivity_data.state = IWL_SENS_CALIB_ALLOWED; + } else + iwl4965_sensitivity_calibration(priv, + &priv->statistics); + } + + mutex_unlock(&priv->mutex); + return; +} +#endif /*CONFIG_IWLWIFI_SENSITIVITY*/ + +static void iwl4965_bg_txpower_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + txpower_work); + + /* If a scan happened to start before we got here + * then just return; the statistics notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + /* Regardless of if we are assocaited, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + iwl_hw_reg_send_txpower(priv); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + priv->last_temperature = priv->temperature; + + mutex_unlock(&priv->mutex); +} + +/* + * Acquire priv->lock before calling this function ! + */ +static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) +{ + iwl_write_restricted(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_restricted_reg(priv, SCD_QUEUE_RDPTR(txq_id), index); +} + +/* + * Acquire priv->lock before calling this function ! + */ +static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; + + iwl_write_restricted_reg(priv, SCD_QUEUE_STATUS_BITS(txq_id), + (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n", + active ? "Activete" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + +static const u16 default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL_CMD_FIFO_NUM, + IWL_TX_FIFO_HCCA_1, + IWL_TX_FIFO_HCCA_2 +}; + +static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id) +{ + set_bit(txq_id, &priv->txq_ctx_active_msk); +} + +static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) +{ + clear_bit(txq_id, &priv->txq_ctx_active_msk); +} + +int iwl4965_alive_notify(struct iwl_priv *priv) +{ + u32 a; + int i = 0; + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); + for (i = 0; i < NUM_RX_CHAINS; i++) + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; +#endif /* CONFIG_IWLWIFI_SENSITIVITY*/ + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + priv->scd_base_addr = iwl_read_restricted_reg(priv, SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) + iwl_write_restricted_mem(priv, a, 0); + for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) + iwl_write_restricted_mem(priv, a, 0); + for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4) + iwl_write_restricted_mem(priv, a, 0); + + iwl_write_restricted_reg(priv, SCD_DRAM_BASE_ADDR, + (priv->hw_setting.shared_phys + + offsetof(struct iwl_shared, queues_byte_cnt_tbls)) >> 10); + iwl_write_restricted_reg(priv, SCD_QUEUECHAIN_SEL, 0); + + /* initiate the queues */ + for (i = 0; i < priv->hw_setting.max_txq_num; i++) { + iwl_write_restricted_reg(priv, SCD_QUEUE_RDPTR(i), 0); + iwl_write_restricted(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + iwl_write_restricted_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + iwl_write_restricted_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + iwl_write_restricted_reg(priv, SCD_INTERRUPT_MASK, + (1 << priv->hw_setting.max_txq_num) - 1); + + iwl_write_restricted_reg(priv, SCD_TXFACT, + SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); + + iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); + /* map qos queues to fifos one-to-one */ + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + iwl4965_txq_ctx_activate(priv, i); + iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +int iwl_hw_set_hw_setting(struct iwl_priv *priv) +{ + priv->hw_setting.shared_virt = + pci_alloc_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + &priv->hw_setting.shared_phys); + + if (!priv->hw_setting.shared_virt) + return -1; + + memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl_shared)); + + priv->hw_setting.max_txq_num = iwl_param_queues_num; + priv->hw_setting.ac_queue_count = AC_NUM; + + priv->hw_setting.cck_flag = RATE_MCS_CCK_MSK; + priv->hw_setting.tx_cmd_len = sizeof(struct iwl_tx_cmd); + priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; + + priv->hw_setting.max_stations = IWL4965_STATION_COUNT; + priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID; + return 0; +} + +/** + * iwl_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) + iwl_tx_queue_free(priv, &priv->txq[txq_id]); + + iwl4965_kw_free(priv); +} + +/** + * iwl_hw_txq_free_tfd - Free one TFD, those at index [txq->q.last_used] + * + * Does NOT advance any indexes + */ +int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; + struct iwl_tfd_frame *bd = &bd_tmp[txq->q.last_used]; + struct pci_dev *dev = priv->pci_dev; + int i; + int counter = 0; + int index, is_odd; + + /* classify bd */ + if (txq->q.id == IWL_CMD_QUEUE_NUM) + /* nothing to cleanup after for host commands */ + return 0; + + /* sanity check */ + counter = IWL_GET_BITS(*bd, num_tbs); + if (counter > MAX_NUM_OF_TBS) { + IWL_ERROR("Too many chunks: %i\n", counter); + /* @todo issue fatal error, it is quite serious situation */ + return 0; + } + + /* unmap chunks if any */ + + for (i = 0; i < counter; i++) { + index = i / 2; + is_odd = i & 0x1; + + if (is_odd) + pci_unmap_single( + dev, + IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | + (IWL_GET_BITS(bd->pa[index], + tb2_addr_hi20) << 16), + IWL_GET_BITS(bd->pa[index], tb2_len), + PCI_DMA_TODEVICE); + + else if (i > 0) + pci_unmap_single(dev, + le32_to_cpu(bd->pa[index].tb1_addr), + IWL_GET_BITS(bd->pa[index], tb1_len), + PCI_DMA_TODEVICE); + + if (txq->txb[txq->q.last_used].skb[i]) { + struct sk_buff *skb = txq->txb[txq->q.last_used].skb[i]; + + dev_kfree_skb(skb); + txq->txb[txq->q.last_used].skb[i] = NULL; + } + } + return 0; +} + +int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) +{ + IWL_ERROR("TODO: Implement iwl_hw_reg_set_txpower!\n"); + return -EINVAL; +} + +static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res) +{ + s32 sign = 1; + + if (num < 0) { + sign = -sign; + num = -num; + } + if (denom < 0) { + sign = -sign; + denom = -denom; + } + *res = 1; + *res = ((num * 2 + denom) / (denom * 2)) * sign; + + return 1; +} + +static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage, + s32 current_voltage) +{ + s32 comp = 0; + + if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) || + (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage)) + return 0; + + iwl4965_math_div_round(current_voltage - eeprom_voltage, + TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp); + + if (current_voltage > eeprom_voltage) + comp *= 2; + if ((comp < -2) || (comp > 2)) + comp = 0; + + return comp; +} + +static const struct iwl_channel_info * +iwl4965_get_channel_txpower_info(struct iwl_priv *priv, u8 phymode, u16 channel) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + + if (!is_channel_valid(ch_info)) + return NULL; + + return ch_info; +} + +static s32 iwl4965_get_tx_atten_grp(u16 channel) +{ + if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR5_LCH) + return CALIB_CH_GROUP_5; + + if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR1_LCH) + return CALIB_CH_GROUP_1; + + if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR2_LCH) + return CALIB_CH_GROUP_2; + + if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR3_LCH) + return CALIB_CH_GROUP_3; + + if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH && + channel <= CALIB_IWL_TX_ATTEN_GR4_LCH) + return CALIB_CH_GROUP_4; + + IWL_ERROR("Can't find txatten group for channel %d.\n", channel); + return -1; +} + +static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel) +{ + s32 b = -1; + + for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) { + if (priv->eeprom.calib_info.band_info[b].ch_from == 0) + continue; + + if ((channel >= priv->eeprom.calib_info.band_info[b].ch_from) + && (channel <= priv->eeprom.calib_info.band_info[b].ch_to)) + break; + } + + return b; +} + +static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2) +{ + s32 val; + + if (x2 == x1) + return y1; + else { + iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val); + return val + y2; + } +} + +static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel, + struct iwl_eeprom_calib_ch_info *chan_info) +{ + s32 s = -1; + u32 c; + u32 m; + const struct iwl_eeprom_calib_measure *m1; + const struct iwl_eeprom_calib_measure *m2; + struct iwl_eeprom_calib_measure *omeas; + u32 ch_i1; + u32 ch_i2; + + s = iwl4965_get_sub_band(priv, channel); + if (s >= EEPROM_TX_POWER_BANDS) { + IWL_ERROR("Tx Power can not find channel %d ", channel); + return -1; + } + + ch_i1 = priv->eeprom.calib_info.band_info[s].ch1.ch_num; + ch_i2 = priv->eeprom.calib_info.band_info[s].ch2.ch_num; + chan_info->ch_num = (u8) channel; + + IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", + channel, s, ch_i1, ch_i2); + + for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) { + for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) { + m1 = &(priv->eeprom.calib_info.band_info[s].ch1. + measurements[c][m]); + m2 = &(priv->eeprom.calib_info.band_info[s].ch2. + measurements[c][m]); + omeas = &(chan_info->measurements[c][m]); + + omeas->actual_pow = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->actual_pow, + ch_i2, + m2->actual_pow); + omeas->gain_idx = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->gain_idx, ch_i2, + m2->gain_idx); + omeas->temperature = + (u8) iwl4965_interpolate_value(channel, ch_i1, + m1->temperature, + ch_i2, + m2->temperature); + omeas->pa_det = + (s8) iwl4965_interpolate_value(channel, ch_i1, + m1->pa_det, ch_i2, + m2->pa_det); + + IWL_DEBUG_TXPOWER + ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m, + m1->actual_pow, m2->actual_pow, omeas->actual_pow); + IWL_DEBUG_TXPOWER + ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m, + m1->gain_idx, m2->gain_idx, omeas->gain_idx); + IWL_DEBUG_TXPOWER + ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m, + m1->pa_det, m2->pa_det, omeas->pa_det); + IWL_DEBUG_TXPOWER + ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m, + m1->temperature, m2->temperature, + omeas->temperature); + } + } + + return 0; +} + +/* bit-rate-dependent table to prevent Tx distortion, in half-dB units, + * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */ +static s32 back_off_table[] = { + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */ + 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */ + 10 /* CCK */ +}; + +/* Thermal compensation values for txpower for various frequency ranges ... + * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */ +static struct iwl_txpower_comp_entry { + s32 degrees_per_05db_a; + s32 degrees_per_05db_a_denom; +} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = { + {9, 2}, /* group 0 5.2, ch 34-43 */ + {4, 1}, /* group 1 5.2, ch 44-70 */ + {4, 1}, /* group 2 5.2, ch 71-124 */ + {4, 1}, /* group 3 5.2, ch 125-200 */ + {3, 1} /* group 4 2.4, ch all */ +}; + +static s32 get_min_power_index(s32 rate_power_index, u32 band) +{ + if (!band) { + if ((rate_power_index & 7) <= 4) + return MIN_TX_GAIN_INDEX_52GHZ_EXT; + } + return MIN_TX_GAIN_INDEX; +} + +struct gain_entry { + u8 dsp; + u8 radio; +}; + +static const struct gain_entry gain_table[2][108] = { + /* 5.2GHz power gain index table */ + { + {123, 0x3F}, /* highest txpower */ + {117, 0x3F}, + {110, 0x3F}, + {104, 0x3F}, + {98, 0x3F}, + {110, 0x3E}, + {104, 0x3E}, + {98, 0x3E}, + {110, 0x3D}, + {104, 0x3D}, + {98, 0x3D}, + {110, 0x3C}, + {104, 0x3C}, + {98, 0x3C}, + {110, 0x3B}, + {104, 0x3B}, + {98, 0x3B}, + {110, 0x3A}, + {104, 0x3A}, + {98, 0x3A}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x25}, + {104, 0x25}, + {98, 0x25}, + {110, 0x24}, + {104, 0x24}, + {98, 0x24}, + {110, 0x23}, + {104, 0x23}, + {98, 0x23}, + {110, 0x22}, + {104, 0x18}, + {98, 0x18}, + {110, 0x17}, + {104, 0x17}, + {98, 0x17}, + {110, 0x16}, + {104, 0x16}, + {98, 0x16}, + {110, 0x15}, + {104, 0x15}, + {98, 0x15}, + {110, 0x14}, + {104, 0x14}, + {98, 0x14}, + {110, 0x13}, + {104, 0x13}, + {98, 0x13}, + {110, 0x12}, + {104, 0x08}, + {98, 0x08}, + {110, 0x07}, + {104, 0x07}, + {98, 0x07}, + {110, 0x06}, + {104, 0x06}, + {98, 0x06}, + {110, 0x05}, + {104, 0x05}, + {98, 0x05}, + {110, 0x04}, + {104, 0x04}, + {98, 0x04}, + {110, 0x03}, + {104, 0x03}, + {98, 0x03}, + {110, 0x02}, + {104, 0x02}, + {98, 0x02}, + {110, 0x01}, + {104, 0x01}, + {98, 0x01}, + {110, 0x00}, + {104, 0x00}, + {98, 0x00}, + {93, 0x00}, + {88, 0x00}, + {83, 0x00}, + {78, 0x00}, + }, + /* 2.4GHz power gain index table */ + { + {110, 0x3f}, /* highest txpower */ + {104, 0x3f}, + {98, 0x3f}, + {110, 0x3e}, + {104, 0x3e}, + {98, 0x3e}, + {110, 0x3d}, + {104, 0x3d}, + {98, 0x3d}, + {110, 0x3c}, + {104, 0x3c}, + {98, 0x3c}, + {110, 0x3b}, + {104, 0x3b}, + {98, 0x3b}, + {110, 0x3a}, + {104, 0x3a}, + {98, 0x3a}, + {110, 0x39}, + {104, 0x39}, + {98, 0x39}, + {110, 0x38}, + {104, 0x38}, + {98, 0x38}, + {110, 0x37}, + {104, 0x37}, + {98, 0x37}, + {110, 0x36}, + {104, 0x36}, + {98, 0x36}, + {110, 0x35}, + {104, 0x35}, + {98, 0x35}, + {110, 0x34}, + {104, 0x34}, + {98, 0x34}, + {110, 0x33}, + {104, 0x33}, + {98, 0x33}, + {110, 0x32}, + {104, 0x32}, + {98, 0x32}, + {110, 0x31}, + {104, 0x31}, + {98, 0x31}, + {110, 0x30}, + {104, 0x30}, + {98, 0x30}, + {110, 0x6}, + {104, 0x6}, + {98, 0x6}, + {110, 0x5}, + {104, 0x5}, + {98, 0x5}, + {110, 0x4}, + {104, 0x4}, + {98, 0x4}, + {110, 0x3}, + {104, 0x3}, + {98, 0x3}, + {110, 0x2}, + {104, 0x2}, + {98, 0x2}, + {110, 0x1}, + {104, 0x1}, + {98, 0x1}, + {110, 0x0}, + {104, 0x0}, + {98, 0x0}, + {97, 0}, + {96, 0}, + {95, 0}, + {94, 0}, + {93, 0}, + {92, 0}, + {91, 0}, + {90, 0}, + {89, 0}, + {88, 0}, + {87, 0}, + {86, 0}, + {85, 0}, + {84, 0}, + {83, 0}, + {82, 0}, + {81, 0}, + {80, 0}, + {79, 0}, + {78, 0}, + {77, 0}, + {76, 0}, + {75, 0}, + {74, 0}, + {73, 0}, + {72, 0}, + {71, 0}, + {70, 0}, + {69, 0}, + {68, 0}, + {67, 0}, + {66, 0}, + {65, 0}, + {64, 0}, + {63, 0}, + {62, 0}, + {61, 0}, + {60, 0}, + {59, 0}, + } +}; + +static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, + u8 is_fat, u8 ctrl_chan_high, + struct iwl_tx_power_db *tx_power_tbl) +{ + u8 saturation_power; + s32 target_power; + s32 user_target_power; + s32 power_limit; + s32 current_temp; + s32 reg_limit; + s32 current_regulatory; + s32 txatten_grp = CALIB_CH_GROUP_MAX; + int i; + int c; + const struct iwl_channel_info *ch_info = NULL; + struct iwl_eeprom_calib_ch_info ch_eeprom_info; + const struct iwl_eeprom_calib_measure *measurement; + s16 voltage; + s32 init_voltage; + s32 voltage_compensation; + s32 degrees_per_05db_num; + s32 degrees_per_05db_denom; + s32 factory_temp; + s32 temperature_comp[2]; + s32 factory_gain_index[2]; + s32 factory_actual_pwr[2]; + s32 power_index; + + /* Sanity check requested level (dBm) */ + if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) { + IWL_WARNING("Requested user TXPOWER %d below limit.\n", + priv->user_txpower_limit); + return -EINVAL; + } + if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) { + IWL_WARNING("Requested user TXPOWER %d above limit.\n", + priv->user_txpower_limit); + return -EINVAL; + } + + /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units + * are used for indexing into txpower table) */ + user_target_power = 2 * priv->user_txpower_limit; + + /* Get current (RXON) channel, band, width */ + ch_info = + iwl4965_get_channel_txpower_info(priv, priv->phymode, channel); + + IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, + is_fat); + + if (!ch_info) + return -EINVAL; + + /* get txatten group, used to select 1) thermal txpower adjustment + * and 2) mimo txpower balance between Tx chains. */ + txatten_grp = iwl4965_get_tx_atten_grp(channel); + if (txatten_grp < 0) + return -EINVAL; + + IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n", + channel, txatten_grp); + + if (is_fat) { + if (ctrl_chan_high) + channel -= 2; + else + channel += 2; + } + + /* hardware txpower limits ... + * saturation (clipping distortion) txpowers are in half-dBm */ + if (band) + saturation_power = priv->eeprom.calib_info.saturation_power24; + else + saturation_power = priv->eeprom.calib_info.saturation_power52; + + if (saturation_power < IWL_TX_POWER_SATURATION_MIN || + saturation_power > IWL_TX_POWER_SATURATION_MAX) { + if (band) + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24; + else + saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52; + } + + /* regulatory txpower limits ... reg_limit values are in half-dBm, + * max_power_avg values are in dBm, convert * 2 */ + if (is_fat) + reg_limit = ch_info->fat_max_power_avg * 2; + else + reg_limit = ch_info->max_power_avg * 2; + + if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) || + (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) { + if (band) + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24; + else + reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52; + } + + /* Interpolate txpower calibration values for this channel, + * based on factory calibration tests on spaced channels. */ + iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); + + /* calculate tx gain adjustment based on power supply voltage */ + voltage = priv->eeprom.calib_info.voltage; + init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); + voltage_compensation = + iwl4965_get_voltage_compensation(voltage, init_voltage); + + IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", + init_voltage, + voltage, voltage_compensation); + + /* get current temperature (Celsius) */ + current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN); + current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX); + current_temp = KELVIN_TO_CELSIUS(current_temp); + + /* select thermal txpower adjustment params, based on channel group + * (same frequency group used for mimo txatten adjustment) */ + degrees_per_05db_num = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a; + degrees_per_05db_denom = + tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom; + + /* get per-chain txpower values from factory measurements */ + for (c = 0; c < 2; c++) { + measurement = &ch_eeprom_info.measurements[c][1]; + + /* txgain adjustment (in half-dB steps) based on difference + * between factory and current temperature */ + factory_temp = measurement->temperature; + iwl4965_math_div_round((current_temp - factory_temp) * + degrees_per_05db_denom, + degrees_per_05db_num, + &temperature_comp[c]); + + factory_gain_index[c] = measurement->gain_idx; + factory_actual_pwr[c] = measurement->actual_pow; + + IWL_DEBUG_TXPOWER("chain = %d\n", c); + IWL_DEBUG_TXPOWER("fctry tmp %d, " + "curr tmp %d, comp %d steps\n", + factory_temp, current_temp, + temperature_comp[c]); + + IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n", + factory_gain_index[c], + factory_actual_pwr[c]); + } + + /* for each of 33 bit-rates (including 1 for CCK) */ + for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) { + u8 is_mimo_rate; + union iwl_tx_power_dual_stream tx_power; + + /* for mimo, reduce each chain's txpower by half + * (3dB, 6 steps), so total output power is regulatory + * compliant. */ + if (i & 0x8) { + current_regulatory = reg_limit - + IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION; + is_mimo_rate = 1; + } else { + current_regulatory = reg_limit; + is_mimo_rate = 0; + } + + /* find txpower limit, either hardware or regulatory */ + power_limit = saturation_power - back_off_table[i]; + if (power_limit > current_regulatory) + power_limit = current_regulatory; + + /* reduce user's txpower request if necessary + * for this rate on this channel */ + target_power = user_target_power; + if (target_power > power_limit) + target_power = power_limit; + + IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", + i, saturation_power - back_off_table[i], + current_regulatory, user_target_power, + target_power); + + /* for each of 2 Tx chains (radio transmitters) */ + for (c = 0; c < 2; c++) { + s32 atten_value; + + if (is_mimo_rate) + atten_value = + (s32)le32_to_cpu(priv->card_alive_init. + tx_atten[txatten_grp][c]); + else + atten_value = 0; + + /* calculate index; higher index means lower txpower */ + power_index = (u8) (factory_gain_index[c] - + (target_power - + factory_actual_pwr[c]) - + temperature_comp[c] - + voltage_compensation + + atten_value); + +/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n", + power_index); */ + + if (power_index < get_min_power_index(i, band)) + power_index = get_min_power_index(i, band); + + /* adjust 5 GHz index to support negative indexes */ + if (!band) + power_index += 9; + + /* CCK, rate 32, reduce txpower for CCK */ + if (i == POWER_TABLE_CCK_ENTRY) + power_index += + IWL_TX_POWER_CCK_COMPENSATION_C_STEP; + + /* stay within the table! */ + if (power_index > 107) { + IWL_WARNING("txpower index %d > 107\n", + power_index); + power_index = 107; + } + if (power_index < 0) { + IWL_WARNING("txpower index %d < 0\n", + power_index); + power_index = 0; + } + + /* fill txpower command for this rate/chain */ + tx_power.s.radio_tx_gain[c] = + gain_table[band][power_index].radio; + tx_power.s.dsp_predis_atten[c] = + gain_table[band][power_index].dsp; + + IWL_DEBUG_TXPOWER("chain %d mimo %d index %d " + "gain 0x%02x dsp %d\n", + c, atten_value, power_index, + tx_power.s.radio_tx_gain[c], + tx_power.s.dsp_predis_atten[c]); + }/* for each chain */ + + tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw); + + }/* for each rate */ + + return 0; +} + +/** + * iwl_hw_reg_send_txpower - Configure the TXPOWER level user limit + * + * Uses the active RXON for channel, band, and characteristics (fat, high) + * The power limit is taken from priv->user_txpower_limit. + */ +int iwl_hw_reg_send_txpower(struct iwl_priv *priv) +{ + struct iwl_txpowertable_cmd cmd = { 0 }; + int rc = 0; + u8 band = 0; + u8 is_fat = 0; + u8 ctrl_chan_high = 0; + + if (test_bit(STATUS_SCANNING, &priv->status)) { + /* If this gets hit a lot, switch it to a BUG() and catch + * the stack trace to find out who is calling this during + * a scan. */ + IWL_WARNING("TX Power requested while scanning!\n"); + return -EAGAIN; + } + + band = ((priv->phymode == MODE_IEEE80211B) || + (priv->phymode == MODE_IEEE80211G)); + + is_fat = is_fat_channel(priv->active_rxon.flags); + + if (is_fat && + (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.channel = priv->active_rxon.channel; + + rc = iwl4965_fill_txpower_tbl(priv, band, + le16_to_cpu(priv->active_rxon.channel), + is_fat, ctrl_chan_high, &cmd.tx_power); + if (rc) + return rc; + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); + return rc; +} + +int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel) +{ + int rc; + u8 band = 0; + u8 is_fat = 0; + u8 ctrl_chan_high = 0; + struct iwl_channel_switch_cmd cmd = { 0 }; + const struct iwl_channel_info *ch_info; + + band = ((priv->phymode == MODE_IEEE80211B) || + (priv->phymode == MODE_IEEE80211G)); + + ch_info = iwl_get_channel_info(priv, priv->phymode, channel); + + is_fat = is_fat_channel(priv->staging_rxon.flags); + + if (is_fat && + (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) + ctrl_chan_high = 1; + + cmd.band = band; + cmd.expect_beacon = 0; + cmd.channel = cpu_to_le16(channel); + cmd.rxon_flags = priv->active_rxon.flags; + cmd.rxon_filter_flags = priv->active_rxon.filter_flags; + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else + cmd.expect_beacon = 1; + + rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat, + ctrl_chan_high, &cmd.tx_power); + if (rc) { + IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc); + return rc; + } + + rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); + return rc; +} + +#define RTS_HCCA_RETRY_LIMIT 3 +#define RTS_DFAULT_RETRY_LIMIT 60 + +void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, int sta_id, + int is_hcca) +{ + u8 rate; + u8 rts_retry_limit = 0; + u8 data_retry_limit = 0; + __le32 tx_flags; + u16 fc = le16_to_cpu(hdr->frame_control); + + tx_flags = cmd->cmd.tx.tx_flags; + + rate = iwl_rates[ctrl->tx_rate].plcp; + + rts_retry_limit = (is_hcca) ? + RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; + + if (ieee80211_is_probe_response(fc)) { + data_retry_limit = 3; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + } else + data_retry_limit = IWL_DEFAULT_TX_RETRY; + + if (priv->data_retry_limit != -1) + data_retry_limit = priv->data_retry_limit; + + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_AUTH: + case IEEE80211_STYPE_DEAUTH: + case IEEE80211_STYPE_ASSOC_REQ: + case IEEE80211_STYPE_REASSOC_REQ: + if (tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; + } + } + + cmd->cmd.tx.rts_retry_limit = rts_retry_limit; + cmd->cmd.tx.data_retry_limit = data_retry_limit; + cmd->cmd.tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate, 0); + cmd->cmd.tx.tx_flags = tx_flags; +} + +int iwl_hw_get_rx_read(struct iwl_priv *priv) +{ + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num); +} + +int iwl_hw_get_temperature(struct iwl_priv *priv) +{ + return priv->temperature; +} + +unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame, u8 rate) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + unsigned int frame_size; + + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + frame_size = iwl_fill_beacon_frame(priv, + tx_beacon_cmd->frame, + BROADCAST_ADDR, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + + BUG_ON(frame_size > MAX_MPDU_SIZE); + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + + if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) + tx_beacon_cmd->tx.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); + else + tx_beacon_cmd->tx.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, 0); + + tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK); + return (sizeof(*tx_beacon_cmd) + frame_size); +} + +int iwl_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int rc; + unsigned long flags; + int txq_id = txq->q.id; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl_write_restricted(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + iwl_write_restricted( + priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), + IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static inline u8 iwl4965_get_dma_hi_address(dma_addr_t addr) +{ + return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0; +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, + dma_addr_t addr, u16 len) +{ + int index, is_odd; + struct iwl_tfd_frame *tfd = ptr; + u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); + + if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { + IWL_ERROR("Error can not send more than %d chunks\n", + MAX_NUM_OF_TBS); + return -EINVAL; + } + + index = num_tbs / 2; + is_odd = num_tbs & 0x1; + + if (!is_odd) { + tfd->pa[index].tb1_addr = cpu_to_le32(addr); + IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, + iwl4965_get_dma_hi_address(addr)); + IWL_SET_BITS(tfd->pa[index], tb1_len, len); + } else { + IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, + (u32) (addr & 0xffff)); + IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); + IWL_SET_BITS(tfd->pa[index], tb2_len, len); + } + + IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); + + return 0; +} + +void iwl_hw_card_show_info(struct iwl_priv *priv) +{ + u16 hw_version = priv->eeprom.board_revision_4965; + + IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n", + ((hw_version >> 8) & 0x0F), + ((hw_version >> 8) >> 4), (hw_version & 0x00FF)); + + IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n", + priv->eeprom.board_pba_number_4965); +} + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u16 byte_cnt) +{ + int len; + int txq_id = txq->q.id; + struct iwl_shared *shared_data = priv->hw_setting.shared_virt; + + if (txq->need_update == 0) + return 0; + + len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + + IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. + tfd_offset[txq->q.first_empty], byte_cnt, len); + + if (txq->q.first_empty < IWL4965_MAX_WIN_SIZE) + IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. + tfd_offset[IWL4965_QUEUE_SIZE + txq->q.first_empty], + byte_cnt, len); + + return 0; +} + +/* Set up Rx receiver/antenna/chain usage in "staging" RXON image. + * This should not be used for scan command ... it puts data in wrong place. */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv) +{ + u8 is_single = is_single_stream(priv); + u8 idle_state, rx_state; + + priv->staging_rxon.rx_chain = 0; + rx_state = idle_state = 3; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl4965_noise_calibration() + * checks which antennas actually *are* connected. */ + priv->staging_rxon.rx_chain |= + cpu_to_le16(priv->valid_antenna << RXON_RX_CHAIN_VALID_POS); + + /* How many receivers should we use? */ + iwl4965_get_rx_chain_counter(priv, &idle_state, &rx_state); + priv->staging_rxon.rx_chain |= + cpu_to_le16(rx_state << RXON_RX_CHAIN_MIMO_CNT_POS); + priv->staging_rxon.rx_chain |= + cpu_to_le16(idle_state << RXON_RX_CHAIN_CNT_POS); + + if (!is_single && (rx_state >= 2) && + !test_bit(STATUS_POWER_PMI, &priv->status)) + priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); +} + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG +/* + get the traffic load value for tid +*/ +static u32 iwl4965_tl_get_load(struct iwl_priv *priv, u8 tid) +{ + u32 load = 0; + u32 current_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + unsigned long flags; + struct iwl_traffic_load *tid_ptr = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]); + + current_time -= current_time % TID_ROUND_VALUE; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (!(tid_ptr->queue_count)) + goto out; + + time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + if (index >= TID_QUEUE_MAX_SIZE) { + u32 oldest_time = current_time - TID_MAX_TIME_DIFF; + + while (tid_ptr->queue_count && + (tid_ptr->time_stamp < oldest_time)) { + tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head]; + tid_ptr->packet_count[tid_ptr->head] = 0; + tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING; + tid_ptr->queue_count--; + tid_ptr->head++; + if (tid_ptr->head >= TID_QUEUE_MAX_SIZE) + tid_ptr->head = 0; + } + } + load = tid_ptr->total; + + out: + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + return load; +} + +/* + increment traffic load value for tid and also remove + any old values if passed the certian time period +*/ +static void iwl4965_tl_add_packet(struct iwl_priv *priv, u8 tid) +{ + u32 current_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + unsigned long flags; + struct iwl_traffic_load *tid_ptr = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return; + + tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]); + + current_time -= current_time % TID_ROUND_VALUE; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (!(tid_ptr->queue_count)) { + tid_ptr->total = 1; + tid_ptr->time_stamp = current_time; + tid_ptr->queue_count = 1; + tid_ptr->head = 0; + tid_ptr->packet_count[0] = 1; + goto out; + } + + time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + if (index >= TID_QUEUE_MAX_SIZE) { + u32 oldest_time = current_time - TID_MAX_TIME_DIFF; + + while (tid_ptr->queue_count && + (tid_ptr->time_stamp < oldest_time)) { + tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head]; + tid_ptr->packet_count[tid_ptr->head] = 0; + tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING; + tid_ptr->queue_count--; + tid_ptr->head++; + if (tid_ptr->head >= TID_QUEUE_MAX_SIZE) + tid_ptr->head = 0; + } + } + + index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE; + tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1; + tid_ptr->total = tid_ptr->total + 1; + + if ((index + 1) > tid_ptr->queue_count) + tid_ptr->queue_count = index + 1; + out: + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + +} + +#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7 +enum HT_STATUS { + BA_STATUS_FAILURE = 0, + BA_STATUS_INITIATOR_DELBA, + BA_STATUS_RECIPIENT_DELBA, + BA_STATUS_RENEW_ADDBA_REQUEST, + BA_STATUS_ACTIVE, +}; + +static u8 iwl4964_tl_ba_avail(struct iwl_priv *priv) +{ + int i; + struct iwl_lq_mngr *lq; + u8 count = 0; + u16 msk; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) { + msk = 1 << i; + if ((lq->agg_ctrl.granted_ba & msk) || + (lq->agg_ctrl.wait_for_agg_status & msk)) + count++; + } + + if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS) + return 1; + + return 0; +} + +static void iwl4965_ba_status(struct iwl_priv *priv, + u8 tid, enum HT_STATUS status); + +static int iwl4965_perform_addba(struct iwl_priv *priv, u8 tid, u32 length, + u32 ba_timeout) +{ + int rc; + + rc = ieee80211_start_BA_session(priv->hw, priv->bssid, tid); + if (rc) + iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE); + + return rc; +} + +static int iwl4965_perform_delba(struct iwl_priv *priv, u8 tid) +{ + int rc; + + rc = ieee80211_stop_BA_session(priv->hw, priv->bssid, tid); + if (rc) + iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE); + + return rc; +} + +static void iwl4965_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_mngr *lq, + u8 auto_agg, u8 tid) +{ + u32 tid_msk = (1 << tid); + unsigned long flags; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); +/* + if ((auto_agg) && (!lq->enable_counter)){ + lq->agg_ctrl.next_retry = 0; + lq->agg_ctrl.tid_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + return; + } +*/ + if (!(lq->agg_ctrl.granted_ba & tid_msk) && + (lq->agg_ctrl.requested_ba & tid_msk)) { + u8 available_queues; + u32 load; + + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + available_queues = iwl4964_tl_ba_avail(priv); + load = iwl4965_tl_get_load(priv, tid); + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (!available_queues) { + if (auto_agg) + lq->agg_ctrl.tid_retry |= tid_msk; + else { + lq->agg_ctrl.requested_ba &= ~tid_msk; + lq->agg_ctrl.wait_for_agg_status &= ~tid_msk; + } + } else if ((auto_agg) && + ((load <= lq->agg_ctrl.tid_traffic_load_threshold) || + ((lq->agg_ctrl.wait_for_agg_status & tid_msk)))) + lq->agg_ctrl.tid_retry |= tid_msk; + else { + lq->agg_ctrl.wait_for_agg_status |= tid_msk; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + iwl4965_perform_addba(priv, tid, 0x40, + lq->agg_ctrl.ba_timeout); + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + } + } + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); +} + +static void iwl4965_turn_on_agg(struct iwl_priv *priv, u8 tid) +{ + struct iwl_lq_mngr *lq; + unsigned long flags; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + if ((tid < TID_MAX_LOAD_COUNT)) + iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg, + tid); + else if (tid == TID_ALL_SPECIFIED) { + if (lq->agg_ctrl.requested_ba) { + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) + iwl4965_turn_on_agg_for_tid(priv, lq, + lq->agg_ctrl.auto_agg, tid); + } else { + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + lq->agg_ctrl.tid_retry = 0; + lq->agg_ctrl.next_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + } + } + +} + +void iwl4965_turn_off_agg(struct iwl_priv *priv, u8 tid) +{ + u32 tid_msk; + struct iwl_lq_mngr *lq; + unsigned long flags; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + if ((tid < TID_MAX_LOAD_COUNT)) { + tid_msk = 1 << tid; + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + lq->agg_ctrl.wait_for_agg_status |= tid_msk; + lq->agg_ctrl.requested_ba &= ~tid_msk; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + iwl4965_perform_delba(priv, tid); + } else if (tid == TID_ALL_SPECIFIED) { + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) { + tid_msk = 1 << tid; + lq->agg_ctrl.wait_for_agg_status |= tid_msk; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + iwl4965_perform_delba(priv, tid); + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + } + lq->agg_ctrl.requested_ba = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + } +} + +static void iwl4965_ba_status(struct iwl_priv *priv, + u8 tid, enum HT_STATUS status) +{ + struct iwl_lq_mngr *lq; + u32 tid_msk = (1 << tid); + unsigned long flags; + + lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + if ((tid >= TID_MAX_LOAD_COUNT)) + goto out; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + switch (status) { + case BA_STATUS_ACTIVE: + if (!(lq->agg_ctrl.granted_ba & tid_msk)) + lq->agg_ctrl.granted_ba |= tid_msk; + break; + default: + if ((lq->agg_ctrl.granted_ba & tid_msk)) + lq->agg_ctrl.granted_ba &= ~tid_msk; + break; + } + + lq->agg_ctrl.wait_for_agg_status &= ~tid_msk; + if (status != BA_STATUS_ACTIVE) { + if (lq->agg_ctrl.auto_agg) { + lq->agg_ctrl.tid_retry |= tid_msk; + lq->agg_ctrl.next_retry = + jiffies + msecs_to_jiffies(500); + } else + lq->agg_ctrl.requested_ba &= ~tid_msk; + } + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + out: + return; +} + +static void iwl4965_bg_agg_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + agg_work); + + u32 tid; + u32 retry_tid; + u32 tid_msk; + unsigned long flags; + struct iwl_lq_mngr *lq = (struct iwl_lq_mngr *)&(priv->lq_mngr); + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + retry_tid = lq->agg_ctrl.tid_retry; + lq->agg_ctrl.tid_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + + if (retry_tid == TID_ALL_SPECIFIED) + iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED); + else { + for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) { + tid_msk = (1 << tid); + if (retry_tid & tid_msk) + iwl4965_turn_on_agg(priv, tid); + } + } + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + if (lq->agg_ctrl.tid_retry) + lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500); + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + return; +} +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + +int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd, + u8 sta_id, dma_addr_t txcmd_phys, + struct ieee80211_hdr *hdr, u8 hdr_len, + struct ieee80211_tx_control *ctrl, void *sta_in) +{ + struct iwl_tx_cmd cmd; + struct iwl_tx_cmd *tx = (struct iwl_tx_cmd *)&out_cmd->cmd.payload[0]; + dma_addr_t scratch_phys; + u8 unicast = 0; + u8 is_data = 1; + u16 fc; + u16 rate_flags; + int rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1); +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + __le16 *qc; +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + unicast = !is_multicast_ether_addr(hdr->addr1); + + fc = le16_to_cpu(hdr->frame_control); + if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) + is_data = 0; + + memcpy(&cmd, &(out_cmd->cmd.tx), sizeof(struct iwl_tx_cmd)); + memset(tx, 0, sizeof(struct iwl_tx_cmd)); + memcpy(tx->hdr, hdr, hdr_len); + + tx->len = cmd.len; + tx->driver_txop = cmd.driver_txop; + tx->stop_time.life_time = cmd.stop_time.life_time; + tx->tx_flags = cmd.tx_flags; + tx->sta_id = cmd.sta_id; + tx->tid_tspec = cmd.tid_tspec; + tx->timeout.pm_frame_timeout = cmd.timeout.pm_frame_timeout; + tx->next_frame_len = cmd.next_frame_len; + + tx->sec_ctl = cmd.sec_ctl; + memcpy(&(tx->key[0]), &(cmd.key[0]), 16); + tx->tx_flags = cmd.tx_flags; + + tx->rts_retry_limit = cmd.rts_retry_limit; + tx->data_retry_limit = cmd.data_retry_limit; + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + tx->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx->dram_msb_ptr = iwl4965_get_dma_hi_address(scratch_phys); + + /* Hard coded to start at the highest retry fallback position + * until the 4965 specific rate control algorithm is tied in */ + tx->initial_rate_index = LINK_QUAL_MAX_RETRY_NUM - 1; + + /* Alternate between antenna A and B for successive frames */ + if (priv->use_ant_b_for_management_frame) { + priv->use_ant_b_for_management_frame = 0; + rate_flags = RATE_MCS_ANT_B_MSK; + } else { + priv->use_ant_b_for_management_frame = 1; + rate_flags = RATE_MCS_ANT_A_MSK; + } + + if (!unicast || !is_data) { + if ((rate_index >= IWL_FIRST_CCK_RATE) && + (rate_index <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + } else { + tx->initial_rate_index = 0; + tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + } + + tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp, + rate_flags); + + if (ieee80211_is_probe_request(fc)) + tx->tx_flags |= TX_CMD_FLG_TSF_MSK; + else if (ieee80211_is_back_request(fc)) + tx->tx_flags |= TX_CMD_FLG_ACK_MSK | + TX_CMD_FLG_IMM_BA_RSP_MASK; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + qc = ieee80211_get_qos_ctrl(hdr); + if (qc && + (priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) { + u8 tid = 0; + tid = (u8) (le16_to_cpu(*qc) & 0xF); + if (tid < TID_MAX_LOAD_COUNT) + iwl4965_tl_add_packet(priv, tid); + } + + if (priv->lq_mngr.agg_ctrl.next_retry && + (time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) { + unsigned long flags; + + spin_lock_irqsave(&priv->lq_mngr.lock, flags); + priv->lq_mngr.agg_ctrl.next_retry = 0; + spin_unlock_irqrestore(&priv->lq_mngr.lock, flags); + schedule_work(&priv->agg_work); + } +#endif +#endif + return 0; +} + +/** + * sign_extend - Sign extend a value using specified bit as sign-bit + * + * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1 + * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7. + * + * @param oper value to sign extend + * @param index 0 based bit index (0<=index<32) to sign bit + */ +static s32 sign_extend(u32 oper, int index) +{ + u8 shift = 31 - index; + + return (s32)(oper << shift) >> shift; +} + +/** + * iwl4965_get_temperature - return the calibrated temperature (in Kelvin) + * @statistics: Provides the temperature reading from the uCode + * + * A return of <0 indicates bogus data in the statistics + */ +int iwl4965_get_temperature(const struct iwl_priv *priv) +{ + s32 temperature; + s32 vt; + s32 R1, R2, R3; + u32 R4; + + if (test_bit(STATUS_TEMPERATURE, &priv->status) && + (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) { + IWL_DEBUG_TEMP("Running FAT temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]); + } else { + IWL_DEBUG_TEMP("Running temperature calibration\n"); + R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]); + R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]); + R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]); + R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]); + } + + /* + * Temperature is only 23 bits so sign extend out to 32 + * + * NOTE If we haven't received a statistics notification yet + * with an updated temperature, use R4 provided to us in the + * ALIVE response. */ + if (!test_bit(STATUS_TEMPERATURE, &priv->status)) + vt = sign_extend(R4, 23); + else + vt = sign_extend( + le32_to_cpu(priv->statistics.general.temperature), 23); + + IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", + R1, R2, R3, vt); + + if (R3 == R1) { + IWL_ERROR("Calibration conflict R1 == R3\n"); + return -1; + } + + /* Calculate temperature in degrees Kelvin, adjust by 97%. + * Add offset to center the adjustment around 0 degrees Centigrade. */ + temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2); + temperature /= (R3 - R1); + temperature = (temperature * 97) / 100 + + TEMPERATURE_CALIB_KELVIN_OFFSET; + + IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature, + KELVIN_TO_CELSIUS(temperature)); + + return temperature; +} + +/* Adjust Txpower only if temperature variance is greater than threshold. */ +#define IWL_TEMPERATURE_THRESHOLD 3 + +/** + * iwl4965_is_temp_calib_needed - determines if new calibration is needed + * + * If the temperature changed has changed sufficiently, then a recalibration + * is needed. + * + * Assumes caller will replace priv->last_temperature once calibration + * executed. + */ +static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv) +{ + int temp_diff; + + if (!test_bit(STATUS_STATISTICS, &priv->status)) { + IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n"); + return 0; + } + + temp_diff = priv->temperature - priv->last_temperature; + + /* get absolute value */ + if (temp_diff < 0) { + IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff); + temp_diff = -temp_diff; + } else if (temp_diff == 0) + IWL_DEBUG_POWER("Same temp, \n"); + else + IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff); + + if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { + IWL_DEBUG_POWER("Thermal txpower calib not needed\n"); + return 0; + } + + IWL_DEBUG_POWER("Thermal txpower calib needed\n"); + + return 1; +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl4965_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info + = &(priv->statistics.rx.general); + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + int bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + int bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + priv->last_rx_noise = (total_silence / num_active_rx) - 107; + else + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + priv->last_rx_noise); +} + +void iwl_hw_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + int change; + s32 temp; + + IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n", + (int)sizeof(priv->statistics), pkt->len); + + change = ((priv->statistics.general.temperature != + pkt->u.stats.general.temperature) || + ((priv->statistics.flag & + STATISTICS_REPLY_FLG_FAT_MODE_MSK) != + (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK))); + + memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * REG_RECALIB_PERIOD seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl4965_rx_calc_noise(priv); +#ifdef CONFIG_IWLWIFI_SENSITIVITY + queue_work(priv->workqueue, &priv->sensitivity_work); +#endif + } + + /* If the hardware hasn't reported a change in + * temperature then don't bother computing a + * calibrated temperature value */ + if (!change) + return; + + temp = iwl4965_get_temperature(priv); + if (temp < 0) + return; + + if (priv->temperature != temp) { + if (priv->temperature) + IWL_DEBUG_TEMP("Temperature changed " + "from %dC to %dC\n", + KELVIN_TO_CELSIUS(priv->temperature), + KELVIN_TO_CELSIUS(temp)); + else + IWL_DEBUG_TEMP("Temperature " + "initialized to %dC\n", + KELVIN_TO_CELSIUS(temp)); + } + + priv->temperature = temp; + set_bit(STATUS_TEMPERATURE, &priv->status); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + iwl4965_is_temp_calib_needed(priv)) + queue_work(priv->workqueue, &priv->txpower_work); +} + +static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, + int include_phy, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl4965_rx_phy_res *rx_start = (include_phy) ? + (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL; + struct ieee80211_hdr *hdr; + u16 len; + __le32 *rx_end; + unsigned int skblen; + u32 ampdu_status; + + if (!include_phy && priv->last_phy_res[0]) + rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; + + if (!rx_start) { + IWL_ERROR("MPDU frame without a PHY data\n"); + return; + } + if (include_phy) { + hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] + + rx_start->cfg_phy_cnt); + + len = le16_to_cpu(rx_start->byte_count); + + rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] + + sizeof(struct iwl4965_rx_phy_res) + + rx_start->cfg_phy_cnt + len); + + } else { + struct iwl4965_rx_mpdu_res_start *amsdu = + (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; + + hdr = (struct ieee80211_hdr *)(pkt->u.raw + + sizeof(struct iwl4965_rx_mpdu_res_start)); + len = le16_to_cpu(amsdu->byte_count); + rx_start->byte_count = amsdu->byte_count; + rx_end = (__le32 *) (((u8 *) hdr) + len); + } + if (len > 2342 || len < 16) { + IWL_DEBUG_DROP("byte count out of range [16,2342]" + " : %d\n", len); + return; + } + + ampdu_status = le32_to_cpu(*rx_end); + skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32); + + /* start from MAC */ + skb_reserve(rxb->skb, (void *)hdr - (void *)pkt); + skb_put(rxb->skb, len); /* end where data ends */ + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT + ("Dropping packet while interface is not open.\n"); + return; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, + ampdu_status, stats); + iwl_handle_data_packet_monitor(priv, rxb, hdr, len, stats, 0); + return; + } + + stats->flag = 0; + hdr = (struct ieee80211_hdr *)rxb->skb->data; + + if (iwl_param_hwcrypto) + iwl_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats); + + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + priv->alloc_rxb_skb--; + rxb->skb = NULL; +#ifdef LED + priv->led_packets += len; + iwl_setup_activity_timer(priv); +#endif +} + +/* Calc max signal level (dBm) among 3 possible receivers */ +static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct iwl4965_rx_non_cfg_phy *ncphy = + (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy; + u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK) + >> IWL_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK) + >> RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return (max_rssi - agc - IWL_RSSI_OFFSET); +} + +#ifdef CONFIG_IWLWIFI_HT + +/* Parsed Information Elements */ +struct ieee802_11_elems { + u8 *ds_params; + u8 ds_params_len; + u8 *tim; + u8 tim_len; + u8 *ibss_params; + u8 ibss_params_len; + u8 *erp_info; + u8 erp_info_len; + u8 *ht_cap_param; + u8 ht_cap_param_len; + u8 *ht_extra_param; + u8 ht_extra_param_len; +}; + +static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems) +{ + size_t left = len; + u8 *pos = start; + int unknown = 0; + + memset(elems, 0, sizeof(*elems)); + + while (left >= 2) { + u8 id, elen; + + id = *pos++; + elen = *pos++; + left -= 2; + + if (elen > left) + return -1; + + switch (id) { + case WLAN_EID_DS_PARAMS: + elems->ds_params = pos; + elems->ds_params_len = elen; + break; + case WLAN_EID_TIM: + elems->tim = pos; + elems->tim_len = elen; + break; + case WLAN_EID_IBSS_PARAMS: + elems->ibss_params = pos; + elems->ibss_params_len = elen; + break; + case WLAN_EID_ERP_INFO: + elems->erp_info = pos; + elems->erp_info_len = elen; + break; + case WLAN_EID_HT_CAPABILITY: + elems->ht_cap_param = pos; + elems->ht_cap_param_len = elen; + break; + case WLAN_EID_HT_EXTRA_INFO: + elems->ht_extra_param = pos; + elems->ht_extra_param_len = elen; + break; + default: + unknown++; + break; + } + + left -= elen; + pos += elen; + } + + return 0; +} +#endif /* CONFIG_IWLWIFI_HT */ + +static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = 0; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + +static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) +{ + /* FIXME: need locking over ps_status ??? */ + u8 sta_id = iwl_hw_find_station(priv, addr); + + if (sta_id != IWL_INVALID_STATION) { + u8 sta_awake = priv->stations[sta_id]. + ps_status == STA_PS_STATUS_WAKE; + + if (sta_awake && ps_bit) + priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP; + else if (!sta_awake && !ps_bit) { + iwl4965_sta_modify_ps_wake(priv, sta_id); + priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE; + } + } +} + +/* Called for REPLY_4965_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +static void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + /* Use phy data (Rx signal strength, etc.) contained within + * this rx packet for legacy frames, + * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ + int include_phy = (pkt->hdr.cmd == REPLY_4965_RX); + struct iwl4965_rx_phy_res *rx_start = (include_phy) ? + (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : + (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; + __le32 *rx_end; + unsigned int len = 0; + struct ieee80211_hdr *header; + u16 fc; + struct ieee80211_rx_status stats = { + .mactime = le64_to_cpu(rx_start->timestamp), + .channel = le16_to_cpu(rx_start->channel), + .phymode = + (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + MODE_IEEE80211G : MODE_IEEE80211A, + .antenna = 0, + .rate = iwl_hw_get_rate(rx_start->rate_n_flags), + .flag = 0, +#ifdef CONFIG_IWLWIFI_HT_AGG + .ordered = 0 +#endif /* CONFIG_IWLWIFI_HT_AGG */ + }; + u8 network_packet; + + if ((unlikely(rx_start->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP + ("dsp size out of range [0,20]: " + "%d/n", rx_start->cfg_phy_cnt); + return; + } + if (!include_phy) { + if (priv->last_phy_res[0]) + rx_start = (struct iwl4965_rx_phy_res *) + &priv->last_phy_res[1]; + else + rx_start = NULL; + } + + if (!rx_start) { + IWL_ERROR("MPDU frame without a PHY data\n"); + return; + } + + if (include_phy) { + header = (struct ieee80211_hdr *)((u8 *) & rx_start[1] + + rx_start->cfg_phy_cnt); + + len = le16_to_cpu(rx_start->byte_count); + rx_end = (__le32 *) (pkt->u.raw + rx_start->cfg_phy_cnt + + sizeof(struct iwl4965_rx_phy_res) + len); + } else { + struct iwl4965_rx_mpdu_res_start *amsdu = + (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw; + + header = (void *)(pkt->u.raw + + sizeof(struct iwl4965_rx_mpdu_res_start)); + len = le16_to_cpu(amsdu->byte_count); + rx_end = (__le32 *) (pkt->u.raw + + sizeof(struct iwl4965_rx_mpdu_res_start) + len); + } + + if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) || + !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(*rx_end)); + return; + } + + priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp); + + stats.freq = ieee80211chan2mhz(stats.channel); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + stats.ssi = iwl4965_calc_rssi(rx_start); + + /* Meaningful noise values are available only from beacon statistics, + * which are gathered only when associated, and indicate noise + * only for the associated network channel ... + * Ignore these noise values while scanning (other channels) */ + if (iwl_is_associated(priv) && + !test_bit(STATUS_SCANNING, &priv->status)) { + stats.noise = priv->last_rx_noise; + stats.signal = iwl_calc_sig_qual(stats.ssi, stats.noise); + } else { + stats.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + stats.signal = iwl_calc_sig_qual(stats.ssi, 0); + } + + /* Reset beacon noise level if not associated. */ + if (!iwl_is_associated(priv)) + priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + +#ifdef CONFIG_IWLWIFI_DEBUG + /* TODO: Parts of iwl_report_frame are broken for 4965 */ + if (iwl_debug_level & (IWL_DL_RX)) + /* Set "1" to report good data frames in groups of 100 */ + iwl_report_frame(priv, pkt, header, 1); + + if (iwl_debug_level & (IWL_DL_RX | IWL_DL_STATS)) + IWL_DEBUG_RX("Rssi %d, noise %d, qual %d, TSF %lu\n", + stats.ssi, stats.noise, stats.signal, + (long unsigned int)le64_to_cpu(rx_start->timestamp)); +#endif + + network_packet = iwl_is_network_packet(priv, header); + if (network_packet) { + priv->last_rx_rssi = stats.ssi; + priv->last_beacon_time = priv->ucode_beacon_time; + priv->last_tsf = le64_to_cpu(rx_start->timestamp); + } + + fc = le16_to_cpu(header->frame_control); + switch (fc & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_MGMT: + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, + header->addr2); + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_PROBE_RESP: + case IEEE80211_STYPE_BEACON: + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA && + !compare_ether_addr(header->addr2, priv->bssid)) || + (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && + !compare_ether_addr(header->addr3, priv->bssid))) { + struct ieee80211_mgmt *mgmt = + (struct ieee80211_mgmt *)header; + u64 timestamp = + le64_to_cpu(mgmt->u.beacon.timestamp); + + priv->timestamp0 = timestamp & 0xFFFFFFFF; + priv->timestamp1 = + (timestamp >> 32) & 0xFFFFFFFF; + priv->beacon_int = le16_to_cpu( + mgmt->u.beacon.beacon_int); + if (priv->call_post_assoc_from_beacon && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { + priv->call_post_assoc_from_beacon = 0; + queue_work(priv->workqueue, + &priv->post_associate.work); + } + } + break; + + case IEEE80211_STYPE_ACTION: + break; + + /* + * TODO: There is no callback function from upper + * stack to inform us when associated status. this + * work around to sniff assoc_resp management frame + * and finish the association process. + */ + case IEEE80211_STYPE_ASSOC_RESP: + case IEEE80211_STYPE_REASSOC_RESP: + if (network_packet && iwl_is_associated(priv)) { +#ifdef CONFIG_IWLWIFI_HT + u8 *pos = NULL; + struct ieee802_11_elems elems; +#endif /*CONFIG_IWLWIFI_HT */ + struct ieee80211_mgmt *mgnt = + (struct ieee80211_mgmt *)header; + + priv->assoc_id = (~((1 << 15) | (1 << 14)) + & le16_to_cpu(mgnt->u.assoc_resp.aid)); + priv->assoc_capability = + le16_to_cpu( + mgnt->u.assoc_resp.capab_info); +#ifdef CONFIG_IWLWIFI_HT + pos = mgnt->u.assoc_resp.variable; + if (!parse_elems(pos, + len - (pos - (u8 *) mgnt), + &elems)) { + if (elems.ht_extra_param && + elems.ht_cap_param) + break; + } +#endif /*CONFIG_IWLWIFI_HT */ + /* assoc_id is 0 no association */ + if (!priv->assoc_id) + break; + if (priv->beacon_int) + queue_work(priv->workqueue, + &priv->post_associate.work); + else + priv->call_post_assoc_from_beacon = 1; + } + + break; + + case IEEE80211_STYPE_PROBE_REQ: + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !iwl_is_associated(priv)) { + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + + IWL_DEBUG_DROP("Dropping (non network): " + "%s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + return; + } + } + iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &stats); + break; + + case IEEE80211_FTYPE_CTL: +#ifdef CONFIG_IWLWIFI_HT_AGG + switch (fc & IEEE80211_FCTL_STYPE) { + case IEEE80211_STYPE_BACK_REQ: + IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n"); + iwl4965_handle_data_packet(priv, 0, include_phy, + rxb, &stats); + break; + default: + break; + } +#endif + + break; + + case IEEE80211_FTYPE_DATA: { + DECLARE_MAC_BUF(mac1); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, + header->addr2); + + if (unlikely(!network_packet)) + IWL_DEBUG_DROP("Dropping (non network): " + "%s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + else if (unlikely(is_duplicate_packet(priv, header))) + IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n", + print_mac(mac1, header->addr1), + print_mac(mac2, header->addr2), + print_mac(mac3, header->addr3)); + else + iwl4965_handle_data_packet(priv, 1, include_phy, rxb, + &stats); + break; + } + default: + break; + + } +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + priv->last_phy_res[0] = 1; + memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), + sizeof(struct iwl4965_rx_phy_res)); +} + +static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ +#ifdef CONFIG_IWLWIFI_SENSITIVITY + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { + IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consequtive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status))) + queue_work(priv->workqueue, &priv->sensitivity_work); + } +#endif /*CONFIG_IWLWIFI_SENSITIVITY*/ +} + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + +static void iwl4965_set_tx_status(struct iwl_priv *priv, int txq_id, int idx, + u32 status, u32 retry_count, u32 rate) +{ + struct ieee80211_tx_status *tx_status = + &(priv->txq[txq_id].txb[idx].status); + + tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0; + tx_status->retry_count += retry_count; + tx_status->control.tx_rate = rate; +} + + +static void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, + int sta_id, int tid) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + + +static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_compressed_ba_resp* + ba_resp) + +{ + int i, sh, ack; + u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl); + u32 bitmap0, bitmap1; + u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0); + u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1); + + if (unlikely(!agg->wait_for_ba)) { + IWL_ERROR("Received BA when not expected\n"); + return -EINVAL; + } + agg->wait_for_ba = 0; + IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl); + sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl>>4); + if (sh < 0) /* tbw something is wrong with indeces */ + sh += 0x100; + + /* don't use 64 bits for now */ + bitmap0 = resp_bitmap0 >> sh; + bitmap1 = resp_bitmap1 >> sh; + bitmap0 |= (resp_bitmap1 & ((1<frame_count > (64 - sh)) { + IWL_DEBUG_TX_REPLY("more frames than bitmap size"); + return -1; + } + + /* check for success or failure according to the + * transmitted bitmap and back bitmap */ + bitmap0 &= agg->bitmap0; + bitmap1 &= agg->bitmap1; + + for (i = 0; i < agg->frame_count ; i++) { + int idx = (agg->start_idx + i) & 0xff; + ack = bitmap0 & (1 << i); + IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", + ack? "ACK":"NACK", i, idx, agg->start_idx + i); + iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0, + agg->rate_n_flags); + + } + + IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1); + + return 0; +} + +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return (index == 0) ? n_bd - 1 : index - 1; +} + +static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + int index; + struct iwl_tx_queue *txq = NULL; + struct iwl_ht_agg *agg; + u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow); + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) { + IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); + return; + } + + txq = &priv->txq[ba_resp_scd_flow]; + agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; + index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + /* TODO: Need to get this copy more sefely - now good for debug */ +/* + { + DECLARE_MAC_BUF(mac); + IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " + "sta_id = %d\n", + agg->wait_for_ba, + print_mac(mac, (u8*) &ba_resp->sta_addr_lo32), + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = " + "%d, scd_ssn = %d\n", + ba_resp->tid, + ba_resp->ba_seq_ctl, + ba_resp->ba_bitmap1, + ba_resp->ba_bitmap0, + ba_resp->scd_flow, + ba_resp->scd_ssn); + IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n", + agg->start_idx, + agg->bitmap1, + agg->bitmap0); + } +*/ + iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); + /* releases all the TFDs until the SSN */ + if (txq->q.last_used != (ba_resp_scd_ssn & 0xff)) + iwl_tx_queue_reclaim(priv, ba_resp_scd_flow, index); + +} + + +static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) +{ + iwl_write_restricted_reg(priv, + SCD_QUEUE_STATUS_BITS(txq_id), + (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_read_restricted_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_write_restricted_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} + +/** + * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID + */ +static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, + u16 ssn_idx) +{ + unsigned long flags; + int rc; + u16 ra_tid; + + if (IWL_BACK_QUEUE_FIRST_ID > txq_id) + IWL_WARNING("queue number too small: %d, must be > %d\n", + txq_id, IWL_BACK_QUEUE_FIRST_ID); + + ra_tid = BUILD_RAxTID(sta_id, tid); + + iwl_sta_modify_enable_tid_tx(priv, sta_id, tid); + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + + iwl_set_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1<txq[txq_id].q.last_used = (ssn_idx & 0xff); + priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff); + + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_write_restricted_mem(priv, + priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + iwl_write_restricted_mem(priv, priv->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) + & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + iwl_set_bits_restricted_reg(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID + */ +static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + unsigned long flags; + int rc; + + if (IWL_BACK_QUEUE_FIRST_ID > txq_id) { + IWL_WARNING("queue number too small: %d, must be > %d\n", + txq_id, IWL_BACK_QUEUE_FIRST_ID); + return -EINVAL; + } + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl_clear_bits_restricted_reg(priv, SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.last_used = (ssn_idx & 0xff); + priv->txq[txq_id].q.first_empty = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_clear_bits_restricted_reg(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl4965_txq_ctx_deactivate(priv, txq_id); + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +#endif/* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ +/* + * RATE SCALE CODE + */ +int iwl4965_init_hw_rates(struct iwl_priv *priv, struct ieee80211_rate *rates) +{ + return 0; +} + + +/** + * iwl4965_add_station - Initialize a station's hardware rate table + * + * The uCode contains a table of fallback rates and retries per rate + * for automatic fallback during transmission. + * + * NOTE: This initializes the table for a single retry per data rate + * which is not optimal. Setting up an intelligent retry per rate + * requires feedback from transmission, which isn't exposed through + * rc80211_simple which is what this driver is currently using. + * + */ +void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap) +{ + int i, r; + struct iwl_link_quality_cmd link_cmd = { + .reserved1 = 0, + }; + u16 rate_flags; + + /* Set up the rate scaling to start at 54M and fallback + * all the way to 1M in IEEE order and then spin on IEEE */ + if (is_ap) + r = IWL_RATE_54M_INDEX; + else if (priv->phymode == MODE_IEEE80211A) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + rate_flags = 0; + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= RATE_MCS_ANT_B_MSK; + rate_flags &= ~RATE_MCS_ANT_A_MSK; + link_cmd.rs_table[i].rate_n_flags = + iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); + r = iwl_get_prev_ieee_rate(r); + } + + link_cmd.general_params.single_stream_ant_msk = 2; + link_cmd.general_params.dual_stream_ant_msk = 3; + link_cmd.agg_params.agg_dis_start_th = 3; + link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000); + + /* Update the rate scaling for control frame Tx to AP */ + link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID; + + iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd), + &link_cmd); +} + +#ifdef CONFIG_IWLWIFI_HT + +static u8 iwl_is_channel_extension(struct iwl_priv *priv, int phymode, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + if (!is_channel_valid(ch_info)) + return 0; + + if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO) + return 0; + + if ((ch_info->fat_extension_channel == extension_chan_offset) || + (ch_info->fat_extension_channel == HT_IE_EXT_CHANNEL_MAX)) + return 1; + + return 0; +} + +static u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, + const struct sta_ht_info *ht_info) +{ + + if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) + return 0; + + if (ht_info->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) + return 0; + + if (ht_info->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO) + return 0; + + /* no fat tx allowed on 2.4GHZ */ + if (priv->phymode != MODE_IEEE80211A) + return 0; + return (iwl_is_channel_extension(priv, priv->phymode, + ht_info->control_channel, + ht_info->extension_chan_offset)); +} + +void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct sta_ht_info *ht_info) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + u32 val; + + if (!ht_info->is_ht) + return; + + if (iwl_is_fat_tx_allowed(priv, ht_info)) + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED_MSK; + else + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | + RXON_FLG_CHANNEL_MODE_PURE_40_MSK); + + if (le16_to_cpu(rxon->channel) != ht_info->control_channel) { + IWL_DEBUG_ASSOC("control diff than current %d %d\n", + le16_to_cpu(rxon->channel), + ht_info->control_channel); + rxon->channel = cpu_to_le16(ht_info->control_channel); + return; + } + + /* Note: control channel is oposit to extension channel */ + switch (ht_info->extension_chan_offset) { + case IWL_EXT_CHANNEL_OFFSET_ABOVE: + rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + break; + case IWL_EXT_CHANNEL_OFFSET_BELOW: + rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IWL_EXT_CHANNEL_OFFSET_AUTO: + rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; + break; + default: + rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; + break; + } + + val = ht_info->operating_mode; + + rxon->flags |= cpu_to_le32(val << RXON_FLG_HT_OPERATING_MODE_POS); + + priv->active_rate_ht[0] = ht_info->supp_rates[0]; + priv->active_rate_ht[1] = ht_info->supp_rates[1]; + iwl4965_set_rxon_chain(priv); + + IWL_DEBUG_ASSOC("supported HT rate 0x%X %X " + "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x " + "control chan %d\n", + priv->active_rate_ht[0], priv->active_rate_ht[1], + le32_to_cpu(rxon->flags), ht_info->operating_mode, + ht_info->extension_chan_offset, + ht_info->control_channel); + return; +} + +void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index) +{ + __le32 sta_flags; + struct sta_ht_info *ht_info = &priv->current_assoc_ht; + + priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ; + if (!ht_info->is_ht) + goto done; + + sta_flags = priv->stations[index].sta.station_flags; + + if (ht_info->tx_mimo_ps_mode == IWL_MIMO_PS_DYNAMIC) + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + else + sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK; + + sta_flags |= cpu_to_le32( + (u32)ht_info->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= cpu_to_le32( + (u32)ht_info->mpdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + sta_flags &= (~STA_FLG_FAT_EN_MSK); + ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_20MHZ; + ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_20MHZ; + + if (iwl_is_fat_tx_allowed(priv, ht_info)) { + sta_flags |= STA_FLG_FAT_EN_MSK; + ht_info->chan_width_cap = IWL_CHANNEL_WIDTH_40MHZ; + if (ht_info->supported_chan_width == IWL_CHANNEL_WIDTH_40MHZ) + ht_info->tx_chan_width = IWL_CHANNEL_WIDTH_40MHZ; + } + priv->current_channel_width = ht_info->tx_chan_width; + priv->stations[index].sta.station_flags = sta_flags; + done: + return; +} + +#ifdef CONFIG_IWLWIFI_HT_AGG + +static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv, + int sta_id, int tid, u16 ssn) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + +static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv, + int sta_id, int tid) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + iwl_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); +} + +static const u16 default_tid_to_tx_fifo[] = { + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC0, + IWL_TX_FIFO_AC0, + IWL_TX_FIFO_AC1, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC2, + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_AC3, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_NONE, + IWL_TX_FIFO_AC3 +}; + +static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) +{ + int txq_id; + + for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +int iwl_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid, + u16 *start_seq_num) +{ + + struct iwl_priv *priv = hw->priv; + int sta_id; + int tx_fifo; + int txq_id; + int ssn = -1; + unsigned long flags; + struct iwl_tid_data *tid_data; + DECLARE_MAC_BUF(mac); + + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) + tx_fifo = default_tid_to_tx_fifo[tid]; + else + return -EINVAL; + + IWL_WARNING("iwl-AGG iwl_mac_ht_tx_agg_start on da=%s" + " tid=%d\n", print_mac(mac, da), tid); + + sta_id = iwl_hw_find_station(priv, da); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + txq_id = iwl_txq_ctx_activate_free(priv); + if (txq_id == -1) + return -ENXIO; + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + *start_seq_num = ssn; + iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE); + return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, + sta_id, tid, ssn); +} + + +int iwl_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid, + int generator) +{ + + struct iwl_priv *priv = hw->priv; + int tx_fifo_id, txq_id, sta_id, ssn = -1; + struct iwl_tid_data *tid_data; + int rc; + DECLARE_MAC_BUF(mac); + + if (!da) { + IWL_ERROR("%s: da = NULL\n", __func__); + return -EINVAL; + } + + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) + tx_fifo_id = default_tid_to_tx_fifo[tid]; + else + return -EINVAL; + + sta_id = iwl_hw_find_station(priv, da); + + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + + rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); + /* FIXME: need more safe way to handle error condition */ + if (rc) + return rc; + + iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA); + IWL_DEBUG_INFO("iwl_mac_ht_tx_agg_stop on da=%s tid=%d\n", + print_mac(mac, da), tid); + + return 0; +} + +int iwl_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da, + u16 tid, u16 start_seq_num) +{ + struct iwl_priv *priv = hw->priv; + int sta_id; + DECLARE_MAC_BUF(mac); + + IWL_WARNING("iwl-AGG iwl_mac_ht_rx_agg_start on da=%s" + " tid=%d\n", print_mac(mac, da), tid); + sta_id = iwl_hw_find_station(priv, da); + iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, start_seq_num); + return 0; +} + +int iwl_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da, + u16 tid, int generator) +{ + struct iwl_priv *priv = hw->priv; + int sta_id; + DECLARE_MAC_BUF(mac); + + IWL_WARNING("iwl-AGG iwl_mac_ht_rx_agg_stop on da=%s tid=%d\n", + print_mac(mac, da), tid); + sta_id = iwl_hw_find_station(priv, da); + iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid); + return 0; +} + +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + +/* Set up 4965-specific Rx frame reply handlers */ +void iwl_hw_rx_handler_setup(struct iwl_priv *priv) +{ + /* Legacy Rx frames */ + priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx; + + /* High-throughput (HT) Rx frames */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl4965_rx_missed_beacon_notif; + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; +#endif /* CONFIG_IWLWIFI_AGG */ +#endif /* CONFIG_IWLWIFI */ +} + +void iwl_hw_setup_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); + INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work); +#ifdef CONFIG_IWLWIFI_SENSITIVITY + INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); +#endif +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work); +#endif /* CONFIG_IWLWIFI_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; +} + +void iwl_hw_cancel_deferred_work(struct iwl_priv *priv) +{ + del_timer_sync(&priv->statistics_periodic); + + cancel_delayed_work(&priv->init_alive_start); +} + +struct pci_device_id iwl_hw_card_ids[] = { + {0x8086, 0x4229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0x8086, 0x4230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0} +}; + +int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv) +{ + u16 count; + int rc; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + rc = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (rc >= 0) { + IWL_DEBUG_IO("Aqcuired semaphore after %d tries.\n", + count+1); + return rc; + } + } + + return rc; +} + +inline void iwl_eeprom_release_semaphore(struct iwl_priv *priv) +{ + iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); +} + + +MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h new file mode 100644 index 0000000000000000000000000000000000000000..4c700812b45beae031e4f317e46668a56eb4c550 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-4965.h @@ -0,0 +1,341 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_4965_h__ +#define __iwl_4965_h__ + +struct iwl_priv; +struct sta_ht_info; + +/* + * Forward declare iwl-4965.c functions for iwl-base.c + */ +extern int iwl_eeprom_aqcuire_semaphore(struct iwl_priv *priv); +extern void iwl_eeprom_release_semaphore(struct iwl_priv *priv); + +extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); +extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, + int is_ap); +extern void iwl4965_set_rxon_ht(struct iwl_priv *priv, + struct sta_ht_info *ht_info); + +extern void iwl4965_set_rxon_chain(struct iwl_priv *priv); +extern int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd, + u8 sta_id, dma_addr_t txcmd_phys, + struct ieee80211_hdr *hdr, u8 hdr_len, + struct ieee80211_tx_control *ctrl, void *sta_in); +extern int iwl4965_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates); +extern int iwl4965_alive_notify(struct iwl_priv *priv); +extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode); +extern void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index); + +extern void iwl4965_chain_noise_reset(struct iwl_priv *priv); +extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, + u8 force); +extern int iwl4965_set_fat_chan_info(struct iwl_priv *priv, int phymode, + u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 fat_extension_channel); +extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG +extern int iwl_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, + u16 tid, u16 *start_seq_num); +extern int iwl_mac_ht_rx_agg_start(struct ieee80211_hw *hw, u8 *da, + u16 tid, u16 start_seq_num); +extern int iwl_mac_ht_rx_agg_stop(struct ieee80211_hw *hw, u8 *da, + u16 tid, int generator); +extern int iwl_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, + u16 tid, int generator); +extern void iwl4965_turn_off_agg(struct iwl_priv *priv, u8 tid); +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /*CONFIG_IWLWIFI_HT */ +/* Structures, enum, and defines specific to the 4965 */ + +#define IWL4965_KW_SIZE 0x1000 /*4k */ + +struct iwl_kw { + dma_addr_t dma_addr; + void *v_addr; + size_t size; +}; + +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +#define TID_ALL_ENABLED 0x7f +#define TID_ALL_SPECIFIED 0xff +#define TID_AGG_TPT_THREHOLD 0x0 + +#define IWL_CHANNEL_WIDTH_20MHZ 0 +#define IWL_CHANNEL_WIDTH_40MHZ 1 + +#define IWL_MIMO_PS_STATIC 0 +#define IWL_MIMO_PS_NONE 3 +#define IWL_MIMO_PS_DYNAMIC 1 +#define IWL_MIMO_PS_INVALID 2 + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define IWL_EXT_CHANNEL_OFFSET_AUTO 0 +#define IWL_EXT_CHANNEL_OFFSET_ABOVE 1 +#define IWL_EXT_CHANNEL_OFFSET_ 2 +#define IWL_EXT_CHANNEL_OFFSET_BELOW 3 +#define IWL_EXT_CHANNEL_OFFSET_MAX 4 + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS (3) + +#define TX_POWER_IWL_ILLEGAL_VDET -100000 +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 +#define TX_POWER_IWL_CLOSED_LOOP_MIN_POWER 18 +#define TX_POWER_IWL_CLOSED_LOOP_MAX_POWER 34 +#define TX_POWER_IWL_VDET_SLOPE_BELOW_NOMINAL 17 +#define TX_POWER_IWL_VDET_SLOPE_ABOVE_NOMINAL 20 +#define TX_POWER_IWL_NOMINAL_POWER 26 +#define TX_POWER_IWL_CLOSED_LOOP_ITERATION_LIMIT 1 +#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V 7 +#define TX_POWER_IWL_DEGREES_PER_VDET_CODE 11 +#define IWL_TX_POWER_MAX_NUM_PA_MEASUREMENTS 1 +#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9) +#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5) + +struct iwl_traffic_load { + unsigned long time_stamp; + u32 packet_count[TID_QUEUE_MAX_SIZE]; + u8 queue_count; + u8 head; + u32 total; +}; + +#ifdef CONFIG_IWLWIFI_HT_AGG +struct iwl_agg_control { + unsigned long next_retry; + u32 wait_for_agg_status; + u32 tid_retry; + u32 requested_ba; + u32 granted_ba; + u8 auto_agg; + u32 tid_traffic_load_threshold; + u32 ba_timeout; + struct iwl_traffic_load traffic_load[TID_MAX_LOAD_COUNT]; +}; +#endif /*CONFIG_IWLWIFI_HT_AGG */ + +struct iwl_lq_mngr { +#ifdef CONFIG_IWLWIFI_HT_AGG + struct iwl_agg_control agg_ctrl; +#endif + spinlock_t lock; + s32 max_window_size; + s32 *expected_tpt; + u8 *next_higher_rate; + u8 *next_lower_rate; + unsigned long stamp; + unsigned long stamp_last; + u32 flush_time; + u32 tx_packets; + u8 lq_ready; +}; + + +/* Sensitivity and chain noise calibration */ +#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1) +#define INITIALIZATION_VALUE 0xFFFF +#define CAL_NUM_OF_BEACONS 20 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +/* Param table within SENSITIVITY_CMD */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1) + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define NRG_MIN_CCK 97 +#define NRG_MAX_CCK 0 + +#define AUTO_CORR_MIN_OFDM 85 +#define AUTO_CORR_MIN_OFDM_MRC 170 +#define AUTO_CORR_MIN_OFDM_X1 105 +#define AUTO_CORR_MIN_OFDM_MRC_X1 220 +#define AUTO_CORR_MAX_OFDM 120 +#define AUTO_CORR_MAX_OFDM_MRC 210 +#define AUTO_CORR_MAX_OFDM_X1 140 +#define AUTO_CORR_MAX_OFDM_MRC_X1 270 +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_MIN_CCK (125) +#define AUTO_CORR_MAX_CCK (200) +#define AUTO_CORR_MIN_CCK_MRC 200 +#define AUTO_CORR_MAX_CCK_MRC 400 +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_ALG 0 +#define AUTO_CORR_ALG 1 +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +enum iwl_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwl_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE = 1, + IWL_CHAIN_NOISE_CALIBRATED = 2, +}; + +enum iwl_sensitivity_state { + IWL_SENS_CALIB_ALLOWED = 0, + IWL_SENS_CALIB_NEED_REINIT = 1, +}; + +enum iwl_calib_enabled_state { + IWL_CALIB_DISABLED = 0, /* must be 0 */ + IWL_CALIB_ENABLED = 1, +}; + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u8 state; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u8 state; + u16 beacon_count; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; +}; + +/* IWL4965 */ +#define RATE_MCS_CODE_MSK 0x7 +#define RATE_MCS_MIMO_POS 3 +#define RATE_MCS_MIMO_MSK 0x8 +#define RATE_MCS_HT_DUP_POS 5 +#define RATE_MCS_HT_DUP_MSK 0x20 +#define RATE_MCS_FLAGS_POS 8 +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK 0x100 +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK 0x200 +#define RATE_MCS_GF_POS 10 +#define RATE_MCS_GF_MSK 0x400 + +#define RATE_MCS_FAT_POS 11 +#define RATE_MCS_FAT_MSK 0x800 +#define RATE_MCS_DUP_POS 12 +#define RATE_MCS_DUP_MSK 0x1000 +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK 0x2000 + +#define EEPROM_SEM_TIMEOUT 10 +#define EEPROM_SEM_RETRY_LIMIT 1000 + +#endif /* __iwl_4965_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-channel.h b/drivers/net/wireless/iwlwifi/iwl-channel.h new file mode 100644 index 0000000000000000000000000000000000000000..023c3f240cea6f5212e886607ed7e400a4b81ac7 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-channel.h @@ -0,0 +1,161 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_channel_h__ +#define __iwl_channel_h__ + +#define IWL_NUM_SCAN_RATES (2) + +struct iwl_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct iwl_channel_tgh_info { + s64 last_radar_time; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power index must also be set. */ +struct iwl_channel_power_info { + struct iwl_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 base_power_index; /* gain index for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct iwl_scan_power_info { + struct iwl_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* Channel unlock period is 15 seconds. If no beacon or probe response + * has been received within 15 seconds on a locked channel then the channel + * remains locked. */ +#define TX_UNLOCK_PERIOD 15 + +/* CSA lock period is 15 seconds. If a CSA has been received on a channel in + * the last 15 seconds, the channel is locked */ +#define CSA_LOCK_PERIOD 15 +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +#define IWL4965_MAX_RATE (33) + +struct iwl_channel_info { + struct iwl_channel_tgd_info tgd; + struct iwl_channel_tgh_info tgh; + struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct iwl_eeprom_channel fat_eeprom; /* EEPROM regulatory limit for + * FAT channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ + u8 phymode; /* MODE_IEEE80211{A,B,G} */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (indexes). */ + struct iwl_channel_power_info power_info[IWL4965_MAX_RATE]; + +#if IWL == 4965 + /* FAT channel info */ + s8 fat_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 fat_curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */ + s8 fat_min_power; /* always 0 */ + s8 fat_scan_power; /* (dBm) eeprom, direct scans, any rate */ + u8 fat_flags; /* flags copied from EEPROM */ + u8 fat_extension_channel; +#endif + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct iwl_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; +}; + +struct iwl_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IWL_MAX_RATES]; +}; + +static inline int is_channel_valid(const struct iwl_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int is_channel_narrow(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0; +} + +static inline int is_channel_radar(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->phymode == MODE_IEEE80211A; +} + +static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info) +{ + return ((ch_info->phymode == MODE_IEEE80211B) || + (ch_info->phymode == MODE_IEEE80211G)); +} + +static inline int is_channel_passive(const struct iwl_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline int is_channel_ibss(const struct iwl_channel_info *ch) +{ + return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; +} + +extern const struct iwl_channel_info *iwl_get_channel_info( + const struct iwl_priv *priv, int phymode, u16 channel); + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h new file mode 100644 index 0000000000000000000000000000000000000000..9de8d7f6efa3a2a6af910b44853c4a753f9b3a61 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -0,0 +1,1734 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_commands_h__ +#define __iwl_commands_h__ + +enum { + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, + + /* RXON and QOS commands */ + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, + + /* Multi-Station support */ + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, /* not used */ + REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ + + /* RX, TX, LEDs */ +#if IWL == 3945 + REPLY_3945_RX = 0x1b, /* 3945 only */ +#endif + REPLY_TX = 0x1c, + REPLY_RATE_SCALE = 0x47, /* 3945 only */ + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ + + /* 802.11h related */ + RADAR_NOTIFICATION = 0x70, /* not used */ + REPLY_QUIET_CMD = 0x71, /* not used */ + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, + + /* Power Management */ + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + + /* Scan commands and notifications */ + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* IBSS/AP commands */ + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, + WHO_IS_AWAKE_NOTIFICATION = 0x94, /* not used */ + + /* Miscellaneous commands */ + QUIET_NOTIFICATION = 0x96, /* not used */ + REPLY_TX_PWR_TABLE_CMD = 0x97, + MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ + + /* BT config command */ + REPLY_BT_CONFIG = 0x9b, + + /* 4965 Statistics */ + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + REPLY_CARD_STATE_CMD = 0xa0, + CARD_STATE_NOTIFICATION = 0xa1, + + /* Missed beacons notification */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + +#if IWL == 4965 + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_4965_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, +#endif + REPLY_MAX = 0xff +}; + +/****************************************************************************** + * (0) + * Header + * + *****************************************************************************/ + +#define IWL_CMD_FAILED_MSK 0x40 + +struct iwl_cmd_header { + u8 cmd; + u8 flags; + /* We have 15 LSB to use as we please (MSB indicates + * a frame Rx'd from the HW). We encode the following + * information into the sequence field: + * + * 0:7 index in fifo + * 8:13 fifo selection + * 14:14 bit indicating if this packet references the 'extra' + * storage at the end of the memory queue + * 15:15 (Rx indication) + * + */ + __le16 sequence; + + /* command data follows immediately */ + u8 data[0]; +} __attribute__ ((packed)); + +/****************************************************************************** + * (0a) + * Alive and Error Commands & Responses: + * + *****************************************************************************/ + +#define UCODE_VALID_OK __constant_cpu_to_le32(0x1) +#define INITIALIZE_SUBTYPE (9) + +/* + * REPLY_ALIVE = 0x1 (response only, not a command) + */ +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; +} __attribute__ ((packed)); + +struct iwl_init_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; + +#if IWL == 4965 + /* calibration values from "initialize" uCode */ + __le32 voltage; /* signed */ + __le32 therm_r1[2]; /* signed 1st for normal, 2nd for FAT channel */ + __le32 therm_r2[2]; /* signed */ + __le32 therm_r3[2]; /* signed */ + __le32 therm_r4[2]; /* signed */ + __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, + * 2 Tx chains */ +#endif +} __attribute__ ((packed)); + +union tsf { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; +}; + +/* + * REPLY_ERROR = 0x2 (response only, not a command) + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; +#if IWL == 3945 + __le16 reserved2; +#endif + __le32 error_info; + union tsf timestamp; +} __attribute__ ((packed)); + +/****************************************************************************** + * (1) + * RXON Commands & Responses: + * + *****************************************************************************/ + +/* + * Rx config defines & structure + */ +/* rx_config device types */ +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, +}; + +/* rx_config flags */ +/* band & modulation selection */ +#define RXON_FLG_BAND_24G_MSK __constant_cpu_to_le32(1 << 0) +#define RXON_FLG_CCK_MSK __constant_cpu_to_le32(1 << 1) +/* auto detection enable */ +#define RXON_FLG_AUTO_DETECT_MSK __constant_cpu_to_le32(1 << 2) +/* TGg protection when tx */ +#define RXON_FLG_TGG_PROTECT_MSK __constant_cpu_to_le32(1 << 3) +/* cck short slot & preamble */ +#define RXON_FLG_SHORT_SLOT_MSK __constant_cpu_to_le32(1 << 4) +#define RXON_FLG_SHORT_PREAMBLE_MSK __constant_cpu_to_le32(1 << 5) +/* antenna selection */ +#define RXON_FLG_DIS_DIV_MSK __constant_cpu_to_le32(1 << 7) +#define RXON_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0x0f00) +#define RXON_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) +#define RXON_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) +/* radar detection enable */ +#define RXON_FLG_RADAR_DETECT_MSK __constant_cpu_to_le32(1 << 12) +#define RXON_FLG_TGJ_NARROW_BAND_MSK __constant_cpu_to_le32(1 << 13) +/* rx response to host with 8-byte TSF +* (according to ON_AIR deassertion) */ +#define RXON_FLG_TSF2HOST_MSK __constant_cpu_to_le32(1 << 15) + +/* rx_config filter flags */ +/* accept all data frames */ +#define RXON_FILTER_PROMISC_MSK __constant_cpu_to_le32(1 << 0) +/* pass control & management to host */ +#define RXON_FILTER_CTL2HOST_MSK __constant_cpu_to_le32(1 << 1) +/* accept multi-cast */ +#define RXON_FILTER_ACCEPT_GRP_MSK __constant_cpu_to_le32(1 << 2) +/* don't decrypt uni-cast frames */ +#define RXON_FILTER_DIS_DECRYPT_MSK __constant_cpu_to_le32(1 << 3) +/* don't decrypt multi-cast frames */ +#define RXON_FILTER_DIS_GRP_DECRYPT_MSK __constant_cpu_to_le32(1 << 4) +/* STA is associated */ +#define RXON_FILTER_ASSOC_MSK __constant_cpu_to_le32(1 << 5) +/* transfer to host non bssid beacons in associated state */ +#define RXON_FILTER_BCON_AWARE_MSK __constant_cpu_to_le32(1 << 6) + +/* + * REPLY_RXON = 0x10 (command, has simple generic response) + */ +struct iwl_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; +#if IWL == 3945 + __le16 reserved4; +#elif IWL == 4965 + __le16 rx_chain; +#endif + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; +#if IWL == 3945 + __le16 reserved5; +#elif IWL == 4965 + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; +#endif +} __attribute__ ((packed)); + +/* + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + */ +struct iwl_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; +#if IWL == 4965 + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + __le16 rx_chain_select_flags; +#endif + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + */ +struct iwl_rxon_time_cmd { + union tsf timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + __le16 reserved; +} __attribute__ ((packed)); + +struct iwl_tx_power { + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ +} __attribute__ ((packed)); + +#if IWL == 3945 +struct iwl_power_per_rate { + u8 rate; /* plcp */ + struct iwl_tx_power tpc; + u8 reserved; +} __attribute__ ((packed)); + +#elif IWL == 4965 +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 +struct tx_power_dual_stream { + __le32 dw; +} __attribute__ ((packed)); + +struct iwl_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; +} __attribute__ ((packed)); +#endif + +/* + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + */ +struct iwl_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; +#if IWL == 3945 + struct iwl_power_per_rate power[IWL_MAX_RATES]; +#elif IWL == 4965 + struct iwl_tx_power_db tx_power; +#endif +} __attribute__ ((packed)); + +/* + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + */ +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; /* 0 - OK, 1 - fail */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (2) + * Quality-of-Service (QOS) Commands & Responses: + * + *****************************************************************************/ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +} __attribute__ ((packed)); + +/* QoS flags defines */ +#define QOS_PARAM_FLG_UPDATE_EDCA_MSK __constant_cpu_to_le32(0x01) +#define QOS_PARAM_FLG_TGN_MSK __constant_cpu_to_le32(0x02) +#define QOS_PARAM_FLG_TXOP_TYPE_MSK __constant_cpu_to_le32(0x10) + +/* + * TXFIFO Queue number defines + */ +/* number of Access categories (AC) (EDCA), queues 0..3 */ +#define AC_NUM 4 + +/* + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + */ +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM]; +} __attribute__ ((packed)); + +/****************************************************************************** + * (3) + * Add/Modify Stations Commands & Responses: + * + *****************************************************************************/ +/* + * Multi station support + */ +#define IWL_AP_ID 0 +#define IWL_MULTICAST_ID 1 +#define IWL_STA_ID 2 + +#define IWL3945_BROADCAST_ID 24 +#define IWL3945_STATION_COUNT 25 + +#define IWL4965_BROADCAST_ID 31 +#define IWL4965_STATION_COUNT 32 + +#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ +#define IWL_INVALID_STATION 255 + +#if IWL == 3945 +#define STA_FLG_TX_RATE_MSK __constant_cpu_to_le32(1<<2); +#endif +#define STA_FLG_PWR_SAVE_MSK __constant_cpu_to_le32(1<<8); + +#define STA_CONTROL_MODIFY_MSK 0x01 + +/* key flags __le16*/ +#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x7) +#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0) +#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x1) +#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x2) +#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x3) + +#define STA_KEY_FLG_KEYID_POS 8 +#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800) + +/* modify flags */ +#define STA_MODIFY_KEY_MASK 0x01 +#define STA_MODIFY_TID_DISABLE_TX 0x02 +#define STA_MODIFY_TX_RATE_MSK 0x04 +#define STA_MODIFY_ADDBA_TID_MSK 0x08 +#define STA_MODIFY_DELBA_TID_MSK 0x10 +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +/* + * Antenna masks: + * bit14:15 01 B inactive, A active + * 10 B active, A inactive + * 11 Both active + */ +#define RATE_MCS_ANT_A_POS 14 +#define RATE_MCS_ANT_B_POS 15 +#define RATE_MCS_ANT_A_MSK 0x4000 +#define RATE_MCS_ANT_B_MSK 0x8000 +#define RATE_MCS_ANT_AB_MSK 0xc000 + +struct iwl_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + __le16 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ +} __attribute__ ((packed)); + +struct sta_id_modify { + u8 addr[ETH_ALEN]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +} __attribute__ ((packed)); + +/* + * REPLY_ADD_STA = 0x18 (command) + */ +struct iwl_addsta_cmd { + u8 mode; + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl_keyinfo key; + __le32 station_flags; + __le32 station_flags_msk; + __le16 tid_disable_tx; +#if IWL == 3945 + __le16 rate_n_flags; +#else + __le16 reserved1; +#endif + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; +#if IWL == 4965 + __le32 reserved2; +#endif +} __attribute__ ((packed)); + +/* + * REPLY_ADD_STA = 0x18 (response) + */ +struct iwl_add_sta_resp { + u8 status; +} __attribute__ ((packed)); + +#define ADD_STA_SUCCESS_MSK 0x1 + +/****************************************************************************** + * (4) + * Rx Responses: + * + *****************************************************************************/ + +struct iwl_rx_frame_stats { + u8 phy_count; + u8 id; + u8 rssi; + u8 agc; + __le16 sig_avg; + __le16 noise_diff; + u8 payload[0]; +} __attribute__ ((packed)); + +struct iwl_rx_frame_hdr { + __le16 channel; + __le16 phy_flags; + u8 reserved1; + u8 rate; + __le16 len; + u8 payload[0]; +} __attribute__ ((packed)); + +#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0) +#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1) + +#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0) +#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1) +#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2) +#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0) + +#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) +#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) +#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) +#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) +#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) + +#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) +#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) +#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) +#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) +#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) + +struct iwl_rx_frame_end { + __le32 status; + __le64 timestamp; + __le32 beacon_timestamp; +} __attribute__ ((packed)); + +/* + * REPLY_3945_RX = 0x1b (response only, not a command) + * + * NOTE: DO NOT dereference from casts to this structure + * It is provided only for calculating minimum data set size. + * The actual offsets of the hdr and end are dynamic based on + * stats.phy_count + */ +struct iwl_rx_frame { + struct iwl_rx_frame_stats stats; + struct iwl_rx_frame_hdr hdr; + struct iwl_rx_frame_end end; +} __attribute__ ((packed)); + +/* Fixed (non-configurable) rx data from phy */ +#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IWL_AGC_DB_POS (7) +struct iwl4965_rx_non_cfg_phy { + __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ + __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ + u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ + u8 pad[0]; +} __attribute__ ((packed)); + +/* + * REPLY_4965_RX = 0xc3 (response only, not a command) + * Used only for legacy (non 11n) frames. + */ +#define RX_RES_PHY_CNT 14 +struct iwl4965_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ + u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ + u8 stat_id; /* configurable DSP phy data set ID */ + u8 reserved1; + __le64 timestamp; /* TSF at on air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le16 phy_flags; /* general phy flags: band, modulation, ... */ + __le16 channel; /* channel number */ + __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */ + __le32 reserved2; + __le32 rate_n_flags; + __le16 byte_count; /* frame's byte-count */ + __le16 reserved3; +} __attribute__ ((packed)); + +struct iwl4965_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (5) + * Tx Commands & Responses: + * + *****************************************************************************/ + +/* Tx flags */ +#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1) +#define TX_CMD_FLG_CTS_MSK __constant_cpu_to_le32(1 << 2) +#define TX_CMD_FLG_ACK_MSK __constant_cpu_to_le32(1 << 3) +#define TX_CMD_FLG_STA_RATE_MSK __constant_cpu_to_le32(1 << 4) +#define TX_CMD_FLG_IMM_BA_RSP_MASK __constant_cpu_to_le32(1 << 6) +#define TX_CMD_FLG_FULL_TXOP_PROT_MSK __constant_cpu_to_le32(1 << 7) +#define TX_CMD_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0xf00) +#define TX_CMD_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) +#define TX_CMD_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) + +/* ucode ignores BT priority for this frame */ +#define TX_CMD_FLG_BT_DIS_MSK __constant_cpu_to_le32(1 << 12) + +/* ucode overrides sequence control */ +#define TX_CMD_FLG_SEQ_CTL_MSK __constant_cpu_to_le32(1 << 13) + +/* signal that this frame is non-last MPDU */ +#define TX_CMD_FLG_MORE_FRAG_MSK __constant_cpu_to_le32(1 << 14) + +/* calculate TSF in outgoing frame */ +#define TX_CMD_FLG_TSF_MSK __constant_cpu_to_le32(1 << 16) + +/* activate TX calibration. */ +#define TX_CMD_FLG_CALIB_MSK __constant_cpu_to_le32(1 << 17) + +/* signals that 2 bytes pad was inserted + after the MAC header */ +#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20) + +/* HCCA-AP - disable duration overwriting. */ +#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25) + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x03 +#define TX_CMD_SEC_SHIFT 6 +#define TX_CMD_SEC_KEY128 0x08 + +/* + * TX command Frame life time + */ + +struct iwl_dram_scratch { + u8 try_cnt; + u8 bt_kill_cnt; + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_TX = 0x1c (command) + */ +struct iwl_tx_cmd { + __le16 len; + __le16 next_frame_len; + __le32 tx_flags; +#if IWL == 3945 + u8 rate; + u8 sta_id; + u8 tid_tspec; +#elif IWL == 4965 + struct iwl_dram_scratch scratch; + __le32 rate_n_flags; + u8 sta_id; +#endif + u8 sec_ctl; +#if IWL == 4965 + u8 initial_rate_index; + u8 reserved; +#endif + u8 key[16]; +#if IWL == 3945 + union { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; + } tkip_mic; + __le32 next_frame_info; +#elif IWL == 4965 + __le16 next_frame_flags; + __le16 reserved2; +#endif + union { + __le32 life_time; + __le32 attempt; + } stop_time; +#if IWL == 3945 + u8 supp_rates[2]; +#elif IWL == 4965 + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; +#endif + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ +#if IWL == 4965 + u8 tid_tspec; +#endif + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + __le16 driver_txop; + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __attribute__ ((packed)); + +/* TX command response is sent after *all* transmission attempts. + * + * NOTES: + * + * TX_STATUS_FAIL_NEXT_FRAG + * + * If the fragment flag in the MAC header for the frame being transmitted + * is set and there is insufficient time to transmit the next frame, the + * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'. + * + * TX_STATUS_FIFO_UNDERRUN + * + * Indicates the host did not provide bytes to the FIFO fast enough while + * a TX was in progress. + * + * TX_STATUS_FAIL_MGMNT_ABORT + * + * This status is only possible if the ABORT ON MGMT RX parameter was + * set to true with the TX command. + * + * If the MSB of the status parameter is set then an abort sequence is + * required. This sequence consists of the host activating the TX Abort + * control line, and then waiting for the TX Abort command response. This + * indicates that a the device is no longer in a transmit state, and that the + * command FIFO has been cleared. The host must then deactivate the TX Abort + * control line. Receiving is still allowed in this case. + */ +enum { + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_STATUS_FAIL_MGMNT_ABORT = 0x85, + TX_STATUS_FAIL_NEXT_FRAG = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_STATUS_FAIL_TX_LOCKED = 0x90, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +#define TX_PACKET_MODE_REGULAR 0x0000 +#define TX_PACKET_MODE_BURST_SEQ 0x0100 +#define TX_PACKET_MODE_BURST_FIRST 0x0200 + +enum { + TX_POWER_PA_NOT_ACTIVE = 0x0, +}; + +enum { + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_DELAY_MSK = 0x00000040, + TX_STATUS_ABORT_MSK = 0x00000080, + TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ + TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ + TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ +}; + +/* ******************************* + * TX aggregation state + ******************************* */ + +enum { + AGG_TX_STATE_TRANSMITTED = 0x00, + AGG_TX_STATE_UNDERRUN_MSK = 0x01, + AGG_TX_STATE_BT_PRIO_MSK = 0x02, + AGG_TX_STATE_FEW_BYTES_MSK = 0x04, + AGG_TX_STATE_ABORT_MSK = 0x08, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40, + AGG_TX_STATE_SCD_QUERY_MSK = 0x80, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, + AGG_TX_STATE_RESPONSE_MSK = 0x1ff, + AGG_TX_STATE_DUMP_TX_MSK = 0x200, + AGG_TX_STATE_DELAY_TX_MSK = 0x400 +}; + +#define AGG_TX_STATE_LAST_SENT_MSK \ +(AGG_TX_STATE_LAST_SENT_TTL_MSK | \ + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ + AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) + +#define AGG_TX_STATE_TRY_CNT_POS 12 +#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 + +#define AGG_TX_STATE_SEQ_NUM_POS 16 +#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 + +/* + * REPLY_TX = 0x1c (response) + */ +#if IWL == 4965 +struct iwl_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 rate_n_flags; + __le16 wireless_media_time; + __le16 reserved; + __le32 pa_power1; + __le32 pa_power2; + __le32 status; /* TX status (for aggregation status of 1st frame) */ +} __attribute__ ((packed)); + +#elif IWL == 3945 +struct iwl_tx_resp { + u8 failure_rts; + u8 failure_frame; + u8 bt_kill_count; + u8 rate; + __le32 wireless_media_time; + __le32 status; /* TX status (for aggregation status of 1st frame) */ +} __attribute__ ((packed)); +#endif + +/* + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + */ +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + u8 sta_id; + u8 tid; + __le16 ba_seq_ctl; + __le32 ba_bitmap0; + __le32 ba_bitmap1; + __le16 scd_flow; + __le16 scd_ssn; +} __attribute__ ((packed)); + +/* + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + */ +struct iwl_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; +#if IWL == 3945 + struct iwl_power_per_rate power[IWL_MAX_RATES]; +#elif IWL == 4965 + struct iwl_tx_power_db tx_power; +#endif +} __attribute__ ((packed)); + +#if IWL == 3945 +struct iwl_rate_scaling_info { + __le16 rate_n_flags; + u8 try_cnt; + u8 next_rate_index; +} __attribute__ ((packed)); + +/** + * struct iwl_rate_scaling_cmd - Rate Scaling Command & Response + * + * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) + * + * NOTE: The table of rates passed to the uCode via the + * RATE_SCALE command sets up the corresponding order of + * rates used for all related commands, including rate + * masks, etc. + * + * For example, if you set 9MB (PLCP 0x0f) as the first + * rate in the rate table, the bit mask for that rate + * when passed through ofdm_basic_rates on the REPLY_RXON + * command would be bit 0 (1<<0) + */ +struct iwl_rate_scaling_cmd { + u8 table_id; + u8 reserved[3]; + struct iwl_rate_scaling_info table[IWL_MAX_RATES]; +} __attribute__ ((packed)); + +#elif IWL == 4965 + +/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ +#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1<<0) + +#define LINK_QUAL_AC_NUM AC_NUM +#define LINK_QUAL_MAX_RETRY_NUM 16 + +#define LINK_QUAL_ANT_A_MSK (1<<0) +#define LINK_QUAL_ANT_B_MSK (1<<1) +#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + +struct iwl_link_qual_general_params { + u8 flags; + u8 mimo_delimiter; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 start_rate_index[LINK_QUAL_AC_NUM]; +} __attribute__ ((packed)); + +struct iwl_link_qual_agg_params { + __le16 agg_time_limit; + u8 agg_dis_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + */ +struct iwl_link_quality_cmd { + u8 sta_id; + u8 reserved1; + __le16 control; + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + struct { + __le32 rate_n_flags; + } rs_table[LINK_QUAL_MAX_RETRY_NUM]; + __le32 reserved2; +} __attribute__ ((packed)); +#endif + +/* + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + */ +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +} __attribute__ ((packed)); + +/****************************************************************************** + * (6) + * Spectrum Management (802.11h) Commands, Responses, Notifications: + * + *****************************************************************************/ + +/* + * Spectrum Management + */ +#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ + RXON_FILTER_CTL2HOST_MSK | \ + RXON_FILTER_ACCEPT_GRP_MSK | \ + RXON_FILTER_DIS_DECRYPT_MSK | \ + RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ + RXON_FILTER_ASSOC_MSK | \ + RXON_FILTER_BCON_AWARE_MSK) + +struct iwl_measure_channel { + __le32 duration; /* measurement duration in extended beacon + * format */ + u8 channel; /* channel to measure */ + u8 type; /* see enum iwl_measure_type */ + __le16 reserved; +} __attribute__ ((packed)); + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + */ +struct iwl_spectrum_cmd { + __le16 len; /* number of bytes starting from token */ + u8 token; /* token id */ + u8 id; /* measurement id -- 0 or 1 */ + u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ + u8 periodic; /* 1 = periodic */ + __le16 path_loss_timeout; + __le32 start_time; /* start time in extended beacon format */ + __le32 reserved2; + __le32 flags; /* rxon flags */ + __le32 filter_flags; /* rxon filter flags */ + __le16 channel_count; /* minimum 1, maximum 10 */ + __le16 reserved3; + struct iwl_measure_channel channels[10]; +} __attribute__ ((packed)); + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + */ +struct iwl_spectrum_resp { + u8 token; + u8 id; /* id of the prior command replaced, or 0xff */ + __le16 status; /* 0 - command will be handled + * 1 - cannot handle (conflicts with another + * measurement) */ +} __attribute__ ((packed)); + +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, +}; + +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, + /* 4-5 reserved */ + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, +}; + +#define NUM_ELEMENTS_IN_HISTOGRAM 8 + +struct iwl_measurement_histogram { + __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ + __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ +} __attribute__ ((packed)); + +/* clear channel availability counters */ +struct iwl_measurement_cca_counters { + __le32 ofdm; + __le32 cck; +} __attribute__ ((packed)); + +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), + /* bits 5:6 are reserved */ + IWL_MEASURE_IDLE = (1 << 7), +}; + +/* + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + */ +struct iwl_spectrum_notification { + u8 id; /* measurement id -- 0 or 1 */ + u8 token; + u8 channel_index; /* index in measurement channel list */ + u8 state; /* 0 - start, 1 - stop */ + __le32 start_time; /* lower 32-bits of TSF */ + u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ + u8 channel; + u8 type; /* see enum iwl_measurement_type */ + u8 reserved1; + /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only + * valid if applicable for measurement type requested. */ + __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ + __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ + __le32 cca_time; /* channel load time in usecs */ + u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - + * unidentified */ + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; /* lower 32-bits of TSF */ + __le32 status; /* see iwl_measurement_status */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (7) + * Power Management Commands, Responses, Notifications: + * + *****************************************************************************/ + +/** + * struct iwl_powertable_cmd - Power Table Command + * @flags: See below: + * + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * PM allow: + * bit 0 - '0' Driver not allow power management + * '1' Driver allow PM (use rest of parameters) + * uCode send sleep notifications: + * bit 1 - '0' Don't send sleep notification + * '1' send sleep notification (SEND_PM_NOTIFICATION) + * Sleep over DTIM + * bit 2 - '0' PM have to walk up every DTIM + * '1' PM could sleep over DTIM till listen Interval. + * PCI power managed + * bit 3 - '0' (PCI_LINK_CTRL & 0x1) + * '1' !(PCI_LINK_CTRL & 0x1) + * Force sleep Modes + * bit 31/30- '00' use both mac/xtal sleeps + * '01' force Mac sleep + * '10' force xtal sleep + * '11' Illegal set + * + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * ucode assume sleep over DTIM is allowed and we don't need to wakeup + * for every DTIM. + */ +#define IWL_POWER_VEC_SIZE 5 + + +#if IWL == 3945 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le32(1<<0) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le32(1<<2) +#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le32(1<<3) +struct iwl_powertable_cmd { + __le32 flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; +} __attribute__((packed)); + +#elif IWL == 4965 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1<<0) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1<<2) +#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1<<3) + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; + u8 debug_flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 keep_alive_beacons; +} __attribute__ ((packed)); +#endif + +/* + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * 3945 and 4965 identical. + */ +struct iwl_sleep_notification { + u8 pm_sleep_mode; + u8 pm_wakeup_src; + __le16 reserved; + __le32 sleep_time; + __le32 tsf_low; + __le32 bcon_timer; +} __attribute__ ((packed)); + +/* Sleep states. 3945 and 4965 identical. */ +enum { + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, + /* 3 reserved */ + IWL_PM_NUM_OF_MODES = 12, +}; + +/* + * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response) + */ +#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */ +#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */ +#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ +struct iwl_card_state_cmd { + __le32 status; /* CARD_STATE_CMD_* request new power state */ +} __attribute__ ((packed)); + +/* + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + */ +struct iwl_card_state_notif { + __le32 flags; +} __attribute__ ((packed)); + +#define HW_CARD_DISABLED 0x01 +#define SW_CARD_DISABLED 0x02 +#define RF_CARD_DISABLED 0x04 +#define RXON_CARD_DISABLED 0x10 + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __attribute__ ((packed)); + +/****************************************************************************** + * (8) + * Scan Commands, Responses, Notifications: + * + *****************************************************************************/ + +struct iwl_scan_channel { + /* type is defined as: + * 0:0 active (0 - passive) + * 1:4 SSID direct + * If 1 is set then corresponding SSID IE is transmitted in probe + * 5:7 reserved + */ + u8 type; + u8 channel; + struct iwl_tx_power tpc; + __le16 active_dwell; + __le16 passive_dwell; +} __attribute__ ((packed)); + +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +} __attribute__ ((packed)); + +#define PROBE_OPTION_MAX 0x4 +#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF) +#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1) +#define IWL_MAX_SCAN_SIZE 1024 + +/* + * REPLY_SCAN_CMD = 0x80 (command) + */ +struct iwl_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; + __le16 quiet_time; /* dwell only this long on quiet chnl + * (active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ +#if IWL == 3945 + __le16 reserved1; +#elif IWL == 4965 + __le16 rx_chain; +#endif + __le32 max_out_time; /* max usec to be out of associated (service) + * chnl */ + __le32 suspend_time; /* pause scan this long when returning to svc + * chnl. + * 3945 -- 31:24 # beacons, 19:0 additional usec, + * 4965 -- 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; + __le32 filter_flags; + + struct iwl_tx_cmd tx_cmd; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + + u8 data[0]; + /* + * The channels start after the probe request payload and are of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * can not mix 2.4GHz channels and 5.2GHz channels and must + * request a scan multiple times (not concurrently) + * + */ +} __attribute__ ((packed)); + +/* Can abort will notify by complete notification with abort status. */ +#define CAN_ABORT_STATUS __constant_cpu_to_le32(0x1) +/* complete notification statuses */ +#define ABORT_STATUS 0x2 + +/* + * REPLY_SCAN_CMD = 0x80 (response) + */ +struct iwl_scanreq_notification { + __le32 status; /* 1: okay, 2: cannot fulfill request */ +} __attribute__ ((packed)); + +/* + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + */ +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __attribute__ ((packed)); + +#define SCAN_OWNER_STATUS 0x1; +#define MEASURE_OWNER_STATUS 0x2; + +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +/* + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + */ +struct iwl_scanresults_notification { + u8 channel; + u8 band; + u8 reserved[2]; + __le32 tsf_low; + __le32 tsf_high; + __le32 statistics[NUMBER_OF_STATISTICS]; +} __attribute__ ((packed)); + +/* + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + */ +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 reserved; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +} __attribute__ ((packed)); + + +/****************************************************************************** + * (9) + * IBSS/AP Commands and Notifications: + * + *****************************************************************************/ + +/* + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + */ +struct iwl_beacon_notif { + struct iwl_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __attribute__ ((packed)); + +/* + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + */ +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __attribute__ ((packed)); + +/****************************************************************************** + * (10) + * Statistics Commands and Notifications: + * + *****************************************************************************/ + +#define IWL_TEMP_CONVERT 260 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/* Used for passing to driver number of successes and failures per rate */ +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __attribute__ ((packed)); + +/* statistics command response */ + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; +#if IWL == 4965 + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +#endif +} __attribute__ ((packed)); + +#if IWL == 4965 +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 reserved2; +} __attribute__ ((packed)); +#endif + +struct statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ +#if IWL == 4965 + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +#endif +} __attribute__ ((packed)); + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; +#if IWL == 4965 + struct statistics_rx_ht_phy ofdm_ht; +#endif +} __attribute__ ((packed)); + +#if IWL == 4965 +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __le32 reserved2; + __le32 reserved3; +} __attribute__ ((packed)); +#endif + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; +#if IWL == 4965 + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; +#endif +} __attribute__ ((packed)); + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 reserved[4]; +} __attribute__ ((packed)); + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; +#if IWL == 4965 + __le32 reserved1; + __le32 reserved2; +#endif +} __attribute__ ((packed)); + +struct statistics_general { + __le32 temperature; +#if IWL == 4965 + __le32 temperature_m; +#endif + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; +#if IWL == 4965 + __le32 rx_enable_counter; + __le32 reserved1; + __le32 reserved2; + __le32 reserved3; +#endif +} __attribute__ ((packed)); + +/* + * REPLY_STATISTICS_CMD = 0x9c, + * 3945 and 4965 identical. + * + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * + * If the CLEAR_STATS configuration flag is set, uCode will clear its + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * + * If the DISABLE_NOTIF configuration flag is set, uCode will not issue + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + */ +#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ +} __attribute__ ((packed)); + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ +#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8) +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +} __attribute__ ((packed)); + + +/* + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + */ +/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row, + * then this notification will be sent. */ +#define CONSECUTIVE_MISSED_BCONS_TH 20 + +struct iwl_missed_beacon_notif { + __le32 consequtive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __attribute__ ((packed)); + +/****************************************************************************** + * (11) + * Rx Calibration Commands: + * + *****************************************************************************/ + +#define PHY_CALIBRATE_DIFF_GAIN_CMD (7) +#define HD_TABLE_SIZE (11) + +struct iwl_sensitivity_cmd { + __le16 control; + __le16 table[HD_TABLE_SIZE]; +} __attribute__ ((packed)); + +struct iwl_calibration_cmd { + u8 opCode; + u8 flags; + __le16 reserved; + s8 diff_gain_a; + s8 diff_gain_b; + s8 diff_gain_c; + u8 reserved1; +} __attribute__ ((packed)); + +/****************************************************************************** + * (12) + * Miscellaneous Commands: + * + *****************************************************************************/ + +/* + * LEDs Command & Response + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * + * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), + * this command turns it on or off, or sets up a periodic blinking cycle. + */ +struct iwl_led_cmd { + __le32 interval; /* "interval" in uSec */ + u8 id; /* 1: Activity, 2: Link, 3: Tech */ + u8 off; /* # intervals off while blinking; + * "0", with >0 "on" value, turns LED on */ + u8 on; /* # intervals on while blinking; + * "0", regardless of "off", turns LED off */ + u8 reserved; +} __attribute__ ((packed)); + +/****************************************************************************** + * (13) + * Union of all expected notifications/responses: + * + *****************************************************************************/ + +struct iwl_rx_packet { + __le32 len; + struct iwl_cmd_header hdr; + union { + struct iwl_alive_resp alive_frame; + struct iwl_rx_frame rx_frame; + struct iwl_tx_resp tx_resp; + struct iwl_spectrum_notification spectrum_notif; + struct iwl_csa_notification csa_notif; + struct iwl_error_resp err_resp; + struct iwl_card_state_notif card_state_notif; + struct iwl_beacon_notif beacon_status; + struct iwl_add_sta_resp add_sta; + struct iwl_sleep_notification sleep_notif; + struct iwl_spectrum_resp spectrum; + struct iwl_notif_statistics stats; +#if IWL == 4965 + struct iwl_compressed_ba_resp compressed_ba; + struct iwl_missed_beacon_notif missed_beacon; +#endif + __le32 status; + u8 raw[0]; + } u; +} __attribute__ ((packed)); + +#define IWL_RX_FRAME_SIZE (4 + sizeof(struct iwl_rx_frame)) + +#endif /* __iwl_commands_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h new file mode 100644 index 0000000000000000000000000000000000000000..72318d78957e3c0c6355fafb27272258152839e4 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_debug_h__ +#define __iwl_debug_h__ + +#ifdef CONFIG_IWLWIFI_DEBUG +extern u32 iwl_debug_level; +#define IWL_DEBUG(level, fmt, args...) \ +do { if (iwl_debug_level & (level)) \ + printk(KERN_ERR DRV_NAME": %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) + +#define IWL_DEBUG_LIMIT(level, fmt, args...) \ +do { if ((iwl_debug_level & (level)) && net_ratelimit()) \ + printk(KERN_ERR DRV_NAME": %c %s " fmt, \ + in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) +#else +static inline void IWL_DEBUG(int level, const char *fmt, ...) +{ +} +static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) +{ +} +#endif /* CONFIG_IWLWIFI_DEBUG */ + +/* + * To use the debug system; + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of: + * + * #define IWL_DL_xxxx VALUE + * + * shifting value to the left one bit from the previous entry. xxxx should be + * the name of the classification (for example, WEP) + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * To add your debug level to the list of levels seen when you perform + * + * % cat /proc/net/iwl/debug_level + * + * you simply need to add your entry to the iwl_debug_levels array. + * + * If you do not see debug_level in /proc/net/iwl then you do not have + * CONFIG_IWLWIFI_DEBUG defined in your kernel configuration + * + */ + +#define IWL_DL_INFO (1<<0) +#define IWL_DL_MAC80211 (1<<1) +#define IWL_DL_HOST_COMMAND (1<<2) +#define IWL_DL_STATE (1<<3) + +#define IWL_DL_RADIO (1<<7) +#define IWL_DL_POWER (1<<8) +#define IWL_DL_TEMP (1<<9) + +#define IWL_DL_NOTIF (1<<10) +#define IWL_DL_SCAN (1<<11) +#define IWL_DL_ASSOC (1<<12) +#define IWL_DL_DROP (1<<13) + +#define IWL_DL_TXPOWER (1<<14) + +#define IWL_DL_AP (1<<15) + +#define IWL_DL_FW (1<<16) +#define IWL_DL_RF_KILL (1<<17) +#define IWL_DL_FW_ERRORS (1<<18) + +#define IWL_DL_LED (1<<19) + +#define IWL_DL_RATE (1<<20) + +#define IWL_DL_CALIB (1<<21) +#define IWL_DL_WEP (1<<22) +#define IWL_DL_TX (1<<23) +#define IWL_DL_RX (1<<24) +#define IWL_DL_ISR (1<<25) +#define IWL_DL_HT (1<<26) +#define IWL_DL_IO (1<<27) +#define IWL_DL_11H (1<<28) + +#define IWL_DL_STATS (1<<29) +#define IWL_DL_TX_REPLY (1<<30) +#define IWL_DL_QOS (1<<31) + +#define IWL_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a) +#define IWL_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a) +#define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) + +#define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(f, a...) IWL_DEBUG(IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a) +#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_DROP(f, a...) IWL_DEBUG(IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_AP(f, a...) IWL_DEBUG(IWL_DL_AP, f, ## a) +#define IWL_DEBUG_TXPOWER(f, a...) IWL_DEBUG(IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_IO(f, a...) IWL_DEBUG(IWL_DL_IO, f, ## a) +#define IWL_DEBUG_RATE(f, a...) IWL_DEBUG(IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_NOTIF(f, a...) IWL_DEBUG(IWL_DL_NOTIF, f, ## a) +#define IWL_DEBUG_ASSOC(f, a...) IWL_DEBUG(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(f, a...) \ + IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(f, a...) IWL_DEBUG(IWL_DL_11H, f, ## a) + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h new file mode 100644 index 0000000000000000000000000000000000000000..e473c97e3f4fd191d51cf4c63e4dc83ca4687624 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -0,0 +1,336 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_eeprom_h__ +#define __iwl_eeprom_h__ + +/* + * This file defines EEPROM related constants, enums, and inline functions. + * + */ + +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ +#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */ +/* EEPROM field values */ +#define ANTENNA_SWITCH_NORMAL 0 +#define ANTENNA_SWITCH_INVERSE 1 + +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), + EEPROM_CHANNEL_NARROW = (1 << 6), + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* EEPROM field lengths */ +#define EEPROM_BOARD_PBA_NUMBER_LENGTH 11 + +/* EEPROM field lengths */ +#define EEPROM_BOARD_PBA_NUMBER_LENGTH 11 +#define EEPROM_REGULATORY_SKU_ID_LENGTH 4 +#define EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH 14 +#define EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH 13 +#define EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH 12 +#define EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH 11 +#define EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH 6 + +#if IWL == 3945 +#define EEPROM_REGULATORY_CHANNELS_LENGTH ( \ + EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH) +#elif IWL == 4965 +#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS_LENGTH 7 +#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS_LENGTH 11 +#define EEPROM_REGULATORY_CHANNELS_LENGTH ( \ + EEPROM_REGULATORY_BAND1_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND2_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND3_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND4_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND5_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND_24_FAT_CHANNELS_LENGTH + \ + EEPROM_REGULATORY_BAND_52_FAT_CHANNELS_LENGTH) +#endif + +#define EEPROM_REGULATORY_NUMBER_OF_BANDS 5 + +/* SKU Capabilities */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) +#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7) + +/* *regulatory* channel data from eeprom, one for each channel */ +struct iwl_eeprom_channel { + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __attribute__ ((packed)); + +/* + * Mapping of a Tx power level, at factory calibration temperature, + * to a radio/DSP gain table index. + * One for each of 5 "sample" power levels in each band. + * v_det is measured at the factory, using the 3945's built-in power amplifier + * (PA) output voltage detector. This same detector is used during Tx of + * long packets in normal operation to provide feedback as to proper output + * level. + * Data copied from EEPROM. + */ +struct iwl_eeprom_txpower_sample { + u8 gain_index; /* index into power (gain) setup table ... */ + s8 power; /* ... for this pwr level for this chnl group */ + u16 v_det; /* PA output voltage */ +} __attribute__ ((packed)); + +/* + * Mappings of Tx power levels -> nominal radio/DSP gain table indexes. + * One for each channel group (a.k.a. "band") (1 for BG, 4 for A). + * Tx power setup code interpolates between the 5 "sample" power levels + * to determine the nominal setup for a requested power level. + * Data copied from EEPROM. + * DO NOT ALTER THIS STRUCTURE!!! + */ +struct iwl_eeprom_txpower_group { + struct iwl_eeprom_txpower_sample samples[5]; /* 5 power levels */ + s32 a, b, c, d, e; /* coefficients for voltage->power + * formula (signed) */ + s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on + * frequency (signed) */ + s8 saturation_power; /* highest power possible by h/w in this + * band */ + u8 group_channel; /* "representative" channel # in this band */ + s16 temperature; /* h/w temperature at factory calib this band + * (signed) */ +} __attribute__ ((packed)); + +/* + * Temperature-based Tx-power compensation data, not band-specific. + * These coefficients are use to modify a/b/c/d/e coeffs based on + * difference between current temperature and factory calib temperature. + * Data copied from EEPROM. + */ +struct iwl_eeprom_temperature_corr { + u32 Ta; + u32 Tb; + u32 Tc; + u32 Td; + u32 Te; +} __attribute__ ((packed)); + +#if IWL == 4965 +#define EEPROM_TX_POWER_TX_CHAINS (2) +#define EEPROM_TX_POWER_BANDS (8) +#define EEPROM_TX_POWER_MEASUREMENTS (3) +#define EEPROM_TX_POWER_VERSION (2) +#define EEPROM_TX_POWER_VERSION_NEW (5) + +struct iwl_eeprom_calib_measure { + u8 temperature; + u8 gain_idx; + u8 actual_pow; + s8 pa_det; +} __attribute__ ((packed)); + +struct iwl_eeprom_calib_ch_info { + u8 ch_num; + struct iwl_eeprom_calib_measure measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __attribute__ ((packed)); + +struct iwl_eeprom_calib_subband_info { + u8 ch_from; + u8 ch_to; + struct iwl_eeprom_calib_ch_info ch1; + struct iwl_eeprom_calib_ch_info ch2; +} __attribute__ ((packed)); + +struct iwl_eeprom_calib_info { + u8 saturation_power24; + u8 saturation_power52; + s16 voltage; /* signed */ + struct iwl_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS]; +} __attribute__ ((packed)); + +#endif + +struct iwl_eeprom { + u8 reserved0[16]; +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ + u16 device_id; /* abs.ofs: 16 */ + u8 reserved1[2]; +#define EEPROM_PMC (2*0x0A) /* 2 bytes */ + u16 pmc; /* abs.ofs: 20 */ + u8 reserved2[20]; +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ + u8 mac_address[6]; /* abs.ofs: 42 */ + u8 reserved3[58]; +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ + u16 board_revision; /* abs.ofs: 106 */ + u8 reserved4[11]; +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ + u8 board_pba_number[9]; /* abs.ofs: 119 */ + u8 reserved5[8]; +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ + u16 version; /* abs.ofs: 136 */ +#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ + u8 sku_cap; /* abs.ofs: 138 */ +#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */ + u8 leds_mode; /* abs.ofs: 139 */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ + u16 oem_mode; +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ + u16 wowlan_mode; /* abs.ofs: 142 */ +#define EEPROM_LEDS_TIME_INTERVAL (2*0x48) /* 2 bytes */ + u16 leds_time_interval; /* abs.ofs: 144 */ +#define EEPROM_LEDS_OFF_TIME (2*0x49) /* 1 bytes */ + u8 leds_off_time; /* abs.ofs: 146 */ +#define EEPROM_LEDS_ON_TIME (2*0x49+1) /* 1 bytes */ + u8 leds_on_time; /* abs.ofs: 147 */ +#define EEPROM_ALMGOR_M_VERSION (2*0x4A) /* 1 bytes */ + u8 almgor_m_version; /* abs.ofs: 148 */ +#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */ + u8 antenna_switch_type; /* abs.ofs: 149 */ +#if IWL == 3945 + u8 reserved6[42]; +#else + u8 reserved6[8]; +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ + u16 board_revision_4965; /* abs.ofs: 158 */ + u8 reserved7[13]; +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + u8 board_pba_number_4965[9]; /* abs.ofs: 173 */ + u8 reserved8[10]; +#endif +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ + u8 sku_id[4]; /* abs.ofs: 192 */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ + u16 band_1_count; /* abs.ofs: 196 */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ + u16 band_2_count; /* abs.ofs: 226 */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ + u16 band_3_count; /* abs.ofs: 254 */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ + u16 band_4_count; /* abs.ofs: 280 */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ + u16 band_5_count; /* abs.ofs: 304 */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ + +/* From here on out the EEPROM diverges between the 4965 and the 3945 */ +#if IWL == 3945 + + u8 reserved9[194]; + +#define EEPROM_TXPOWER_CALIB_GROUP0 0x200 +#define EEPROM_TXPOWER_CALIB_GROUP1 0x240 +#define EEPROM_TXPOWER_CALIB_GROUP2 0x280 +#define EEPROM_TXPOWER_CALIB_GROUP3 0x2c0 +#define EEPROM_TXPOWER_CALIB_GROUP4 0x300 +#define IWL_NUM_TX_CALIB_GROUPS 5 + struct iwl_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; +/* abs.ofs: 512 */ +#define EEPROM_CALIB_TEMPERATURE_CORRECT 0x340 + struct iwl_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ + u8 reserved16[172]; /* fill out to full 1024 byte block */ + +/* 4965AGN adds fat channel support */ +#elif IWL == 4965 + + u8 reserved10[2]; +#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */ + struct iwl_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */ + u8 reserved11[2]; +#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */ + struct iwl_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */ + u8 reserved12[6]; +#define EEPROM_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ + u16 calib_version; /* abs.ofs: 364 */ + u8 reserved13[2]; +#define EEPROM_SATURATION_POWER_OFFSET (2*0xB8) /* 2 bytes */ + u16 satruation_power; /* abs.ofs: 368 */ + u8 reserved14[94]; +#define EEPROM_IWL_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ + struct iwl_eeprom_calib_info calib_info; /* abs.ofs: 464 */ + + u8 reserved16[140]; /* fill out to full 1024 byte block */ + +#endif + +} __attribute__ ((packed)); + +#define IWL_EEPROM_IMAGE_SIZE 1024 + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..e2a8d95ad9cd8a5eaa8e9beb13b807594d29a17d --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -0,0 +1,255 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_helpers_h__ +#define __iwl_helpers_h__ + +#include + +/* + * The structures defined by the hardware/uCode interface + * have bit-wise operations. For each bit-field there is + * a data symbol in the structure, the start bit position + * and the length of the bit-field. + * + * iwl_get_bits and iwl_set_bits will return or set the + * appropriate bits on a 32-bit value. + * + * IWL_GET_BITS and IWL_SET_BITS use symbol expansion to + * expand out to the appropriate call to iwl_get_bits + * and iwl_set_bits without having to reference all of the + * numerical constants and defines provided in the hardware + * definition + */ + +/** + * iwl_get_bits - Extract a hardware bit-field value + * @src: source hardware value (__le32) + * @pos: bit-position (0-based) of first bit of value + * @len: length of bit-field + * + * iwl_get_bits will return the bit-field in cpu endian ordering. + * + * NOTE: If used from IWL_GET_BITS then pos and len are compile-constants and + * will collapse to minimal code by the compiler. + */ +static inline u32 iwl_get_bits(__le32 src, u8 pos, u8 len) +{ + u32 tmp = le32_to_cpu(src); + + tmp >>= pos; + tmp &= (1UL << len) - 1; + return tmp; +} + +/** + * iwl_set_bits - Set a hardware bit-field value + * @dst: Address of __le32 hardware value + * @pos: bit-position (0-based) of first bit of value + * @len: length of bit-field + * @val: cpu endian value to encode into the bit-field + * + * iwl_set_bits will encode val into dst, masked to be len bits long at bit + * position pos. + * + * NOTE: If used IWL_SET_BITS pos and len will be compile-constants and + * will collapse to minimal code by the compiler. + */ +static inline void iwl_set_bits(__le32 *dst, u8 pos, u8 len, int val) +{ + u32 tmp = le32_to_cpu(*dst); + + tmp &= ~(((1UL << len) - 1) << pos); + tmp |= (val & ((1UL << len) - 1)) << pos; + *dst = cpu_to_le32(tmp); +} + +static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val) +{ + u16 tmp = le16_to_cpu(*dst); + + tmp &= ~((1UL << (pos + len)) - (1UL << pos)); + tmp |= (val & ((1UL << len) - 1)) << pos; + *dst = cpu_to_le16(tmp); +} + +/* + * The bit-field definitions in iwl-xxxx-hw.h are in the form of: + * + * struct example { + * __le32 val1; + * #define IWL_name_POS 8 + * #define IWL_name_LEN 4 + * #define IWL_name_SYM val1 + * }; + * + * The IWL_SET_BITS and IWL_GET_BITS macros are provided to allow the driver + * to call: + * + * struct example bar; + * u32 val = IWL_GET_BITS(bar, name); + * val = val * 2; + * IWL_SET_BITS(bar, name, val); + * + * All cpu / host ordering, masking, and shifts are performed by the macros + * and iwl_{get,set}_bits. + * + */ +#define IWL_SET_BITS(s, sym, v) \ + iwl_set_bits(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \ + IWL_ ## sym ## _LEN, (v)) + +#define IWL_SET_BITS16(s, sym, v) \ + iwl_set_bits16(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \ + IWL_ ## sym ## _LEN, (v)) + +#define IWL_GET_BITS(s, sym) \ + iwl_get_bits((s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \ + IWL_ ## sym ## _LEN) + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + +#define IEEE80211_CHAN_W_RADAR_DETECT 0x00000010 + +static inline struct ieee80211_conf *ieee80211_get_hw_conf( + struct ieee80211_hw *hw) +{ + return &hw->conf; +} + +#define QOS_CONTROL_LEN 2 + +#define IEEE80211_STYPE_BACK_REQ 0x0080 +#define IEEE80211_STYPE_BACK 0x0090 + + +static inline int ieee80211_is_management(u16 fc) +{ + return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT; +} + +static inline int ieee80211_is_control(u16 fc) +{ + return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL; +} + +static inline int ieee80211_is_data(u16 fc) +{ + return (fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA; +} + +static inline int ieee80211_is_back_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ); +} + +static inline int ieee80211_is_probe_response(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP); +} + +static inline int ieee80211_is_probe_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_REQ); +} + +static inline int ieee80211_is_beacon(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON); +} + +static inline int ieee80211_is_atim(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ATIM); +} + +static inline int ieee80211_is_assoc_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_assoc_response(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_RESP); +} + +static inline int ieee80211_is_auth(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_deauth(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_disassoc(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ); +} + +static inline int ieee80211_is_reassoc_request(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ); +} + +static inline int ieee80211_is_reassoc_response(u16 fc) +{ + return ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_RESP); +} + +static inline int iwl_check_bits(unsigned long field, unsigned long mask) +{ + return ((field & mask) == mask) ? 1 : 0; +} + +static inline unsigned long elapsed_jiffies(unsigned long start, + unsigned long end) +{ + if (end > start) + return end - start; + + return end + (MAX_JIFFY_OFFSET - start); +} + +#endif /* __iwl_helpers_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-hw.h b/drivers/net/wireless/iwlwifi/iwl-hw.h new file mode 100644 index 0000000000000000000000000000000000000000..1aa6fcd39a5e40ad1610ed25a1847e5cc937da6a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-hw.h @@ -0,0 +1,537 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwlwifi_hw_h__ +#define __iwlwifi_hw_h__ + +/* + * This file defines hardware constants common to 3945 and 4965. + * + * Device-specific constants are defined in iwl-3945-hw.h and iwl-4965-hw.h, + * although this file contains a few definitions for which the .c + * implementation is the same for 3945 and 4965, except for the value of + * a constant. + * + * uCode API constants are defined in iwl-commands.h. + * + * NOTE: DO NOT PUT OS IMPLEMENTATION-SPECIFIC DECLARATIONS HERE + * + * The iwl-*hw.h (and files they include) files should remain OS/driver + * implementation independent, declaring only the hardware interface. + */ + +/* uCode queue management definitions */ +#define IWL_CMD_QUEUE_NUM 4 +#define IWL_CMD_FIFO_NUM 4 +#define IWL_BACK_QUEUE_FIRST_ID 7 + +/* Tx rates */ +#define IWL_CCK_RATES 4 +#define IWL_OFDM_RATES 8 + +#if IWL == 3945 +#define IWL_HT_RATES 0 +#elif IWL == 4965 +#define IWL_HT_RATES 16 +#endif + +#define IWL_MAX_RATES (IWL_CCK_RATES+IWL_OFDM_RATES+IWL_HT_RATES) + +/* Time constants */ +#define SHORT_SLOT_TIME 9 +#define LONG_SLOT_TIME 20 + +/* RSSI to dBm */ +#if IWL == 3945 +#define IWL_RSSI_OFFSET 95 +#elif IWL == 4965 +#define IWL_RSSI_OFFSET 44 +#endif + +#include "iwl-eeprom.h" +#include "iwl-commands.h" + +#define PCI_LINK_CTRL 0x0F0 +#define PCI_POWER_SOURCE 0x0C8 +#define PCI_REG_WUM8 0x0E8 +#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) + +/*=== CSR (control and status registers) ===*/ +#define CSR_BASE (0x000) + +#define CSR_SW_VER (CSR_BASE+0x000) +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_GP_CNTRL (CSR_BASE+0x024) +#define CSR_HW_REV (CSR_BASE+0x028) +#define CSR_EEPROM_REG (CSR_BASE+0x02c) +#define CSR_EEPROM_GP (CSR_BASE+0x030) +#define CSR_GP_UCODE (CSR_BASE+0x044) +#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) +#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) +#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) +#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) +#define CSR_LED_REG (CSR_BASE+0x094) +#define CSR_DRAM_INT_TBL_CTL (CSR_BASE+0x0A0) +#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) +#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) +#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) + +/* HW I/F configuration */ +#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM (0x00000200) +#define CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400) +#define CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800) +#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) +#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) +#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) + +/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), + * acknowledged (reset) by host writing "1" to flagged bits. */ +#define CSR_INT_BIT_FH_RX (1<<31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1<<29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_DNLD (1<<28) /* uCode Download */ +#define CSR_INT_BIT_FH_TX (1<<27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_MAC_CLK_ACTV (1<<26) /* NIC controller's clock toggled on/off */ +#define CSR_INT_BIT_SW_ERR (1<<25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1<<7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1<<6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1<<3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1<<1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1<<0) /* uCode interrupts once it initializes */ + +#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ + CSR_INT_BIT_HW_ERR | \ + CSR_INT_BIT_FH_TX | \ + CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_RF_KILL | \ + CSR_INT_BIT_SW_RX | \ + CSR_INT_BIT_WAKEUP | \ + CSR_INT_BIT_ALIVE) + +/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ +#define CSR_FH_INT_BIT_ERR (1<<31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1<<30) /* High priority Rx, bypass coalescing */ +#define CSR_FH_INT_BIT_RX_CHNL2 (1<<18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1<<17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1<<16) /* Rx channel 0 */ +#define CSR_FH_INT_BIT_TX_CHNL6 (1<<6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1<<1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1<<0) /* Tx channel 0 */ + +#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR_FH_INT_BIT_RX_CHNL2 | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + +#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL6 | \ + CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0 ) + + +/* RESET */ +#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) +#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) +#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) +#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) +#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) + +/* GP (general purpose) CONTROL */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) +#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) +#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) + +#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) + +#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) +#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + + +/* EEPROM REG */ +#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) +#define CSR_EEPROM_REG_BIT_CMD (0x00000002) + +/* EEPROM GP */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000006) +#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000) +#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) + +/* UCODE DRV GP */ +#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) +#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) +#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) +#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) + +/* GPIO */ +#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) +#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) +#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER + +/* GI Chicken Bits */ +#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) +#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) + +/* CSR_ANA_PLL_CFG */ +#define CSR_ANA_PLL_CFG_SH (0x00880300) + +#define CSR_LED_REG_TRUN_ON (0x00000078) +#define CSR_LED_REG_TRUN_OFF (0x00000038) +#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) + +/* DRAM_INT_TBL_CTRL */ +#define CSR_DRAM_INT_TBL_CTRL_EN (1<<31) +#define CSR_DRAM_INT_TBL_CTRL_WRAP_CHK (1<<27) + +/*=== HBUS (Host-side Bus) ===*/ +#define HBUS_BASE (0x400) + +#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) +#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) +#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) +#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) +#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) +#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) +#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) +#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) +#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) + +#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) + + +/* SCD (Scheduler) */ +#define SCD_BASE (CSR_BASE + 0x2E00) + +#define SCD_MODE_REG (SCD_BASE + 0x000) +#define SCD_ARASTAT_REG (SCD_BASE + 0x004) +#define SCD_TXFACT_REG (SCD_BASE + 0x010) +#define SCD_TXF4MF_REG (SCD_BASE + 0x014) +#define SCD_TXF5MF_REG (SCD_BASE + 0x020) +#define SCD_SBYP_MODE_1_REG (SCD_BASE + 0x02C) +#define SCD_SBYP_MODE_2_REG (SCD_BASE + 0x030) + +/*=== FH (data Flow Handler) ===*/ +#define FH_BASE (0x800) + +#define FH_CBCC_TABLE (FH_BASE+0x140) +#define FH_TFDB_TABLE (FH_BASE+0x180) +#define FH_RCSR_TABLE (FH_BASE+0x400) +#define FH_RSSR_TABLE (FH_BASE+0x4c0) +#define FH_TCSR_TABLE (FH_BASE+0x500) +#define FH_TSSR_TABLE (FH_BASE+0x680) + +/* TFDB (Transmit Frame Buffer Descriptor) */ +#define FH_TFDB(_channel, buf) \ + (FH_TFDB_TABLE+((_channel)*2+(buf))*0x28) +#define ALM_FH_TFDB_CHNL_BUF_CTRL_REG(_channel) \ + (FH_TFDB_TABLE + 0x50 * _channel) +/* CBCC _channel is [0,2] */ +#define FH_CBCC(_channel) (FH_CBCC_TABLE+(_channel)*0x8) +#define FH_CBCC_CTRL(_channel) (FH_CBCC(_channel)+0x00) +#define FH_CBCC_BASE(_channel) (FH_CBCC(_channel)+0x04) + +/* RCSR _channel is [0,2] */ +#define FH_RCSR(_channel) (FH_RCSR_TABLE+(_channel)*0x40) +#define FH_RCSR_CONFIG(_channel) (FH_RCSR(_channel)+0x00) +#define FH_RCSR_RBD_BASE(_channel) (FH_RCSR(_channel)+0x04) +#define FH_RCSR_WPTR(_channel) (FH_RCSR(_channel)+0x20) +#define FH_RCSR_RPTR_ADDR(_channel) (FH_RCSR(_channel)+0x24) + +#if IWL == 3945 +#define FH_RSCSR_CHNL0_WPTR (FH_RCSR_WPTR(0)) +#elif IWL == 4965 +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) +#endif + +/* RSSR */ +#define FH_RSSR_CTRL (FH_RSSR_TABLE+0x000) +#define FH_RSSR_STATUS (FH_RSSR_TABLE+0x004) +/* TCSR */ +#define FH_TCSR(_channel) (FH_TCSR_TABLE+(_channel)*0x20) +#define FH_TCSR_CONFIG(_channel) (FH_TCSR(_channel)+0x00) +#define FH_TCSR_CREDIT(_channel) (FH_TCSR(_channel)+0x04) +#define FH_TCSR_BUFF_STTS(_channel) (FH_TCSR(_channel)+0x08) +/* TSSR */ +#define FH_TSSR_CBB_BASE (FH_TSSR_TABLE+0x000) +#define FH_TSSR_MSG_CONFIG (FH_TSSR_TABLE+0x008) +#define FH_TSSR_TX_STATUS (FH_TSSR_TABLE+0x010) +/* 18 - reserved */ + +/* card static random access memory (SRAM) for processor data and instructs */ +#define RTC_INST_LOWER_BOUND (0x000000) +#define RTC_DATA_LOWER_BOUND (0x800000) + + +/* DBM */ + +#define ALM_FH_SRVC_CHNL (6) + +#define ALM_FH_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20) +#define ALM_FH_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4) + +#define ALM_FH_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000) + +#define ALM_FH_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) + +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define ALM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000) + +#define ALM_FH_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000) +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100) +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080) + +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020) +#define ALM_FH_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005) + +#define ALM_TB_MAX_BYTES_COUNT (0xFFF0) + +#define ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) \ + ((1LU << _channel) << 24) +#define ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel) \ + ((1LU << _channel) << 16) + +#define ALM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_channel) \ + (ALM_FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_channel) | \ + ALM_FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_channel)) +#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */ +#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */ + +#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) + +#define TFD_QUEUE_MIN 0 +#define TFD_QUEUE_MAX 6 +#define TFD_QUEUE_SIZE_MAX (256) + +/* spectrum and channel data structures */ +#define IWL_NUM_SCAN_RATES (2) + +#define IWL_SCAN_FLAG_24GHZ (1<<0) +#define IWL_SCAN_FLAG_52GHZ (1<<1) +#define IWL_SCAN_FLAG_ACTIVE (1<<2) +#define IWL_SCAN_FLAG_DIRECT (1<<3) + +#define IWL_MAX_CMD_SIZE 1024 + +#define IWL_DEFAULT_TX_RETRY 15 +#define IWL_MAX_TX_RETRY 16 + +/*********************************************/ + +#define RFD_SIZE 4 +#define NUM_TFD_CHUNKS 4 + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* QoS definitions */ + +#define CW_MIN_OFDM 15 +#define CW_MAX_OFDM 1023 +#define CW_MIN_CCK 31 +#define CW_MAX_CCK 1023 + +#define QOS_TX0_CW_MIN_OFDM CW_MIN_OFDM +#define QOS_TX1_CW_MIN_OFDM CW_MIN_OFDM +#define QOS_TX2_CW_MIN_OFDM ((CW_MIN_OFDM + 1) / 2 - 1) +#define QOS_TX3_CW_MIN_OFDM ((CW_MIN_OFDM + 1) / 4 - 1) + +#define QOS_TX0_CW_MIN_CCK CW_MIN_CCK +#define QOS_TX1_CW_MIN_CCK CW_MIN_CCK +#define QOS_TX2_CW_MIN_CCK ((CW_MIN_CCK + 1) / 2 - 1) +#define QOS_TX3_CW_MIN_CCK ((CW_MIN_CCK + 1) / 4 - 1) + +#define QOS_TX0_CW_MAX_OFDM CW_MAX_OFDM +#define QOS_TX1_CW_MAX_OFDM CW_MAX_OFDM +#define QOS_TX2_CW_MAX_OFDM CW_MIN_OFDM +#define QOS_TX3_CW_MAX_OFDM ((CW_MIN_OFDM + 1) / 2 - 1) + +#define QOS_TX0_CW_MAX_CCK CW_MAX_CCK +#define QOS_TX1_CW_MAX_CCK CW_MAX_CCK +#define QOS_TX2_CW_MAX_CCK CW_MIN_CCK +#define QOS_TX3_CW_MAX_CCK ((CW_MIN_CCK + 1) / 2 - 1) + +#define QOS_TX0_AIFS 3 +#define QOS_TX1_AIFS 7 +#define QOS_TX2_AIFS 2 +#define QOS_TX3_AIFS 2 + +#define QOS_TX0_ACM 0 +#define QOS_TX1_ACM 0 +#define QOS_TX2_ACM 0 +#define QOS_TX3_ACM 0 + +#define QOS_TX0_TXOP_LIMIT_CCK 0 +#define QOS_TX1_TXOP_LIMIT_CCK 0 +#define QOS_TX2_TXOP_LIMIT_CCK 6016 +#define QOS_TX3_TXOP_LIMIT_CCK 3264 + +#define QOS_TX0_TXOP_LIMIT_OFDM 0 +#define QOS_TX1_TXOP_LIMIT_OFDM 0 +#define QOS_TX2_TXOP_LIMIT_OFDM 3008 +#define QOS_TX3_TXOP_LIMIT_OFDM 1504 + +#define DEF_TX0_CW_MIN_OFDM CW_MIN_OFDM +#define DEF_TX1_CW_MIN_OFDM CW_MIN_OFDM +#define DEF_TX2_CW_MIN_OFDM CW_MIN_OFDM +#define DEF_TX3_CW_MIN_OFDM CW_MIN_OFDM + +#define DEF_TX0_CW_MIN_CCK CW_MIN_CCK +#define DEF_TX1_CW_MIN_CCK CW_MIN_CCK +#define DEF_TX2_CW_MIN_CCK CW_MIN_CCK +#define DEF_TX3_CW_MIN_CCK CW_MIN_CCK + +#define DEF_TX0_CW_MAX_OFDM CW_MAX_OFDM +#define DEF_TX1_CW_MAX_OFDM CW_MAX_OFDM +#define DEF_TX2_CW_MAX_OFDM CW_MAX_OFDM +#define DEF_TX3_CW_MAX_OFDM CW_MAX_OFDM + +#define DEF_TX0_CW_MAX_CCK CW_MAX_CCK +#define DEF_TX1_CW_MAX_CCK CW_MAX_CCK +#define DEF_TX2_CW_MAX_CCK CW_MAX_CCK +#define DEF_TX3_CW_MAX_CCK CW_MAX_CCK + +#define DEF_TX0_AIFS (2) +#define DEF_TX1_AIFS (2) +#define DEF_TX2_AIFS (2) +#define DEF_TX3_AIFS (2) + +#define DEF_TX0_ACM 0 +#define DEF_TX1_ACM 0 +#define DEF_TX2_ACM 0 +#define DEF_TX3_ACM 0 + +#define DEF_TX0_TXOP_LIMIT_CCK 0 +#define DEF_TX1_TXOP_LIMIT_CCK 0 +#define DEF_TX2_TXOP_LIMIT_CCK 0 +#define DEF_TX3_TXOP_LIMIT_CCK 0 + +#define DEF_TX0_TXOP_LIMIT_OFDM 0 +#define DEF_TX1_TXOP_LIMIT_OFDM 0 +#define DEF_TX2_TXOP_LIMIT_OFDM 0 +#define DEF_TX3_TXOP_LIMIT_OFDM 0 + +#define QOS_QOS_SETS 3 +#define QOS_PARAM_SET_ACTIVE 0 +#define QOS_PARAM_SET_DEF_CCK 1 +#define QOS_PARAM_SET_DEF_OFDM 2 + +#define CTRL_QOS_NO_ACK (0x0020) +#define DCT_FLAG_EXT_QOS_ENABLED (0x10) + +#define U32_PAD(n) ((4-(n))&0x3) + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +#define TFD_CTL_COUNT_SET(n) (n<<24) +#define TFD_CTL_COUNT_GET(ctl) ((ctl>>24) & 7) +#define TFD_CTL_PAD_SET(n) (n<<28) +#define TFD_CTL_PAD_GET(ctl) (ctl>>28) + +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \ + sizeof(struct iwl_cmd_meta)) + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +#endif /* __iwlwifi_hw_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h new file mode 100644 index 0000000000000000000000000000000000000000..8a8b96fcf48dd83db7a095a8bdd66848def3af73 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-io.h @@ -0,0 +1,470 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_io_h__ +#define __iwl_io_h__ + +#include + +#include "iwl-debug.h" + +/* + * IO, register, and NIC memory access functions + * + * NOTE on naming convention and macro usage for these + * + * A single _ prefix before a an access function means that no state + * check or debug information is printed when that function is called. + * + * A double __ prefix before an access function means that state is checked + * (in the case of *restricted calls) and the current line number is printed + * in addition to any other debug output. + * + * The non-prefixed name is the #define that maps the caller into a + * #define that provides the caller's __LINE__ to the double prefix version. + * + * If you wish to call the function without any debug or state checking, + * you should use the single _ prefix version (as is used by dependent IO + * routines, for example _iwl_read_restricted calls the non-check version of + * _iwl_read32.) + * + * These declarations are *extremely* useful in quickly isolating code deltas + * which result in misconfiguring of the hardware I/O. In combination with + * git-bisect and the IO debug level you can quickly determine the specific + * commit which breaks the IO sequence to the hardware. + * + */ + +#define _iwl_write32(iwl, ofs, val) writel((val), (iwl)->hw_base + (ofs)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *iwl, + u32 ofs, u32 val) +{ + IWL_DEBUG_IO("write_direct32(0x%08X, 0x%08X) - %s %d\n", + (u32) (ofs), (u32) (val), f, l); + _iwl_write32(iwl, ofs, val); +} +#define iwl_write32(iwl, ofs, val) \ + __iwl_write32(__FILE__, __LINE__, iwl, ofs, val) +#else +#define iwl_write32(iwl, ofs, val) _iwl_write32(iwl, ofs, val) +#endif + +#define _iwl_read32(iwl, ofs) readl((iwl)->hw_base + (ofs)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *iwl, u32 ofs) +{ + IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l); + return _iwl_read32(iwl, ofs); +} +#define iwl_read32(iwl, ofs) __iwl_read32(__FILE__, __LINE__, iwl, ofs) +#else +#define iwl_read32(p, o) _iwl_read32(p, o) +#endif + +static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int i = 0; + + do { + if ((_iwl_read32(priv, addr) & mask) == (bits & mask)) + return i; + mdelay(10); + i += 10; + } while (i < timeout); + + return -ETIMEDOUT; +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_poll_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int rc = _iwl_poll_bit(priv, addr, bits, mask, timeout); + if (unlikely(rc == -ETIMEDOUT)) + IWL_DEBUG_IO + ("poll_bit(0x%08X, 0x%08X, 0x%08X) - timedout - %s %d\n", + addr, bits, mask, f, l); + else + IWL_DEBUG_IO + ("poll_bit(0x%08X, 0x%08X, 0x%08X) = 0x%08X - %s %d\n", + addr, bits, mask, rc, f, l); + return rc; +} +#define iwl_poll_bit(iwl, addr, bits, mask, timeout) \ + __iwl_poll_bit(__FILE__, __LINE__, iwl, addr, bits, mask, timeout) +#else +#define iwl_poll_bit(p, a, b, m, t) _iwl_poll_bit(p, a, b, m, t) +#endif + +static inline void _iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_write32(priv, reg, _iwl_read32(priv, reg) | mask); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read32(priv, reg) | mask; + IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_write32(priv, reg, val); +} +#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m) +#else +#define iwl_set_bit(p, r, m) _iwl_set_bit(p, r, m) +#endif + +static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_write32(priv, reg, _iwl_read32(priv, reg) & ~mask); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_clear_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read32(priv, reg) & ~mask; + IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_write32(priv, reg, val); +} +#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m) +#else +#define iwl_clear_bit(p, r, m) _iwl_clear_bit(p, r, m) +#endif + +static inline int _iwl_grab_restricted_access(struct iwl_priv *priv) +{ + int rc; + u32 gp_ctl; + +#ifdef CONFIG_IWLWIFI_DEBUG + if (atomic_read(&priv->restrict_refcnt)) + return 0; +#endif + if (test_bit(STATUS_RF_KILL_HW, &priv->status) || + test_bit(STATUS_RF_KILL_SW, &priv->status)) { + IWL_WARNING("WARNING: Requesting MAC access during RFKILL " + "wakes up NIC\n"); + + /* 10 msec allows time for NIC to complete its data save */ + gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL); + if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) { + IWL_DEBUG_RF_KILL("Wait for complete power-down, " + "gpctl = 0x%08x\n", gp_ctl); + mdelay(10); + } else + IWL_DEBUG_RF_KILL("power-down complete, " + "gpctl = 0x%08x\n", gp_ctl); + } + + /* this bit wakes up the NIC */ + _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + rc = _iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50); + if (rc < 0) { + IWL_ERROR("MAC is in deep sleep!\n"); + return -EIO; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + atomic_inc(&priv->restrict_refcnt); +#endif + return 0; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_grab_restricted_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + if (atomic_read(&priv->restrict_refcnt)) + IWL_DEBUG_INFO("Grabbing access while already held at " + "line %d.\n", l); + + IWL_DEBUG_IO("grabbing restricted access - %s %d\n", f, l); + + return _iwl_grab_restricted_access(priv); +} +#define iwl_grab_restricted_access(priv) \ + __iwl_grab_restricted_access(__FILE__, __LINE__, priv) +#else +#define iwl_grab_restricted_access(priv) \ + _iwl_grab_restricted_access(priv) +#endif + +static inline void _iwl_release_restricted_access(struct iwl_priv *priv) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + if (atomic_dec_and_test(&priv->restrict_refcnt)) +#endif + _iwl_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_release_restricted_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + if (atomic_read(&priv->restrict_refcnt) <= 0) + IWL_ERROR("Release unheld restricted access at line %d.\n", l); + + IWL_DEBUG_IO("releasing restricted access - %s %d\n", f, l); + _iwl_release_restricted_access(priv); +} +#define iwl_release_restricted_access(priv) \ + __iwl_release_restricted_access(__FILE__, __LINE__, priv) +#else +#define iwl_release_restricted_access(priv) \ + _iwl_release_restricted_access(priv) +#endif + +static inline u32 _iwl_read_restricted(struct iwl_priv *priv, u32 reg) +{ + return _iwl_read32(priv, reg); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read_restricted(const char *f, u32 l, + struct iwl_priv *priv, u32 reg) +{ + u32 value = _iwl_read_restricted(priv, reg); + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from %s %d\n", f, l); + IWL_DEBUG_IO("read_restricted(0x%4X) = 0x%08x - %s %d \n", reg, value, + f, l); + return value; +} +#define iwl_read_restricted(priv, reg) \ + __iwl_read_restricted(__FILE__, __LINE__, priv, reg) +#else +#define iwl_read_restricted _iwl_read_restricted +#endif + +static inline void _iwl_write_restricted(struct iwl_priv *priv, + u32 reg, u32 value) +{ + _iwl_write32(priv, reg, value); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static void __iwl_write_restricted(u32 line, + struct iwl_priv *priv, u32 reg, u32 value) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_write_restricted(priv, reg, value); +} +#define iwl_write_restricted(priv, reg, value) \ + __iwl_write_restricted(__LINE__, priv, reg, value) +#else +#define iwl_write_restricted _iwl_write_restricted +#endif + +static inline void iwl_write_buffer_restricted(struct iwl_priv *priv, + u32 reg, u32 len, u32 *values) +{ + u32 count = sizeof(u32); + + if ((priv != NULL) && (values != NULL)) { + for (; 0 < len; len -= count, reg += count, values++) + _iwl_write_restricted(priv, reg, *values); + } +} + +static inline int _iwl_poll_restricted_bit(struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int i = 0; + + do { + if ((_iwl_read_restricted(priv, addr) & mask) == mask) + return i; + mdelay(10); + i += 10; + } while (i < timeout); + + return -ETIMEDOUT; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static inline int __iwl_poll_restricted_bit(const char *f, u32 l, + struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int rc = _iwl_poll_restricted_bit(priv, addr, mask, timeout); + + if (unlikely(rc == -ETIMEDOUT)) + IWL_DEBUG_IO("poll_restricted_bit(0x%08X, 0x%08X) - " + "timedout - %s %d\n", addr, mask, f, l); + else + IWL_DEBUG_IO("poll_restricted_bit(0x%08X, 0x%08X) = 0x%08X " + "- %s %d\n", addr, mask, rc, f, l); + return rc; +} +#define iwl_poll_restricted_bit(iwl, addr, mask, timeout) \ + __iwl_poll_restricted_bit(__FILE__, __LINE__, iwl, addr, mask, timeout) +#else +#define iwl_poll_restricted_bit _iwl_poll_restricted_bit +#endif + +static inline u32 _iwl_read_restricted_reg(struct iwl_priv *priv, u32 reg) +{ + _iwl_write_restricted(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + return _iwl_read_restricted(priv, HBUS_TARG_PRPH_RDAT); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline u32 __iwl_read_restricted_reg(u32 line, + struct iwl_priv *priv, u32 reg) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + return _iwl_read_restricted_reg(priv, reg); +} + +#define iwl_read_restricted_reg(priv, reg) \ + __iwl_read_restricted_reg(__LINE__, priv, reg) +#else +#define iwl_read_restricted_reg _iwl_read_restricted_reg +#endif + +static inline void _iwl_write_restricted_reg(struct iwl_priv *priv, + u32 addr, u32 val) +{ + _iwl_write_restricted(priv, HBUS_TARG_PRPH_WADDR, + ((addr & 0x0000FFFF) | (3 << 24))); + _iwl_write_restricted(priv, HBUS_TARG_PRPH_WDAT, val); +} +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_write_restricted_reg(u32 line, + struct iwl_priv *priv, + u32 addr, u32 val) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_write_restricted_reg(priv, addr, val); +} + +#define iwl_write_restricted_reg(priv, addr, val) \ + __iwl_write_restricted_reg(__LINE__, priv, addr, val); +#else +#define iwl_write_restricted_reg _iwl_write_restricted_reg +#endif + +#define _iwl_set_bits_restricted_reg(priv, reg, mask) \ + _iwl_write_restricted_reg(priv, reg, \ + (_iwl_read_restricted_reg(priv, reg) | mask)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bits_restricted_reg(u32 line, struct iwl_priv + *priv, u32 reg, u32 mask) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_set_bits_restricted_reg(priv, reg, mask); +} +#define iwl_set_bits_restricted_reg(priv, reg, mask) \ + __iwl_set_bits_restricted_reg(__LINE__, priv, reg, mask) +#else +#define iwl_set_bits_restricted_reg _iwl_set_bits_restricted_reg +#endif + +#define _iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask) \ + _iwl_write_restricted_reg( \ + priv, reg, ((_iwl_read_restricted_reg(priv, reg) & mask) | bits)) +#ifdef CONFIG_IWLWIFI_DEBUG +static inline void __iwl_set_bits_mask_restricted_reg(u32 line, + struct iwl_priv *priv, u32 reg, u32 bits, u32 mask) +{ + if (!atomic_read(&priv->restrict_refcnt)) + IWL_ERROR("Unrestricted access from line %d\n", line); + _iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask); +} + +#define iwl_set_bits_mask_restricted_reg(priv, reg, bits, mask) \ + __iwl_set_bits_mask_restricted_reg(__LINE__, priv, reg, bits, mask) +#else +#define iwl_set_bits_mask_restricted_reg _iwl_set_bits_mask_restricted_reg +#endif + +static inline void iwl_clear_bits_restricted_reg(struct iwl_priv + *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_read_restricted_reg(priv, reg); + _iwl_write_restricted_reg(priv, reg, (val & ~mask)); +} + +static inline u32 iwl_read_restricted_mem(struct iwl_priv *priv, u32 addr) +{ + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, addr); + return iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); +} + +static inline void iwl_write_restricted_mem(struct iwl_priv *priv, u32 addr, + u32 val) +{ + iwl_write_restricted(priv, HBUS_TARG_MEM_WADDR, addr); + iwl_write_restricted(priv, HBUS_TARG_MEM_WDAT, val); +} + +static inline void iwl_write_restricted_mems(struct iwl_priv *priv, u32 addr, + u32 len, u32 *values) +{ + iwl_write_restricted(priv, HBUS_TARG_MEM_WADDR, addr); + for (; 0 < len; len -= sizeof(u32), values++) + iwl_write_restricted(priv, HBUS_TARG_MEM_WDAT, *values); +} + +static inline void iwl_write_restricted_regs(struct iwl_priv *priv, u32 reg, + u32 len, u8 *values) +{ + u32 reg_offset = reg; + u32 aligment = reg & 0x3; + + /* write any non-dword-aligned stuff at the beginning */ + if (len < sizeof(u32)) { + if ((aligment + len) <= sizeof(u32)) { + u8 size; + u32 value = 0; + size = len - 1; + memcpy(&value, values, len); + reg_offset = (reg_offset & 0x0000FFFF); + + _iwl_write_restricted(priv, + HBUS_TARG_PRPH_WADDR, + (reg_offset | (size << 24))); + _iwl_write_restricted(priv, HBUS_TARG_PRPH_WDAT, + value); + } + + return; + } + + /* now write all the dword-aligned stuff */ + for (; reg_offset < (reg + len); + reg_offset += sizeof(u32), values += sizeof(u32)) + _iwl_write_restricted_reg(priv, reg_offset, *((u32 *) values)); +} + +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-priv.h b/drivers/net/wireless/iwlwifi/iwl-priv.h new file mode 100644 index 0000000000000000000000000000000000000000..6b490d08fea93aefc36446ed8cdd759b5b9abaa7 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-priv.h @@ -0,0 +1,308 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_priv_h__ +#define __iwl_priv_h__ + +#include + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +#endif + +struct iwl_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + u8 phymode; + int alloc_rxb_skb; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + + const struct ieee80211_hw_mode *modes; + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; +#endif + /* ucode beacon time */ + u32 ucode_beacon_time; + + /* we allocate array of iwl_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* each calibration channel group in the EEPROM has a derived + * clip setting for each rate. */ + const struct iwl_clip_group clip_groups[5]; + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* Scan related variables */ + unsigned long last_scan_jiffies; + unsigned long scan_start; + unsigned long scan_pass_start; + unsigned long scan_start_tsf; + int scan_bands; + int one_direct_scan; + u8 direct_ssid_len; + u8 direct_ssid[IW_ESSID_MAX_SIZE]; + struct iwl_scan_cmd *scan; + u8 only_active_channel; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + struct mutex mutex; + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + + /* uCode images, save to reload in case of failure */ + struct fw_image_desc ucode_code; /* runtime inst */ + struct fw_image_desc ucode_data; /* runtime data original */ + struct fw_image_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_image_desc ucode_init; /* initialization inst */ + struct fw_image_desc ucode_init_data; /* initialization data */ + struct fw_image_desc ucode_boot; /* bootstrap inst */ + + + struct iwl_rxon_time_cmd rxon_timing; + + /* We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware */ + const struct iwl_rxon_cmd active_rxon; + struct iwl_rxon_cmd staging_rxon; + + int error_recovering; + struct iwl_rxon_cmd recovery_rxon; + + /* 1st responses from initialize and runtime uCode images. + * 4965's initialize alive response contains some calibration data. */ + struct iwl_init_alive_resp card_alive_init; + struct iwl_alive_resp card_alive; + +#ifdef LED + /* LED related variables */ + struct iwl_activity_blink activity; + unsigned long led_packets; + int led_state; +#endif + + u16 active_rate; + u16 active_rate_basic; + + u8 call_post_assoc_from_beacon; + u8 assoc_station_added; +#if IWL == 4965 + u8 use_ant_b_for_management_frame; /* Tx antenna selection */ + /* HT variables */ + u8 is_dup; + u8 is_ht_enabled; + u8 channel_width; /* 0=20MHZ, 1=40MHZ */ + u8 current_channel_width; + u8 valid_antenna; /* Bit mask of antennas actually connected */ +#ifdef CONFIG_IWLWIFI_SENSITIVITY + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + u8 start_calib; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; +#endif /*CONFIG_IWLWIFI_SENSITIVITY*/ + +#ifdef CONFIG_IWLWIFI_HT + struct sta_ht_info current_assoc_ht; +#endif + u8 active_rate_ht[2]; + u8 last_phy_res[100]; + + /* Rate scaling data */ + struct iwl_lq_mngr lq_mngr; +#endif + + /* Rate scaling data */ + s8 data_retry_limit; + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct iwl_rx_queue rxq; + struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES]; +#if IWL == 4965 + unsigned long txq_ctx_active_msk; + struct iwl_kw kw; /* keep warm address */ + u32 scd_base_addr; /* scheduler sram base address */ +#endif + + unsigned long status; + u32 config; + + int last_rx_rssi; /* From Rx packet statisitics */ + int last_rx_noise; /* From beacon statistics */ + + struct iwl_power_mgr power_data; + + struct iwl_notif_statistics statistics; + unsigned long last_statistics_time; + + /* context information */ + u8 essid[IW_ESSID_MAX_SIZE]; + u8 essid_len; + u16 rates_mask; + + u32 power_mode; + u32 antenna; + u8 bssid[ETH_ALEN]; + u16 rts_threshold; + u8 mac_addr[ETH_ALEN]; + + /*station table variables */ + spinlock_t sta_lock; + int num_stations; + struct iwl_station_entry stations[IWL_STATION_COUNT]; + + /* Indication if ieee80211_ops->open has been called */ + int is_open; + + u8 mac80211_registered; + int is_abg; + + u32 notif_missed_beacons; + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* Duplicate packet detection */ + u16 last_seq_num; + u16 last_frag_num; + unsigned long last_packet_time; + struct list_head ibss_mac_hash[IWL_IBSS_MAC_HASH_SIZE]; + + /* eeprom */ + struct iwl_eeprom eeprom; + + int iw_mode; + + struct sk_buff *ibss_beacon; + + /* Last Rx'd beacon timestamp */ + u32 timestamp0; + u32 timestamp1; + u16 beacon_int; + struct iwl_driver_hw_info hw_setting; + int interface_id; + + /* Current association information needed to configure the + * hardware */ + u16 assoc_id; + u16 assoc_capability; + u8 ps_mode; + +#ifdef CONFIG_IWLWIFI_QOS + struct iwl_qos_info qos_data; +#endif /*CONFIG_IWLWIFI_QOS */ + + struct workqueue_struct *workqueue; + + struct work_struct up; + struct work_struct restart; + struct work_struct calibrated_work; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct rf_kill; + struct work_struct abort_scan; + struct work_struct update_link_led; + struct work_struct auth_work; + struct work_struct report_work; + struct work_struct request_scan; + struct work_struct beacon_update; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work activity_timer; + struct delayed_work thermal_periodic; + struct delayed_work gather_stats; + struct delayed_work scan_check; + struct delayed_work post_associate; + +#define IWL_DEFAULT_TX_POWER 0x0F + s8 user_txpower_limit; + s8 max_channel_txpower_limit; + u32 cck_power_index_compensation; + +#ifdef CONFIG_PM + u32 pm_state[16]; +#endif + +#ifdef CONFIG_IWLWIFI_DEBUG + /* debugging info */ + u32 framecnt_to_us; + atomic_t restrict_refcnt; +#endif + +#if IWL == 4965 + struct work_struct txpower_work; +#ifdef CONFIG_IWLWIFI_SENSITIVITY + struct work_struct sensitivity_work; +#endif + struct work_struct statistics_work; + struct timer_list statistics_periodic; + +#ifdef CONFIG_IWLWIFI_HT_AGG + struct work_struct agg_work; +#endif + +#endif /* 4965 */ +}; /*iwl_priv */ + +#endif /* __iwl_priv_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h new file mode 100644 index 0000000000000000000000000000000000000000..0df41148eadc0b7ec3b983ae01f675efbabfe8da --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -0,0 +1,229 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU Geeral Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_prph_h__ +#define __iwl_prph_h__ + + +#define PRPH_BASE (0x00000) +#define PRPH_END (0xFFFFF) + +/* APMG (power management) constants */ +#define APMG_BASE (PRPH_BASE + 0x3000) +#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) +#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) +#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) +#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) +#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) +#define APMG_RFKILL_REG (APMG_BASE + 0x0014) +#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) +#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) + +#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) +#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) + +#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) + +#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) + +#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x01000000) + + +/** + * BSM (Bootstrap State Machine) + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down when the embedded control + * processor is sleeping (e.g. for periodic power-saving shutdowns of radio). + * + * When powering back up after sleeps (or during initial uCode load), the BSM + * internally loads the short bootstrap program from the special SRAM into the + * embedded processor's instruction SRAM, and starts the processor so it runs + * the bootstrap program. + * + * This bootstrap program loads (via PCI busmaster DMA) instructions and data + * images for a uCode program from host DRAM locations. The host driver + * indicates DRAM locations and sizes for instruction and data images via the + * four BSM_DRAM_* registers. Once the bootstrap program loads the new program, + * the new program starts automatically. + * + * The uCode used for open-source drivers includes two programs: + * + * 1) Initialization -- performs hardware calibration and sets up some + * internal data, then notifies host via "initialize alive" notification + * (struct iwl_init_alive_resp) that it has completed all of its work. + * After signal from host, it then loads and starts the runtime program. + * The initialization program must be used when initially setting up the + * NIC after loading the driver. + * + * 2) Runtime/Protocol -- performs all normal runtime operations. This + * notifies host via "alive" notification (struct iwl_alive_resp) that it + * is ready to be used. + * + * When initializing the NIC, the host driver does the following procedure: + * + * 1) Load bootstrap program (instructions only, no data image for bootstrap) + * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND + * + * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction + * images in host DRAM. + * + * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked: + * BSM_WR_MEM_SRC_REG = 0 + * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND + * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image + * + * 4) Load bootstrap into instruction SRAM: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START + * + * 5) Wait for load completion: + * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0 + * + * 6) Enable future boot loads whenever NIC's power management triggers it: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN + * + * 7) Start the NIC by removing all reset bits: + * CSR_RESET = 0 + * + * The bootstrap uCode (already in instruction SRAM) loads initialization + * uCode. Initialization uCode performs data initialization, sends + * "initialize alive" notification to host, and waits for a signal from + * host to load runtime code. + * + * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction + * images in host DRAM. The last register loaded must be the instruction + * bytecount register ("1" in MSbit tells initialization uCode to load + * the runtime uCode): + * BSM_DRAM_INST_BYTECOUNT_REG = bytecount | BSM_DRAM_INST_LOAD + * + * 5) Wait for "alive" notification, then issue normal runtime commands. + * + * Data caching during power-downs: + * + * Just before the embedded controller powers down (e.g for automatic + * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA) + * a current snapshot of the embedded processor's data SRAM into host DRAM. + * This caches the data while the embedded processor's memory is powered down. + * Location and size are controlled by BSM_DRAM_DATA_* registers. + * + * NOTE: Instruction SRAM does not need to be saved, since that doesn't + * change during operation; the original image (from uCode distribution + * file) can be used for reload. + * + * When powering back up, the BSM loads the bootstrap program. Bootstrap looks + * at the BSM_DRAM_* registers, which now point to the runtime instruction + * image and the cached (modified) runtime data (*not* the initialization + * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the + * uCode from where it left off before the power-down. + * + * NOTE: Initialization uCode does *not* run as part of the save/restore + * procedure. + * + * This save/restore method is mostly for autonomous power management during + * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and + * RFKILL should use complete restarts (with total re-initialization) of uCode, + * allowing total shutdown (including BSM memory). + * + * Note that, during normal operation, the host DRAM that held the initial + * startup data for the runtime code is now being used as a backup data cache + * for modified data! If you need to completely re-initialize the NIC, make + * sure that you use the runtime data image from the uCode distribution file, + * not the modified/saved runtime data. You may want to store a separate + * "clean" runtime data image in DRAM to avoid disk reads of distribution file. + */ + +/* BSM bit fields */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ + +/* BSM addresses */ +#define BSM_BASE (PRPH_BASE + 0x3400) +#define BSM_END (PRPH_BASE + 0x3800) + +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ + +/* + * Pointers and size regs for bootstrap load and data SRAM save/restore. + * NOTE: 3945 pointers use bits 31:0 of DRAM address. + * 4965 pointers use bits 35:4 of DRAM address. + */ +#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090) +#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094) +#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098) +#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C) + +/* + * BSM special memory, stays powered on during power-save sleeps. + * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) + */ +#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) +#define BSM_SRAM_SIZE (1024) /* bytes */ + + +#endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h new file mode 100644 index 0000000000000000000000000000000000000000..b576ff24eb4fd19ec24ffba1d06e02b2cfe49d12 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h @@ -0,0 +1,91 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_spectrum_h__ +#define __iwl_spectrum_h__ +enum { /* ieee80211_basic_report.map */ + IEEE80211_BASIC_MAP_BSS = (1 << 0), + IEEE80211_BASIC_MAP_OFDM = (1 << 1), + IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), + IEEE80211_BASIC_MAP_RADAR = (1 << 3), + IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct ieee80211_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __attribute__ ((packed)); + +enum { /* ieee80211_measurement_request.mode */ + /* Bit 0 is reserved */ + IEEE80211_MEASUREMENT_ENABLE = (1 << 1), + IEEE80211_MEASUREMENT_REQUEST = (1 << 2), + IEEE80211_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + IEEE80211_REPORT_BASIC = 0, /* required */ + IEEE80211_REPORT_CCA = 1, /* optional */ + IEEE80211_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct ieee80211_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __attribute__ ((packed)); + +struct ieee80211_info_element { + u8 id; + u8 len; + u8 data[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_request { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + struct ieee80211_measurement_params params[0]; +} __attribute__ ((packed)); + +struct ieee80211_measurement_report { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct ieee80211_basic_report basic[0]; + } u; +} __attribute__ ((packed)); +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c new file mode 100644 index 0000000000000000000000000000000000000000..75e3b5c3f1558bce3ffc32cc99e172f6d2fa7bc4 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -0,0 +1,8746 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +/* + * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets + * by defining IWL to either 3945 or 4965. The Makefile used when building + * the base targets will create base-3945.o and base-4965.o + * + * The eventual goal is to move as many of the #if IWL / #endif blocks out of + * this file and into the hardware specific implementation files (iwl-XXXX.c) + * and leave only the common (non #ifdef sprinkled) code in this file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define IWL 3945 + +#include "iwlwifi.h" +#include "iwl-3945.h" +#include "iwl-helpers.h" + +#ifdef CONFIG_IWLWIFI_DEBUG +u32 iwl_debug_level; +#endif + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* module parameters */ +int iwl_param_disable_hw_scan; +int iwl_param_debug; +int iwl_param_disable; /* def: enable radio */ +int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */ +int iwl_param_hwcrypto; /* def: using software encryption */ +int iwl_param_qos_enable = 1; +int iwl_param_queues_num = IWL_MAX_NUM_QUEUES; + +/* + * module name, copyright, version, etc. + * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk + */ + +#define DRV_DESCRIPTION \ +"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT +#define VS "s" +#else +#define VS +#endif + +#define IWLWIFI_VERSION "1.1.17k" VD VS +#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" +#define DRV_VERSION IWLWIFI_VERSION + +/* Change firmware file name, using "-" and incrementing number, + * *only* when uCode interface or architecture changes so that it + * is not compatible with earlier drivers. + * This number will also appear in << 8 position of 1st dword of uCode file */ +#define IWL3945_UCODE_API "-1" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); + +__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + int hdr_len = ieee80211_get_hdrlen(fc); + + if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) + return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN); + return NULL; +} + +static const struct ieee80211_hw_mode *iwl_get_hw_mode( + struct iwl_priv *priv, int mode) +{ + int i; + + for (i = 0; i < 3; i++) + if (priv->modes[i].mode == mode) + return &priv->modes[i]; + + return NULL; +} + +static int iwl_is_empty_essid(const char *essid, int essid_len) +{ + /* Single white space is for Linksys APs */ + if (essid_len == 1 && essid[0] == ' ') + return 1; + + /* Otherwise, if the entire essid is 0, we assume it is hidden */ + while (essid_len) { + essid_len--; + if (essid[essid_len] != '\0') + return 0; + } + + return 1; +} + +static const char *iwl_escape_essid(const char *essid, u8 essid_len) +{ + static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + const char *s = essid; + char *d = escaped; + + if (iwl_is_empty_essid(essid, essid_len)) { + memcpy(escaped, "", sizeof("")); + return escaped; + } + + essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE); + while (essid_len--) { + if (*s == '\0') { + *d++ = '\\'; + *d++ = '0'; + s++; + } else + *d++ = *s++; + } + *d = '\0'; + return escaped; +} + +static void iwl_print_hex_dump(int level, void *p, u32 len) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_debug_level & level)) + return; + + print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, + p, len, 1); +#endif +} + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A queue is a circular buffers with 'Read' and 'Write' pointers. + * 2 empty entries always kept in the buffer to protect from overflow. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * The IWL operates with six queues, one receive queue in the device's + * sram, one transmit queue for sending commands to the device firmware, + * and four transmit queues for data. + ***************************************************/ + +static int iwl_queue_space(const struct iwl_queue *q) +{ + int s = q->last_used - q->first_empty; + + if (q->last_used > q->first_empty) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +static inline int x2_queue_used(const struct iwl_queue *q, int i) +{ + return q->first_empty > q->last_used ? + (i >= q->last_used && i < q->first_empty) : + !(i < q->last_used && i >= q->first_empty); +} + +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) +{ + if (is_huge) + return q->n_window; + + return index & (q->n_window - 1); +} + +static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_queue_inc_wrap + * and iwl_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->first_empty = q->last_used = 0; + + return 0; +} + +static int iwl_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct pci_dev *dev = priv->pci_dev; + + if (id != IWL_CMD_QUEUE_NUM) { + txq->txb = kmalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERROR("kmalloc for auxilary BD " + "structures failed\n"); + goto error; + } + } else + txq->txb = NULL; + + txq->bd = pci_alloc_consistent(dev, + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, + &txq->q.dma_addr); + + if (!txq->bd) { + IWL_ERROR("pci_alloc_consistent(%zd) failed\n", + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); + goto error; + } + txq->q.id = id; + + return 0; + + error: + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + return -ENOMEM; +} + +int iwl_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq, int slots_num, u32 txq_id) +{ + struct pci_dev *dev = priv->pci_dev; + int len; + int rc = 0; + + /* alocate command space + one big command for scan since scan + * command is very huge the system will not have two scan at the + * same time */ + len = sizeof(struct iwl_cmd) * slots_num; + if (txq_id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); + if (!txq->cmd) + return -ENOMEM; + + rc = iwl_tx_queue_alloc(priv, txq, txq_id); + if (rc) { + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + return -ENOMEM; + } + txq->need_update = 0; + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + iwl_hw_tx_queue_init(priv, txq); + + return 0; +} + +/** + * iwl_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. txq itself is not freed. + * + */ +void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_queue *q = &txq->q; + struct pci_dev *dev = priv->pci_dev; + int len; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->first_empty != q->last_used; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) + iwl_hw_txq_free_tfd(priv, txq); + + len = sizeof(struct iwl_cmd) * q->n_window; + if (q->id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + /* free buffers belonging to queue itself */ + if (txq->q.n_bd) + pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * + txq->q.n_bd, txq->bd, txq->q.dma_addr); + + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + /* 0 fill whole structure */ + memset(txq, 0, sizeof(*txq)); +} + +const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +/*************** STATION TABLE MANAGEMENT **** + * + * NOTE: This needs to be overhauled to better synchronize between + * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c + * + * mac80211 should also be examined to determine if sta_info is duplicating + * the functionality provided here + */ + +/**************************************************************/ +#if 0 /* temparary disable till we add real remove station */ +static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) +{ + int index = IWL_INVALID_STATION; + int i; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) + if (priv->stations[i].used && + !compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (unlikely(index == IWL_INVALID_STATION)) + goto out; + + if (priv->stations[index].used) { + priv->stations[index].used = 0; + priv->num_stations--; + } + + BUG_ON(priv->num_stations < 0); + +out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; +} +#endif +static void iwl_clear_stations_table(struct iwl_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->num_stations = 0; + memset(priv->stations, 0, sizeof(priv->stations)); + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + + +u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags) +{ + int i; + int index = IWL_INVALID_STATION; + struct iwl_station_entry *station; + unsigned long flags_spin; + DECLARE_MAC_BUF(mac); + u8 rate; + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (!priv->stations[i].used && + index == IWL_INVALID_STATION) + index = i; + } + + /* These twh conditions has the same outcome but keep them separate + since they have different meaning */ + if (unlikely(index == IWL_INVALID_STATION)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + if (priv->stations[index].used && + !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr)); + station = &priv->stations[index]; + station->used = 1; + priv->num_stations++; + + memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = index; + station->sta.station_flags = 0; + + rate = (priv->phymode == MODE_IEEE80211A) ? IWL_RATE_6M_PLCP : + IWL_RATE_1M_PLCP | priv->hw_setting.cck_flag; + + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = + iwl_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK); + station->current_rate.rate_n_flags = + le16_to_cpu(station->sta.rate_n_flags); + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + iwl_send_add_station(priv, &station->sta, flags); + return index; + +} + +/*************** DRIVER STATUS FUNCTIONS *****/ + +static inline int iwl_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_is_rfkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status) || + test_bit(STATUS_RF_KILL_SW, &priv->status); +} + +static inline int iwl_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_is_rfkill(priv)) + return 0; + + return iwl_is_ready(priv); +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +#define IWL_CMD(x) case x : return #x + +static const char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_3945_RX); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(RADAR_NOTIFICATION); + IWL_CMD(REPLY_QUIET_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); + IWL_CMD(QUIET_NOTIFICATION); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(MEASURE_ABORT_NOTIFICATION); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(REPLY_CARD_STATE_CMD); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + default: + return "UNKNOWN"; + + } +} + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +/** + * iwl_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; + struct iwl_queue *q = &txq->q; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + struct iwl_cmd *out_cmd; + u32 idx; + u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + dma_addr_t phys_addr; + int pad; + u16 count; + int ret; + unsigned long flags; + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->meta.flags & CMD_SIZE_HUGE)); + + if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERROR("No space for Tx\n"); + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + + control_flags = (u32 *) tfd; + + idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); + out_cmd = &txq->cmd[idx]; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | + INDEX_TO_SEQ(q->first_empty)); + if (out_cmd->meta.flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); + + phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + + offsetof(struct iwl_cmd, hdr); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); + + pad = U32_PAD(cmd->len); + count = TFD_CTL_COUNT_GET(*control_flags); + *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad); + + IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); + + txq->need_update = 1; + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + ret = iwl_tx_queue_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return ret ? ret : idx; +} + +int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->meta.flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->meta.flags & CMD_WANT_SKB); + + /* An asynchronous command MUST have a callback. */ + BUG_ON(!cmd->meta.u.callback); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */ + + BUG_ON(cmd->meta.flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->meta.u.callback != NULL); + + if (atomic_xchg(&entry, 1)) { + IWL_ERROR("Error sending %s: Already sending a host command\n", + get_cmd_string(cmd->id)); + return -EBUSY; + } + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + + if (cmd->meta.flags & CMD_WANT_SKB) + cmd->meta.source = &cmd->meta; + + cmd_idx = iwl_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERROR("Error sending %s: time out after %dms.\n", + get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_DEBUG_INFO("Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { + IWL_ERROR("Error: Response NULL in '%s'\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto out; + } + + ret = 0; + goto out; + +cancel: + if (cmd->meta.flags & CMD_WANT_SKB) { + struct iwl_cmd *qcmd; + + /* Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). */ + qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; + qcmd->meta.flags &= ~CMD_WANT_SKB; + } +fail: + if (cmd->meta.u.skb) { + dev_kfree_skb_any(cmd->meta.u.skb); + cmd->meta.u.skb = NULL; + } +out: + atomic_set(&entry, 0); + return ret; +} + +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + /* A command can not be asynchronous AND expect an SKB to be set. */ + BUG_ON((cmd->meta.flags & CMD_ASYNC) && + (cmd->meta.flags & CMD_WANT_SKB)); + + if (cmd->meta.flags & CMD_ASYNC) + return iwl_send_cmd_async(priv, cmd); + + return iwl_send_cmd_sync(priv, cmd); +} + +int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = sizeof(val), + .data = &val, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +int iwl_send_statistics_request(struct iwl_priv *priv) +{ + return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0); +} + +/** + * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON + * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz + * @channel: Any channel valid for the requested phymode + + * In addition to setting the staging RXON, priv->phymode is also set. + * + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the phymode + */ +static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel) +{ + if (!iwl_get_channel_info(priv, phymode, channel)) { + IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", + channel, phymode); + return -EINVAL; + } + + if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && + (priv->phymode == phymode)) + return 0; + + priv->staging_rxon.channel = cpu_to_le16(channel); + if (phymode == MODE_IEEE80211A) + priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + + priv->phymode = phymode; + + IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); + + return 0; +} + +/** + * iwl_check_rxon_cmd - validate RXON structure is valid + * + * NOTE: This is really only useful during development and can eventually + * be #ifdef'd out once the driver is stable and folks aren't actively + * making changes + */ +static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon) +{ + int error = 0; + int counter = 1; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + error |= le32_to_cpu(rxon->flags & + (RXON_FLG_TGJ_NARROW_BAND_MSK | + RXON_FLG_RADAR_DETECT_MSK)); + if (error) + IWL_WARNING("check 24G fields %d | %d\n", + counter++, error); + } else { + error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? + 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); + if (error) + IWL_WARNING("check 52 fields %d | %d\n", + counter++, error); + error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); + if (error) + IWL_WARNING("check 52 CCK %d | %d\n", + counter++, error); + } + error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; + if (error) + IWL_WARNING("check mac addr %d | %d\n", counter++, error); + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && + ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); + if (error) + IWL_WARNING("check basic rate %d | %d\n", counter++, error); + + error |= (le16_to_cpu(rxon->assoc_id) > 2007); + if (error) + IWL_WARNING("check assoc id %d | %d\n", counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); + if (error) + IWL_WARNING("check CCK and short slot %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); + if (error) + IWL_WARNING("check CCK & auto detect %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); + if (error) + IWL_WARNING("check TGG and auto detect %d | %d\n", + counter++, error); + + if ((rxon->flags & RXON_FLG_DIS_DIV_MSK)) + error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK | + RXON_FLG_ANT_A_MSK)) == 0); + if (error) + IWL_WARNING("check antenna %d %d\n", counter++, error); + + if (error) + IWL_WARNING("Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n"); + return -1; + } + return 0; +} + +/** + * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit + * @priv: staging_rxon is comapred to active_rxon + * + * If the RXON structure is changing sufficient to require a new + * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1 + * to indicate a new tune is required. + */ +static int iwl_full_rxon_required(struct iwl_priv *priv) +{ + + /* These items are only settable from the full RXON command */ + if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || + compare_ether_addr(priv->staging_rxon.bssid_addr, + priv->active_rxon.bssid_addr) || + compare_ether_addr(priv->staging_rxon.node_addr, + priv->active_rxon.node_addr) || + compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, + priv->active_rxon.wlap_bssid_addr) || + (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || + (priv->staging_rxon.channel != priv->active_rxon.channel) || + (priv->staging_rxon.air_propagation != + priv->active_rxon.air_propagation) || + (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) + return 1; + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != + (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) + return 1; + + /* Check if we are switching association toggle */ + if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != + (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) + return 1; + + return 0; +} + +static int iwl_send_rxon_assoc(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res = NULL; + struct iwl_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .meta.flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved = 0; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +/** + * iwl_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is commited to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +static int iwl_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + int rc = 0; + DECLARE_MAC_BUF(mac); + + if (!iwl_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; + + /* select antenna */ + priv->staging_rxon.flags &= + ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); + priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv); + + rc = iwl_check_rxon_cmd(&priv->staging_rxon); + if (rc) { + IWL_ERROR("Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + rc = iwl_send_rxon_assoc(priv); + if (rc) { + IWL_ERROR("Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + return 0; + } + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && + (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { + IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERROR("Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + } + + IWL_DEBUG_INFO("Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %s\n", + ((priv->staging_rxon.filter_flags & + RXON_FILTER_ASSOC_MSK) ? "" : "out"), + le16_to_cpu(priv->staging_rxon.channel), + print_mac(mac, priv->staging_rxon.bssid_addr)); + + /* Apply the new configuration */ + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (rc) { + IWL_ERROR("Error setting new configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + iwl_clear_stations_table(priv); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = iwl_hw_reg_send_txpower(priv); + if (rc) { + IWL_ERROR("Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Add the broadcast address so we can send broadcast frames */ + if (iwl_add_station(priv, BROADCAST_ADDR, 0, 0) == + IWL_INVALID_STATION) { + IWL_ERROR("Error adding BROADCAST address for transmit.\n"); + return -EIO; + } + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (iwl_is_associated(priv) && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) + if (iwl_add_station(priv, priv->active_rxon.bssid_addr, 1, 0) + == IWL_INVALID_STATION) { + IWL_ERROR("Error adding AP address for transmit.\n"); + return -EIO; + } + + /* Init the hardware's rate fallback order based on the + * phymode */ + rc = iwl3945_init_hw_rate_table(priv); + if (rc) { + IWL_ERROR("Error setting HW rate table: %02X\n", rc); + return -EIO; + } + + return 0; +} + +static int iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .flags = 3, + .lead_time = 0xAA, + .max_kill = 1, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd); +} + +static int iwl_send_scan_abort(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .meta.flags = CMD_WANT_SKB, + }; + + /* If there isn't a scan actively going on in the hardware + * then we are in between scan bands and not actually + * actively scanning, so don't send the abort command */ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return 0; + } + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return rc; + } + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_card_state_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct sk_buff *skb) +{ + return 1; +} + +/* + * CARD_STATE_CMD + * + * Use: Sets the internal card state to enable, disable, or halt + * + * When in the 'enable' state the card operates as normal. + * When in the 'disable' state, the card enters into a low power mode. + * When in the 'halt' state, the card is shut down and must be fully + * restarted to come back on. + */ +static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_CARD_STATE_CMD, + .len = sizeof(u32), + .data = &flags, + .meta.flags = meta_flag, + }; + + if (meta_flag & CMD_ASYNC) + cmd.meta.u.callback = iwl_card_state_sync_callback; + + return iwl_send_cmd(priv, &cmd); +} + +static int iwl_add_sta_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + struct iwl_rx_packet *res = NULL; + + if (!skb) { + IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); + return 1; + } + + res = (struct iwl_rx_packet *)skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + return 1; + } + + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + break; + default: + break; + } + + /* We didn't cache the SKB; let the caller free it */ + return 1; +} + +int iwl_send_add_station(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *res = NULL; + int rc = 0; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .len = sizeof(struct iwl_addsta_cmd), + .meta.flags = flags, + .data = sta, + }; + + if (flags & CMD_ASYNC) + cmd.meta.u.callback = iwl_add_sta_sync_callback; + else + cmd.meta.flags |= CMD_WANT_SKB; + + rc = iwl_send_cmd(priv, &cmd); + + if (rc || (flags & CMD_ASYNC)) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + rc = -EIO; + } + + if (rc == 0) { + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); + break; + default: + rc = -EIO; + IWL_WARNING("REPLY_ADD_STA failed\n"); + break; + } + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_update_sta_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + + switch (keyconf->alg) { + case ALG_CCMP: + key_flags |= STA_KEY_FLG_CCMP; + key_flags |= cpu_to_le16( + keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + break; + case ALG_TKIP: + case ALG_WEP: + return -EINVAL; + default: + return -EINVAL; + } + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static void iwl_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARNING("%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERROR("Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + const u8 *dest, int left) +{ + + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && + (priv->iw_mode != IEEE80211_IF_TYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +static int iwl_rate_index_from_plcp(int plcp) +{ + int i = 0; + + for (i = 0; i < IWL_RATE_COUNT; i++) + if (iwl_rates[i].plcp == plcp) + return i; + return -1; +} + +static u8 iwl_rate_get_lowest_plcp(int rate_mask) +{ + u8 i; + + for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; + i = iwl_rates[i].next_ieee) { + if (rate_mask & (1 << i)) + return iwl_rates[i].plcp; + } + + return IWL_RATE_INVALID; +} + +static int iwl_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl_get_free_frame(priv); + + if (!frame) { + IWL_ERROR("Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & + 0xFF0); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_6M_PLCP; + } else { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_1M_PLCP; + } + + frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl_free_frame(priv, frame); + + return rc; +} + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac) +{ + memcpy(mac, priv->eeprom.mac_address, 6); +} + +/** + * iwl_eeprom_init - read EEPROM contents + * + * Load the EEPROM from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_eeprom_init(struct iwl_priv *priv) +{ + u16 *e = (u16 *)&priv->eeprom; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + u32 r; + int sz = sizeof(priv->eeprom); + int rc; + int i; + u16 addr; + + /* The EEPROM structure has several padding buffers within it + * and when adding new EEPROM maps is subject to programmer errors + * which may be very difficult to identify without explicitly + * checking the resulting size of the eeprom map. */ + BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); + + if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); + return -ENOENT; + } + + rc = iwl_eeprom_aqcuire_semaphore(priv); + if (rc < 0) { + IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n"); + return -ENOENT; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); + _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); + + for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; + i += IWL_EEPROM_ACCESS_DELAY) { + r = _iwl_read_restricted(priv, CSR_EEPROM_REG); + if (r & CSR_EEPROM_REG_READ_VALID_MSK) + break; + udelay(IWL_EEPROM_ACCESS_DELAY); + } + + if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { + IWL_ERROR("Time out reading EEPROM[%d]", addr); + return -ETIMEDOUT; + } + e[addr / 2] = le16_to_cpu(r >> 16); + } + + return 0; +} + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_DEBUG + +/** + * iwl_report_frame - dump frame to syslog during debug sessions + * + * hack this function to show different aspects of received frames, + * including selective frame dumps. + * group100 parameter selects whether to show 1 out of 100 good frames. + * + * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type + * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats) + * is 3945-specific and gives bad output for 4965. Need to split the + * functionality, keep common stuff here. + */ +void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ + u32 to_us; + u32 print_summary = 0; + u32 print_dump = 0; /* set to 1 to dump all frames' contents */ + u32 hundred = 0; + u32 dataframe = 0; + u16 fc; + u16 seq_ctl; + u16 channel; + u16 phy_flags; + int rate_sym; + u16 length; + u16 status; + u16 bcn_tmr; + u32 tsf_low; + u64 tsf; + u8 rssi; + u8 agc; + u16 sig_avg; + u16 noise_diff; + struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + u8 *data = IWL_RX_DATA(pkt); + + /* MAC header */ + fc = le16_to_cpu(header->frame_control); + seq_ctl = le16_to_cpu(header->seq_ctrl); + + /* metadata */ + channel = le16_to_cpu(rx_hdr->channel); + phy_flags = le16_to_cpu(rx_hdr->phy_flags); + rate_sym = rx_hdr->rate; + length = le16_to_cpu(rx_hdr->len); + + /* end-of-frame status and timestamp */ + status = le32_to_cpu(rx_end->status); + bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); + tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; + tsf = le64_to_cpu(rx_end->timestamp); + + /* signal statistics */ + rssi = rx_stats->rssi; + agc = rx_stats->agc; + sig_avg = le16_to_cpu(rx_stats->sig_avg); + noise_diff = le16_to_cpu(rx_stats->noise_diff); + + to_us = !compare_ether_addr(header->addr1, priv->mac_addr); + + /* if data frame is to us and all is good, + * (optionally) print summary for only 1 out of every 100 */ + if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == + (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { + dataframe = 1; + if (!group100) + print_summary = 1; /* print each frame */ + else if (priv->framecnt_to_us < 100) { + priv->framecnt_to_us++; + print_summary = 0; + } else { + priv->framecnt_to_us = 0; + print_summary = 1; + hundred = 1; + } + } else { + /* print summary for all other frames */ + print_summary = 1; + } + + if (print_summary) { + char *title; + u32 rate; + + if (hundred) + title = "100Frames"; + else if (fc & IEEE80211_FCTL_RETRY) + title = "Retry"; + else if (ieee80211_is_assoc_response(fc)) + title = "AscRsp"; + else if (ieee80211_is_reassoc_response(fc)) + title = "RasRsp"; + else if (ieee80211_is_probe_response(fc)) { + title = "PrbRsp"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_beacon(fc)) { + title = "Beacon"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_atim(fc)) + title = "ATIM"; + else if (ieee80211_is_auth(fc)) + title = "Auth"; + else if (ieee80211_is_deauth(fc)) + title = "DeAuth"; + else if (ieee80211_is_disassoc(fc)) + title = "DisAssoc"; + else + title = "Frame"; + + rate = iwl_rate_index_from_plcp(rate_sym); + if (rate == -1) + rate = 0; + else + rate = iwl_rates[rate].ieee / 2; + + /* print frame summary. + * MAC addresses show just the last byte (for brevity), + * but you can hack it to show more, if you'd like to. */ + if (dataframe) + IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " + "len=%u, rssi=%d, chnl=%d, rate=%u, \n", + title, fc, header->addr1[5], + length, rssi, channel, rate); + else { + /* src/dst addresses assume managed mode */ + IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " + "src=0x%02x, rssi=%u, tim=%lu usec, " + "phy=0x%02x, chnl=%d\n", + title, fc, header->addr1[5], + header->addr3[5], rssi, + tsf_low - priv->scan_start_tsf, + phy_flags, channel); + } + } + if (print_dump) + iwl_print_hex_dump(IWL_DL_RX, data, length); +} +#endif + +static void iwl_unset_hw_setting(struct iwl_priv *priv) +{ + if (priv->hw_setting.shared_virt) + pci_free_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + priv->hw_setting.shared_virt, + priv->hw_setting.shared_phys); +} + +/** + * iwl_supported_rate_to_ie - fill in the supported rate in IE field + * + * return : set the bit for each supported rate insert in ie + */ +static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate, + u16 basic_rate, int max_count) +{ + u16 ret_rates = 0, bit; + int i; + u8 *rates; + + rates = &(ie[1]); + + for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { + if (bit & supported_rate) { + ret_rates |= bit; + rates[*ie] = iwl_rates[i].ieee | + ((bit & basic_rate) ? 0x80 : 0x00); + *ie = *ie + 1; + if (*ie >= max_count) + break; + } + } + + return ret_rates; +} + +/** + * iwl_fill_probe_req - fill in all required fields and IE for probe request + */ +static u16 iwl_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + int left, int is_direct) +{ + int len = 0; + u8 *pos = NULL; + u16 ret_rates; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + len += 24; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN); + memcpy(frame->sa, priv->mac_addr, ETH_ALEN); + memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN); + frame->seq_ctrl = 0; + + /* fill in our indirect SSID IE */ + /* ...next IE... */ + + left -= 2; + if (left < 0) + return 0; + len += 2; + pos = &(frame->u.probe_req.variable[0]); + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + /* fill in our direct SSID IE... */ + if (is_direct) { + /* ...next IE... */ + left -= 2 + priv->essid_len; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SSID; + *pos++ = priv->essid_len; + memcpy(pos, priv->essid, priv->essid_len); + pos += priv->essid_len; + len += 2 + priv->essid_len; + } + + /* fill in supported rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SUPP_RATES; + *pos = 0; + ret_rates = priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_supported_rate_to_ie(pos, priv->active_rate, + priv->active_rate_basic, left); + len += 2 + *pos; + pos += (*pos) + 1; + ret_rates = ~ret_rates & priv->active_rate; + + if (ret_rates == 0) + goto fill_end; + + /* fill in supported extended rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos = 0; + iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left); + if (*pos > 0) + len += 2 + *pos; + + fill_end: + return (u16)len; +} + +/* + * QoS support +*/ +#ifdef CONFIG_IWLWIFI_QOS +static int iwl_send_qos_params_command(struct iwl_priv *priv, + struct iwl_qosparam_cmd *qos) +{ + + return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM, + sizeof(struct iwl_qosparam_cmd), qos); +} + +static void iwl_reset_qos(struct iwl_priv *priv) +{ + u16 cw_min = 15; + u16 cw_max = 1023; + u8 aifs = 2; + u8 is_legacy = 0; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.qos_active = 0; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + if (!(priv->active_rate & 0xfff0)) { + cw_min = 31; + is_legacy = 1; + } + } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { + cw_min = 31; + is_legacy = 1; + } + + if (priv->qos_data.qos_active) + aifs = 3; + + priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; + + if (priv->qos_data.qos_active) { + i = 1; + priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 7; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 2; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(6016); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3008); + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 3; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 4 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16((cw_max + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3264); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(1504); + } else { + for (i = 1; i < 4; i++) { + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + } + } + IWL_DEBUG_QOS("set QoS to default \n"); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_activate_qos(struct iwl_priv *priv, u8 force) +{ + unsigned long flags; + + if (priv == NULL) + return; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!priv->qos_data.qos_enable) + return; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.def_qos_parm.qos_flags = 0; + + if (priv->qos_data.qos_cap.q_AP.queue_request && + !priv->qos_data.qos_cap.q_AP.txop_request) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_TXOP_TYPE_MSK; + + if (priv->qos_data.qos_active) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + spin_unlock_irqrestore(&priv->lock, flags); + + if (force || iwl_is_associated(priv)) { + IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", + priv->qos_data.qos_active); + + iwl_send_qos_params_command(priv, + &(priv->qos_data.def_qos_parm)); + } +} + +#endif /* CONFIG_IWLWIFI_QOS */ +/* + * Power management (not Tx power!) functions + */ +#define MSEC_TO_USEC 1024 + +#define NOSLP __constant_cpu_to_le32(0) +#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK +#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC) +#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \ + __constant_cpu_to_le32(X1), \ + __constant_cpu_to_le32(X2), \ + __constant_cpu_to_le32(X3), \ + __constant_cpu_to_le32(X4)} + + +/* default power management (not Tx power) table values */ +/* for tim 0-10 */ +static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} +}; + +/* for tim > 10 */ +static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), + SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), + SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), + SLP_VEC(2, 6, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), + SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} +}; + +int iwl_power_init_handle(struct iwl_priv *priv) +{ + int rc = 0, i; + struct iwl_power_mgr *pow_data; + int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; + u16 pci_pm; + + IWL_DEBUG_POWER("Initialize power \n"); + + pow_data = &(priv->power_data); + + memset(pow_data, 0, sizeof(*pow_data)); + + pow_data->active_index = IWL_POWER_RANGE_0; + pow_data->dtim_val = 0xffff; + + memcpy(&pow_data->pwr_range_0[0], &range_0[0], size); + memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); + + rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm); + if (rc != 0) + return 0; + else { + struct iwl_powertable_cmd *cmd; + + IWL_DEBUG_POWER("adjust power command flags\n"); + + for (i = 0; i < IWL_POWER_AC; i++) { + cmd = &pow_data->pwr_range_0[i].cmd; + + if (pci_pm & 0x1) + cmd->flags &= ~IWL_POWER_PCI_PM_MSK; + else + cmd->flags |= IWL_POWER_PCI_PM_MSK; + } + } + return rc; +} + +static int iwl_update_power_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, u32 mode) +{ + int rc = 0, i; + u8 skip; + u32 max_sleep = 0; + struct iwl_power_vec_entry *range; + u8 period = 0; + struct iwl_power_mgr *pow_data; + + if (mode > IWL_POWER_INDEX_5) { + IWL_DEBUG_POWER("Error invalid power mode \n"); + return -1; + } + pow_data = &(priv->power_data); + + if (pow_data->active_index == IWL_POWER_RANGE_0) + range = &pow_data->pwr_range_0[0]; + else + range = &pow_data->pwr_range_1[1]; + + memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd)); + +#ifdef IWL_MAC80211_DISABLE + if (priv->assoc_network != NULL) { + unsigned long flags; + + period = priv->assoc_network->tim.tim_period; + } +#endif /*IWL_MAC80211_DISABLE */ + skip = range[mode].no_dtim; + + if (period == 0) { + period = 1; + skip = 0; + } + + if (skip == 0) { + max_sleep = period; + cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; + } else { + __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; + max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; + cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; + } + + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { + if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) + cmd->sleep_interval[i] = cpu_to_le32(max_sleep); + } + + IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return rc; +} + +static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode) +{ + u32 final_mode = mode; + int rc; + struct iwl_powertable_cmd cmd; + + /* If on battery, set to 3, + * if plugged into AC power, set to CAM ("continuosly aware mode"), + * else user level */ + switch (mode) { + case IWL_POWER_BATTERY: + final_mode = IWL_POWER_INDEX_3; + break; + case IWL_POWER_AC: + final_mode = IWL_POWER_MODE_CAM; + break; + default: + final_mode = mode; + break; + } + + iwl_update_power_cmd(priv, &cmd, final_mode); + + rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); + + if (final_mode == IWL_POWER_MODE_CAM) + clear_bit(STATUS_POWER_PMI, &priv->status); + else + set_bit(STATUS_POWER_PMI, &priv->status); + + return rc; +} + +int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr2, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our IBSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr3, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr3, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our BSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr2, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + } + + return 1; +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +const char *iwl_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + * + * NOTE: priv->mutex is not required before calling this function + */ +static int iwl_scan_cancel(struct iwl_priv *priv) +{ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCANNING, &priv->status); + return 0; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Queuing scan abort.\n"); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + queue_work(priv->workqueue, &priv->abort_scan); + + } else + IWL_DEBUG_SCAN("Scan abort already in progress.\n"); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return 0; +} + +/** + * iwl_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + * NOTE: priv->mutex must be held before calling this function + */ +static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long now = jiffies; + int ret; + + ret = iwl_scan_cancel(priv); + if (ret && ms) { + mutex_unlock(&priv->mutex); + while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && + test_bit(STATUS_SCANNING, &priv->status)) + msleep(1); + mutex_lock(&priv->mutex); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return ret; +} + +static void iwl_sequence_reset(struct iwl_priv *priv) +{ + /* Reset ieee stats */ + + /* We don't reset the net_device_stats (ieee->stats) on + * re-association */ + + priv->last_seq_num = -1; + priv->last_frag_num = -1; + priv->last_packet_time = 0; + + iwl_scan_cancel(priv); +} + +#define MAX_UCODE_BEACON_INTERVAL 1024 +#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) + +static __le16 iwl_adjust_beacon_interval(u16 beacon_val) +{ + u16 new_val = 0; + u16 beacon_factor = 0; + + beacon_factor = + (beacon_val + MAX_UCODE_BEACON_INTERVAL) + / MAX_UCODE_BEACON_INTERVAL; + new_val = beacon_val / beacon_factor; + + return cpu_to_le16(new_val); +} + +static void iwl_setup_rxon_timing(struct iwl_priv *priv) +{ + u64 interval_tm_unit; + u64 tsf, result; + unsigned long flags; + struct ieee80211_conf *conf = NULL; + u16 beacon_int = 0; + + conf = ieee80211_get_hw_conf(priv->hw); + + spin_lock_irqsave(&priv->lock, flags); + priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); + priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); + + priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; + + tsf = priv->timestamp1; + tsf = ((tsf << 32) | priv->timestamp0); + + beacon_int = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { + if (beacon_int == 0) { + priv->rxon_timing.beacon_interval = cpu_to_le16(100); + priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); + } else { + priv->rxon_timing.beacon_interval = + cpu_to_le16(beacon_int); + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval( + le16_to_cpu(priv->rxon_timing.beacon_interval)); + } + + priv->rxon_timing.atim_window = 0; + } else { + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval(conf->beacon_int); + /* TODO: we need to get atim_window from upper stack + * for now we set to 0 */ + priv->rxon_timing.atim_window = 0; + } + + interval_tm_unit = + (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024); + result = do_div(tsf, interval_tm_unit); + priv->rxon_timing.beacon_init_val = + cpu_to_le32((u32) ((u64) interval_tm_unit - result)); + + IWL_DEBUG_ASSOC + ("beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(priv->rxon_timing.beacon_interval), + le32_to_cpu(priv->rxon_timing.beacon_init_val), + le16_to_cpu(priv->rxon_timing.atim_window)); +} + +static int iwl_scan_initiate(struct iwl_priv *priv) +{ + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("APs don't scan.\n"); + return 0; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN("Scan already in progress.\n"); + return -EAGAIN; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Scan request while abort pending. " + "Queuing.\n"); + return -EAGAIN; + } + + IWL_DEBUG_INFO("Starting scan...\n"); + priv->scan_bands = 2; + set_bit(STATUS_SCANNING, &priv->status); + priv->scan_start = jiffies; + priv->scan_pass_start = priv->scan_start; + + queue_work(priv->workqueue, &priv->request_scan); + + return 0; +} + +static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + + return 0; +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode) +{ + if (phymode == MODE_IEEE80211A) { + priv->staging_rxon.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_bg_post_associate() */ + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; + priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; + } +} + +/* + * initilize rxon structure with default values fromm eeprom + */ +static void iwl_connection_init_rx_config(struct iwl_priv *priv) +{ + const struct iwl_channel_info *ch_info; + + memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_AP: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; + break; + + case IEEE80211_IF_TYPE_STA: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; + priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_IBSS: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; + priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_MNTR: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; + priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_get_channel_info(priv, priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + /* + * in some case A channels are all non IBSS + * in this case force B/G channel + */ + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !(is_channel_ibss(ch_info))) + ch_info = &priv->channel_info[0]; + + priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); + if (is_channel_a_band(ch_info)) + priv->phymode = MODE_IEEE80211A; + else + priv->phymode = MODE_IEEE80211G; + + iwl_set_flags_for_phymode(priv, priv->phymode); + + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; +} + +static int iwl_set_mode(struct iwl_priv *priv, int mode) +{ + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + if (mode == IEEE80211_IF_TYPE_IBSS) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, + priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info || !is_channel_ibss(ch_info)) { + IWL_ERROR("channel %d not IBSS channel\n", + le16_to_cpu(priv->staging_rxon.channel)); + return -EINVAL; + } + } + + cancel_delayed_work(&priv->scan_check); + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + return -EAGAIN; + } + + priv->iw_mode = mode; + + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + + iwl_clear_stations_table(priv); + + iwl_commit_rxon(priv); + + return 0; +} + +static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_control *ctl, + struct iwl_cmd *cmd, + struct sk_buff *skb_frag, + int last_frag) +{ + struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; + + switch (keyinfo->alg) { + case ALG_CCMP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; + memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); + break; + + case ALG_TKIP: +#if 0 + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; + + if (last_frag) + memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, + 8); + else + memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); +#endif + break; + + case ALG_WEP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | + (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + if (keyinfo->keylen == 13) + cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; + + memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX("Configuring packet for WEP encryption " + "with key %d\n", ctl->key_idx); + break; + + default: + printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, + int is_unicast, u8 std_id) +{ + __le16 *qc; + u16 fc = le16_to_cpu(hdr->frame_control); + __le32 tx_flags = cmd->cmd.tx.tx_flags; + + cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_response(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + cmd->cmd.tx.sta_id = std_id; + if (ieee80211_get_morefrag(hdr)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + + if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { + tx_flags |= TX_CMD_FLG_RTS_MSK; + tx_flags &= ~TX_CMD_FLG_CTS_MSK; + } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + + if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) + tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(3); + else + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(2); + } else + cmd->cmd.tx.timeout.pm_frame_timeout = 0; + + cmd->cmd.tx.driver_txop = 0; + cmd->cmd.tx.tx_flags = tx_flags; + cmd->cmd.tx.next_frame_len = 0; +} + +static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + int sta_id; + u16 fc = le16_to_cpu(hdr->frame_control); + + /* If this frame is broadcast or not data then use the broadcast + * station id */ + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || + is_multicast_ether_addr(hdr->addr1)) + return priv->hw_setting.bcast_sta_id; + + switch (priv->iw_mode) { + + /* If this frame is part of a BSS network (we're a station), then + * we use the AP's station id */ + case IEEE80211_IF_TYPE_STA: + return IWL_AP_ID; + + /* If we are an AP, then find the station, or use BCAST */ + case IEEE80211_IF_TYPE_AP: + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + return priv->hw_setting.bcast_sta_id; + + /* If this frame is part of a IBSS network, then we use the + * target specific station id */ + case IEEE80211_IF_TYPE_IBSS: { + DECLARE_MAC_BUF(mac); + + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC); + + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + IWL_DEBUG_DROP("Station %s not in station map. " + "Defaulting to broadcast...\n", + print_mac(mac, hdr->addr1)); + iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); + return priv->hw_setting.bcast_sta_id; + } + default: + IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode); + return priv->hw_setting.bcast_sta_id; + } +} + +/* + * start REPLY_TX command process + */ +static int iwl_tx_skb(struct iwl_priv *priv, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + int txq_id = ctl->queue; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + struct iwl_cmd *out_cmd = NULL; + u16 len, idx, len_org; + u8 id, hdr_len, unicast; + u8 sta_id; + u16 seq_number = 0; + u16 fc; + __le16 *qc; + u8 wait_write_ptr = 0; + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP("Dropping - RF KILL\n"); + goto drop_unlock; + } + + if (!priv->interface_id) { + IWL_DEBUG_DROP("Dropping - !priv->interface_id\n"); + goto drop_unlock; + } + + if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { + IWL_ERROR("ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = le16_to_cpu(hdr->frame_control); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX("Sending AUTH frame\n"); + else if (ieee80211_is_assoc_request(fc)) + IWL_DEBUG_TX("Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_request(fc)) + IWL_DEBUG_TX("Sending REASSOC frame\n"); +#endif + + if (!iwl_is_associated(priv) && + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { + IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); + goto drop_unlock; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_get_hdrlen(fc); + sta_id = iwl_get_sta_id(priv, hdr); + if (sta_id == IWL_INVALID_STATION) { + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", + print_mac(mac, hdr->addr1)); + goto drop; + } + + IWL_DEBUG_RATE("station Id %d\n", sta_id); + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + seq_number = priv->stations[sta_id].tid[tid].seq_number & + IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = cpu_to_le16(seq_number) | + (hdr->seq_ctrl & + __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); + seq_number += 0x10; + } + txq = &priv->txq[txq_id]; + q = &txq->q; + + spin_lock_irqsave(&priv->lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + control_flags = (u32 *) tfd; + idx = get_cmd_index(q, q->first_empty, 0); + + memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->first_empty].skb[0] = skb; + memcpy(&(txq->txb[q->first_empty].status.control), + ctl, sizeof(struct ieee80211_tx_control)); + out_cmd = &txq->cmd[idx]; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->first_empty))); + /* copy frags header */ + memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); + + /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */ + len = priv->hw_setting.tx_cmd_len + + sizeof(struct iwl_cmd_header) + hdr_len; + + len_org = len; + len = (len + 3) & ~3; + + if (len_org != len) + len_org = 1; + else + len_org = 0; + + txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + + offsetof(struct iwl_cmd, hdr); + + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); + + if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) + iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); + + /* 802.11 null functions have no payload... */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); + } + + /* If there is no payload, then only one TFD is used */ + if (!len) + *control_flags = TFD_CTL_COUNT_SET(1); + else + *control_flags = TFD_CTL_COUNT_SET(2) | + TFD_CTL_PAD_SET(U32_PAD(len)); + + len = (u16)skb->len; + out_cmd->cmd.tx.len = cpu_to_le16(len); + + /* TODO need this for burst mode later on */ + iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); + + out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; + out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; + + if (!ieee80211_get_morefrag(hdr)) { + txq->need_update = 1; + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, + sizeof(out_cmd->cmd.tx)); + + iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, + ieee80211_get_hdrlen(fc)); + + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + rc = iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if (rc) + return rc; + + if ((iwl_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + ieee80211_stop_queue(priv->hw, ctl->queue); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +static void iwl_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_hw_mode *hw = NULL; + struct ieee80211_rate *rate; + int i; + + hw = iwl_get_hw_mode(priv, priv->phymode); + + priv->active_rate = 0; + priv->active_rate_basic = 0; + + IWL_DEBUG_RATE("Setting rates for 802.11%c\n", + hw->mode == MODE_IEEE80211A ? + 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); + + for (i = 0; i < hw->num_rates; i++) { + rate = &(hw->rates[i]); + if ((rate->val < IWL_RATE_COUNT) && + (rate->flags & IEEE80211_RATE_SUPPORTED)) { + IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", + rate->val, iwl_rates[rate->val].plcp, + (rate->flags & IEEE80211_RATE_BASIC) ? + "*" : ""); + priv->active_rate |= (1 << rate->val); + if (rate->flags & IEEE80211_RATE_BASIC) + priv->active_rate_basic |= (1 << rate->val); + } else + IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n", + rate->val, iwl_rates[rate->val].plcp); + } + + IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", + priv->active_rate, priv->active_rate_basic); + + /* + * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) + * otherwise set it to the default of all CCK rates and 6, 12, 24 for + * OFDM + */ + if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) + priv->staging_rxon.cck_basic_rates = + ((priv->active_rate_basic & + IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; + else + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) + priv->staging_rxon.ofdm_basic_rates = + ((priv->active_rate_basic & + (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> + IWL_FIRST_OFDM_RATE) & 0xFF; + else + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; +} + +static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio) +{ + unsigned long flags; + + if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) + return; + + IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", + disable_radio ? "OFF" : "ON"); + + if (disable_radio) { + iwl_scan_cancel(priv); + /* FIXME: This is a workaround for AP */ + if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_SW_BIT_RFKILL); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); + set_bit(STATUS_RF_KILL_SW, &priv->status); + } + return; + } + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + clear_bit(STATUS_RF_KILL_SW, &priv->status); + spin_unlock_irqrestore(&priv->lock, flags); + + /* wake up ucode */ + msleep(10); + + spin_lock_irqsave(&priv->lock, flags); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + if (!iwl_grab_restricted_access(priv)) + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by HW switch\n"); + return; + } + + queue_work(priv->workqueue, &priv->restart); + return; +} + +void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, + u32 decrypt_res, struct ieee80211_rx_status *stats) +{ + u16 fc = + le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control); + + if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) + return; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return; + + IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) + stats->flag |= RX_FLAG_MMIC_ERROR; + case RX_RES_STATUS_SEC_TYPE_WEP: + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX("hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } +} + +void iwl_handle_data_packet_monitor(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + void *data, short len, + struct ieee80211_rx_status *stats, + u16 phy_flags) +{ + struct iwl_rt_rx_hdr *iwl_rt; + + /* First cache any information we need before we overwrite + * the information provided in the skb from the hardware */ + s8 signal = stats->ssi; + s8 noise = 0; + int rate = stats->rate; + u64 tsf = stats->mactime; + __le16 phy_flags_hw = cpu_to_le16(phy_flags); + + /* We received data from the HW, so stop the watchdog */ + if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) { + IWL_DEBUG_DROP("Dropping too large packet in monitor\n"); + return; + } + + /* copy the frame data to write after where the radiotap header goes */ + iwl_rt = (void *)rxb->skb->data; + memmove(iwl_rt->payload, data, len); + + iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */ + + /* total header + data */ + iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt)); + + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, sizeof(*iwl_rt) + len); + + /* Big bitfield of all the fields we provide in radiotap */ + iwl_rt->rt_hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | + (1 << IEEE80211_RADIOTAP_ANTENNA)); + + /* Zero the flags, we'll add to them as we go */ + iwl_rt->rt_flags = 0; + + iwl_rt->rt_tsf = cpu_to_le64(tsf); + + /* Convert to dBm */ + iwl_rt->rt_dbmsignal = signal; + iwl_rt->rt_dbmnoise = noise; + + /* Convert the channel frequency and set the flags */ + iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq); + if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); + else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); + else /* 802.11g */ + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ)); + + rate = iwl_rate_index_from_plcp(rate); + if (rate == -1) + iwl_rt->rt_rate = 0; + else + iwl_rt->rt_rate = iwl_rates[rate].ieee; + + /* antenna number */ + iwl_rt->rt_antenna = + le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if we have it */ + if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); + + stats->flag |= RX_FLAG_RADIOTAP; + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + rxb->skb = NULL; +} + + +#define IWL_PACKET_RETRY_TIME HZ + +int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + u16 sc = le16_to_cpu(header->seq_ctrl); + u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + u16 frag = sc & IEEE80211_SCTL_FRAG; + u16 *last_seq, *last_frag; + unsigned long *last_time; + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS:{ + struct list_head *p; + struct iwl_ibss_seq *entry = NULL; + u8 *mac = header->addr2; + int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1); + + __list_for_each(p, &priv->ibss_mac_hash[index]) { + entry = + list_entry(p, struct iwl_ibss_seq, list); + if (!compare_ether_addr(entry->mac, mac)) + break; + } + if (p == &priv->ibss_mac_hash[index]) { + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + IWL_ERROR + ("Cannot malloc new mac entry\n"); + return 0; + } + memcpy(entry->mac, mac, ETH_ALEN); + entry->seq_num = seq; + entry->frag_num = frag; + entry->packet_time = jiffies; + list_add(&entry->list, + &priv->ibss_mac_hash[index]); + return 0; + } + last_seq = &entry->seq_num; + last_frag = &entry->frag_num; + last_time = &entry->packet_time; + break; + } + case IEEE80211_IF_TYPE_STA: + last_seq = &priv->last_seq_num; + last_frag = &priv->last_frag_num; + last_time = &priv->last_packet_time; + break; + default: + return 0; + } + if ((*last_seq == seq) && + time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) { + if (*last_frag == frag) + goto drop; + if (*last_frag + 1 != frag) + /* out-of-order fragment */ + goto drop; + } else + *last_seq = seq; + + *last_frag = frag; + *last_time = jiffies; + return 0; + + drop: + return 1; +} + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +#include "iwl-spectrum.h" + +#define BEACON_TIME_MASK_LOW 0x00FFFFFF +#define BEACON_TIME_MASK_HIGH 0xFF000000 +#define TIME_UNIT 1024 + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in 8:24 format + * the high 1 byte is the beacon counts + * the lower 3 bytes is the time in usec within one beacon interval + */ + +static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * 1024; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); + rem = (usec % interval) & BEACON_TIME_MASK_LOW; + + return (quot << 24) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ + +static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) +{ + u32 base_low = base & BEACON_TIME_MASK_LOW; + u32 addon_low = addon & BEACON_TIME_MASK_LOW; + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & BEACON_TIME_MASK_HIGH) + + (addon & BEACON_TIME_MASK_HIGH); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << 24); + } else + res += (1 << 24); + + return cpu_to_le32(res); +} + +static int iwl_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .meta.flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + + if (iwl_is_associated(priv)) + add_time = + iwl_usecs_to_beacons( + le64_to_cpu(params->start_time) - priv->last_tsf, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_is_associated(priv)) + spectrum.start_time = + iwl_add_beacon_time(priv->last_beacon_time, + add_time, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (res->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO + ("Replaced existing measurement: %d\n", + res->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} +#endif + +static void iwl_txstatus_to_ieee(struct iwl_priv *priv, + struct iwl_tx_info *tx_sta) +{ + + tx_sta->status.ack_signal = 0; + tx_sta->status.excessive_retries = 0; + tx_sta->status.queue_length = 0; + tx_sta->status.queue_number = 0; + + if (in_interrupt()) + ieee80211_tx_status_irqsafe(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + else + ieee80211_tx_status(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + + tx_sta->skb[0] = NULL; +} + +/** + * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC. + * + * When FW advances 'R' index, all entries between old and + * new 'R' index need to be reclaimed. As result, some free space + * forms. If there is enough free space (> low mark), wake Tx queue. + */ +int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->first_empty, q->last_used); + return 0; + } + + for (index = iwl_queue_inc_wrap(index, q->n_bd); + q->last_used != index; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { + if (txq_id != IWL_CMD_QUEUE_NUM) { + iwl_txstatus_to_ieee(priv, + &(txq->txb[txq->q.last_used])); + iwl_hw_txq_free_tfd(priv, txq); + } else if (nfreed > 1) { + IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, + q->first_empty, q->last_used); + queue_work(priv->workqueue, &priv->restart); + } + nfreed++; + } + + if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL_CMD_QUEUE_NUM) && + priv->mac80211_registered) + ieee80211_wake_queue(priv->hw, txq_id); + + + return nfreed; +} + +static int iwl_is_tx_success(u32 status) +{ + return (status & 0xFF) == 0x1; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_status *tx_status; + struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); + + if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.first_empty, + txq->q.last_used); + return; + } + + tx_status = &(txq->txb[txq->q.last_used].status); + + tx_status->retry_count = tx_resp->failure_frame; + tx_status->queue_number = status; + tx_status->queue_length = tx_resp->bt_kill_count; + tx_status->queue_length |= tx_resp->failure_rts; + + tx_status->flags = + iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; + + tx_status->control.tx_rate = iwl_rate_index_from_plcp(tx_resp->rate); + + IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", + txq_id, iwl_get_tx_fail_reason(status), status, + tx_resp->rate, tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); + if (index != -1) + iwl_tx_queue_reclaim(priv, txq_id, index); + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO("Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO("Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + iwl_disable_events(priv); + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARNING("uCode did not respond OK.\n"); +} + +static void iwl_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); + return; +} + +static void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", + le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); + rxon->channel = csa->channel; + priv->staging_rxon.channel = csa->channel; +} + +static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +#endif +} + +static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX("sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} + +static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " + "notification for %s:\n", + le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL); + + if (!beacon) { + IWL_ERROR("update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl_send_beacon_cmd(priv); +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status); + u8 rate = beacon->beacon_notify_hdr.rate; + + IWL_DEBUG_RX("beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN("Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + notif->tsf_high, + notif->tsf_low, notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec (%dms since last)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, + jiffies_to_msecs(elapsed_jiffies + (priv->last_scan_jiffies, jiffies))); + + priv->last_scan_jiffies = jiffies; +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + /* The scan completion notification came in, so kill that timer... */ + cancel_delayed_work(&priv->scan_check); + + IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", + (priv->scan_bands == 2) ? "2.4" : "5.2", + jiffies_to_msecs(elapsed_jiffies + (priv->scan_pass_start, jiffies))); + + /* Remove this scanned band from the list + * of pending bands to scan */ + priv->scan_bands--; + + /* If a request to abort was given, or the scan did not succeed + * then we reset the scan state machine and terminate, + * re-queuing another scan if one has been requested */ + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_INFO("Aborted scan completed.\n"); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + } else { + /* If there are more bands on this scan pass reschedule */ + if (priv->scan_bands > 0) + goto reschedule; + } + + priv->last_scan_jiffies = jiffies; + IWL_DEBUG_INFO("Setting scan to off\n"); + + clear_bit(STATUS_SCANNING, &priv->status); + + IWL_DEBUG_INFO("Scan took %dms\n", + jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); + + queue_work(priv->workqueue, &priv->scan_completed); + + return; + +reschedule: + priv->scan_pass_start = jiffies; + queue_work(priv->workqueue, &priv->request_scan); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (flags & SW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_SW, &priv->status); + else + clear_bit(STATUS_RF_KILL_SW, &priv->status); + + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status)) || + (test_bit(STATUS_RF_KILL_SW, &status) != + test_bit(STATUS_RF_KILL_SW, &priv->status))) + queue_work(priv->workqueue, &priv->rf_kill); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* NOTE: iwl_rx_statistics is different based on whether + * the build is for the 3945 or the 4965. See the + * corresponding implementation in iwl-XXXX.c + * + * The same handler is used for both the REPLY to a + * discrete statistics request from the host as well as + * for the periodic statistics notification from the uCode + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics; + + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_rx_scan_complete_notif; + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx; + + /* Setup hardware specific Rx handlers */ + iwl_hw_rx_handler_setup(priv); +} + +/** + * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +static void iwl_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int huge = sequence & SEQ_HUGE_FRAME; + int cmd_index; + struct iwl_cmd *cmd; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (txq_id != IWL_CMD_QUEUE_NUM) + IWL_ERROR("Error wrong command queue %d command id 0x%X\n", + txq_id, pkt->hdr.cmd); + BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); + + cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); + cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; + + /* Input error checking is done when commands are added to queue. */ + if (cmd->meta.flags & CMD_WANT_SKB) { + cmd->meta.source->u.skb = rxb->skb; + rxb->skb = NULL; + } else if (cmd->meta.u.callback && + !cmd->meta.u.callback(priv, cmd, rxb->skb)) + rxb->skb = NULL; + + iwl_tx_queue_reclaim(priv, txq_id, index); + + if (!(cmd->meta.flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + } +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replensish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_rx_queue_space - Return number of free slots available in queue. + */ +static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/** + * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue + * + * NOTE: This function has 3945 and 4965 specific code sections + * but is declared in base due to the majority of the + * implementation being the same (only a numeric constant is + * different) + * + */ +int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) +{ + u32 reg = 0; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) + goto exit_unlock; + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR, + q->write & ~0x7); + iwl_release_restricted_access(priv); + } else + iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); + + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); + return rc; +} + +/** + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer. + * + * NOTE: This function has 3945 and 4965 specific code paths in it. + */ +static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)dma_addr); +} + +/** + * iwl_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +int iwl_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write, rc; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it */ + if ((write != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + rc = iwl_rx_queue_update_write_ptr(priv, rxq); + if (rc) + return rc; + } + + return 0; +} + +/** + * iwl_rx_replensih - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during intialization) + */ +void iwl_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + spin_lock_irqsave(&rxq->lock, flags); + while (!list_empty(&rxq->rx_used)) { + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + rxb->skb = + alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); + if (!rxb->skb) { + if (net_ratelimit()) + printk(KERN_CRIT DRV_NAME + ": Can not allocate SKB buffers\n"); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + priv->alloc_rxb_skb++; + list_del(element); + rxb->dma_addr = + pci_map_single(priv->pci_dev, rxb->skb->data, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + iwl_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have it's SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + } + } + + pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + rxq->bd = NULL; +} + +int iwl_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct pci_dev *dev = priv->pci_dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); + if (!rxq->bd) + return -ENOMEM; + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; +} + +void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + priv->alloc_rxb_skb--; + dev_kfree_skb(rxq->pool[i].skb); + rxq->pool[i].skb = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl_calc_db_from_ratio(int sig_ratio) +{ + /* Anything above 1000:1 just report as 60 dB */ + if (sig_ratio > 1000) + return 60; + + /* Above 100:1, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio > 100) + return (20 + (int)ratio2dB[sig_ratio/10]); + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +#define PERFECT_RSSI (-20) /* dBm */ +#define WORST_RSSI (-95) /* dBm */ +#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) + +/* Calculate an indication of rx signal quality (a percentage, not dBm!). + * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info + * about formulas used below. */ +int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) +{ + int sig_qual; + int degradation = PERFECT_RSSI - rssi_dbm; + + /* If we get a noise measurement, use signal-to-noise ratio (SNR) + * as indicator; formula is (signal dbm - noise dbm). + * SNR at or above 40 is a great signal (100%). + * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. + * Weakest usable signal is usually 10 - 15 dB SNR. */ + if (noise_dbm) { + if (rssi_dbm - noise_dbm >= 40) + return 100; + else if (rssi_dbm < noise_dbm) + return 0; + sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; + + /* Else use just the signal level. + * This formula is a least squares fit of data points collected and + * compared with a reference system that had a percentage (%) display + * for signal quality. */ + } else + sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * + (15 * RSSI_RANGE + 62 * degradation)) / + (RSSI_RANGE * RSSI_RANGE); + + if (sig_qual > 100) + sig_qual = 100; + else if (sig_qual < 1) + sig_qual = 0; + + return sig_qual; +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from the uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + + r = iwl_hw_get_rx_read(priv); + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a queue slot associated with it + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + pkt = (struct iwl_rx_packet *)rxb->skb->data; + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r = %d, i = %d, %s, 0x%02x\n", r, i, + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + if (reclaim) { + /* Invoke any callbacks, transfer the skb to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb && rxb->skb) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARNING("Claim null rxb?\n"); + } + + /* For now we just don't re-use anything. We can tweak this + * later to try and re-use notification packets and SKBs that + * fail to Rx correctly */ + if (rxb->skb != NULL) { + priv->alloc_rxb_skb--; + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + } + + pci_unmap_single(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + spin_lock_irqsave(&rxq->lock, flags); + list_add_tail(&rxb->list, &priv->rxq.rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + i = (i + 1) & RX_QUEUE_MASK; + } + + /* Backtrack one entry */ + priv->rxq.read = i; + iwl_rx_queue_restock(priv); +} + +int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int rc = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return rc; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return rc; + } + + /* restore this queue's parameters in nic hardware. */ + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + iwl_write_restricted(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + iwl_release_restricted_access(priv); + + /* else not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + + txq->need_update = 0; + + return rc; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon) +{ + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_RADIO("RX CONFIG:\n"); + iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); + IWL_DEBUG_RADIO("u8[6] node_addr: %s\n", + print_mac(mac, rxon->node_addr)); + IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n", + print_mac(mac, rxon->bssid_addr)); + IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); +} +#endif + +static void iwl_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR("Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); +} + +static inline void iwl_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR("Disabled interrupts\n"); +} + +static const char *desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 i; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + int rc; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Not valid error log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + count = iwl_read_restricted_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERROR("Start IWL Error Log Dump:\n"); + IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", + priv->status, priv->config, count); + } + + IWL_ERROR("Desc Time asrtPC blink2 " + "ilink1 nmiPC Line\n"); + for (i = ERROR_START_OFFSET; + i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; + i += ERROR_ELEM_SIZE) { + desc = iwl_read_restricted_mem(priv, base + i); + time = + iwl_read_restricted_mem(priv, base + i + 1 * sizeof(u32)); + blink1 = + iwl_read_restricted_mem(priv, base + i + 2 * sizeof(u32)); + blink2 = + iwl_read_restricted_mem(priv, base + i + 3 * sizeof(u32)); + ilink1 = + iwl_read_restricted_mem(priv, base + i + 4 * sizeof(u32)); + ilink2 = + iwl_read_restricted_mem(priv, base + i + 5 * sizeof(u32)); + data1 = + iwl_read_restricted_mem(priv, base + i + 6 * sizeof(u32)); + + IWL_ERROR + ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", + desc_lookup(desc), desc, time, blink1, blink2, + ilink1, ilink2, data1); + } + + iwl_release_restricted_access(priv); + +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + * NOTE: Must be called with iwl_grab_restricted_access() already obtained! + */ +static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + + if (num_events == 0) + return; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + time = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + if (mode == 0) + IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ + else { + data = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); + } + } +} + +static void iwl_dump_nic_event_log(struct iwl_priv *priv) +{ + int rc; + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Invalid event log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + /* event log header */ + capacity = iwl_read_restricted_mem(priv, base); + mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32))); + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); + iwl_release_restricted_access(priv); + return; + } + + IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", + size, num_wraps); + + /* if uCode has wrapped back to top of log, start at the oldest entry, + * i.e the next one that uCode would fill. */ + if (num_wraps) + iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode); + + /* (then/else) start at top of log */ + iwl_print_event_log(priv, 0, next_entry, mode); + + iwl_release_restricted_access(priv); +} + +/** + * iwl_irq_handle_error - called for HW or SW error interrupt from card + */ +static void iwl_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_FW_ERRORS) { + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv); + iwl_print_rx_config_cmd(&priv->staging_rxon); + } +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (iwl_is_associated(priv)) { + memcpy(&priv->recovery_rxon, &priv->active_rxon, + sizeof(priv->recovery_rxon)); + priv->error_recovering = 1; + } + queue_work(priv->workqueue, &priv->restart); + } +} + +static void iwl_error_recovery(struct iwl_priv *priv) +{ + unsigned long flags; + + memcpy(&priv->staging_rxon, &priv->recovery_rxon, + sizeof(priv->staging_rxon)); + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + iwl_add_station(priv, priv->bssid, 1, 0); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); + priv->error_recovering = 0; + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_ISR) { + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERROR("Microcode HW error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + spin_unlock_irqrestore(&priv->lock, flags); + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_MAC_CLK_ACTV) + IWL_DEBUG_ISR("Microcode started or stopped.\n"); + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) + IWL_DEBUG_ISR("Alive interrupt\n"); + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled (4965 only) */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, + "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio":"enable radio"); + + /* Queue restart only if RF_KILL switch was set to "kill" + * when we loaded driver, and is now set to "enable". + * After we're Alive, RF_KILL gets handled by + * iwl_rx_card_state_notif() */ + if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself (4965 only) */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERROR("Microcode CT kill error detected.\n"); + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", + inta); + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR("Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]); + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl_rx_handle(priv); + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR("Tx interrupt\n"); + + iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted(priv, + FH_TCSR_CREDIT + (ALM_FH_SRVC_CHNL), 0x0); + iwl_release_restricted_access(priv); + } + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) + IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + + if (inta & ~CSR_INI_SET_MASK) { + IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", + inta & ~CSR_INI_SET_MASK); + IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif + spin_unlock_irqrestore(&priv->lock, flags); +} + +static irqreturn_t iwl_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared */ + IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); + goto none; + } + + IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + tasklet_schedule(&priv->irq_tasklet); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + iwl_enable_interrupts(priv); + spin_unlock(&priv->lock); + return IRQ_NONE; +} + +/************************** EEPROM BANDS **************************** + * + * The iwl_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +static const u8 iwl_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwl_eeprom_band_2[] = { + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static void iwl_init_band_reference(const struct iwl_priv *priv, int band, + int *eeprom_ch_count, + const struct iwl_eeprom_channel + **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + switch (band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_info = priv->eeprom.band_1_channels; + *eeprom_ch_index = iwl_eeprom_band_1; + break; + case 2: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_info = priv->eeprom.band_2_channels; + *eeprom_ch_index = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_info = priv->eeprom.band_3_channels; + *eeprom_ch_index = iwl_eeprom_band_3; + break; + case 4: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_info = priv->eeprom.band_4_channels; + *eeprom_ch_index = iwl_eeprom_band_4; + break; + case 5: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_info = priv->eeprom.band_5_channels; + *eeprom_ch_index = iwl_eeprom_band_5; + break; + default: + BUG(); + return; + } +} + +const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, + int phymode, u16 channel) +{ + int i; + + switch (phymode) { + case MODE_IEEE80211A: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + + case MODE_IEEE80211B: + case MODE_IEEE80211G: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + + } + + return NULL; +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +static int iwl_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_INFO("Channel map already initialized.\n"); + return 0; + } + + if (priv->eeprom.version < 0x2f) { + IWL_WARNING("Unsupported EEPROM version: 0x%04X\n", + priv->eeprom.version); + return -EINVAL; + } + + IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwl_eeprom_band_1) + + ARRAY_SIZE(iwl_eeprom_band_2) + + ARRAY_SIZE(iwl_eeprom_band_3) + + ARRAY_SIZE(iwl_eeprom_band_4) + + ARRAY_SIZE(iwl_eeprom_band_5); + + IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERROR("Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->phymode = (band == 1) ? MODE_IEEE80211B : + MODE_IEEE80211A; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + + if (!(is_channel_valid(ch_info))) { + IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" + " %ddBm): Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(NARROW), + CHECK_AND_PRINT(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the user_txpower_limit to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->user_txpower_limit) + priv->user_txpower_limit = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + if (iwl3945_txpower_set_from_eeprom(priv)) + return -EIO; + + return 0; +} + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (10) + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ +#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode) +{ + if (phymode == MODE_IEEE80211A) + return IWL_ACTIVE_DWELL_TIME_52; + else + return IWL_ACTIVE_DWELL_TIME_24; +} + +static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode) +{ + u16 active = iwl_get_active_dwell_time(priv, phymode); + u16 passive = (phymode != MODE_IEEE80211A) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_is_associated(priv)) { + /* If we're associated, we clamp the maximum passive + * dwell time to be 98% of the beacon interval (minus + * 2 * channel tune time) */ + passive = priv->beacon_int; + if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) + passive = IWL_PASSIVE_DWELL_BASE; + passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + } + + if (passive <= active) + passive = active + 1; + + return passive; +} + +static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode, + u8 is_active, u8 direct_mask, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + hw_mode = iwl_get_hw_mode(priv, phymode); + if (!hw_mode) + return 0; + + channels = hw_mode->channels; + + active_dwell = iwl_get_active_dwell_time(priv, phymode); + passive_dwell = iwl_get_passive_dwell_time(priv, phymode); + + for (i = 0, added = 0; i < hw_mode->num_channels; i++) { + if (channels[i].chan == + le16_to_cpu(priv->active_rxon.channel)) { + if (iwl_is_associated(priv)) { + IWL_DEBUG_SCAN + ("Skipping current channel %d\n", + le16_to_cpu(priv->active_rxon.channel)); + continue; + } + } else if (priv->only_active_channel) + continue; + + scan_ch->channel = channels[i].chan; + + ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", + scan_ch->channel); + continue; + } + + if (!is_active || is_channel_passive(ch_info) || + !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) + scan_ch->type = 0; /* passive */ + else + scan_ch->type = 1; /* active */ + + if (scan_ch->type & 1) + scan_ch->type |= (direct_mask << 1); + + if (is_channel_narrow(ch_info)) + scan_ch->type |= (1 << 7); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set power levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (phymode == MODE_IEEE80211A) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level + scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN("total channels to scan %d \n", added); + return added; +} + +static void iwl_reset_channel_flag(struct iwl_priv *priv) +{ + int i, j; + for (i = 0; i < 3; i++) { + struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i]; + for (j = 0; j < hw_mode->num_channels; j++) + hw_mode->channels[j].flag = hw_mode->channels[j].val; + } +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT; i++) { + rates[i].rate = iwl_rates[i].ieee * 5; + rates[i].val = i; /* Rate scaling will work on indexes */ + rates[i].val2 = i; + rates[i].flags = IEEE80211_RATE_SUPPORTED; + /* Only OFDM have the bits-per-symbol set */ + if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE)) + rates[i].flags |= IEEE80211_RATE_OFDM; + else { + /* + * If CCK 1M then set rate flag to CCK else CCK_2 + * which is CCK | PREAMBLE2 + */ + rates[i].flags |= (iwl_rates[i].plcp == 10) ? + IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; + } + + /* Set up which ones are basic rates... */ + if (IWL_BASIC_RATES_MASK & (1 << i)) + rates[i].flags |= IEEE80211_RATE_BASIC; + } +} + +/** + * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +static int iwl_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_hw_mode *modes; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + enum { + A = 0, + B = 1, + G = 2, + }; + int mode_count = 3; + + if (priv->modes) { + IWL_DEBUG_INFO("Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count, + GFP_KERNEL); + if (!modes) + return -ENOMEM; + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) { + kfree(modes); + return -ENOMEM; + } + + rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), + GFP_KERNEL); + if (!rates) { + kfree(modes); + kfree(channels); + return -ENOMEM; + } + + /* 0 = 802.11a + * 1 = 802.11b + * 2 = 802.11g + */ + + /* 5.2GHz channels start after the 2.4GHz channels */ + modes[A].mode = MODE_IEEE80211A; + modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + modes[A].rates = rates; + modes[A].num_rates = 8; /* just OFDM */ + modes[A].num_channels = 0; + + modes[B].mode = MODE_IEEE80211B; + modes[B].channels = channels; + modes[B].rates = &rates[8]; + modes[B].num_rates = 4; /* just CCK */ + modes[B].num_channels = 0; + + modes[G].mode = MODE_IEEE80211G; + modes[G].channels = channels; + modes[G].rates = rates; + modes[G].num_rates = 12; /* OFDM & CCK */ + modes[G].num_channels = 0; + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + iwl_init_hw_rates(priv, rates); + + for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!is_channel_valid(ch)) { + IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " + "skipping.\n", + ch->channel, is_channel_a_band(ch) ? + "5.2" : "2.4"); + continue; + } + + if (is_channel_a_band(ch)) + geo_ch = &modes[A].channels[modes[A].num_channels++]; + else { + geo_ch = &modes[B].channels[modes[B].num_channels++]; + modes[G].num_channels++; + } + + geo_ch->freq = ieee80211chan2mhz(ch->channel); + geo_ch->chan = ch->channel; + geo_ch->power_level = ch->max_power_avg; + geo_ch->antenna_max = 0xff; + + if (is_channel_valid(ch)) { + geo_ch->flag = IEEE80211_CHAN_W_SCAN; + if (ch->flags & EEPROM_CHANNEL_IBSS) + geo_ch->flag |= IEEE80211_CHAN_W_IBSS; + + if (ch->flags & EEPROM_CHANNEL_ACTIVE) + geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; + + if (ch->max_power_avg > priv->max_channel_txpower_limit) + priv->max_channel_txpower_limit = + ch->max_power_avg; + } + + geo_ch->val = geo_ch->flag; + } + + if ((modes[A].num_channels == 0) && priv->is_abg) { + printk(KERN_INFO DRV_NAME + ": Incorrectly detected BG card as ABG. Please send " + "your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, priv->pci_dev->subsystem_device); + priv->is_abg = 0; + } + + printk(KERN_INFO DRV_NAME + ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", + modes[G].num_channels, modes[A].num_channels); + + /* + * NOTE: We register these in preference of order -- the + * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick + * a phymode based on rates or AP capabilities but seems to + * configure it purely on if the channel being configured + * is supported by a mode -- and the first match is taken + */ + + if (modes[G].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[G]); + if (modes[B].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[B]); + if (modes[A].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[A]); + + priv->modes = modes; + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) +{ + if (priv->ucode_code.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_code.len, + priv->ucode_code.v_addr, + priv->ucode_code.p_addr); + priv->ucode_code.v_addr = NULL; + } + if (priv->ucode_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data.len, + priv->ucode_data.v_addr, + priv->ucode_data.p_addr); + priv->ucode_data.v_addr = NULL; + } + if (priv->ucode_data_backup.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + priv->ucode_data_backup.v_addr, + priv->ucode_data_backup.p_addr); + priv->ucode_data_backup.v_addr = NULL; + } + if (priv->ucode_init.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init.len, + priv->ucode_init.v_addr, + priv->ucode_init.p_addr); + priv->ucode_init.v_addr = NULL; + } + if (priv->ucode_init_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init_data.len, + priv->ucode_init_data.v_addr, + priv->ucode_init_data.p_addr); + priv->ucode_init_data.v_addr = NULL; + } + if (priv->ucode_boot.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_boot.len, + priv->ucode_boot.v_addr, + priv->ucode_boot.p_addr); + priv->ucode_boot.v_addr = NULL; + } +} + +/** + * iwl_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + iwl_release_restricted_access(priv); + + if (!errcnt) + IWL_DEBUG_INFO + ("ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, + i + RTC_INST_LOWER_BOUND); + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + iwl_release_restricted_access(priv); + + return rc; +} + + +/** + * iwl_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Show first several data entries in instruction SRAM. + * Selection of bootstrap image is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_full(priv, image, len); + + return rc; +} + + +/* check contents of special bootstrap uCode SRAM */ +static int iwl_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO("Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image ++) { + val = iwl_read_restricted_reg(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO("Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965. + * NOTE: iwl_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr; + pdata = priv->ucode_init_data.p_addr; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_write_restricted_reg(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl_verify_bsm(priv); + if (rc) { + iwl_release_restricted_access(priv); + return rc; + } + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG, + RTC_INST_LOWER_BOUND); + iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); + else { + IWL_ERROR("BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + iwl_release_restricted_access(priv); + + return 0; +} + +static void iwl_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +/** + * iwl_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl_read_ucode(struct iwl_priv *priv) +{ + struct iwl_ucode *ucode; + int rc = 0; + const struct firmware *ucode_raw; + /* firmware file name contains uCode/driver compatibility version */ + const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode"; + u8 *src; + size_t len; + u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); + if (rc < 0) { + IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc); + goto error; + } + + IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", + name, ucode_raw->size); + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < sizeof(*ucode)) { + IWL_ERROR("File size way too small!\n"); + rc = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (void *)ucode_raw->data; + + ver = le32_to_cpu(ucode->ver); + inst_size = le32_to_cpu(ucode->inst_size); + data_size = le32_to_cpu(ucode->data_size); + init_size = le32_to_cpu(ucode->init_size); + init_data_size = le32_to_cpu(ucode->init_data_size); + boot_size = le32_to_cpu(ucode->boot_size); + + IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); + IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", + boot_size); + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size < sizeof(*ucode) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO("uCode file size %d too small\n", + (int)ucode_raw->size); + rc = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n", + (int)inst_size); + rc = -EINVAL; + goto err_release; + } + + if (data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n", + (int)data_size); + rc = -EINVAL; + goto err_release; + } + if (init_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO + ("uCode init instr len %d too large to fit in card\n", + (int)init_size); + rc = -EINVAL; + goto err_release; + } + if (init_data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO + ("uCode init data len %d too large to fit in card\n", + (int)init_data_size); + rc = -EINVAL; + goto err_release; + } + if (boot_size > IWL_MAX_BSM_SIZE) { + IWL_DEBUG_INFO + ("uCode boot instr len %d too large to fit in bsm\n", + (int)boot_size); + rc = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + priv->ucode_code.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_code.len, + &(priv->ucode_code.p_addr)); + + priv->ucode_data.len = data_size; + priv->ucode_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data.len, + &(priv->ucode_data.p_addr)); + + priv->ucode_data_backup.len = data_size; + priv->ucode_data_backup.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + &(priv->ucode_data_backup.p_addr)); + + + /* Initialization instructions and data */ + priv->ucode_init.len = init_size; + priv->ucode_init.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init.len, + &(priv->ucode_init.p_addr)); + + priv->ucode_init_data.len = init_data_size; + priv->ucode_init_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init_data.len, + &(priv->ucode_init_data.p_addr)); + + /* Bootstrap (instructions only, no data) */ + priv->ucode_boot.len = boot_size; + priv->ucode_boot.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_boot.len, + &(priv->ucode_boot.p_addr)); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr || + !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + src = &ucode->data[0]; + len = priv->ucode_code.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n", + (int)len); + memcpy(priv->ucode_code.v_addr, src, len); + IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl_up() */ + src = &ucode->data[inst_size]; + len = priv->ucode_data.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n", + (int)len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + + /* Initialization instructions (3rd block) */ + if (init_size) { + src = &ucode->data[inst_size + data_size]; + len = priv->ucode_init.len; + IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n", + (int)len); + memcpy(priv->ucode_init.v_addr, src, len); + } + + /* Initialization data (4th block) */ + if (init_data_size) { + src = &ucode->data[inst_size + data_size + init_size]; + len = priv->ucode_init_data.len; + IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", + (int)len); + memcpy(priv->ucode_init_data.v_addr, src, len); + } + + /* Bootstrap instructions (5th block) */ + src = &ucode->data[inst_size + data_size + init_size + init_data_size]; + len = priv->ucode_boot.len; + IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", + (int)len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERROR("failed to allocate pci memory\n"); + rc = -ENOMEM; + iwl_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return rc; +} + + +/** + * iwl_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int rc = 0; + unsigned long flags; + + /* bits 31:0 for 3945 */ + pinst = priv->ucode_code.p_addr; + pdata = priv->ucode_data_backup.p_addr; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* Tell bootstrap uCode where to find image to load */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst bytecount must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + iwl_release_restricted_access(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); + + return rc; +} + +/** + * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO("Initialization Alive received.\n"); + if (iwl_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl_alive_start(struct iwl_priv *priv) +{ + int rc = 0; + int thermal_spin = 0; + u32 rfkill; + + IWL_DEBUG_INFO("Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read rfkill status from adapter\n"); + return; + } + + rfkill = iwl_read_restricted_reg(priv, APMG_RFKILL_REG); + IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill); + iwl_release_restricted_access(priv); + + if (rfkill & 0x1) { + clear_bit(STATUS_RF_KILL_HW, &priv->status); + /* if rfkill is not on, then wait for thermal + * sensor in adapter to kick in */ + while (iwl_hw_get_temperature(priv) == 0) { + thermal_spin++; + udelay(10); + } + + if (thermal_spin) + IWL_DEBUG_INFO("Thermal calibration took %dus\n", + thermal_spin * 10); + } else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + /* After the ALIVE response, we can process host commands */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Clear out the uCode error bit if it is set */ + clear_bit(STATUS_FW_ERROR, &priv->status); + + rc = iwl_init_channel_map(priv); + if (rc) { + IWL_ERROR("initializing regulatory failed: %d\n", rc); + return; + } + + iwl_init_geos(priv); + + if (iwl_is_rfkill(priv)) + return; + + if (!priv->mac80211_registered) { + /* Unlock so any user space entry points can call back into + * the driver without a deadlock... */ + mutex_unlock(&priv->mutex); + iwl_rate_control_register(priv->hw); + rc = ieee80211_register_hw(priv->hw); + priv->hw->conf.beacon_int = 100; + mutex_lock(&priv->mutex); + + if (rc) { + IWL_ERROR("Failed to register network " + "device (error %d)\n", rc); + return; + } + + priv->mac80211_registered = 1; + + iwl_reset_channel_flag(priv); + } else + ieee80211_start_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); + + if (iwl_is_associated(priv)) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)(&priv->active_rxon); + + memcpy(&priv->staging_rxon, &priv->active_rxon, + sizeof(priv->staging_rxon)); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + } + + /* Configure BT coexistence */ + iwl_send_bt_config(priv); + + /* Configure the adapter for unassociated operation */ + iwl_commit_rxon(priv); + + /* At this point, the NIC is initialized and operational */ + priv->notif_missed_beacons = 0; + set_bit(STATUS_READY, &priv->status); + + iwl3945_reg_txpower_periodic(priv); + + IWL_DEBUG_INFO("ALIVE processing complete.\n"); + + if (priv->error_recovering) + iwl_error_recovery(priv); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + struct ieee80211_conf *conf = NULL; + + IWL_DEBUG_INFO(DRV_NAME " is going down\n"); + + conf = ieee80211_get_hw_conf(priv->hw); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + iwl_cancel_deferred_work(priv); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill and SUSPEND bits and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill and + * SUSPEND bits and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR; + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_txq_ctx_stop(priv); + iwl_hw_rxq_stop(priv); + + spin_lock_irqsave(&priv->lock, flags); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_stop_master(priv); + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + iwl_hw_nic_reset(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl_clear_free_frames(priv); +} + +static void iwl_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl_down(priv); + mutex_unlock(&priv->mutex); +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + DECLARE_MAC_BUF(mac); + int rc, i; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARNING("Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { + IWL_WARNING("Radio disabled by SW RF kill (module " + "parameter)\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl_hw_nic_init(priv); + if (rc) { + IWL_ERROR("Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = iwl_load_bsm(priv); + + if (rc) { + IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl_nic_start(priv); + + /* MAC Address location in EEPROM same for 3945/4965 */ + get_eeprom_mac(priv, priv->mac_addr); + IWL_DEBUG_INFO("MAC address: %s\n", + print_mac(mac, priv->mac_addr)); + + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERROR("Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_rf_kill(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); + + wake_up_interruptible(&priv->wait_command_queue); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + if (!iwl_is_rfkill(priv)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, + "HW and/or SW RF Kill no longer active, restarting " + "device\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } else { + + if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by SW switch\n"); + else + IWL_WARNING("Radio Frequency Kill Switch is On:\n" + "Kill switch must be turned off for " + "wireless networking to work.\n"); + } + mutex_unlock(&priv->mutex); +} + +#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) + +static void iwl_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, + "Scan completion watchdog resetting adapter (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_request_scan(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, request_scan); + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .meta.flags = CMD_SIZE_HUGE, + }; + int rc = 0; + struct iwl_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 direct_mask; + int phymode; + + conf = ieee80211_get_hw_conf(priv->hw); + + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + IWL_WARNING("request scan called when driver not ready.\n"); + goto done; + } + + /* Make sure the scan wasn't cancelled before this queued work + * was given the chance to run... */ + if (!test_bit(STATUS_SCANNING, &priv->status)) + goto done; + + /* This should never be called or scheduled if there is currently + * a scan active in the hardware. */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " + "Ignoring second request.\n"); + rc = -EIO; + goto done; + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); + goto done; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); + goto done; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); + goto done; + } + + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); + goto done; + } + + if (!priv->scan_bands) { + IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); + goto done; + } + + if (!priv->scan) { + priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { + rc = -ENOMEM; + goto done; + } + } + scan = priv->scan; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + unsigned long flags; + + IWL_DEBUG_INFO("Scanning while associated...\n"); + + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(600 * 1024); + if (!interval) + interval = suspend_time; + /* + * suspend time format: + * 0-19: beacon interval in usec (time before exec.) + * 20-23: 0 + * 24-31: number of beacons (suspend between channels) + */ + + extra = (suspend_time / interval) << 24; + scan_suspend_time = 0xFF0FFFFF & + (extra | ((suspend_time % interval) * 1024)); + + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + /* We should add the ability for user to lock to PASSIVE ONLY */ + if (priv->one_direct_scan) { + IWL_DEBUG_SCAN + ("Kicking off one direct scan for '%s'\n", + iwl_escape_essid(priv->direct_ssid, + priv->direct_ssid_len)); + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->direct_ssid_len; + memcpy(scan->direct_scan[0].ssid, + priv->direct_ssid, priv->direct_ssid_len); + direct_mask = 1; + } else if (!iwl_is_associated(priv)) { + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->essid_len; + memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); + direct_mask = 1; + } else + direct_mask = 0; + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + IWL_MAX_SCAN_SIZE - sizeof(scan), 0)); + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + switch (priv->scan_bands) { + case 2: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate = IWL_RATE_1M_PLCP; + scan->good_CRC_th = 0; + phymode = MODE_IEEE80211G; + break; + + case 1: + scan->tx_cmd.rate = IWL_RATE_6M_PLCP; + scan->good_CRC_th = IWL_GOOD_CRC_TH; + phymode = MODE_IEEE80211A; + break; + + default: + IWL_WARNING("Invalid scan band count\n"); + goto done; + } + + /* select Rx antennas */ + scan->flags |= iwl3945_get_antenna_flags(priv); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + if (direct_mask) + IWL_DEBUG_SCAN + ("Initiating direct scan for %s.\n", + iwl_escape_essid(priv->essid, priv->essid_len)); + else + IWL_DEBUG_SCAN("Initiating indirect scan.\n"); + + scan->channel_count = + iwl_get_channels_for_scan( + priv, phymode, 1, /* active */ + direct_mask, + (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + mutex_unlock(&priv->mutex); + return; + + done: + /* inform mac80211 sacn aborted */ + queue_work(priv->workqueue, &priv->scan_completed); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_up(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, up); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl_up(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_down(priv); + queue_work(priv->workqueue, &priv->up); +} + +static void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_post_associate(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, + post_associate.work); + + int rc = 0; + struct ieee80211_conf *conf = NULL; + DECLARE_MAC_BUF(mac); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); + return; + } + + + IWL_DEBUG_ASSOC("Associated as %d to: %s\n", + priv->assoc_id, + print_mac(mac, priv->active_rxon.bssid_addr)); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwl_commit_rxon(priv); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_STA: + iwl_rate_scale_init(priv->hw, IWL_AP_ID); + break; + + case IEEE80211_IF_TYPE_IBSS: + + /* clear out the station table */ + iwl_clear_stations_table(priv); + + iwl_add_station(priv, BROADCAST_ADDR, 0, 0); + iwl_add_station(priv, priv->bssid, 0, 0); + iwl3945_sync_sta(priv, IWL_STA_ID, + (priv->phymode == MODE_IEEE80211A)? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, + CMD_ASYNC); + iwl_rate_scale_init(priv->hw, IWL_STA_ID); + iwl_send_beacon_cmd(priv); + + break; + + default: + IWL_ERROR("%s Should not be called in %d mode\n", + __FUNCTION__, priv->iw_mode); + break; + } + + iwl_sequence_reset(priv); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_activate_qos(priv, 0); +#endif /* CONFIG_IWLWIFI_QOS */ + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + abort_scan); + + if (!iwl_is_ready(priv)) + return; + + mutex_lock(&priv->mutex); + + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + ieee80211_scan_completed(priv->hw); + + /* Since setting the TXPOWER may have been deferred while + * performing the scan, fire one off */ + mutex_lock(&priv->mutex); + iwl_hw_reg_send_txpower(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static int iwl_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + priv->is_open = 1; + + if (!iwl_is_rfkill(priv)) + ieee80211_start_queues(priv->hw); + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static void iwl_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + priv->is_open = 0; + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + IWL_DEBUG_MAC80211("leave\n"); +} + +static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + IWL_DEBUG_MAC80211("leave - monitor\n"); + return -1; + } + + IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ctl->tx_rate); + + if (iwl_tx_skb(priv, skb, ctl)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); + if (conf->mac_addr) + IWL_DEBUG_MAC80211("enter: MAC %s\n", + print_mac(mac, conf->mac_addr)); + + if (priv->interface_id) { + IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->interface_id = conf->if_id; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + iwl_set_mode(priv, conf->type); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +/** + * iwl_mac_config - mac80211 config callback + * + * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to + * be set inappropriately and the driver currently sets the hardware up to + * use it whenever needed. + */ +static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only + * what is exposed through include/ declrations */ + if (unlikely(!iwl_param_disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + + ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", + conf->channel, conf->phymode); + IWL_DEBUG_MAC80211("leave - invalid channel\n"); + spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + iwl_set_rxon_channel(priv, conf->phymode, conf->channel); + + iwl_set_flags_for_phymode(priv, conf->phymode); + + /* The list of supported rates and rate mask can be different + * for each phymode; since the phymode may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_set_rate(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef IEEE80211_CONF_CHANNEL_SWITCH + if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { + iwl_hw_channel_switch(priv, conf->channel); + mutex_unlock(&priv->mutex); + return 0; + } +#endif + + iwl_radio_kill_sw(priv, !conf->radio_enabled); + + if (!conf->radio_enabled) { + IWL_DEBUG_MAC80211("leave - radio disabled\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_MAC80211("leave - RF kill\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + iwl_set_rate(priv); + + if (memcmp(&priv->active_rxon, + &priv->staging_rxon, sizeof(priv->staging_rxon))) + iwl_commit_rxon(priv); + else + IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); + + IWL_DEBUG_MAC80211("leave\n"); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_config_ap(struct iwl_priv *priv) +{ + int rc = 0; + + if (priv->status & STATUS_EXIT_PENDING) + return; + + /* The following should be done only at AP bring up */ + if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + /* RXON Timing */ + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + iwl_add_station(priv, BROADCAST_ADDR, 0, 0); + } + iwl_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + DECLARE_MAC_BUF(mac); + unsigned long flags; + int rc; + + if (conf == NULL) + return -EIO; + + /* XXX: this MUST use conf->mac_addr */ + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!conf->beacon || !conf->ssid_len)) { + IWL_DEBUG_MAC80211 + ("Leaving in AP mode because HostAPD is not ready.\n"); + return 0; + } + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id); + if (conf->bssid) + IWL_DEBUG_MAC80211("bssid: %s\n", + print_mac(mac, conf->bssid)); + +/* + * very dubious code was here; the probe filtering flag is never set: + * + if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && + !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { + */ + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->interface_id != if_id) { + IWL_DEBUG_MAC80211("leave - interface_id != if_id\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (!conf->bssid) { + conf->bssid = priv->mac_addr; + memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); + IWL_DEBUG_MAC80211("bssid was set to: %s\n", + print_mac(mac, conf->bssid)); + } + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = conf->beacon; + } + + if (conf->bssid && !is_zero_ether_addr(conf->bssid) && + !is_multicast_ether_addr(conf->bssid)) { + /* If there is currently a HW scan going on in the background + * then we need to cancel it else the RXON below will fail. */ + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress " + "after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return -EAGAIN; + } + memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN); + + /* TODO: Audit driver for usage of these members and see + * if mac80211 deprecates them (priv->bssid looks like it + * shouldn't be there, but I haven't scanned the IBSS code + * to verify) - jpk */ + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_config_ap(priv); + else { + priv->staging_rxon.filter_flags |= + RXON_FILTER_ASSOC_MSK; + rc = iwl_commit_rxon(priv); + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) + iwl_add_station(priv, + priv->active_rxon.bssid_addr, 1, 0); + } + + } else { + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + } + + spin_lock_irqsave(&priv->lock, flags); + if (!conf->ssid_len) + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + else + memcpy(priv->essid, conf->ssid, conf->ssid_len); + + priv->essid_len = conf->ssid_len; + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list) +{ + /* + * XXX: dummy + * see also iwl_connection_init_rx_config + */ + *total_flags = 0; +} + +static void iwl_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + mutex_lock(&priv->mutex); + if (priv->interface_id == conf->if_id) { + priv->interface_id = 0; + memset(priv->bssid, 0, ETH_ALEN); + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + priv->essid_len = 0; + } + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) +static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) +{ + int rc = 0; + unsigned long flags; + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + + if (!iwl_is_ready_rf(priv)) { + rc = -EIO; + IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); + goto out_unlock; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ + rc = -EIO; + IWL_ERROR("ERROR: APs don't scan\n"); + goto out_unlock; + } + + /* if we just finished scan ask for delay */ + if (priv->last_scan_jiffies && + time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, + jiffies)) { + rc = -EAGAIN; + goto out_unlock; + } + if (len) { + IWL_DEBUG_SCAN("direct scan for " + "%s [%d]\n ", + iwl_escape_essid(ssid, len), (int)len); + + priv->one_direct_scan = 1; + priv->direct_ssid_len = (u8) + min((u8) len, (u8) IW_ESSID_MAX_SIZE); + memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); + } + + rc = iwl_scan_initiate(priv); + + IWL_DEBUG_MAC80211("leave\n"); + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + int rc = 0; + u8 sta_id; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_param_hwcrypto) { + IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(addr)) + /* only support pairwise keys */ + return -EOPNOTSUPP; + + sta_id = iwl_hw_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_MAC80211("leave - %s not in station map.\n", + print_mac(mac, addr)); + return -EINVAL; + } + + mutex_lock(&priv->mutex); + + switch (cmd) { + case SET_KEY: + rc = iwl_update_sta_key_info(priv, key, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 1); + iwl_commit_rxon(priv); + key->hw_key_idx = sta_id; + IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + break; + case DISABLE_KEY: + rc = iwl_clear_sta_key_info(priv, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 0); + iwl_commit_rxon(priv); + IWL_DEBUG_MAC80211("disable hwcrypto key\n"); + } + break; + default: + rc = -EINVAL; + } + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return rc; +} + +static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; +#ifdef CONFIG_IWLWIFI_QOS + unsigned long flags; + int q; +#endif /* CONFIG_IWL_QOS */ + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); + return 0; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (!priv->qos_data.qos_enable) { + priv->qos_data.qos_active = 0; + IWL_DEBUG_MAC80211("leave - qos not enabled\n"); + return 0; + } + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); + priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); + priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + priv->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->burst_time * 100)); + + priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; + priv->qos_data.qos_active = 1; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_activate_qos(priv, 1); + else if (priv->assoc_id && iwl_is_associated(priv)) + iwl_activate_qos(priv, 0); + + mutex_unlock(&priv->mutex); + +#endif /*CONFIG_IWLWIFI_QOS */ + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct iwl_priv *priv = hw->priv; + int i, avail; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + unsigned long flags; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + for (i = 0; i < AC_NUM; i++) { + txq = &priv->txq[i]; + q = &txq->q; + avail = iwl_queue_space(q); + + stats->data[i].len = q->n_window - avail; + stats->data[i].limit = q->n_window - q->high_mark; + stats->data[i].count = q->n_window; + + } + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static int iwl_mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + cancel_delayed_work(&priv->post_associate); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = 0; + priv->assoc_capability = 0; + priv->call_post_assoc_from_beacon = 0; + + /* new association get rid of ibss beacon skb */ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = NULL; + + priv->beacon_int = priv->hw->conf.beacon_int; + priv->timestamp1 = 0; + priv->timestamp0 = 0; + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) + priv->beacon_int = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Per mac80211.h: This is only used in IBSS mode... */ + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not in IBSS\n"); + mutex_unlock(&priv->mutex); + return; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + priv->only_active_channel = 0; + + iwl_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not IBSS\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = skb; + + priv->assoc_id = 0; + + IWL_DEBUG_MAC80211("leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + + queue_work(priv->workqueue, &priv->post_associate.work); + + mutex_unlock(&priv->mutex); + + return 0; +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + */ + +static ssize_t show_debug_level(struct device_driver *d, char *buf) +{ + return sprintf(buf, "0x%08X\n", iwl_debug_level); +} +static ssize_t store_debug_level(struct device_driver *d, + const char *buf, size_t count) +{ + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 0); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in hex or decimal form.\n", buf); + else + iwl_debug_level = val; + + return strnlen(buf, count); +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static ssize_t show_rf_kill(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* + * 0 - RF kill not enabled + * 1 - SW based RF kill active (sysfs) + * 2 - HW based RF kill active + * 3 - Both HW and SW based RF kill active + */ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) | + (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0); + + return sprintf(buf, "%i\n", val); +} + +static ssize_t store_rf_kill(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + mutex_lock(&priv->mutex); + iwl_radio_kill_sw(priv, buf[0] == '1'); + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_rs_window(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct iwl_priv *priv = d->driver_data; + return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID); +} +static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + return sprintf(buf, "%d\n", priv->user_txpower_limit); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in decimal form.\n", buf); + else + iwl_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", + flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + +static ssize_t show_tune(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + (priv->phymode << 8) | + le16_to_cpu(priv->active_rxon.channel)); +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode); + +static ssize_t store_tune(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u16 tune = simple_strtoul(p, &p, 0); + u8 phymode = (tune >> 8) & 0xff; + u16 channel = tune & 0xff; + + IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel); + + mutex_lock(&priv->mutex); + if ((le16_to_cpu(priv->staging_rxon.channel) != channel) || + (priv->phymode != phymode)) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + if (!ch_info) { + IWL_WARNING("Requested invalid phymode/channel " + "combination: %d %d\n", phymode, channel); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing phymode and " + "rxon.channel = %d %d\n", + phymode, channel); + + iwl_set_rxon_channel(priv, phymode, channel); + iwl_set_flags_for_phymode(priv, phymode); + + iwl_set_rate(priv); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune); + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +static ssize_t show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *) & measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(priv->active_rxon.channel), + .start_time = cpu_to_le64(priv->last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO("Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + show_measurement, store_measurement); +#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */ + +static ssize_t show_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) + i = priv->stations[IWL_AP_ID].current_rate.s.rate; + else + i = priv->stations[IWL_STA_ID].current_rate.s.rate; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + i = iwl_rate_index_from_plcp(i); + if (i == -1) + return sprintf(buf, "0\n"); + + return sprintf(buf, "%d%s\n", + (iwl_rates[i].ieee >> 1), + (iwl_rates[i].ieee & 0x1) ? ".5" : ""); +} + +static DEVICE_ATTR(rate, S_IRUSR, show_rate, NULL); + +static ssize_t store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, + store_retry_rate); + +static ssize_t store_power_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int rc; + int mode; + + mode = simple_strtoul(buf, NULL, 0); + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + rc = -EAGAIN; + goto out; + } + + if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) + mode = IWL_POWER_AC; + else + mode |= IWL_POWER_ENABLED; + + if (mode != priv->power_mode) { + rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode)); + if (rc) { + IWL_DEBUG_MAC80211("failed setting power mode.\n"); + goto out; + } + priv->power_mode = mode; + } + + rc = count; + + out: + mutex_unlock(&priv->mutex); + return rc; +} + +#define MAX_WX_STRING 80 + +/* Values are in microsecond */ +static const s32 timeout_duration[] = { + 350000, + 250000, + 75000, + 37000, + 25000, +}; +static const s32 period_duration[] = { + 400000, + 700000, + 1000000, + 1000000, + 1000000 +}; + +static ssize_t show_power_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int level = IWL_POWER_LEVEL(priv->power_mode); + char *p = buf; + + p += sprintf(p, "%d ", level); + switch (level) { + case IWL_POWER_MODE_CAM: + case IWL_POWER_AC: + p += sprintf(p, "(AC)"); + break; + case IWL_POWER_BATTERY: + p += sprintf(p, "(BATTERY)"); + break; + default: + p += sprintf(p, + "(Timeout %dms, Period %dms)", + timeout_duration[level - 1] / 1000, + period_duration[level - 1] / 1000); + } + + if (!(priv->power_mode & IWL_POWER_ENABLED)) + p += sprintf(p, " OFF\n"); + else + p += sprintf(p, " \n"); + + return (p - buf + 1); + +} + +static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, + store_power_level); + +static ssize_t show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int len = 0, i; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode = NULL; + int count = 0; + + if (!iwl_is_ready(priv)) + return -EAGAIN; + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G); + if (!hw_mode) + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } + + len += + sprintf(&buf[len], + "Displaying %d channels in 2.4GHz band " + "(802.11bg):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } else { + channels = NULL; + count = 0; + } + + len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band " + "(802.11a):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + return len; +} + +static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *) & priv->statistics; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->antenna); +} + +static ssize_t store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ant; + struct iwl_priv *priv = dev_get_drvdata(d); + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO("not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); + priv->antenna = (enum iwl_antenna)ant; + } else + IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); + +static ssize_t show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + if (!iwl_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); + +static ssize_t dump_event_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->up, iwl_bg_up); + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); + + iwl_hw_setup_deferred_work(priv); + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl_hw_cancel_deferred_work(priv); + + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_delayed_work(&priv->post_associate); + cancel_work_sync(&priv->beacon_update); +} + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_dump_events.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + &dev_attr_measurement.attr, +#endif + &dev_attr_power_level.attr, + &dev_attr_rate.attr, + &dev_attr_retry_rate.attr, + &dev_attr_rf_kill.attr, + &dev_attr_rs_window.attr, + &dev_attr_statistics.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tune.attr, + &dev_attr_tx_power.attr, + + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +static struct ieee80211_ops iwl_hw_ops = { + .tx = iwl_mac_tx, + .start = iwl_mac_start, + .stop = iwl_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .config_interface = iwl_mac_config_interface, + .configure_filter = iwl_configure_filter, + .set_key = iwl_mac_set_key, + .get_stats = iwl_mac_get_stats, + .get_tx_stats = iwl_mac_get_tx_stats, + .conf_tx = iwl_mac_conf_tx, + .get_tsf = iwl_mac_get_tsf, + .reset_tsf = iwl_mac_reset_tsf, + .beacon_update = iwl_mac_beacon_update, + .hw_scan = iwl_mac_hw_scan +}; + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + u32 pci_id; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + int i; + + if (iwl_param_disable_hw_scan) { + IWL_DEBUG_INFO("Disabling hw_scan\n"); + iwl_hw_ops.hw_scan = NULL; + } + + if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) || + (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) { + IWL_ERROR("invalid queues_num, should be between %d and %d\n", + IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); + err = -EINVAL; + goto out; + } + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops); + if (hw == NULL) { + IWL_ERROR("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); + priv = hw->priv; + priv->hw = hw; + + priv->pci_dev = pdev; + priv->antenna = (enum iwl_antenna)iwl_param_antenna; +#ifdef CONFIG_IWLWIFI_DEBUG + iwl_debug_level = iwl_param_debug; + atomic_set(&priv->restrict_refcnt, 0); +#endif + priv->retry_rate = 1; + + priv->ibss_beacon = NULL; + + /* Tell mac80211 and its clients (e.g. Wireless Extensions) + * the range of signal quality values that we'll provide. + * Negative values for level/noise indicate that we'll provide dBm. + * For WE, at least, non-0 values here *enable* display of values + * in app (iwconfig). */ + hw->max_rssi = -20; /* signal level, negative indicates dBm */ + hw->max_noise = -20; /* noise level, negative indicates dBm */ + hw->max_signal = 100; /* link quality indication (%) */ + + /* Tell mac80211 our Tx characteristics */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; + + hw->queues = 4; + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->power_data.lock); + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) + INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + iwl_clear_stations_table(priv); + + priv->data_retry_limit = -1; + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->phymode = -1; + + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); + + /* Initialize module parameter values here */ + + if (iwl_param_disable) { + set_bit(STATUS_RF_KILL_SW, &priv->status); + IWL_DEBUG_INFO("Radio disabled.\n"); + } + + priv->iw_mode = IEEE80211_IF_TYPE_STA; + + pci_id = + (priv->pci_dev->device << 16) | priv->pci_dev->subsystem_device; + + switch (pci_id) { + case 0x42221005: /* 0x4222 0x8086 0x1005 is BG SKU */ + case 0x42221034: /* 0x4222 0x8086 0x1034 is BG SKU */ + case 0x42271014: /* 0x4227 0x8086 0x1014 is BG SKU */ + case 0x42221044: /* 0x4222 0x8086 0x1044 is BG SKU */ + priv->is_abg = 0; + break; + + /* + * Rest are assumed ABG SKU -- if this is not the + * case then the card will get the wrong 'Detected' + * line in the kernel log however the code that + * initializes the GEO table will detect no A-band + * channels and remove the is_abg mask. + */ + default: + priv->is_abg = 1; + break; + } + + printk(KERN_INFO DRV_NAME + ": Detected Intel PRO/Wireless 3945%sBG Network Connection\n", + priv->is_abg ? "A" : ""); + + /* Device-specific setup */ + if (iwl_hw_set_hw_setting(priv)) { + IWL_ERROR("failed to set hw settings\n"); + mutex_unlock(&priv->mutex); + goto out_iounmap; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (iwl_param_qos_enable) + priv->qos_data.qos_enable = 1; + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; +#endif /* CONFIG_IWLWIFI_QOS */ + + iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6); + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + priv->rates_mask = IWL_RATES_MASK; + /* If power management is turned on, default to AC mode */ + priv->power_mode = IWL_POWER_AC; + priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; + + pci_enable_msi(pdev); + + err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERROR("Error allocating IRQ %d\n", pdev->irq); + goto out_disable_msi; + } + + mutex_lock(&priv->mutex); + + err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); + if (err) { + IWL_ERROR("failed to create sysfs device attributes\n"); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + err = iwl_read_ucode(priv); + if (err) { + IWL_ERROR("Could not read microcode: %d\n", err); + mutex_unlock(&priv->mutex); + goto out_pci_alloc; + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_INFO("Queing UP work.\n"); + + queue_work(priv->workqueue, &priv->up); + + return 0; + + out_pci_alloc: + iwl_dealloc_ucode_pci(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + out_release_irq: + free_irq(pdev->irq, priv); + + out_disable_msi: + pci_disable_msi(pdev); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_unset_hw_setting(priv); + + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + out_ieee80211_free_hw: + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + struct list_head *p, *q; + int i; + + if (!priv) + return; + + IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); + + mutex_lock(&priv->mutex); + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + mutex_unlock(&priv->mutex); + + /* Free MAC hash list for ADHOC */ + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { + list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { + list_del(p); + kfree(list_entry(p, struct iwl_ibss_seq, list)); + } + } + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + iwl_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl_rx_queue_free(priv, &priv->rxq); + iwl_hw_txq_ctx_free(priv); + + iwl_unset_hw_setting(priv); + iwl_clear_stations_table(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + iwl_rate_control_unregister(priv->hw); + } + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + kfree(priv->channel_info); + + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + +#ifdef CONFIG_PM + +static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + + mutex_lock(&priv->mutex); + + set_bit(STATUS_IN_SUSPEND, &priv->status); + + /* Take down the device; powers it off, etc. */ + __iwl_down(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_resume(struct iwl_priv *priv) +{ + unsigned long flags; + + /* The following it a temporary work around due to the + * suspend / resume not fully initializing the NIC correctly. + * Without all of the following, resume will not attempt to take + * down the NIC (it shouldn't really need to) and will just try + * and bring the NIC back up. However that fails during the + * ucode verification process. This then causes iwl_down to be + * called *after* iwl_hw_nic_init() has succeeded -- which + * then lets the next init sequence succeed. So, we've + * replicated all of that NIC init code here... */ + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + iwl_hw_nic_init(priv); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_reset(priv); + + /* Bring the device back up */ + clear_bit(STATUS_IN_SUSPEND, &priv->status); + queue_work(priv->workqueue, &priv->up); +} + +static int iwl_pci_resume(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + int err; + + printk(KERN_INFO "Coming out of suspend...\n"); + + mutex_lock(&priv->mutex); + + pci_set_power_state(pdev, PCI_D0); + err = pci_enable_device(pdev); + pci_restore_state(pdev); + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_write_config_byte(pdev, 0x41, 0x00); + + iwl_resume(priv); + mutex_unlock(&priv->mutex); + + return 0; +} + +#endif /* CONFIG_PM */ + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + ret = pci_register_driver(&iwl_driver); + if (ret) { + IWL_ERROR("Unable to initialize PCI module\n"); + return ret; + } +#ifdef CONFIG_IWLWIFI_DEBUG + ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level); + if (ret) { + IWL_ERROR("Unable to create driver sysfs file\n"); + pci_unregister_driver(&iwl_driver); + return ret; + } +#endif + + return ret; +} + +static void __exit iwl_exit(void) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level); +#endif + pci_unregister_driver(&iwl_driver); +} + +module_param_named(antenna, iwl_param_antenna, int, 0444); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(disable, iwl_param_disable, int, 0444); +MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); +module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444); +MODULE_PARM_DESC(hwcrypto, + "using hardware crypto engine (default 0 [software])\n"); +module_param_named(debug, iwl_param_debug, int, 0444); +MODULE_PARM_DESC(debug, "debug output mask"); +module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); + +module_param_named(queues_num, iwl_param_queues_num, int, 0444); +MODULE_PARM_DESC(queues_num, "number of hw queues."); + +/* QoS */ +module_param_named(qos_enable, iwl_param_qos_enable, int, 0444); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); + +module_exit(iwl_exit); +module_init(iwl_init); diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c new file mode 100644 index 0000000000000000000000000000000000000000..b1a6e39f7821d9cd5888d905737d168856ada18a --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c @@ -0,0 +1,9340 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +/* + * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets + * by defining IWL to either 3945 or 4965. The Makefile used when building + * the base targets will create base-3945.o and base-4965.o + * + * The eventual goal is to move as many of the #if IWL / #endif blocks out of + * this file and into the hardware specific implementation files (iwl-XXXX.c) + * and leave only the common (non #ifdef sprinkled) code in this file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define IWL 4965 + +#include "iwlwifi.h" +#include "iwl-4965.h" +#include "iwl-helpers.h" + +#ifdef CONFIG_IWLWIFI_DEBUG +u32 iwl_debug_level; +#endif + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* module parameters */ +int iwl_param_disable_hw_scan; +int iwl_param_debug; +int iwl_param_disable; /* def: enable radio */ +int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */ +int iwl_param_hwcrypto; /* def: using software encryption */ +int iwl_param_qos_enable = 1; +int iwl_param_queues_num = IWL_MAX_NUM_QUEUES; + +/* + * module name, copyright, version, etc. + * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk + */ + +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux" + +#ifdef CONFIG_IWLWIFI_DEBUG +#define VD "d" +#else +#define VD +#endif + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT +#define VS "s" +#else +#define VS +#endif + +#define IWLWIFI_VERSION "1.1.17k" VD VS +#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" +#define DRV_VERSION IWLWIFI_VERSION + +/* Change firmware file name, using "-" and incrementing number, + * *only* when uCode interface or architecture changes so that it + * is not compatible with earlier drivers. + * This number will also appear in << 8 position of 1st dword of uCode file */ +#define IWL4965_UCODE_API "-1" + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); + +__le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + int hdr_len = ieee80211_get_hdrlen(fc); + + if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) + return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN); + return NULL; +} + +static const struct ieee80211_hw_mode *iwl_get_hw_mode( + struct iwl_priv *priv, int mode) +{ + int i; + + for (i = 0; i < 3; i++) + if (priv->modes[i].mode == mode) + return &priv->modes[i]; + + return NULL; +} + +static int iwl_is_empty_essid(const char *essid, int essid_len) +{ + /* Single white space is for Linksys APs */ + if (essid_len == 1 && essid[0] == ' ') + return 1; + + /* Otherwise, if the entire essid is 0, we assume it is hidden */ + while (essid_len) { + essid_len--; + if (essid[essid_len] != '\0') + return 0; + } + + return 1; +} + +static const char *iwl_escape_essid(const char *essid, u8 essid_len) +{ + static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; + const char *s = essid; + char *d = escaped; + + if (iwl_is_empty_essid(essid, essid_len)) { + memcpy(escaped, "", sizeof("")); + return escaped; + } + + essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE); + while (essid_len--) { + if (*s == '\0') { + *d++ = '\\'; + *d++ = '0'; + s++; + } else + *d++ = *s++; + } + *d = '\0'; + return escaped; +} + +static void iwl_print_hex_dump(int level, void *p, u32 len) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + if (!(iwl_debug_level & level)) + return; + + print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, + p, len, 1); +#endif +} + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A queue is a circular buffers with 'Read' and 'Write' pointers. + * 2 empty entries always kept in the buffer to protect from overflow. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * The IWL operates with six queues, one receive queue in the device's + * sram, one transmit queue for sending commands to the device firmware, + * and four transmit queues for data. + ***************************************************/ + +static int iwl_queue_space(const struct iwl_queue *q) +{ + int s = q->last_used - q->first_empty; + + if (q->last_used > q->first_empty) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/* XXX: n_bd must be power-of-two size */ +static inline int iwl_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +static inline int x2_queue_used(const struct iwl_queue *q, int i) +{ + return q->first_empty > q->last_used ? + (i >= q->last_used && i < q->first_empty) : + !(i < q->last_used && i >= q->first_empty); +} + +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) +{ + if (is_huge) + return q->n_window; + + return index & (q->n_window - 1); +} + +static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_queue_inc_wrap + * and iwl_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->first_empty = q->last_used = 0; + + return 0; +} + +static int iwl_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct pci_dev *dev = priv->pci_dev; + + if (id != IWL_CMD_QUEUE_NUM) { + txq->txb = kmalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERROR("kmalloc for auxilary BD " + "structures failed\n"); + goto error; + } + } else + txq->txb = NULL; + + txq->bd = pci_alloc_consistent(dev, + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, + &txq->q.dma_addr); + + if (!txq->bd) { + IWL_ERROR("pci_alloc_consistent(%zd) failed\n", + sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); + goto error; + } + txq->q.id = id; + + return 0; + + error: + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + return -ENOMEM; +} + +int iwl_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq, int slots_num, u32 txq_id) +{ + struct pci_dev *dev = priv->pci_dev; + int len; + int rc = 0; + + /* alocate command space + one big command for scan since scan + * command is very huge the system will not have two scan at the + * same time */ + len = sizeof(struct iwl_cmd) * slots_num; + if (txq_id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); + if (!txq->cmd) + return -ENOMEM; + + rc = iwl_tx_queue_alloc(priv, txq, txq_id); + if (rc) { + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + return -ENOMEM; + } + txq->need_update = 0; + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + iwl_hw_tx_queue_init(priv, txq); + + return 0; +} + +/** + * iwl_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. txq itself is not freed. + * + */ +void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_queue *q = &txq->q; + struct pci_dev *dev = priv->pci_dev; + int len; + + if (q->n_bd == 0) + return; + + /* first, empty all BD's */ + for (; q->first_empty != q->last_used; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) + iwl_hw_txq_free_tfd(priv, txq); + + len = sizeof(struct iwl_cmd) * q->n_window; + if (q->id == IWL_CMD_QUEUE_NUM) + len += IWL_MAX_SCAN_SIZE; + + pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); + + /* free buffers belonging to queue itself */ + if (txq->q.n_bd) + pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * + txq->q.n_bd, txq->bd, txq->q.dma_addr); + + if (txq->txb) { + kfree(txq->txb); + txq->txb = NULL; + } + + /* 0 fill whole structure */ + memset(txq, 0, sizeof(*txq)); +} + +const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + +/*************** STATION TABLE MANAGEMENT **** + * + * NOTE: This needs to be overhauled to better synchronize between + * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c + * + * mac80211 should also be examined to determine if sta_info is duplicating + * the functionality provided here + */ + +/**************************************************************/ + +#if 0 /* temparary disable till we add real remove station */ +static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) +{ + int index = IWL_INVALID_STATION; + int i; + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) + if (priv->stations[i].used && + !compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (unlikely(index == IWL_INVALID_STATION)) + goto out; + + if (priv->stations[index].used) { + priv->stations[index].used = 0; + priv->num_stations--; + } + + BUG_ON(priv->num_stations < 0); + +out: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; +} +#endif + +static void iwl_clear_stations_table(struct iwl_priv *priv) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->num_stations = 0; + memset(priv->stations, 0, sizeof(priv->stations)); + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags) +{ + int i; + int index = IWL_INVALID_STATION; + struct iwl_station_entry *station; + unsigned long flags_spin; + DECLARE_MAC_BUF(mac); + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (is_ap) + index = IWL_AP_ID; + else if (is_broadcast_ether_addr(addr)) + index = priv->hw_setting.bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + index = i; + break; + } + + if (!priv->stations[i].used && + index == IWL_INVALID_STATION) + index = i; + } + + + /* These twh conditions has the same outcome but keep them separate + since they have different meaning */ + if (unlikely(index == IWL_INVALID_STATION)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + if (priv->stations[index].used && + !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return index; + } + + + IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr)); + station = &priv->stations[index]; + station->used = 1; + priv->num_stations++; + + memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = index; + station->sta.station_flags = 0; + +#ifdef CONFIG_IWLWIFI_HT + /* BCAST station and IBSS stations do not work in HT mode */ + if (index != priv->hw_setting.bcast_sta_id && + priv->iw_mode != IEEE80211_IF_TYPE_IBSS) + iwl4965_set_ht_add_station(priv, index); +#endif /*CONFIG_IWLWIFI_HT*/ + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + iwl_send_add_station(priv, &station->sta, flags); + return index; + +} + +/*************** DRIVER STATUS FUNCTIONS *****/ + +static inline int iwl_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_is_rfkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status) || + test_bit(STATUS_RF_KILL_SW, &priv->status); +} + +static inline int iwl_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_is_rfkill(priv)) + return 0; + + return iwl_is_ready(priv); +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +#define IWL_CMD(x) case x : return #x + +static const char *get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_REMOVE_ALL_STA); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(RADAR_NOTIFICATION); + IWL_CMD(REPLY_QUIET_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); + IWL_CMD(QUIET_NOTIFICATION); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(MEASURE_ABORT_NOTIFICATION); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(REPLY_CARD_STATE_CMD); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_4965_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + default: + return "UNKNOWN"; + + } +} + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +/** + * iwl_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; + struct iwl_queue *q = &txq->q; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + struct iwl_cmd *out_cmd; + u32 idx; + u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + dma_addr_t phys_addr; + int ret; + unsigned long flags; + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->meta.flags & CMD_SIZE_HUGE)); + + if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERROR("No space for Tx\n"); + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + + control_flags = (u32 *) tfd; + + idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); + out_cmd = &txq->cmd[idx]; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | + INDEX_TO_SEQ(q->first_empty)); + if (out_cmd->meta.flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); + + phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + + offsetof(struct iwl_cmd, hdr); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); + + IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); + + txq->need_update = 1; + ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0); + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + iwl_tx_queue_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return ret ? ret : idx; +} + +int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->meta.flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->meta.flags & CMD_WANT_SKB); + + /* An asynchronous command MUST have a callback. */ + BUG_ON(!cmd->meta.u.callback); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */ + + BUG_ON(cmd->meta.flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->meta.u.callback != NULL); + + if (atomic_xchg(&entry, 1)) { + IWL_ERROR("Error sending %s: Already sending a host command\n", + get_cmd_string(cmd->id)); + return -EBUSY; + } + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + + if (cmd->meta.flags & CMD_WANT_SKB) + cmd->meta.source = &cmd->meta; + + cmd_idx = iwl_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", + get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERROR("Error sending %s: time out after %dms.\n", + get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", + get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_DEBUG_INFO("Command %s failed: FW Error\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { + IWL_ERROR("Error: Response NULL in '%s'\n", + get_cmd_string(cmd->id)); + ret = -EIO; + goto out; + } + + ret = 0; + goto out; + +cancel: + if (cmd->meta.flags & CMD_WANT_SKB) { + struct iwl_cmd *qcmd; + + /* Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). */ + qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; + qcmd->meta.flags &= ~CMD_WANT_SKB; + } +fail: + if (cmd->meta.u.skb) { + dev_kfree_skb_any(cmd->meta.u.skb); + cmd->meta.u.skb = NULL; + } +out: + atomic_set(&entry, 0); + return ret; +} + +int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + /* A command can not be asynchronous AND expect an SKB to be set. */ + BUG_ON((cmd->meta.flags & CMD_ASYNC) && + (cmd->meta.flags & CMD_WANT_SKB)); + + if (cmd->meta.flags & CMD_ASYNC) + return iwl_send_cmd_async(priv, cmd); + + return iwl_send_cmd_sync(priv, cmd); +} + +int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = sizeof(val), + .data = &val, + }; + + return iwl_send_cmd_sync(priv, &cmd); +} + +int iwl_send_statistics_request(struct iwl_priv *priv) +{ + return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0); +} + +/** + * iwl_rxon_add_station - add station into station table. + * + * there is only one AP station with id= IWL_AP_ID + * NOTE: mutex must be held before calling the this fnction +*/ +static int iwl_rxon_add_station(struct iwl_priv *priv, + const u8 *addr, int is_ap) +{ + u8 sta_id; + + sta_id = iwl_add_station(priv, addr, is_ap, 0); + iwl4965_add_station(priv, addr, is_ap); + + return sta_id; +} + +/** + * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON + * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz + * @channel: Any channel valid for the requested phymode + + * In addition to setting the staging RXON, priv->phymode is also set. + * + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the phymode + */ +static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel) +{ + if (!iwl_get_channel_info(priv, phymode, channel)) { + IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", + channel, phymode); + return -EINVAL; + } + + if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && + (priv->phymode == phymode)) + return 0; + + priv->staging_rxon.channel = cpu_to_le16(channel); + if (phymode == MODE_IEEE80211A) + priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + + priv->phymode = phymode; + + IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); + + return 0; +} + +/** + * iwl_check_rxon_cmd - validate RXON structure is valid + * + * NOTE: This is really only useful during development and can eventually + * be #ifdef'd out once the driver is stable and folks aren't actively + * making changes + */ +static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon) +{ + int error = 0; + int counter = 1; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + error |= le32_to_cpu(rxon->flags & + (RXON_FLG_TGJ_NARROW_BAND_MSK | + RXON_FLG_RADAR_DETECT_MSK)); + if (error) + IWL_WARNING("check 24G fields %d | %d\n", + counter++, error); + } else { + error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? + 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); + if (error) + IWL_WARNING("check 52 fields %d | %d\n", + counter++, error); + error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); + if (error) + IWL_WARNING("check 52 CCK %d | %d\n", + counter++, error); + } + error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; + if (error) + IWL_WARNING("check mac addr %d | %d\n", counter++, error); + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && + ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); + if (error) + IWL_WARNING("check basic rate %d | %d\n", counter++, error); + + error |= (le16_to_cpu(rxon->assoc_id) > 2007); + if (error) + IWL_WARNING("check assoc id %d | %d\n", counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); + if (error) + IWL_WARNING("check CCK and short slot %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); + if (error) + IWL_WARNING("check CCK & auto detect %d | %d\n", + counter++, error); + + error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); + if (error) + IWL_WARNING("check TGG and auto detect %d | %d\n", + counter++, error); + + if (error) + IWL_WARNING("Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n"); + return -1; + } + return 0; +} + +/** + * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit + * @priv: staging_rxon is comapred to active_rxon + * + * If the RXON structure is changing sufficient to require a new + * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1 + * to indicate a new tune is required. + */ +static int iwl_full_rxon_required(struct iwl_priv *priv) +{ + + /* These items are only settable from the full RXON command */ + if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || + compare_ether_addr(priv->staging_rxon.bssid_addr, + priv->active_rxon.bssid_addr) || + compare_ether_addr(priv->staging_rxon.node_addr, + priv->active_rxon.node_addr) || + compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, + priv->active_rxon.wlap_bssid_addr) || + (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || + (priv->staging_rxon.channel != priv->active_rxon.channel) || + (priv->staging_rxon.air_propagation != + priv->active_rxon.air_propagation) || + (priv->staging_rxon.ofdm_ht_single_stream_basic_rates != + priv->active_rxon.ofdm_ht_single_stream_basic_rates) || + (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != + priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || + (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) || + (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) + return 1; + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != + (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) + return 1; + + /* Check if we are switching association toggle */ + if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != + (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) + return 1; + + return 0; +} + +static int iwl_send_rxon_assoc(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res = NULL; + struct iwl_rxon_assoc_cmd rxon_assoc; + struct iwl_host_cmd cmd = { + .id = REPLY_RXON_ASSOC, + .len = sizeof(rxon_assoc), + .meta.flags = CMD_WANT_SKB, + .data = &rxon_assoc, + }; + const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; + const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; + + if ((rxon1->flags == rxon2->flags) && + (rxon1->filter_flags == rxon2->filter_flags) && + (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && + (rxon1->ofdm_ht_single_stream_basic_rates == + rxon2->ofdm_ht_single_stream_basic_rates) && + (rxon1->ofdm_ht_dual_stream_basic_rates == + rxon2->ofdm_ht_dual_stream_basic_rates) && + (rxon1->rx_chain == rxon2->rx_chain) && + (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { + IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); + return 0; + } + + rxon_assoc.flags = priv->staging_rxon.flags; + rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; + rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; + rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; + rxon_assoc.reserved = 0; + rxon_assoc.ofdm_ht_single_stream_basic_rates = + priv->staging_rxon.ofdm_ht_single_stream_basic_rates; + rxon_assoc.ofdm_ht_dual_stream_basic_rates = + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; + rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); + rc = -EIO; + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +/** + * iwl_commit_rxon - commit staging_rxon to hardware + * + * The RXON command in staging_rxon is commited to the hardware and + * the active_rxon structure is updated with the new data. This + * function correctly transitions out of the RXON_ASSOC_MSK state if + * a HW tune is required based on the RXON structure changes. + */ +static int iwl_commit_rxon(struct iwl_priv *priv) +{ + /* cast away the const for active_rxon in this function */ + struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; + DECLARE_MAC_BUF(mac); + int rc = 0; + + if (!iwl_is_alive(priv)) + return -1; + + /* always get timestamp with Rx frame */ + priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; + + rc = iwl_check_rxon_cmd(&priv->staging_rxon); + if (rc) { + IWL_ERROR("Invalid RXON configuration. Not committing.\n"); + return -EINVAL; + } + + /* If we don't need to send a full RXON, we can use + * iwl_rxon_assoc_cmd which is used to reconfigure filter + * and other flags for the current radio configuration. */ + if (!iwl_full_rxon_required(priv)) { + rc = iwl_send_rxon_assoc(priv); + if (rc) { + IWL_ERROR("Error setting RXON_ASSOC " + "configuration (%d).\n", rc); + return rc; + } + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + return 0; + } + + /* station table will be cleared */ + priv->assoc_station_added = 0; + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; + if (!priv->error_recovering) + priv->start_calib = 0; + + iwl4965_init_sensitivity(priv, CMD_ASYNC, 1); +#endif /* CONFIG_IWLWIFI_SENSITIVITY */ + + /* If we are currently associated and the new config requires + * an RXON_ASSOC and the new config wants the associated mask enabled, + * we must clear the associated from the active configuration + * before we apply the new config */ + if (iwl_is_associated(priv) && + (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { + IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), + &priv->active_rxon); + + /* If the mask clearing failed then we set + * active_rxon back to what it was previously */ + if (rc) { + active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; + IWL_ERROR("Error clearing ASSOC_MSK on current " + "configuration (%d).\n", rc); + return rc; + } + } + + IWL_DEBUG_INFO("Sending RXON\n" + "* with%s RXON_FILTER_ASSOC_MSK\n" + "* channel = %d\n" + "* bssid = %s\n", + ((priv->staging_rxon.filter_flags & + RXON_FILTER_ASSOC_MSK) ? "" : "out"), + le16_to_cpu(priv->staging_rxon.channel), + print_mac(mac, priv->staging_rxon.bssid_addr)); + + /* Apply the new configuration */ + rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); + if (rc) { + IWL_ERROR("Error setting new configuration (%d).\n", rc); + return rc; + } + + iwl_clear_stations_table(priv); + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + if (!priv->error_recovering) + priv->start_calib = 0; + + priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; + iwl4965_init_sensitivity(priv, CMD_ASYNC, 1); +#endif /* CONFIG_IWLWIFI_SENSITIVITY */ + + memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); + + /* If we issue a new RXON command which required a tune then we must + * send a new TXPOWER command or we won't be able to Tx any frames */ + rc = iwl_hw_reg_send_txpower(priv); + if (rc) { + IWL_ERROR("Error setting Tx power (%d).\n", rc); + return rc; + } + + /* Add the broadcast address so we can send broadcast frames */ + if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) == + IWL_INVALID_STATION) { + IWL_ERROR("Error adding BROADCAST address for transmit.\n"); + return -EIO; + } + + /* If we have set the ASSOC_MSK and we are in BSS mode then + * add the IWL_AP_ID to the station rate table */ + if (iwl_is_associated(priv) && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { + if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) + == IWL_INVALID_STATION) { + IWL_ERROR("Error adding AP address for transmit.\n"); + return -EIO; + } + priv->assoc_station_added = 1; + } + + return 0; +} + +static int iwl_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .flags = 3, + .lead_time = 0xAA, + .max_kill = 1, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd); +} + +static int iwl_send_scan_abort(struct iwl_priv *priv) +{ + int rc = 0; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .meta.flags = CMD_WANT_SKB, + }; + + /* If there isn't a scan actively going on in the hardware + * then we are in between scan bands and not actually + * actively scanning, so don't send the abort command */ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return 0; + } + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) { + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + return rc; + } + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_card_state_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct sk_buff *skb) +{ + return 1; +} + +/* + * CARD_STATE_CMD + * + * Use: Sets the internal card state to enable, disable, or halt + * + * When in the 'enable' state the card operates as normal. + * When in the 'disable' state, the card enters into a low power mode. + * When in the 'halt' state, the card is shut down and must be fully + * restarted to come back on. + */ +static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_CARD_STATE_CMD, + .len = sizeof(u32), + .data = &flags, + .meta.flags = meta_flag, + }; + + if (meta_flag & CMD_ASYNC) + cmd.meta.u.callback = iwl_card_state_sync_callback; + + return iwl_send_cmd(priv, &cmd); +} + +static int iwl_add_sta_sync_callback(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb) +{ + struct iwl_rx_packet *res = NULL; + + if (!skb) { + IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); + return 1; + } + + res = (struct iwl_rx_packet *)skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + return 1; + } + + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + break; + default: + break; + } + + /* We didn't cache the SKB; let the caller free it */ + return 1; +} + +int iwl_send_add_station(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *res = NULL; + int rc = 0; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .len = sizeof(struct iwl_addsta_cmd), + .meta.flags = flags, + .data = sta, + }; + + if (flags & CMD_ASYNC) + cmd.meta.u.callback = iwl_add_sta_sync_callback; + else + cmd.meta.flags |= CMD_WANT_SKB; + + rc = iwl_send_cmd(priv, &cmd); + + if (rc || (flags & CMD_ASYNC)) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", + res->hdr.flags); + rc = -EIO; + } + + if (rc == 0) { + switch (res->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); + break; + default: + rc = -EIO; + IWL_WARNING("REPLY_ADD_STA failed\n"); + break; + } + } + + priv->alloc_rxb_skb--; + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} + +static int iwl_update_sta_key_info(struct iwl_priv *priv, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + + switch (keyconf->alg) { + case ALG_CCMP: + key_flags |= STA_KEY_FLG_CCMP; + key_flags |= cpu_to_le16( + keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + break; + case ALG_TKIP: + case ALG_WEP: + return -EINVAL; + default: + return -EINVAL; + } + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.alg = keyconf->alg; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); + iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); + return 0; +} + +static void iwl_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARNING("%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERROR("Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + const u8 *dest, int left) +{ + + if (!iwl_is_associated(priv) || !priv->ibss_beacon || + ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && + (priv->iw_mode != IEEE80211_IF_TYPE_AP))) + return 0; + + if (priv->ibss_beacon->len > left) + return 0; + + memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); + + return priv->ibss_beacon->len; +} + +int iwl_rate_index_from_plcp(int plcp) +{ + int i = 0; + + if (plcp & RATE_MCS_HT_MSK) { + i = (plcp & 0xff); + + if (i >= IWL_RATE_MIMO_6M_PLCP) + i = i - IWL_RATE_MIMO_6M_PLCP; + + i += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (i >= IWL_RATE_9M_INDEX) + i += 1; + if ((i >= IWL_FIRST_OFDM_RATE) && + (i <= IWL_LAST_OFDM_RATE)) + return i; + } else { + for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) + if (iwl_rates[i].plcp == (plcp &0xFF)) + return i; + } + return -1; +} + +static u8 iwl_rate_get_lowest_plcp(int rate_mask) +{ + u8 i; + + for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; + i = iwl_rates[i].next_ieee) { + if (rate_mask & (1 << i)) + return iwl_rates[i].plcp; + } + + return IWL_RATE_INVALID; +} + +static int iwl_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + u8 rate; + + frame = iwl_get_free_frame(priv); + + if (!frame) { + IWL_ERROR("Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & + 0xFF0); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_6M_PLCP; + } else { + rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF); + if (rate == IWL_INVALID_RATE) + rate = IWL_RATE_1M_PLCP; + } + + frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); + + rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl_free_frame(priv, frame); + + return rc; +} + +/****************************************************************************** + * + * EEPROM related functions + * + ******************************************************************************/ + +static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac) +{ + memcpy(mac, priv->eeprom.mac_address, 6); +} + +/** + * iwl_eeprom_init - read EEPROM contents + * + * Load the EEPROM from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_eeprom_init(struct iwl_priv *priv) +{ + u16 *e = (u16 *)&priv->eeprom; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + u32 r; + int sz = sizeof(priv->eeprom); + int rc; + int i; + u16 addr; + + /* The EEPROM structure has several padding buffers within it + * and when adding new EEPROM maps is subject to programmer errors + * which may be very difficult to identify without explicitly + * checking the resulting size of the eeprom map. */ + BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); + + if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { + IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); + return -ENOENT; + } + + rc = iwl_eeprom_aqcuire_semaphore(priv); + if (rc < 0) { + IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n"); + return -ENOENT; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); + _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); + + for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; + i += IWL_EEPROM_ACCESS_DELAY) { + r = _iwl_read_restricted(priv, CSR_EEPROM_REG); + if (r & CSR_EEPROM_REG_READ_VALID_MSK) + break; + udelay(IWL_EEPROM_ACCESS_DELAY); + } + + if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { + IWL_ERROR("Time out reading EEPROM[%d]", addr); + rc = -ETIMEDOUT; + goto done; + } + e[addr / 2] = le16_to_cpu(r >> 16); + } + rc = 0; + +done: + iwl_eeprom_release_semaphore(priv); + return rc; +} + +/****************************************************************************** + * + * Misc. internal state and helper functions + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_DEBUG + +/** + * iwl_report_frame - dump frame to syslog during debug sessions + * + * hack this function to show different aspects of received frames, + * including selective frame dumps. + * group100 parameter selects whether to show 1 out of 100 good frames. + * + * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type + * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats) + * is 3945-specific and gives bad output for 4965. Need to split the + * functionality, keep common stuff here. + */ +void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100) +{ + u32 to_us; + u32 print_summary = 0; + u32 print_dump = 0; /* set to 1 to dump all frames' contents */ + u32 hundred = 0; + u32 dataframe = 0; + u16 fc; + u16 seq_ctl; + u16 channel; + u16 phy_flags; + int rate_sym; + u16 length; + u16 status; + u16 bcn_tmr; + u32 tsf_low; + u64 tsf; + u8 rssi; + u8 agc; + u16 sig_avg; + u16 noise_diff; + struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); + struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); + struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); + u8 *data = IWL_RX_DATA(pkt); + + /* MAC header */ + fc = le16_to_cpu(header->frame_control); + seq_ctl = le16_to_cpu(header->seq_ctrl); + + /* metadata */ + channel = le16_to_cpu(rx_hdr->channel); + phy_flags = le16_to_cpu(rx_hdr->phy_flags); + rate_sym = rx_hdr->rate; + length = le16_to_cpu(rx_hdr->len); + + /* end-of-frame status and timestamp */ + status = le32_to_cpu(rx_end->status); + bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); + tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; + tsf = le64_to_cpu(rx_end->timestamp); + + /* signal statistics */ + rssi = rx_stats->rssi; + agc = rx_stats->agc; + sig_avg = le16_to_cpu(rx_stats->sig_avg); + noise_diff = le16_to_cpu(rx_stats->noise_diff); + + to_us = !compare_ether_addr(header->addr1, priv->mac_addr); + + /* if data frame is to us and all is good, + * (optionally) print summary for only 1 out of every 100 */ + if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == + (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { + dataframe = 1; + if (!group100) + print_summary = 1; /* print each frame */ + else if (priv->framecnt_to_us < 100) { + priv->framecnt_to_us++; + print_summary = 0; + } else { + priv->framecnt_to_us = 0; + print_summary = 1; + hundred = 1; + } + } else { + /* print summary for all other frames */ + print_summary = 1; + } + + if (print_summary) { + char *title; + u32 rate; + + if (hundred) + title = "100Frames"; + else if (fc & IEEE80211_FCTL_RETRY) + title = "Retry"; + else if (ieee80211_is_assoc_response(fc)) + title = "AscRsp"; + else if (ieee80211_is_reassoc_response(fc)) + title = "RasRsp"; + else if (ieee80211_is_probe_response(fc)) { + title = "PrbRsp"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_beacon(fc)) { + title = "Beacon"; + print_dump = 1; /* dump frame contents */ + } else if (ieee80211_is_atim(fc)) + title = "ATIM"; + else if (ieee80211_is_auth(fc)) + title = "Auth"; + else if (ieee80211_is_deauth(fc)) + title = "DeAuth"; + else if (ieee80211_is_disassoc(fc)) + title = "DisAssoc"; + else + title = "Frame"; + + rate = iwl_rate_index_from_plcp(rate_sym); + if (rate == -1) + rate = 0; + else + rate = iwl_rates[rate].ieee / 2; + + /* print frame summary. + * MAC addresses show just the last byte (for brevity), + * but you can hack it to show more, if you'd like to. */ + if (dataframe) + IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " + "len=%u, rssi=%d, chnl=%d, rate=%u, \n", + title, fc, header->addr1[5], + length, rssi, channel, rate); + else { + /* src/dst addresses assume managed mode */ + IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " + "src=0x%02x, rssi=%u, tim=%lu usec, " + "phy=0x%02x, chnl=%d\n", + title, fc, header->addr1[5], + header->addr3[5], rssi, + tsf_low - priv->scan_start_tsf, + phy_flags, channel); + } + } + if (print_dump) + iwl_print_hex_dump(IWL_DL_RX, data, length); +} +#endif + +static void iwl_unset_hw_setting(struct iwl_priv *priv) +{ + if (priv->hw_setting.shared_virt) + pci_free_consistent(priv->pci_dev, + sizeof(struct iwl_shared), + priv->hw_setting.shared_virt, + priv->hw_setting.shared_phys); +} + +/** + * iwl_supported_rate_to_ie - fill in the supported rate in IE field + * + * return : set the bit for each supported rate insert in ie + */ +static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate, + u16 basic_rate, int max_count) +{ + u16 ret_rates = 0, bit; + int i; + u8 *rates; + + rates = &(ie[1]); + + for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { + if (bit & supported_rate) { + ret_rates |= bit; + rates[*ie] = iwl_rates[i].ieee | + ((bit & basic_rate) ? 0x80 : 0x00); + *ie = *ie + 1; + if (*ie >= max_count) + break; + } + } + + return ret_rates; +} + +#ifdef CONFIG_IWLWIFI_HT +void static iwl_set_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap, + u8 use_wide_chan); +#endif + +/** + * iwl_fill_probe_req - fill in all required fields and IE for probe request + */ +static u16 iwl_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + int left, int is_direct) +{ + int len = 0; + u8 *pos = NULL; + u16 ret_rates; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + len += 24; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN); + memcpy(frame->sa, priv->mac_addr, ETH_ALEN); + memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN); + frame->seq_ctrl = 0; + + /* fill in our indirect SSID IE */ + /* ...next IE... */ + + left -= 2; + if (left < 0) + return 0; + len += 2; + pos = &(frame->u.probe_req.variable[0]); + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + /* fill in our direct SSID IE... */ + if (is_direct) { + /* ...next IE... */ + left -= 2 + priv->essid_len; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SSID; + *pos++ = priv->essid_len; + memcpy(pos, priv->essid, priv->essid_len); + pos += priv->essid_len; + len += 2 + priv->essid_len; + } + + /* fill in supported rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_SUPP_RATES; + *pos = 0; + ret_rates = priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_supported_rate_to_ie(pos, priv->active_rate, + priv->active_rate_basic, left); + len += 2 + *pos; + pos += (*pos) + 1; + ret_rates = ~ret_rates & priv->active_rate; + + if (ret_rates == 0) + goto fill_end; + + /* fill in supported extended rate */ + /* ...next IE... */ + left -= 2; + if (left < 0) + return 0; + /* ... fill it in... */ + *pos++ = WLAN_EID_EXT_SUPP_RATES; + *pos = 0; + iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left); + if (*pos > 0) + len += 2 + *pos; + +#ifdef CONFIG_IWLWIFI_HT + if (is_direct && priv->is_ht_enabled) { + u8 use_wide_chan = 1; + + if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) + use_wide_chan = 0; + pos += (*pos) + 1; + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_capability); + iwl_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos, + use_wide_chan); + len += 2 + sizeof(struct ieee80211_ht_capability); + } +#endif /*CONFIG_IWLWIFI_HT */ + + fill_end: + return (u16)len; +} + +/* + * QoS support +*/ +#ifdef CONFIG_IWLWIFI_QOS +static int iwl_send_qos_params_command(struct iwl_priv *priv, + struct iwl_qosparam_cmd *qos) +{ + + return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM, + sizeof(struct iwl_qosparam_cmd), qos); +} + +static void iwl_reset_qos(struct iwl_priv *priv) +{ + u16 cw_min = 15; + u16 cw_max = 1023; + u8 aifs = 2; + u8 is_legacy = 0; + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.qos_active = 0; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + if (!(priv->active_rate & 0xfff0)) { + cw_min = 31; + is_legacy = 1; + } + } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (priv->qos_data.qos_enable) + priv->qos_data.qos_active = 1; + } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { + cw_min = 31; + is_legacy = 1; + } + + if (priv->qos_data.qos_active) + aifs = 3; + + priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; + + if (priv->qos_data.qos_active) { + i = 1; + priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 7; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 2; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(6016); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3008); + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + + i = 3; + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16((cw_min + 1) / 4 - 1); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16((cw_max + 1) / 2 - 1); + priv->qos_data.def_qos_parm.ac[i].aifsn = 2; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + if (is_legacy) + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(3264); + else + priv->qos_data.def_qos_parm.ac[i].edca_txop = + cpu_to_le16(1504); + } else { + for (i = 1; i < 4; i++) { + priv->qos_data.def_qos_parm.ac[i].cw_min = + cpu_to_le16(cw_min); + priv->qos_data.def_qos_parm.ac[i].cw_max = + cpu_to_le16(cw_max); + priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; + priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; + priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; + } + } + IWL_DEBUG_QOS("set QoS to default \n"); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_activate_qos(struct iwl_priv *priv, u8 force) +{ + unsigned long flags; + + if (priv == NULL) + return; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!priv->qos_data.qos_enable) + return; + + spin_lock_irqsave(&priv->lock, flags); + priv->qos_data.def_qos_parm.qos_flags = 0; + + if (priv->qos_data.qos_cap.q_AP.queue_request && + !priv->qos_data.qos_cap.q_AP.txop_request) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_TXOP_TYPE_MSK; + + if (priv->qos_data.qos_active) + priv->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + spin_unlock_irqrestore(&priv->lock, flags); + + if (force || iwl_is_associated(priv)) { + IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", + priv->qos_data.qos_active); + + iwl_send_qos_params_command(priv, + &(priv->qos_data.def_qos_parm)); + } +} + +#endif /* CONFIG_IWLWIFI_QOS */ +/* + * Power management (not Tx power!) functions + */ +#define MSEC_TO_USEC 1024 + +#define NOSLP __constant_cpu_to_le16(0), 0, 0 +#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 +#define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC) +#define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \ + __constant_cpu_to_le32(X1), \ + __constant_cpu_to_le32(X2), \ + __constant_cpu_to_le32(X3), \ + __constant_cpu_to_le32(X4)} + + +/* default power management (not Tx power) table values */ +/* for tim 0-10 */ +static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} +}; + +/* for tim > 10 */ +static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { + {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), + SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), + SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), + SLP_VEC(2, 6, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, + {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), + SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} +}; + +int iwl_power_init_handle(struct iwl_priv *priv) +{ + int rc = 0, i; + struct iwl_power_mgr *pow_data; + int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; + u16 pci_pm; + + IWL_DEBUG_POWER("Initialize power \n"); + + pow_data = &(priv->power_data); + + memset(pow_data, 0, sizeof(*pow_data)); + + pow_data->active_index = IWL_POWER_RANGE_0; + pow_data->dtim_val = 0xffff; + + memcpy(&pow_data->pwr_range_0[0], &range_0[0], size); + memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); + + rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm); + if (rc != 0) + return 0; + else { + struct iwl_powertable_cmd *cmd; + + IWL_DEBUG_POWER("adjust power command flags\n"); + + for (i = 0; i < IWL_POWER_AC; i++) { + cmd = &pow_data->pwr_range_0[i].cmd; + + if (pci_pm & 0x1) + cmd->flags &= ~IWL_POWER_PCI_PM_MSK; + else + cmd->flags |= IWL_POWER_PCI_PM_MSK; + } + } + return rc; +} + +static int iwl_update_power_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd, u32 mode) +{ + int rc = 0, i; + u8 skip; + u32 max_sleep = 0; + struct iwl_power_vec_entry *range; + u8 period = 0; + struct iwl_power_mgr *pow_data; + + if (mode > IWL_POWER_INDEX_5) { + IWL_DEBUG_POWER("Error invalid power mode \n"); + return -1; + } + pow_data = &(priv->power_data); + + if (pow_data->active_index == IWL_POWER_RANGE_0) + range = &pow_data->pwr_range_0[0]; + else + range = &pow_data->pwr_range_1[1]; + + memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd)); + +#ifdef IWL_MAC80211_DISABLE + if (priv->assoc_network != NULL) { + unsigned long flags; + + period = priv->assoc_network->tim.tim_period; + } +#endif /*IWL_MAC80211_DISABLE */ + skip = range[mode].no_dtim; + + if (period == 0) { + period = 1; + skip = 0; + } + + if (skip == 0) { + max_sleep = period; + cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; + } else { + __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; + max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; + cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; + } + + for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { + if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) + cmd->sleep_interval[i] = cpu_to_le32(max_sleep); + } + + IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return rc; +} + +static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode) +{ + u32 final_mode = mode; + int rc; + struct iwl_powertable_cmd cmd; + + /* If on battery, set to 3, + * if plugged into AC power, set to CAM ("continuosly aware mode"), + * else user level */ + switch (mode) { + case IWL_POWER_BATTERY: + final_mode = IWL_POWER_INDEX_3; + break; + case IWL_POWER_AC: + final_mode = IWL_POWER_MODE_CAM; + break; + default: + final_mode = mode; + break; + } + + cmd.keep_alive_beacons = 0; + + iwl_update_power_cmd(priv, &cmd, final_mode); + + rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); + + if (final_mode == IWL_POWER_MODE_CAM) + clear_bit(STATUS_POWER_PMI, &priv->status); + else + set_bit(STATUS_POWER_PMI, &priv->status); + + return rc; +} + +int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + /* Filter incoming packets to determine if they are targeted toward + * this network, discarding packets coming from ourselves */ + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr2, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our IBSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr3, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ + /* packets from our adapter are dropped (echo) */ + if (!compare_ether_addr(header->addr3, priv->mac_addr)) + return 0; + /* {broad,multi}cast packets to our BSS go through */ + if (is_multicast_ether_addr(header->addr1)) + return !compare_ether_addr(header->addr2, priv->bssid); + /* packets to our adapter go through */ + return !compare_ether_addr(header->addr1, priv->mac_addr); + } + + return 1; +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +const char *iwl_get_tx_fail_reason(u32 status) +{ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_ENTRY(SHORT_LIMIT); + TX_STATUS_ENTRY(LONG_LIMIT); + TX_STATUS_ENTRY(FIFO_UNDERRUN); + TX_STATUS_ENTRY(MGMNT_ABORT); + TX_STATUS_ENTRY(NEXT_FRAG); + TX_STATUS_ENTRY(LIFE_EXPIRE); + TX_STATUS_ENTRY(DEST_PS); + TX_STATUS_ENTRY(ABORTED); + TX_STATUS_ENTRY(BT_RETRY); + TX_STATUS_ENTRY(STA_INVALID); + TX_STATUS_ENTRY(FRAG_DROPPED); + TX_STATUS_ENTRY(TID_DISABLE); + TX_STATUS_ENTRY(FRAME_FLUSHED); + TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); + TX_STATUS_ENTRY(TX_LOCKED); + TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + * + * NOTE: priv->mutex is not required before calling this function + */ +static int iwl_scan_cancel(struct iwl_priv *priv) +{ + if (!test_bit(STATUS_SCAN_HW, &priv->status)) { + clear_bit(STATUS_SCANNING, &priv->status); + return 0; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Queuing scan abort.\n"); + set_bit(STATUS_SCAN_ABORTING, &priv->status); + queue_work(priv->workqueue, &priv->abort_scan); + + } else + IWL_DEBUG_SCAN("Scan abort already in progress.\n"); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return 0; +} + +/** + * iwl_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + * NOTE: priv->mutex must be held before calling this function + */ +static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long now = jiffies; + int ret; + + ret = iwl_scan_cancel(priv); + if (ret && ms) { + mutex_unlock(&priv->mutex); + while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && + test_bit(STATUS_SCANNING, &priv->status)) + msleep(1); + mutex_lock(&priv->mutex); + + return test_bit(STATUS_SCANNING, &priv->status); + } + + return ret; +} + +static void iwl_sequence_reset(struct iwl_priv *priv) +{ + /* Reset ieee stats */ + + /* We don't reset the net_device_stats (ieee->stats) on + * re-association */ + + priv->last_seq_num = -1; + priv->last_frag_num = -1; + priv->last_packet_time = 0; + + iwl_scan_cancel(priv); +} + +#define MAX_UCODE_BEACON_INTERVAL 4096 +#define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) + +static __le16 iwl_adjust_beacon_interval(u16 beacon_val) +{ + u16 new_val = 0; + u16 beacon_factor = 0; + + beacon_factor = + (beacon_val + MAX_UCODE_BEACON_INTERVAL) + / MAX_UCODE_BEACON_INTERVAL; + new_val = beacon_val / beacon_factor; + + return cpu_to_le16(new_val); +} + +static void iwl_setup_rxon_timing(struct iwl_priv *priv) +{ + u64 interval_tm_unit; + u64 tsf, result; + unsigned long flags; + struct ieee80211_conf *conf = NULL; + u16 beacon_int = 0; + + conf = ieee80211_get_hw_conf(priv->hw); + + spin_lock_irqsave(&priv->lock, flags); + priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); + priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); + + priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; + + tsf = priv->timestamp1; + tsf = ((tsf << 32) | priv->timestamp0); + + beacon_int = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { + if (beacon_int == 0) { + priv->rxon_timing.beacon_interval = cpu_to_le16(100); + priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); + } else { + priv->rxon_timing.beacon_interval = + cpu_to_le16(beacon_int); + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval( + le16_to_cpu(priv->rxon_timing.beacon_interval)); + } + + priv->rxon_timing.atim_window = 0; + } else { + priv->rxon_timing.beacon_interval = + iwl_adjust_beacon_interval(conf->beacon_int); + /* TODO: we need to get atim_window from upper stack + * for now we set to 0 */ + priv->rxon_timing.atim_window = 0; + } + + interval_tm_unit = + (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024); + result = do_div(tsf, interval_tm_unit); + priv->rxon_timing.beacon_init_val = + cpu_to_le32((u32) ((u64) interval_tm_unit - result)); + + IWL_DEBUG_ASSOC + ("beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(priv->rxon_timing.beacon_interval), + le32_to_cpu(priv->rxon_timing.beacon_init_val), + le16_to_cpu(priv->rxon_timing.atim_window)); +} + +static int iwl_scan_initiate(struct iwl_priv *priv) +{ + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("APs don't scan.\n"); + return 0; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN("Scan already in progress.\n"); + return -EAGAIN; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN("Scan request while abort pending. " + "Queuing.\n"); + return -EAGAIN; + } + + IWL_DEBUG_INFO("Starting scan...\n"); + priv->scan_bands = 2; + set_bit(STATUS_SCANNING, &priv->status); + priv->scan_start = jiffies; + priv->scan_pass_start = priv->scan_start; + + queue_work(priv->workqueue, &priv->request_scan); + + return 0; +} + +static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) +{ + struct iwl_rxon_cmd *rxon = &priv->staging_rxon; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + + return 0; +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode) +{ + if (phymode == MODE_IEEE80211A) { + priv->staging_rxon.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_bg_post_associate() */ + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; + priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; + priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; + } +} + +/* + * initilize rxon structure with default values fromm eeprom + */ +static void iwl_connection_init_rx_config(struct iwl_priv *priv) +{ + const struct iwl_channel_info *ch_info; + + memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_AP: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; + break; + + case IEEE80211_IF_TYPE_STA: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; + priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_IBSS: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; + priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case IEEE80211_IF_TYPE_MNTR: + priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; + priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_get_channel_info(priv, priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + /* + * in some case A channels are all non IBSS + * in this case force B/G channel + */ + if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && + !(is_channel_ibss(ch_info))) + ch_info = &priv->channel_info[0]; + + priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); + if (is_channel_a_band(ch_info)) + priv->phymode = MODE_IEEE80211A; + else + priv->phymode = MODE_IEEE80211G; + + iwl_set_flags_for_phymode(priv, priv->phymode); + + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | + RXON_FLG_CHANNEL_MODE_PURE_40_MSK); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); + priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; + priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; + iwl4965_set_rxon_chain(priv); +} + +static int iwl_set_mode(struct iwl_priv *priv, int mode) +{ + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + + if (mode == IEEE80211_IF_TYPE_IBSS) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, + priv->phymode, + le16_to_cpu(priv->staging_rxon.channel)); + + if (!ch_info || !is_channel_ibss(ch_info)) { + IWL_ERROR("channel %d not IBSS channel\n", + le16_to_cpu(priv->staging_rxon.channel)); + return -EINVAL; + } + } + + cancel_delayed_work(&priv->scan_check); + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + return -EAGAIN; + } + + priv->iw_mode = mode; + + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + + iwl_clear_stations_table(priv); + + iwl_commit_rxon(priv); + + return 0; +} + +static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_control *ctl, + struct iwl_cmd *cmd, + struct sk_buff *skb_frag, + int last_frag) +{ + struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; + + switch (keyinfo->alg) { + case ALG_CCMP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; + memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); + IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); + break; + + case ALG_TKIP: +#if 0 + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; + + if (last_frag) + memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, + 8); + else + memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); +#endif + break; + + case ALG_WEP: + cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | + (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; + + if (keyinfo->keylen == 13) + cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; + + memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); + + IWL_DEBUG_TX("Configuring packet for WEP encryption " + "with key %d\n", ctl->key_idx); + break; + + default: + printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); + break; + } +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl_build_tx_cmd_basic(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, + int is_unicast, u8 std_id) +{ + __le16 *qc; + u16 fc = le16_to_cpu(hdr->frame_control); + __le32 tx_flags = cmd->cmd.tx.tx_flags; + + cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_response(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + cmd->cmd.tx.sta_id = std_id; + if (ieee80211_get_morefrag(hdr)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + + if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { + tx_flags |= TX_CMD_FLG_RTS_MSK; + tx_flags &= ~TX_CMD_FLG_CTS_MSK; + } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { + tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_flags |= TX_CMD_FLG_CTS_MSK; + } + + if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) + tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { + if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(3); + else + cmd->cmd.tx.timeout.pm_frame_timeout = + cpu_to_le16(2); + } else + cmd->cmd.tx.timeout.pm_frame_timeout = 0; + + cmd->cmd.tx.driver_txop = 0; + cmd->cmd.tx.tx_flags = tx_flags; + cmd->cmd.tx.next_frame_len = 0; +} + +static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +{ + int sta_id; + u16 fc = le16_to_cpu(hdr->frame_control); + DECLARE_MAC_BUF(mac); + + /* If this frame is broadcast or not data then use the broadcast + * station id */ + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || + is_multicast_ether_addr(hdr->addr1)) + return priv->hw_setting.bcast_sta_id; + + switch (priv->iw_mode) { + + /* If this frame is part of a BSS network (we're a station), then + * we use the AP's station id */ + case IEEE80211_IF_TYPE_STA: + return IWL_AP_ID; + + /* If we are an AP, then find the station, or use BCAST */ + case IEEE80211_IF_TYPE_AP: + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + return priv->hw_setting.bcast_sta_id; + + /* If this frame is part of a IBSS network, then we use the + * target specific station id */ + case IEEE80211_IF_TYPE_IBSS: + sta_id = iwl_hw_find_station(priv, hdr->addr1); + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC); + + if (sta_id != IWL_INVALID_STATION) + return sta_id; + + IWL_DEBUG_DROP("Station %s not in station map. " + "Defaulting to broadcast...\n", + print_mac(mac, hdr->addr1)); + iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); + return priv->hw_setting.bcast_sta_id; + + default: + IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode); + return priv->hw_setting.bcast_sta_id; + } +} + +/* + * start REPLY_TX command process + */ +static int iwl_tx_skb(struct iwl_priv *priv, + struct sk_buff *skb, struct ieee80211_tx_control *ctl) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_tfd_frame *tfd; + u32 *control_flags; + int txq_id = ctl->queue; + struct iwl_tx_queue *txq = NULL; + struct iwl_queue *q = NULL; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + struct iwl_cmd *out_cmd = NULL; + u16 len, idx, len_org; + u8 id, hdr_len, unicast; + u8 sta_id; + u16 seq_number = 0; + u16 fc; + __le16 *qc; + u8 wait_write_ptr = 0; + unsigned long flags; + int rc; + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_DROP("Dropping - RF KILL\n"); + goto drop_unlock; + } + + if (!priv->interface_id) { + IWL_DEBUG_DROP("Dropping - !priv->interface_id\n"); + goto drop_unlock; + } + + if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { + IWL_ERROR("ERROR: No TX rate available.\n"); + goto drop_unlock; + } + + unicast = !is_multicast_ether_addr(hdr->addr1); + id = 0; + + fc = le16_to_cpu(hdr->frame_control); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX("Sending AUTH frame\n"); + else if (ieee80211_is_assoc_request(fc)) + IWL_DEBUG_TX("Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_request(fc)) + IWL_DEBUG_TX("Sending REASSOC frame\n"); +#endif + + if (!iwl_is_associated(priv) && + ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { + IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); + goto drop_unlock; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + hdr_len = ieee80211_get_hdrlen(fc); + sta_id = iwl_get_sta_id(priv, hdr); + if (sta_id == IWL_INVALID_STATION) { + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", + print_mac(mac, hdr->addr1)); + goto drop; + } + + IWL_DEBUG_RATE("station Id %d\n", sta_id); + + qc = ieee80211_get_qos_ctrl(hdr); + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + seq_number = priv->stations[sta_id].tid[tid].seq_number & + IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = cpu_to_le16(seq_number) | + (hdr->seq_ctrl & + __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); + seq_number += 0x10; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + /* aggregation is on for this */ + if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG) + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + } + txq = &priv->txq[txq_id]; + q = &txq->q; + + spin_lock_irqsave(&priv->lock, flags); + + tfd = &txq->bd[q->first_empty]; + memset(tfd, 0, sizeof(*tfd)); + control_flags = (u32 *) tfd; + idx = get_cmd_index(q, q->first_empty, 0); + + memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->first_empty].skb[0] = skb; + memcpy(&(txq->txb[q->first_empty].status.control), + ctl, sizeof(struct ieee80211_tx_control)); + out_cmd = &txq->cmd[idx]; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->first_empty))); + /* copy frags header */ + memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); + + /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */ + len = priv->hw_setting.tx_cmd_len + + sizeof(struct iwl_cmd_header) + hdr_len; + + len_org = len; + len = (len + 3) & ~3; + + if (len_org != len) + len_org = 1; + else + len_org = 0; + + txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + + offsetof(struct iwl_cmd, hdr); + + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); + + if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) + iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); + + /* 802.11 null functions have no payload... */ + len = skb->len - hdr_len; + if (len) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + len, PCI_DMA_TODEVICE); + iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); + } + + if (len_org) + out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + len = (u16)skb->len; + out_cmd->cmd.tx.len = cpu_to_le16(len); + + /* TODO need this for burst mode later on */ + iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); + + iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys, + hdr, hdr_len, ctl, NULL); + + if (!ieee80211_get_morefrag(hdr)) { + txq->need_update = 1; + if (qc) { + u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, + sizeof(out_cmd->cmd.tx)); + + iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, + ieee80211_get_hdrlen(fc)); + + iwl4965_tx_queue_update_wr_ptr(priv, txq, len); + + q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); + rc = iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + if (rc) + return rc; + + if ((iwl_queue_space(q) < q->high_mark) + && priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_tx_queue_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } + + ieee80211_stop_queue(priv->hw, ctl->queue); + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); +drop: + return -1; +} + +static void iwl_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_hw_mode *hw = NULL; + struct ieee80211_rate *rate; + int i; + + hw = iwl_get_hw_mode(priv, priv->phymode); + + priv->active_rate = 0; + priv->active_rate_basic = 0; + + IWL_DEBUG_RATE("Setting rates for 802.11%c\n", + hw->mode == MODE_IEEE80211A ? + 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); + + for (i = 0; i < hw->num_rates; i++) { + rate = &(hw->rates[i]); + if ((rate->val < IWL_RATE_COUNT) && + (rate->flags & IEEE80211_RATE_SUPPORTED)) { + IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", + rate->val, iwl_rates[rate->val].plcp, + (rate->flags & IEEE80211_RATE_BASIC) ? + "*" : ""); + priv->active_rate |= (1 << rate->val); + if (rate->flags & IEEE80211_RATE_BASIC) + priv->active_rate_basic |= (1 << rate->val); + } else + IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n", + rate->val, iwl_rates[rate->val].plcp); + } + + IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", + priv->active_rate, priv->active_rate_basic); + + /* + * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) + * otherwise set it to the default of all CCK rates and 6, 12, 24 for + * OFDM + */ + if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) + priv->staging_rxon.cck_basic_rates = + ((priv->active_rate_basic & + IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; + else + priv->staging_rxon.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) + priv->staging_rxon.ofdm_basic_rates = + ((priv->active_rate_basic & + (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> + IWL_FIRST_OFDM_RATE) & 0xFF; + else + priv->staging_rxon.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; +} + +static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio) +{ + unsigned long flags; + + if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) + return; + + IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", + disable_radio ? "OFF" : "ON"); + + if (disable_radio) { + iwl_scan_cancel(priv); + /* FIXME: This is a workaround for AP */ + if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_SW_BIT_RFKILL); + spin_unlock_irqrestore(&priv->lock, flags); + iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); + set_bit(STATUS_RF_KILL_SW, &priv->status); + } + return; + } + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + clear_bit(STATUS_RF_KILL_SW, &priv->status); + spin_unlock_irqrestore(&priv->lock, flags); + + /* wake up ucode */ + msleep(10); + + spin_lock_irqsave(&priv->lock, flags); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + if (!iwl_grab_restricted_access(priv)) + iwl_release_restricted_access(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by HW switch\n"); + return; + } + + queue_work(priv->workqueue, &priv->restart); + return; +} + +void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, + u32 decrypt_res, struct ieee80211_rx_status *stats) +{ + u16 fc = + le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control); + + if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) + return; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return; + + IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) + stats->flag |= RX_FLAG_MMIC_ERROR; + case RX_RES_STATUS_SEC_TYPE_WEP: + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX("hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } +} + +void iwl_handle_data_packet_monitor(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + void *data, short len, + struct ieee80211_rx_status *stats, + u16 phy_flags) +{ + struct iwl_rt_rx_hdr *iwl_rt; + + /* First cache any information we need before we overwrite + * the information provided in the skb from the hardware */ + s8 signal = stats->ssi; + s8 noise = 0; + int rate = stats->rate; + u64 tsf = stats->mactime; + __le16 phy_flags_hw = cpu_to_le16(phy_flags); + + /* We received data from the HW, so stop the watchdog */ + if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) { + IWL_DEBUG_DROP("Dropping too large packet in monitor\n"); + return; + } + + /* copy the frame data to write after where the radiotap header goes */ + iwl_rt = (void *)rxb->skb->data; + memmove(iwl_rt->payload, data, len); + + iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; + iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */ + + /* total header + data */ + iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt)); + + /* Set the size of the skb to the size of the frame */ + skb_put(rxb->skb, sizeof(*iwl_rt) + len); + + /* Big bitfield of all the fields we provide in radiotap */ + iwl_rt->rt_hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | + (1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | + (1 << IEEE80211_RADIOTAP_ANTENNA)); + + /* Zero the flags, we'll add to them as we go */ + iwl_rt->rt_flags = 0; + + iwl_rt->rt_tsf = cpu_to_le64(tsf); + + /* Convert to dBm */ + iwl_rt->rt_dbmsignal = signal; + iwl_rt->rt_dbmnoise = noise; + + /* Convert the channel frequency and set the flags */ + iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq); + if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); + else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); + else /* 802.11g */ + iwl_rt->rt_chbitmask = + cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ)); + + rate = iwl_rate_index_from_plcp(rate); + if (rate == -1) + iwl_rt->rt_rate = 0; + else + iwl_rt->rt_rate = iwl_rates[rate].ieee; + + /* antenna number */ + iwl_rt->rt_antenna = + le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; + + /* set the preamble flag if we have it */ + if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; + + IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); + + stats->flag |= RX_FLAG_RADIOTAP; + ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); + rxb->skb = NULL; +} + + +#define IWL_PACKET_RETRY_TIME HZ + +int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) +{ + u16 sc = le16_to_cpu(header->seq_ctrl); + u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + u16 frag = sc & IEEE80211_SCTL_FRAG; + u16 *last_seq, *last_frag; + unsigned long *last_time; + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_IBSS:{ + struct list_head *p; + struct iwl_ibss_seq *entry = NULL; + u8 *mac = header->addr2; + int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1); + + __list_for_each(p, &priv->ibss_mac_hash[index]) { + entry = + list_entry(p, struct iwl_ibss_seq, list); + if (!compare_ether_addr(entry->mac, mac)) + break; + } + if (p == &priv->ibss_mac_hash[index]) { + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) { + IWL_ERROR + ("Cannot malloc new mac entry\n"); + return 0; + } + memcpy(entry->mac, mac, ETH_ALEN); + entry->seq_num = seq; + entry->frag_num = frag; + entry->packet_time = jiffies; + list_add(&entry->list, + &priv->ibss_mac_hash[index]); + return 0; + } + last_seq = &entry->seq_num; + last_frag = &entry->frag_num; + last_time = &entry->packet_time; + break; + } + case IEEE80211_IF_TYPE_STA: + last_seq = &priv->last_seq_num; + last_frag = &priv->last_frag_num; + last_time = &priv->last_packet_time; + break; + default: + return 0; + } + if ((*last_seq == seq) && + time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) { + if (*last_frag == frag) + goto drop; + if (*last_frag + 1 != frag) + /* out-of-order fragment */ + goto drop; + } else + *last_seq = seq; + + *last_frag = frag; + *last_time = jiffies; + return 0; + + drop: + return 1; +} + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +#include "iwl-spectrum.h" + +#define BEACON_TIME_MASK_LOW 0x00FFFFFF +#define BEACON_TIME_MASK_HIGH 0xFF000000 +#define TIME_UNIT 1024 + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in 8:24 format + * the high 1 byte is the beacon counts + * the lower 3 bytes is the time in usec within one beacon interval + */ + +static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * 1024; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); + rem = (usec % interval) & BEACON_TIME_MASK_LOW; + + return (quot << 24) + rem; +} + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ + +static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) +{ + u32 base_low = base & BEACON_TIME_MASK_LOW; + u32 addon_low = addon & BEACON_TIME_MASK_LOW; + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & BEACON_TIME_MASK_HIGH) + + (addon & BEACON_TIME_MASK_HIGH); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << 24); + } else + res += (1 << 24); + + return cpu_to_le32(res); +} + +static int iwl_get_measurement(struct iwl_priv *priv, + struct ieee80211_measurement_params *params, + u8 type) +{ + struct iwl_spectrum_cmd spectrum; + struct iwl_rx_packet *res; + struct iwl_host_cmd cmd = { + .id = REPLY_SPECTRUM_MEASUREMENT_CMD, + .data = (void *)&spectrum, + .meta.flags = CMD_WANT_SKB, + }; + u32 add_time = le64_to_cpu(params->start_time); + int rc; + int spectrum_resp_status; + int duration = le16_to_cpu(params->duration); + + if (iwl_is_associated(priv)) + add_time = + iwl_usecs_to_beacons( + le64_to_cpu(params->start_time) - priv->last_tsf, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + + memset(&spectrum, 0, sizeof(spectrum)); + + spectrum.channel_count = cpu_to_le16(1); + spectrum.flags = + RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; + spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; + cmd.len = sizeof(spectrum); + spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); + + if (iwl_is_associated(priv)) + spectrum.start_time = + iwl_add_beacon_time(priv->last_beacon_time, + add_time, + le16_to_cpu(priv->rxon_timing.beacon_interval)); + else + spectrum.start_time = 0; + + spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); + spectrum.channels[0].channel = params->channel; + spectrum.channels[0].type = type; + if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) + spectrum.flags |= RXON_FLG_BAND_24G_MSK | + RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; + + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + return rc; + + res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; + if (res->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); + rc = -EIO; + } + + spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); + switch (spectrum_resp_status) { + case 0: /* Command will be handled */ + if (res->u.spectrum.id != 0xff) { + IWL_DEBUG_INFO + ("Replaced existing measurement: %d\n", + res->u.spectrum.id); + priv->measurement_status &= ~MEASUREMENT_READY; + } + priv->measurement_status |= MEASUREMENT_ACTIVE; + rc = 0; + break; + + case 1: /* Command will not be handled */ + rc = -EAGAIN; + break; + } + + dev_kfree_skb_any(cmd.meta.u.skb); + + return rc; +} +#endif + +static void iwl_txstatus_to_ieee(struct iwl_priv *priv, + struct iwl_tx_info *tx_sta) +{ + + tx_sta->status.ack_signal = 0; + tx_sta->status.excessive_retries = 0; + tx_sta->status.queue_length = 0; + tx_sta->status.queue_number = 0; + + if (in_interrupt()) + ieee80211_tx_status_irqsafe(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + else + ieee80211_tx_status(priv->hw, + tx_sta->skb[0], &(tx_sta->status)); + + tx_sta->skb[0] = NULL; +} + +/** + * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC. + * + * When FW advances 'R' index, all entries between old and + * new 'R' index need to be reclaimed. As result, some free space + * forms. If there is enough free space (> low mark), wake Tx queue. + */ +int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->first_empty, q->last_used); + return 0; + } + + for (index = iwl_queue_inc_wrap(index, q->n_bd); + q->last_used != index; + q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { + if (txq_id != IWL_CMD_QUEUE_NUM) { + iwl_txstatus_to_ieee(priv, + &(txq->txb[txq->q.last_used])); + iwl_hw_txq_free_tfd(priv, txq); + } else if (nfreed > 1) { + IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, + q->first_empty, q->last_used); + queue_work(priv->workqueue, &priv->restart); + } + nfreed++; + } + + if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && + (txq_id != IWL_CMD_QUEUE_NUM) && + priv->mac80211_registered) + ieee80211_wake_queue(priv->hw, txq_id); + + + return nfreed; +} + +static int iwl_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) + || (status == TX_STATUS_DIRECT_DONE); +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + +static inline int iwl_get_ra_sta_id(struct iwl_priv *priv, + struct ieee80211_hdr *hdr) +{ + if (priv->iw_mode == IEEE80211_IF_TYPE_STA) + return IWL_AP_ID; + else { + u8 *da = ieee80211_get_DA(hdr); + return iwl_hw_find_station(priv, da); + } +} + +static struct ieee80211_hdr *iwl_tx_queue_get_hdr( + struct iwl_priv *priv, int txq_id, int idx) +{ + if (priv->txq[txq_id].txb[idx].skb[0]) + return (struct ieee80211_hdr *)priv->txq[txq_id]. + txb[idx].skb[0]->data; + return NULL; +} + +static inline u32 iwl_get_scd_ssn(struct iwl_tx_resp *tx_resp) +{ + __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status + + tx_resp->frame_count); + return le32_to_cpu(*scd_ssn) & MAX_SN; + +} +static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_tx_resp *tx_resp, + u16 start_idx) +{ + u32 status; + __le32 *frame_status = &tx_resp->status; + struct ieee80211_tx_status *tx_status = NULL; + struct ieee80211_hdr *hdr = NULL; + int i, sh; + int txq_id, idx; + u16 seq; + + if (agg->wait_for_ba) + IWL_DEBUG_TX_REPLY("got tx repsons w/o back\n"); + + agg->frame_count = tx_resp->frame_count; + agg->start_idx = start_idx; + agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + agg->bitmap0 = agg->bitmap1 = 0; + + if (agg->frame_count == 1) { + struct iwl_tx_queue *txq ; + status = le32_to_cpu(frame_status[0]); + + txq_id = agg->txq_id; + txq = &priv->txq[txq_id]; + /* FIXME: code repetition */ + IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n", + agg->frame_count, agg->start_idx); + + tx_status = &(priv->txq[txq_id].txb[txq->q.last_used].status); + tx_status->retry_count = tx_resp->failure_frame; + tx_status->queue_number = status & 0xff; + tx_status->queue_length = tx_resp->bt_kill_count; + tx_status->queue_length |= tx_resp->failure_rts; + + tx_status->flags = iwl_is_tx_success(status)? + IEEE80211_TX_STATUS_ACK : 0; + tx_status->control.tx_rate = + iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags); + /* FIXME: code repetition end */ + + IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", + status & 0xff, tx_resp->failure_frame); + IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", + iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags)); + + agg->wait_for_ba = 0; + } else { + u64 bitmap = 0; + int start = agg->start_idx; + + for (i = 0; i < agg->frame_count; i++) { + u16 sc; + status = le32_to_cpu(frame_status[i]); + seq = status >> 16; + idx = SEQ_TO_INDEX(seq); + txq_id = SEQ_TO_QUEUE(seq); + + if (status & (AGG_TX_STATE_FEW_BYTES_MSK | + AGG_TX_STATE_ABORT_MSK)) + continue; + + IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", + agg->frame_count, txq_id, idx); + + hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); + + sc = le16_to_cpu(hdr->seq_ctrl); + if (idx != (SEQ_TO_SN(sc) & 0xff)) { + IWL_ERROR("BUG_ON idx doesn't match seq control" + " idx=%d, seq_idx=%d, seq=%d\n", + idx, SEQ_TO_SN(sc), + hdr->seq_ctrl); + return -1; + } + + IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", + i, idx, SEQ_TO_SN(sc)); + + sh = idx - start; + if (sh > 64) { + sh = (start - idx) + 0xff; + bitmap = bitmap << sh; + sh = 0; + start = idx; + } else if (sh < -64) + sh = 0xff - (start - idx); + else if (sh < 0) { + sh = start - idx; + start = idx; + bitmap = bitmap << sh; + sh = 0; + } + bitmap |= (1 << sh); + IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", + start, (u32)(bitmap & 0xFFFFFFFF)); + } + + agg->bitmap0 = bitmap & 0xFFFFFFFF; + agg->bitmap1 = bitmap >> 32; + agg->start_idx = start; + agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); + IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n", + agg->frame_count, agg->start_idx, + agg->bitmap0); + + if (bitmap) + agg->wait_for_ba = 1; + } + return 0; +} +#endif +#endif + +static void iwl_rx_reply_tx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct ieee80211_tx_status *tx_status; + struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; + u32 status = le32_to_cpu(tx_resp->status); +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + int tid, sta_id; +#endif +#endif + + if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { + IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " + "is out of range [0-%d] %d %d\n", txq_id, + index, txq->q.n_bd, txq->q.first_empty, + txq->q.last_used); + return; + } + +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + if (txq->sched_retry) { + const u32 scd_ssn = iwl_get_scd_ssn(tx_resp); + struct ieee80211_hdr *hdr = + iwl_tx_queue_get_hdr(priv, txq_id, index); + struct iwl_ht_agg *agg = NULL; + __le16 *qc = ieee80211_get_qos_ctrl(hdr); + + if (qc == NULL) { + IWL_ERROR("BUG_ON qc is null!!!!\n"); + return; + } + + tid = le16_to_cpu(*qc) & 0xf; + + sta_id = iwl_get_ra_sta_id(priv, hdr); + if (unlikely(sta_id == IWL_INVALID_STATION)) { + IWL_ERROR("Station not known for\n"); + return; + } + + agg = &priv->stations[sta_id].tid[tid].agg; + + iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index); + + if ((tx_resp->frame_count == 1) && + !iwl_is_tx_success(status)) { + /* TODO: send BAR */ + } + + if ((txq->q.last_used != (scd_ssn & 0xff))) { + index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); + IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " + "%d index %d\n", scd_ssn , index); + iwl_tx_queue_reclaim(priv, txq_id, index); + } + } else { +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + tx_status = &(txq->txb[txq->q.last_used].status); + + tx_status->retry_count = tx_resp->failure_frame; + tx_status->queue_number = status; + tx_status->queue_length = tx_resp->bt_kill_count; + tx_status->queue_length |= tx_resp->failure_rts; + + tx_status->flags = + iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; + + tx_status->control.tx_rate = + iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags); + + IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x " + "retries %d\n", txq_id, iwl_get_tx_fail_reason(status), + status, le32_to_cpu(tx_resp->rate_n_flags), + tx_resp->failure_frame); + + IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); + if (index != -1) + iwl_tx_queue_reclaim(priv, txq_id, index); +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + } +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) + IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); +} + + +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO("Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO("Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARNING("uCode did not respond OK.\n"); +} + +static void iwl_rx_reply_add_sta(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); + return; +} + +static void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + + IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} + +#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x + +static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", + le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); + rxon->channel = csa->channel; + priv->staging_rxon.channel = csa->channel; +} + +static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +#endif +} + +static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX("sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} + +static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " + "notification for %s:\n", + le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); +} + +static void iwl_bg_beacon_update(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, beacon_update); + struct sk_buff *beacon; + + /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ + beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL); + + if (!beacon) { + IWL_ERROR("update beacon failed\n"); + return; + } + + mutex_lock(&priv->mutex); + /* new beacon skb is allocated every time; dispose previous.*/ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = beacon; + mutex_unlock(&priv->mutex); + + iwl_send_beacon_cmd(priv); +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status); + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX("beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!test_bit(STATUS_EXIT_PENDING, &priv->status))) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN("Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + notif->tsf_high, + notif->tsf_low, notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec (%dms since last)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, + jiffies_to_msecs(elapsed_jiffies + (priv->last_scan_jiffies, jiffies))); + + priv->last_scan_jiffies = jiffies; +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; + + IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + /* The scan completion notification came in, so kill that timer... */ + cancel_delayed_work(&priv->scan_check); + + IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", + (priv->scan_bands == 2) ? "2.4" : "5.2", + jiffies_to_msecs(elapsed_jiffies + (priv->scan_pass_start, jiffies))); + + /* Remove this scanned band from the list + * of pending bands to scan */ + priv->scan_bands--; + + /* If a request to abort was given, or the scan did not succeed + * then we reset the scan state machine and terminate, + * re-queuing another scan if one has been requested */ + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_INFO("Aborted scan completed.\n"); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + } else { + /* If there are more bands on this scan pass reschedule */ + if (priv->scan_bands > 0) + goto reschedule; + } + + priv->last_scan_jiffies = jiffies; + IWL_DEBUG_INFO("Setting scan to off\n"); + + clear_bit(STATUS_SCANNING, &priv->status); + + IWL_DEBUG_INFO("Scan took %dms\n", + jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); + + queue_work(priv->workqueue, &priv->scan_completed); + + return; + +reschedule: + priv->scan_pass_start = jiffies; + queue_work(priv->workqueue, &priv->request_scan); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (void *)rxb->skb->data; + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + RF_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted( + priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + iwl_release_restricted_access(priv); + } + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted( + priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + iwl_release_restricted_access(priv); + } + } + + if (flags & RF_CARD_DISABLED) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + if (!iwl_grab_restricted_access(priv)) + iwl_release_restricted_access(priv); + } + } + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (flags & SW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_SW, &priv->status); + else + clear_bit(STATUS_RF_KILL_SW, &priv->status); + + if (!(flags & RXON_CARD_DISABLED)) + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status)) || + (test_bit(STATUS_RF_KILL_SW, &status) != + test_bit(STATUS_RF_KILL_SW, &priv->status))) + queue_work(priv->workqueue, &priv->rf_kill); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta; + priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* NOTE: iwl_rx_statistics is different based on whether + * the build is for the 3945 or the 4965. See the + * corresponding implementation in iwl-XXXX.c + * + * The same handler is used for both the REPLY to a + * discrete statistics request from the host as well as + * for the periodic statistics notification from the uCode + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics; + + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_rx_scan_complete_notif; + priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx; + + /* Setup hardware specific Rx handlers */ + iwl_hw_rx_handler_setup(priv); +} + +/** + * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +static void iwl_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int huge = sequence & SEQ_HUGE_FRAME; + int cmd_index; + struct iwl_cmd *cmd; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (txq_id != IWL_CMD_QUEUE_NUM) + IWL_ERROR("Error wrong command queue %d command id 0x%X\n", + txq_id, pkt->hdr.cmd); + BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); + + cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); + cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; + + /* Input error checking is done when commands are added to queue. */ + if (cmd->meta.flags & CMD_WANT_SKB) { + cmd->meta.source->u.skb = rxb->skb; + rxb->skb = NULL; + } else if (cmd->meta.u.callback && + !cmd->meta.u.callback(priv, cmd, rxb->skb)) + rxb->skb = NULL; + + iwl_tx_queue_reclaim(priv, txq_id, index); + + if (!(cmd->meta.flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + } +} + +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * The host allocates 32 DMA target addresses and passes the host address + * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is + * 0 to 31 + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replensish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_rx_queue_space - Return number of free slots available in queue. + */ +static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} + +/** + * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue + * + * NOTE: This function has 3945 and 4965 specific code sections + * but is declared in base due to the majority of the + * implementation being the same (only a numeric constant is + * different) + * + */ +int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) +{ + u32 reg = 0; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) + goto exit_unlock; + + iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR, + q->write & ~0x7); + iwl_release_restricted_access(priv); + } else + iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); + + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); + return rc; +} + +/** + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer. + * + * NOTE: This function has 3945 and 4965 specific code paths in it. + */ +static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + + +/** + * iwl_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +int iwl_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + int write, rc; + + spin_lock_irqsave(&rxq->lock, flags); + write = rxq->write & ~0x7; + while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it */ + if ((write != (rxq->write & ~0x7)) + || (abs(rxq->write - rxq->read) > 7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + rc = iwl_rx_queue_update_write_ptr(priv, rxq); + if (rc) + return rc; + } + + return 0; +} + +/** + * iwl_rx_replensih - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during intialization) + */ +void iwl_rx_replenish(void *data) +{ + struct iwl_priv *priv = data; + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + spin_lock_irqsave(&rxq->lock, flags); + while (!list_empty(&rxq->rx_used)) { + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + rxb->skb = + alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); + if (!rxb->skb) { + if (net_ratelimit()) + printk(KERN_CRIT DRV_NAME + ": Can not allocate SKB buffers\n"); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + break; + } + priv->alloc_rxb_skb++; + list_del(element); + rxb->dma_addr = + pci_map_single(priv->pci_dev, rxb->skb->data, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + iwl_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have it's SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rxq->pool[i].skb); + } + } + + pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->dma_addr); + rxq->bd = NULL; +} + +int iwl_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct pci_dev *dev = priv->pci_dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); + if (!rxq->bd) + return -ENOMEM; + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; +} + +void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].skb != NULL) { + pci_unmap_single(priv->pci_dev, + rxq->pool[i].dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + priv->alloc_rxb_skb--; + dev_kfree_skb(rxq->pool[i].skb); + rxq->pool[i].skb = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +/* Convert linear signal-to-noise ratio into dB */ +static u8 ratio2dB[100] = { +/* 0 1 2 3 4 5 6 7 8 9 */ + 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ + 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ + 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ + 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ + 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ + 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ + 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ + 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ + 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ +}; + +/* Calculates a relative dB value from a ratio of linear + * (i.e. not dB) signal levels. + * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ +int iwl_calc_db_from_ratio(int sig_ratio) +{ + /* Anything above 1000:1 just report as 60 dB */ + if (sig_ratio > 1000) + return 60; + + /* Above 100:1, divide by 10 and use table, + * add 20 dB to make up for divide by 10 */ + if (sig_ratio > 100) + return (20 + (int)ratio2dB[sig_ratio/10]); + + /* We shouldn't see this */ + if (sig_ratio < 1) + return 0; + + /* Use table for ratios 1:1 - 99:1 */ + return (int)ratio2dB[sig_ratio]; +} + +#define PERFECT_RSSI (-20) /* dBm */ +#define WORST_RSSI (-95) /* dBm */ +#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) + +/* Calculate an indication of rx signal quality (a percentage, not dBm!). + * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info + * about formulas used below. */ +int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) +{ + int sig_qual; + int degradation = PERFECT_RSSI - rssi_dbm; + + /* If we get a noise measurement, use signal-to-noise ratio (SNR) + * as indicator; formula is (signal dbm - noise dbm). + * SNR at or above 40 is a great signal (100%). + * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. + * Weakest usable signal is usually 10 - 15 dB SNR. */ + if (noise_dbm) { + if (rssi_dbm - noise_dbm >= 40) + return 100; + else if (rssi_dbm < noise_dbm) + return 0; + sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; + + /* Else use just the signal level. + * This formula is a least squares fit of data points collected and + * compared with a reference system that had a percentage (%) display + * for signal quality. */ + } else + sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * + (15 * RSSI_RANGE + 62 * degradation)) / + (RSSI_RANGE * RSSI_RANGE); + + if (sig_qual > 100) + sig_qual = 100; + else if (sig_qual < 1) + sig_qual = 0; + + return sig_qual; +} + +/** + * iwl_rx_handle - Main entry function for receiving responses from the uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +static void iwl_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + + r = iwl_hw_get_rx_read(priv); + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); + + while (i != r) { + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a queue slot associated with it + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, + PCI_DMA_FROMDEVICE); + pkt = (struct iwl_rx_packet *)rxb->skb->data; + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_4965_RX) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r = %d, i = %d, %s, 0x%02x\n", r, i, + get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + if (reclaim) { + /* Invoke any callbacks, transfer the skb to caller, + * and fire off the (possibly) blocking iwl_send_cmd() + * as we reclaim the driver command queue */ + if (rxb && rxb->skb) + iwl_tx_cmd_complete(priv, rxb); + else + IWL_WARNING("Claim null rxb?\n"); + } + + /* For now we just don't re-use anything. We can tweak this + * later to try and re-use notification packets and SKBs that + * fail to Rx correctly */ + if (rxb->skb != NULL) { + priv->alloc_rxb_skb--; + dev_kfree_skb_any(rxb->skb); + rxb->skb = NULL; + } + + pci_unmap_single(priv->pci_dev, rxb->dma_addr, + IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + spin_lock_irqsave(&rxq->lock, flags); + list_add_tail(&rxb->list, &priv->rxq.rx_used); + spin_unlock_irqrestore(&rxq->lock, flags); + i = (i + 1) & RX_QUEUE_MASK; + } + + /* Backtrack one entry */ + priv->rxq.read = i; + iwl_rx_queue_restock(priv); +} + +int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int rc = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return rc; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); + iwl_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return rc; + } + + /* restore this queue's parameters in nic hardware. */ + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + iwl_write_restricted(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + iwl_release_restricted_access(priv); + + /* else not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.first_empty | (txq_id << 8)); + + txq->need_update = 0; + + return rc; +} + +#ifdef CONFIG_IWLWIFI_DEBUG +static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon) +{ + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_RADIO("RX CONFIG:\n"); + iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); + IWL_DEBUG_RADIO("u8[6] node_addr: %s\n", + print_mac(mac, rxon->node_addr)); + IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n", + print_mac(mac, rxon->bssid_addr)); + IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); +} +#endif + +static void iwl_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR("Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); +} + +static inline void iwl_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR("Disabled interrupts\n"); +} + +static const char *desc_lookup(int i) +{ + switch (i) { + case 1: + return "FAIL"; + case 2: + return "BAD_PARAM"; + case 3: + return "BAD_CHECKSUM"; + case 4: + return "NMI_INTERRUPT"; + case 5: + return "SYSASSERT"; + case 6: + return "FATAL_ERROR"; + } + + return "UNKNOWN"; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +static void iwl_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + int rc; + + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Not valid error log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + count = iwl_read_restricted_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERROR("Start IWL Error Log Dump:\n"); + IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", + priv->status, priv->config, count); + } + + desc = iwl_read_restricted_mem(priv, base + 1 * sizeof(u32)); + blink1 = iwl_read_restricted_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_read_restricted_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_read_restricted_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_read_restricted_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_read_restricted_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_read_restricted_mem(priv, base + 8 * sizeof(u32)); + line = iwl_read_restricted_mem(priv, base + 9 * sizeof(u32)); + time = iwl_read_restricted_mem(priv, base + 11 * sizeof(u32)); + + IWL_ERROR("Desc Time " + "data1 data2 line\n"); + IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n", + desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERROR("blink1 blink2 ilink1 ilink2\n"); + IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, + ilink1, ilink2); + + iwl_release_restricted_access(priv); +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl_print_event_log - Dump error event log to syslog + * + * NOTE: Must be called with iwl_grab_restricted_access() already obtained! + */ +static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + + if (num_events == 0) + return; + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + time = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + if (mode == 0) + IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ + else { + data = iwl_read_restricted_mem(priv, ptr); + ptr += sizeof(u32); + IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); + } + } +} + +static void iwl_dump_nic_event_log(struct iwl_priv *priv) +{ + int rc; + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (!iwl_hw_valid_rtc_data_addr(base)) { + IWL_ERROR("Invalid event log pointer 0x%08X\n", base); + return; + } + + rc = iwl_grab_restricted_access(priv); + if (rc) { + IWL_WARNING("Can not read from adapter at this time.\n"); + return; + } + + /* event log header */ + capacity = iwl_read_restricted_mem(priv, base); + mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32))); + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); + iwl_release_restricted_access(priv); + return; + } + + IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n", + size, num_wraps); + + /* if uCode has wrapped back to top of log, start at the oldest entry, + * i.e the next one that uCode would fill. */ + if (num_wraps) + iwl_print_event_log(priv, next_entry, + capacity - next_entry, mode); + + /* (then/else) start at top of log */ + iwl_print_event_log(priv, 0, next_entry, mode); + + iwl_release_restricted_access(priv); +} + +/** + * iwl_irq_handle_error - called for HW or SW error interrupt from card + */ +static void iwl_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_FW_ERRORS) { + iwl_dump_nic_error_log(priv); + iwl_dump_nic_event_log(priv); + iwl_print_rx_config_cmd(&priv->staging_rxon); + } +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (iwl_is_associated(priv)) { + memcpy(&priv->recovery_rxon, &priv->active_rxon, + sizeof(priv->recovery_rxon)); + priv->error_recovering = 1; + } + queue_work(priv->workqueue, &priv->restart); + } +} + +static void iwl_error_recovery(struct iwl_priv *priv) +{ + unsigned long flags; + + memcpy(&priv->staging_rxon, &priv->recovery_rxon, + sizeof(priv->staging_rxon)); + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + iwl_rxon_add_station(priv, priv->bssid, 1); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); + priv->error_recovering = 0; + spin_unlock_irqrestore(&priv->lock, flags); +} + +static void iwl_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; +#ifdef CONFIG_IWLWIFI_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & IWL_DL_ISR) { + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERROR("Microcode HW error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + iwl_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + spin_unlock_irqrestore(&priv->lock, flags); + + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_MAC_CLK_ACTV) + IWL_DEBUG_ISR("Microcode started or stopped.\n"); + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) + IWL_DEBUG_ISR("Alive interrupt\n"); + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled (4965 only) */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, + "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio":"enable radio"); + + /* Queue restart only if RF_KILL switch was set to "kill" + * when we loaded driver, and is now set to "enable". + * After we're Alive, RF_KILL gets handled by + * iwl_rx_card_state_notif() */ + if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself (4965 only) */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERROR("Microcode CT kill error detected.\n"); + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", + inta); + iwl_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* uCode wakes up after power-down sleep */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR("Wakeup interrupt\n"); + iwl_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]); + iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]); + + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl_rx_handle(priv); + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR("Tx interrupt\n"); + handled |= CSR_INT_BIT_FH_TX; + } + + if (inta & ~handled) + IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); + + if (inta & ~CSR_INI_SET_MASK) { + IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", + inta & ~CSR_INI_SET_MASK); + IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + iwl_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_DEBUG + if (iwl_debug_level & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif + spin_unlock_irqrestore(&priv->lock, flags); +} + +static irqreturn_t iwl_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + if (!priv) + return IRQ_NONE; + + spin_lock(&priv->lock); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared */ + IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); + goto none; + } + + IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + tasklet_schedule(&priv->irq_tasklet); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; + + none: + /* re-enable interrupts here since we don't have anything to service. */ + iwl_enable_interrupts(priv); + spin_unlock(&priv->lock); + return IRQ_NONE; +} + +/************************** EEPROM BANDS **************************** + * + * The iwl_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +static const u8 iwl_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwl_eeprom_band_2[] = { + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static u8 iwl_eeprom_band_6[] = { /* 2.4 FAT channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static u8 iwl_eeprom_band_7[] = { /* 5.2 FAT channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +static void iwl_init_band_reference(const struct iwl_priv *priv, int band, + int *eeprom_ch_count, + const struct iwl_eeprom_channel + **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + switch (band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); + *eeprom_ch_info = priv->eeprom.band_1_channels; + *eeprom_ch_index = iwl_eeprom_band_1; + break; + case 2: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); + *eeprom_ch_info = priv->eeprom.band_2_channels; + *eeprom_ch_index = iwl_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); + *eeprom_ch_info = priv->eeprom.band_3_channels; + *eeprom_ch_index = iwl_eeprom_band_3; + break; + case 4: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); + *eeprom_ch_info = priv->eeprom.band_4_channels; + *eeprom_ch_index = iwl_eeprom_band_4; + break; + case 5: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); + *eeprom_ch_info = priv->eeprom.band_5_channels; + *eeprom_ch_index = iwl_eeprom_band_5; + break; + case 6: + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); + *eeprom_ch_info = priv->eeprom.band_24_channels; + *eeprom_ch_index = iwl_eeprom_band_6; + break; + case 7: + *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); + *eeprom_ch_info = priv->eeprom.band_52_channels; + *eeprom_ch_index = iwl_eeprom_band_7; + break; + default: + BUG(); + return; + } +} + +const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, + int phymode, u16 channel) +{ + int i; + + switch (phymode) { + case MODE_IEEE80211A: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + + case MODE_IEEE80211B: + case MODE_IEEE80211G: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + + } + + return NULL; +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +static int iwl_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_INFO("Channel map already initialized.\n"); + return 0; + } + + if (priv->eeprom.version < 0x2f) { + IWL_WARNING("Unsupported EEPROM version: 0x%04X\n", + priv->eeprom.version); + return -EINVAL; + } + + IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwl_eeprom_band_1) + + ARRAY_SIZE(iwl_eeprom_band_2) + + ARRAY_SIZE(iwl_eeprom_band_3) + + ARRAY_SIZE(iwl_eeprom_band_4) + + ARRAY_SIZE(iwl_eeprom_band_5); + + IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERROR("Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->phymode = (band == 1) ? MODE_IEEE80211B : + MODE_IEEE80211A; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + + if (!(is_channel_valid(ch_info))) { + IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" + " %ddBm): Ad-Hoc %ssupported\n", + ch_info->channel, + is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(NARROW), + CHECK_AND_PRINT(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the user_txpower_limit to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->user_txpower_limit) + priv->user_txpower_limit = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + for (band = 6; band <= 7; band++) { + int phymode; + u8 fat_extension_chan; + + iwl_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A; + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + + if ((band == 6) && + ((eeprom_ch_index[ch] == 5) || + (eeprom_ch_index[ch] == 6) || + (eeprom_ch_index[ch] == 7))) + fat_extension_chan = HT_IE_EXT_CHANNEL_MAX; + else + fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE; + + iwl4965_set_fat_chan_info(priv, phymode, + eeprom_ch_index[ch], + &(eeprom_ch_info[ch]), + fat_extension_chan); + + iwl4965_set_fat_chan_info(priv, phymode, + (eeprom_ch_index[ch] + 4), + &(eeprom_ch_info[ch]), + HT_IE_EXT_CHANNEL_BELOW); + } + } + + return 0; +} + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (10) + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ +#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode) +{ + if (phymode == MODE_IEEE80211A) + return IWL_ACTIVE_DWELL_TIME_52; + else + return IWL_ACTIVE_DWELL_TIME_24; +} + +static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode) +{ + u16 active = iwl_get_active_dwell_time(priv, phymode); + u16 passive = (phymode != MODE_IEEE80211A) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_is_associated(priv)) { + /* If we're associated, we clamp the maximum passive + * dwell time to be 98% of the beacon interval (minus + * 2 * channel tune time) */ + passive = priv->beacon_int; + if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) + passive = IWL_PASSIVE_DWELL_BASE; + passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + } + + if (passive <= active) + passive = active + 1; + + return passive; +} + +static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode, + u8 is_active, u8 direct_mask, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + + hw_mode = iwl_get_hw_mode(priv, phymode); + if (!hw_mode) + return 0; + + channels = hw_mode->channels; + + active_dwell = iwl_get_active_dwell_time(priv, phymode); + passive_dwell = iwl_get_passive_dwell_time(priv, phymode); + + for (i = 0, added = 0; i < hw_mode->num_channels; i++) { + if (channels[i].chan == + le16_to_cpu(priv->active_rxon.channel)) { + if (iwl_is_associated(priv)) { + IWL_DEBUG_SCAN + ("Skipping current channel %d\n", + le16_to_cpu(priv->active_rxon.channel)); + continue; + } + } else if (priv->only_active_channel) + continue; + + scan_ch->channel = channels[i].chan; + + ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", + scan_ch->channel); + continue; + } + + if (!is_active || is_channel_passive(ch_info) || + !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) + scan_ch->type = 0; /* passive */ + else + scan_ch->type = 1; /* active */ + + if (scan_ch->type & 1) + scan_ch->type |= (direct_mask << 1); + + if (is_channel_narrow(ch_info)) + scan_ch->type |= (1 << 7); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set power levels to defaults */ + scan_ch->tpc.dsp_atten = 110; + /* scan_pwr_info->tpc.dsp_atten; */ + + /*scan_pwr_info->tpc.tx_gain; */ + if (phymode == MODE_IEEE80211A) + scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; + else { + scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level + scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3; + */ + } + + IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", + scan_ch->channel, + (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", + (scan_ch->type & 1) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN("total channels to scan %d \n", added); + return added; +} + +static void iwl_reset_channel_flag(struct iwl_priv *priv) +{ + int i, j; + for (i = 0; i < 3; i++) { + struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i]; + for (j = 0; j < hw_mode->num_channels; j++) + hw_mode->channels[j].flag = hw_mode->channels[j].val; + } +} + +static void iwl_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT; i++) { + rates[i].rate = iwl_rates[i].ieee * 5; + rates[i].val = i; /* Rate scaling will work on indexes */ + rates[i].val2 = i; + rates[i].flags = IEEE80211_RATE_SUPPORTED; + /* Only OFDM have the bits-per-symbol set */ + if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE)) + rates[i].flags |= IEEE80211_RATE_OFDM; + else { + /* + * If CCK 1M then set rate flag to CCK else CCK_2 + * which is CCK | PREAMBLE2 + */ + rates[i].flags |= (iwl_rates[i].plcp == 10) ? + IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; + } + + /* Set up which ones are basic rates... */ + if (IWL_BASIC_RATES_MASK & (1 << i)) + rates[i].flags |= IEEE80211_RATE_BASIC; + } + + iwl4965_init_hw_rates(priv, rates); +} + +/** + * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +static int iwl_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_hw_mode *modes; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + enum { + A = 0, + B = 1, + G = 2, + A_11N = 3, + G_11N = 4, + }; + int mode_count = 5; + + if (priv->modes) { + IWL_DEBUG_INFO("Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count, + GFP_KERNEL); + if (!modes) + return -ENOMEM; + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) { + kfree(modes); + return -ENOMEM; + } + + rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), + GFP_KERNEL); + if (!rates) { + kfree(modes); + kfree(channels); + return -ENOMEM; + } + + /* 0 = 802.11a + * 1 = 802.11b + * 2 = 802.11g + */ + + /* 5.2GHz channels start after the 2.4GHz channels */ + modes[A].mode = MODE_IEEE80211A; + modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + modes[A].rates = rates; + modes[A].num_rates = 8; /* just OFDM */ + modes[A].rates = &rates[4]; + modes[A].num_channels = 0; + + modes[B].mode = MODE_IEEE80211B; + modes[B].channels = channels; + modes[B].rates = rates; + modes[B].num_rates = 4; /* just CCK */ + modes[B].num_channels = 0; + + modes[G].mode = MODE_IEEE80211G; + modes[G].channels = channels; + modes[G].rates = rates; + modes[G].num_rates = 12; /* OFDM & CCK */ + modes[G].num_channels = 0; + + modes[G_11N].mode = MODE_IEEE80211G; + modes[G_11N].channels = channels; + modes[G_11N].num_rates = 13; /* OFDM & CCK */ + modes[G_11N].rates = rates; + modes[G_11N].num_channels = 0; + + modes[A_11N].mode = MODE_IEEE80211A; + modes[A_11N].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; + modes[A_11N].rates = &rates[4]; + modes[A_11N].num_rates = 9; /* just OFDM */ + modes[A_11N].num_channels = 0; + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + iwl_init_hw_rates(priv, rates); + + for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!is_channel_valid(ch)) { + IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " + "skipping.\n", + ch->channel, is_channel_a_band(ch) ? + "5.2" : "2.4"); + continue; + } + + if (is_channel_a_band(ch)) { + geo_ch = &modes[A].channels[modes[A].num_channels++]; + modes[A_11N].num_channels++; + } else { + geo_ch = &modes[B].channels[modes[B].num_channels++]; + modes[G].num_channels++; + modes[G_11N].num_channels++; + } + + geo_ch->freq = ieee80211chan2mhz(ch->channel); + geo_ch->chan = ch->channel; + geo_ch->power_level = ch->max_power_avg; + geo_ch->antenna_max = 0xff; + + if (is_channel_valid(ch)) { + geo_ch->flag = IEEE80211_CHAN_W_SCAN; + if (ch->flags & EEPROM_CHANNEL_IBSS) + geo_ch->flag |= IEEE80211_CHAN_W_IBSS; + + if (ch->flags & EEPROM_CHANNEL_ACTIVE) + geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; + + if (ch->max_power_avg > priv->max_channel_txpower_limit) + priv->max_channel_txpower_limit = + ch->max_power_avg; + } + + geo_ch->val = geo_ch->flag; + } + + if ((modes[A].num_channels == 0) && priv->is_abg) { + printk(KERN_INFO DRV_NAME + ": Incorrectly detected BG card as ABG. Please send " + "your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, priv->pci_dev->subsystem_device); + priv->is_abg = 0; + } + + printk(KERN_INFO DRV_NAME + ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", + modes[G].num_channels, modes[A].num_channels); + + /* + * NOTE: We register these in preference of order -- the + * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick + * a phymode based on rates or AP capabilities but seems to + * configure it purely on if the channel being configured + * is supported by a mode -- and the first match is taken + */ + + if (modes[G].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[G]); + if (modes[B].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[B]); + if (modes[A].num_channels) + ieee80211_register_hwmode(priv->hw, &modes[A]); + + priv->modes = modes; + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) +{ + if (priv->ucode_code.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_code.len, + priv->ucode_code.v_addr, + priv->ucode_code.p_addr); + priv->ucode_code.v_addr = NULL; + } + if (priv->ucode_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data.len, + priv->ucode_data.v_addr, + priv->ucode_data.p_addr); + priv->ucode_data.v_addr = NULL; + } + if (priv->ucode_data_backup.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + priv->ucode_data_backup.v_addr, + priv->ucode_data_backup.p_addr); + priv->ucode_data_backup.v_addr = NULL; + } + if (priv->ucode_init.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init.len, + priv->ucode_init.v_addr, + priv->ucode_init.p_addr); + priv->ucode_init.v_addr = NULL; + } + if (priv->ucode_init_data.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_init_data.len, + priv->ucode_init_data.v_addr, + priv->ucode_init_data.p_addr); + priv->ucode_init_data.v_addr = NULL; + } + if (priv->ucode_boot.v_addr != NULL) { + pci_free_consistent(priv->pci_dev, + priv->ucode_boot.len, + priv->ucode_boot.v_addr, + priv->ucode_boot.p_addr); + priv->ucode_boot.v_addr = NULL; + } +} + +/** + * iwl_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len) +{ + u32 val; + u32 save_len = len; + int rc = 0; + u32 errcnt; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + rc = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + iwl_release_restricted_access(priv); + + if (!errcnt) + IWL_DEBUG_INFO + ("ucode image in INSTRUCTION memory is good\n"); + + return rc; +} + + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int rc = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO("ucode inst image size is %u\n", len); + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, + i + RTC_INST_LOWER_BOUND); + val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { +#if 0 /* Enable this if you want to see details */ + IWL_ERROR("uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + i, val, *image); +#endif + rc = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + iwl_release_restricted_access(priv); + + return rc; +} + + +/** + * iwl_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +static int iwl_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int rc = 0; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + rc = iwl_verify_inst_sparse(priv, image, len); + if (rc == 0) { + IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Show first several data entries in instruction SRAM. + * Selection of bootstrap image is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + rc = iwl_verify_inst_full(priv, image, len); + + return rc; +} + + +/* check contents of special bootstrap uCode SRAM */ +static int iwl_verify_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + u32 reg; + u32 val; + + IWL_DEBUG_INFO("Begin verify bsm\n"); + + /* verify BSM SRAM contents */ + val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG); + for (reg = BSM_SRAM_LOWER_BOUND; + reg < BSM_SRAM_LOWER_BOUND + len; + reg += sizeof(u32), image ++) { + val = iwl_read_restricted_reg(priv, reg); + if (val != le32_to_cpu(*image)) { + IWL_ERROR("BSM uCode verification failed at " + "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", + BSM_SRAM_LOWER_BOUND, + reg - BSM_SRAM_LOWER_BOUND, len, + val, le32_to_cpu(*image)); + return -EIO; + } + } + + IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); + + return 0; +} + +/** + * iwl_load_bsm - Load bootstrap instructions + * + * BSM operation: + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down during RFKILL. When powering back + * up after power-saving sleeps (or during initial uCode load), the BSM loads + * the bootstrap program into the on-board processor, and starts it. + * + * The bootstrap program loads (via DMA) instructions and data for a new + * program from host DRAM locations indicated by the host driver in the + * BSM_DRAM_* registers. Once the new program is loaded, it starts + * automatically. + * + * When initializing the NIC, the host driver points the BSM to the + * "initialize" uCode image. This uCode sets up some internal data, then + * notifies host via "initialize alive" that it is complete. + * + * The host then replaces the BSM_DRAM_* pointer values to point to the + * normal runtime uCode instructions and a backup uCode data cache buffer + * (filled initially with starting data values for the on-board processor), + * then triggers the "initialize" uCode to load and launch the runtime uCode, + * which begins normal operation. + * + * When doing a power-save shutdown, runtime uCode saves data SRAM into + * the backup data cache in DRAM before SRAM is powered down. + * + * When powering back up, the BSM loads the bootstrap program. This reloads + * the runtime uCode instructions and the backup data cache into SRAM, + * and re-launches the runtime uCode from where it left off. + */ +static int iwl_load_bsm(struct iwl_priv *priv) +{ + __le32 *image = priv->ucode_boot.v_addr; + u32 len = priv->ucode_boot.len; + dma_addr_t pinst; + dma_addr_t pdata; + u32 inst_len; + u32 data_len; + int rc; + int i; + u32 done; + u32 reg_offset; + + IWL_DEBUG_INFO("Begin load bsm\n"); + + /* make sure bootstrap program is no larger than BSM's SRAM size */ + if (len > IWL_MAX_BSM_SIZE) + return -EINVAL; + + /* Tell bootstrap uCode where to find the "Initialize" uCode + * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965. + * NOTE: iwl_initialize_alive_start() will replace these values, + * after the "initialize" uCode has run, to point to + * runtime/protocol instructions and backup data cache. */ + pinst = priv->ucode_init.p_addr >> 4; + pdata = priv->ucode_init_data.p_addr >> 4; + inst_len = priv->ucode_init.len; + data_len = priv->ucode_init_data.len; + + rc = iwl_grab_restricted_access(priv); + if (rc) + return rc; + + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + + /* Fill BSM memory with bootstrap instructions */ + for (reg_offset = BSM_SRAM_LOWER_BOUND; + reg_offset < BSM_SRAM_LOWER_BOUND + len; + reg_offset += sizeof(u32), image++) + _iwl_write_restricted_reg(priv, reg_offset, + le32_to_cpu(*image)); + + rc = iwl_verify_bsm(priv); + if (rc) { + iwl_release_restricted_access(priv); + return rc; + } + + /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ + iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG, + RTC_INST_LOWER_BOUND); + iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + + /* Load bootstrap code into instruction SRAM now, + * to prepare to load "initialize" uCode */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START); + + /* Wait for load of bootstrap uCode to finish */ + for (i = 0; i < 100; i++) { + done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG); + if (!(done & BSM_WR_CTRL_REG_BIT_START)) + break; + udelay(10); + } + if (i < 100) + IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); + else { + IWL_ERROR("BSM write did not complete!\n"); + return -EIO; + } + + /* Enable future boot loads whenever power management unit triggers it + * (e.g. when powering back up after power-save shutdown) */ + iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, + BSM_WR_CTRL_REG_BIT_START_EN); + + iwl_release_restricted_access(priv); + + return 0; +} + +static void iwl_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +/** + * iwl_read_ucode - Read uCode images from disk file. + * + * Copy into buffers for card to fetch via bus-mastering + */ +static int iwl_read_ucode(struct iwl_priv *priv) +{ + struct iwl_ucode *ucode; + int rc = 0; + const struct firmware *ucode_raw; + const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode"; + u8 *src; + size_t len; + u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; + + /* Ask kernel firmware_class module to get the boot firmware off disk. + * request_firmware() is synchronous, file is in memory on return. */ + rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); + if (rc < 0) { + IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc); + goto error; + } + + IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", + name, ucode_raw->size); + + /* Make sure that we got at least our header! */ + if (ucode_raw->size < sizeof(*ucode)) { + IWL_ERROR("File size way too small!\n"); + rc = -EINVAL; + goto err_release; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (void *)ucode_raw->data; + + ver = le32_to_cpu(ucode->ver); + inst_size = le32_to_cpu(ucode->inst_size); + data_size = le32_to_cpu(ucode->data_size); + init_size = le32_to_cpu(ucode->init_size); + init_data_size = le32_to_cpu(ucode->init_data_size); + boot_size = le32_to_cpu(ucode->boot_size); + + IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); + IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", + inst_size); + IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", + data_size); + IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", + init_size); + IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", + init_data_size); + IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", + boot_size); + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size < sizeof(*ucode) + + inst_size + data_size + init_size + + init_data_size + boot_size) { + + IWL_DEBUG_INFO("uCode file size %d too small\n", + (int)ucode_raw->size); + rc = -EINVAL; + goto err_release; + } + + /* Verify that uCode images will fit in card's SRAM */ + if (inst_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n", + (int)inst_size); + rc = -EINVAL; + goto err_release; + } + + if (data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n", + (int)data_size); + rc = -EINVAL; + goto err_release; + } + if (init_size > IWL_MAX_INST_SIZE) { + IWL_DEBUG_INFO + ("uCode init instr len %d too large to fit in card\n", + (int)init_size); + rc = -EINVAL; + goto err_release; + } + if (init_data_size > IWL_MAX_DATA_SIZE) { + IWL_DEBUG_INFO + ("uCode init data len %d too large to fit in card\n", + (int)init_data_size); + rc = -EINVAL; + goto err_release; + } + if (boot_size > IWL_MAX_BSM_SIZE) { + IWL_DEBUG_INFO + ("uCode boot instr len %d too large to fit in bsm\n", + (int)boot_size); + rc = -EINVAL; + goto err_release; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = inst_size; + priv->ucode_code.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_code.len, + &(priv->ucode_code.p_addr)); + + priv->ucode_data.len = data_size; + priv->ucode_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data.len, + &(priv->ucode_data.p_addr)); + + priv->ucode_data_backup.len = data_size; + priv->ucode_data_backup.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_data_backup.len, + &(priv->ucode_data_backup.p_addr)); + + + /* Initialization instructions and data */ + priv->ucode_init.len = init_size; + priv->ucode_init.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init.len, + &(priv->ucode_init.p_addr)); + + priv->ucode_init_data.len = init_data_size; + priv->ucode_init_data.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_init_data.len, + &(priv->ucode_init_data.p_addr)); + + /* Bootstrap (instructions only, no data) */ + priv->ucode_boot.len = boot_size; + priv->ucode_boot.v_addr = + pci_alloc_consistent(priv->pci_dev, + priv->ucode_boot.len, + &(priv->ucode_boot.p_addr)); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr || + !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + src = &ucode->data[0]; + len = priv->ucode_code.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n", + (int)len); + memcpy(priv->ucode_code.v_addr, src, len); + IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* Runtime data (2nd block) + * NOTE: Copy into backup buffer will be done in iwl_up() */ + src = &ucode->data[inst_size]; + len = priv->ucode_data.len; + IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n", + (int)len); + memcpy(priv->ucode_data.v_addr, src, len); + memcpy(priv->ucode_data_backup.v_addr, src, len); + + /* Initialization instructions (3rd block) */ + if (init_size) { + src = &ucode->data[inst_size + data_size]; + len = priv->ucode_init.len; + IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n", + (int)len); + memcpy(priv->ucode_init.v_addr, src, len); + } + + /* Initialization data (4th block) */ + if (init_data_size) { + src = &ucode->data[inst_size + data_size + init_size]; + len = priv->ucode_init_data.len; + IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", + (int)len); + memcpy(priv->ucode_init_data.v_addr, src, len); + } + + /* Bootstrap instructions (5th block) */ + src = &ucode->data[inst_size + data_size + init_size + init_data_size]; + len = priv->ucode_boot.len; + IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", + (int)len); + memcpy(priv->ucode_boot.v_addr, src, len); + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + return 0; + + err_pci_alloc: + IWL_ERROR("failed to allocate pci memory\n"); + rc = -ENOMEM; + iwl_dealloc_ucode_pci(priv); + + err_release: + release_firmware(ucode_raw); + + error: + return rc; +} + + +/** + * iwl_set_ucode_ptrs - Set uCode address location + * + * Tell initialization uCode where to find runtime uCode. + * + * BSM registers initially contain pointers to initialization uCode. + * We need to replace them to load runtime uCode inst and data, + * and to save runtime data when powering down. + */ +static int iwl_set_ucode_ptrs(struct iwl_priv *priv) +{ + dma_addr_t pinst; + dma_addr_t pdata; + int rc = 0; + unsigned long flags; + + /* bits 35:4 for 4965 */ + pinst = priv->ucode_code.p_addr >> 4; + pdata = priv->ucode_data_backup.p_addr >> 4; + + spin_lock_irqsave(&priv->lock, flags); + rc = iwl_grab_restricted_access(priv); + if (rc) { + spin_unlock_irqrestore(&priv->lock, flags); + return rc; + } + + /* Tell bootstrap uCode where to find image to load */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + priv->ucode_data.len); + + /* Inst bytecount must be last to set up, bit 31 signals uCode + * that all new ptr/size info is in place */ + iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, + priv->ucode_code.len | BSM_DRAM_INST_LOAD); + + iwl_release_restricted_access(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); + + return rc; +} + +/** + * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved + * + * Called after REPLY_ALIVE notification received from "initialize" uCode. + * + * The 4965 "initialize" ALIVE reply contains calibration data for: + * Voltage, temperature, and MIMO tx gain correction, now stored in priv + * (3945 does not contain this data). + * + * Tell "initialize" uCode to go ahead and load the runtime uCode. +*/ +static void iwl_init_alive_start(struct iwl_priv *priv) +{ + /* Check alive response for "valid" sign from uCode */ + if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Initialize Alive failed.\n"); + goto restart; + } + + /* Bootstrap uCode has loaded initialize uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "initialize" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); + goto restart; + } + + /* Calculate temperature */ + priv->temperature = iwl4965_get_temperature(priv); + + /* Send pointers to protocol/runtime uCode image ... init code will + * load and launch runtime uCode, which will send us another "Alive" + * notification. */ + IWL_DEBUG_INFO("Initialization Alive received.\n"); + if (iwl_set_ucode_ptrs(priv)) { + /* Runtime instruction load won't happen; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); + goto restart; + } + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + + +/** + * iwl_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl_alive_start(struct iwl_priv *priv) +{ + int rc = 0; + + IWL_DEBUG_INFO("Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO("Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO("Bad runtime uCode load.\n"); + goto restart; + } + + iwl_clear_stations_table(priv); + + rc = iwl4965_alive_notify(priv); + if (rc) { + IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", + rc); + goto restart; + } + + /* After the ALIVE response, we can process host commands */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Clear out the uCode error bit if it is set */ + clear_bit(STATUS_FW_ERROR, &priv->status); + + rc = iwl_init_channel_map(priv); + if (rc) { + IWL_ERROR("initializing regulatory failed: %d\n", rc); + return; + } + + iwl_init_geos(priv); + + if (iwl_is_rfkill(priv)) + return; + + if (!priv->mac80211_registered) { + /* Unlock so any user space entry points can call back into + * the driver without a deadlock... */ + mutex_unlock(&priv->mutex); + iwl_rate_control_register(priv->hw); + rc = ieee80211_register_hw(priv->hw); + priv->hw->conf.beacon_int = 100; + mutex_lock(&priv->mutex); + + if (rc) { + IWL_ERROR("Failed to register network " + "device (error %d)\n", rc); + return; + } + + priv->mac80211_registered = 1; + + iwl_reset_channel_flag(priv); + } else + ieee80211_start_queues(priv->hw); + + priv->active_rate = priv->rates_mask; + priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; + + iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); + + if (iwl_is_associated(priv)) { + struct iwl_rxon_cmd *active_rxon = + (struct iwl_rxon_cmd *)(&priv->active_rxon); + + memcpy(&priv->staging_rxon, &priv->active_rxon, + sizeof(priv->staging_rxon)); + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + /* Initialize our rx_config data */ + iwl_connection_init_rx_config(priv); + memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); + } + + /* Configure BT coexistence */ + iwl_send_bt_config(priv); + + /* Configure the adapter for unassociated operation */ + iwl_commit_rxon(priv); + + /* At this point, the NIC is initialized and operational */ + priv->notif_missed_beacons = 0; + set_bit(STATUS_READY, &priv->status); + + iwl4965_rf_kill_ct_config(priv); + IWL_DEBUG_INFO("ALIVE processing complete.\n"); + + if (priv->error_recovering) + iwl_error_recovery(priv); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + struct ieee80211_conf *conf = NULL; + + IWL_DEBUG_INFO(DRV_NAME " is going down\n"); + + conf = ieee80211_get_hw_conf(priv->hw); + + if (!exit_pending) + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_clear_stations_table(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + iwl_cancel_deferred_work(priv); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill and SUSPEND bits and return */ + if (!iwl_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill and + * SUSPEND bits and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_RF_KILL_SW, &priv->status) << + STATUS_RF_KILL_SW | + test_bit(STATUS_IN_SUSPEND, &priv->status) << + STATUS_IN_SUSPEND | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR; + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_hw_txq_ctx_stop(priv); + iwl_hw_rxq_stop(priv); + + spin_lock_irqsave(&priv->lock, flags); + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_stop_master(priv); + iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + iwl_hw_nic_reset(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + priv->ibss_beacon = NULL; + + /* clear out any free frames */ + iwl_clear_free_frames(priv); +} + +static void iwl_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl_down(priv); + mutex_unlock(&priv->mutex); +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl_up(struct iwl_priv *priv) +{ + DECLARE_MAC_BUF(mac); + int rc, i; + u32 hw_rf_kill = 0; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARNING("Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { + IWL_WARNING("Radio disabled by SW RF kill (module " + "parameter)\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + rc = iwl_hw_nic_init(priv); + if (rc) { + IWL_ERROR("Unable to int nic\n"); + return rc; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + /* If platform's RF_KILL switch is set to KILL, + * wait for BIT_INT_RF_KILL interrupt before loading uCode + * and getting things started */ + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) { + IWL_WARNING("Radio disabled by HW RF Kill switch\n"); + return 0; + } + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + iwl_clear_stations_table(priv); + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + rc = iwl_load_bsm(priv); + + if (rc) { + IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl_nic_start(priv); + + /* MAC Address location in EEPROM same for 3945/4965 */ + get_eeprom_mac(priv, priv->mac_addr); + IWL_DEBUG_INFO("MAC address: %s\n", + print_mac(mac, priv->mac_addr)); + + SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); + + IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERROR("Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_rf_kill(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); + + wake_up_interruptible(&priv->wait_command_queue); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + if (!iwl_is_rfkill(priv)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, + "HW and/or SW RF Kill no longer active, restarting " + "device\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } else { + + if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) + IWL_DEBUG_RF_KILL("Can not turn radio back on - " + "disabled by SW switch\n"); + else + IWL_WARNING("Radio Frequency Kill Switch is On:\n" + "Kill switch must be turned off for " + "wireless networking to work.\n"); + } + mutex_unlock(&priv->mutex); +} + +#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) + +static void iwl_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCANNING, &priv->status) || + test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, + "Scan completion watchdog resetting adapter (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->restart); + } + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_request_scan(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, request_scan); + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .meta.flags = CMD_SIZE_HUGE, + }; + int rc = 0; + struct iwl_scan_cmd *scan; + struct ieee80211_conf *conf = NULL; + u8 direct_mask; + int phymode; + + conf = ieee80211_get_hw_conf(priv->hw); + + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + IWL_WARNING("request scan called when driver not ready.\n"); + goto done; + } + + /* Make sure the scan wasn't cancelled before this queued work + * was given the chance to run... */ + if (!test_bit(STATUS_SCANNING, &priv->status)) + goto done; + + /* This should never be called or scheduled if there is currently + * a scan active in the hardware. */ + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " + "Ignoring second request.\n"); + rc = -EIO; + goto done; + } + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); + goto done; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); + goto done; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); + goto done; + } + + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); + goto done; + } + + if (!priv->scan_bands) { + IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); + goto done; + } + + if (!priv->scan) { + priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan) { + rc = -ENOMEM; + goto done; + } + } + scan = priv->scan; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_is_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + unsigned long flags; + + IWL_DEBUG_INFO("Scanning while associated...\n"); + + spin_lock_irqsave(&priv->lock, flags); + interval = priv->beacon_int; + spin_unlock_irqrestore(&priv->lock, flags); + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(600 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + /* We should add the ability for user to lock to PASSIVE ONLY */ + if (priv->one_direct_scan) { + IWL_DEBUG_SCAN + ("Kicking off one direct scan for '%s'\n", + iwl_escape_essid(priv->direct_ssid, + priv->direct_ssid_len)); + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->direct_ssid_len; + memcpy(scan->direct_scan[0].ssid, + priv->direct_ssid, priv->direct_ssid_len); + direct_mask = 1; + } else if (!iwl_is_associated(priv)) { + scan->direct_scan[0].id = WLAN_EID_SSID; + scan->direct_scan[0].len = priv->essid_len; + memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); + direct_mask = 1; + } else + direct_mask = 0; + + /* We don't build a direct scan probe request; the uCode will do + * that based on the direct_mask added to each channel entry */ + scan->tx_cmd.len = cpu_to_le16( + iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, + IWL_MAX_SCAN_SIZE - sizeof(scan), 0)); + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + /* flags + rate selection */ + + scan->tx_cmd.tx_flags |= cpu_to_le32(0x200); + + switch (priv->scan_bands) { + case 2: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + scan->tx_cmd.rate_n_flags = + iwl_hw_set_rate_n_flags(IWL_RATE_1M_PLCP, + RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); + + scan->good_CRC_th = 0; + phymode = MODE_IEEE80211G; + break; + + case 1: + scan->tx_cmd.rate_n_flags = + iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP, + RATE_MCS_ANT_B_MSK); + scan->good_CRC_th = IWL_GOOD_CRC_TH; + phymode = MODE_IEEE80211A; + break; + + default: + IWL_WARNING("Invalid scan band count\n"); + goto done; + } + + /* select Rx chains */ + + /* Force use of chains B and C (0x6) for scan Rx. + * Avoid A (0x1) because of its off-channel reception on A-band. + * MIMO is not used here, but value is required to make uCode happy. */ + scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | + cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) | + (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | + (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) + scan->filter_flags = RXON_FILTER_PROMISC_MSK; + + if (direct_mask) + IWL_DEBUG_SCAN + ("Initiating direct scan for %s.\n", + iwl_escape_essid(priv->essid, priv->essid_len)); + else + IWL_DEBUG_SCAN("Initiating indirect scan.\n"); + + scan->channel_count = + iwl_get_channels_for_scan( + priv, phymode, 1, /* active */ + direct_mask, + (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + rc = iwl_send_cmd_sync(priv, &cmd); + if (rc) + goto done; + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + mutex_unlock(&priv->mutex); + return; + + done: + /* inform mac80211 sacn aborted */ + queue_work(priv->workqueue, &priv->scan_completed); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_up(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, up); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl_up(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + iwl_down(priv); + queue_work(priv->workqueue, &priv->up); +} + +static void iwl_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_post_associate(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, + post_associate.work); + + int rc = 0; + struct ieee80211_conf *conf = NULL; + DECLARE_MAC_BUF(mac); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); + return; + } + + IWL_DEBUG_ASSOC("Associated as %d to: %s\n", + priv->assoc_id, + print_mac(mac, priv->active_rxon.bssid_addr)); + + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + conf = ieee80211_get_hw_conf(priv->hw); + + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + +#ifdef CONFIG_IWLWIFI_HT + if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht) + iwl4965_set_rxon_ht(priv, &priv->current_assoc_ht); + else { + priv->active_rate_ht[0] = 0; + priv->active_rate_ht[1] = 0; + priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ; + } +#endif /* CONFIG_IWLWIFI_HT*/ + iwl4965_set_rxon_chain(priv); + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + + IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", + priv->assoc_id, priv->beacon_int); + + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + } + + iwl_commit_rxon(priv); + + switch (priv->iw_mode) { + case IEEE80211_IF_TYPE_STA: + iwl_rate_scale_init(priv->hw, IWL_AP_ID); + break; + + case IEEE80211_IF_TYPE_IBSS: + + /* clear out the station table */ + iwl_clear_stations_table(priv); + + iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); + iwl_rxon_add_station(priv, priv->bssid, 0); + iwl_rate_scale_init(priv->hw, IWL_STA_ID); + iwl_send_beacon_cmd(priv); + + break; + + default: + IWL_ERROR("%s Should not be called in %d mode\n", + __FUNCTION__, priv->iw_mode); + break; + } + + iwl_sequence_reset(priv); + +#ifdef CONFIG_IWLWIFI_SENSITIVITY + /* Enable Rx differential gain and sensitivity calibrations */ + iwl4965_chain_noise_reset(priv); + priv->start_calib = 1; +#endif /* CONFIG_IWLWIFI_SENSITIVITY */ + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->assoc_station_added = 1; + +#ifdef CONFIG_IWLWIFI_QOS + iwl_activate_qos(priv, 0); +#endif /* CONFIG_IWLWIFI_QOS */ + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + abort_scan); + + if (!iwl_is_ready(priv)) + return; + + mutex_lock(&priv->mutex); + + set_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_send_scan_abort(priv); + + mutex_unlock(&priv->mutex); +} + +static void iwl_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + + IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + ieee80211_scan_completed(priv->hw); + + /* Since setting the TXPOWER may have been deferred while + * performing the scan, fire one off */ + mutex_lock(&priv->mutex); + iwl_hw_reg_send_txpower(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +static int iwl_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + + priv->is_open = 1; + + if (!iwl_is_rfkill(priv)) + ieee80211_start_queues(priv->hw); + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static void iwl_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + priv->is_open = 0; + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + IWL_DEBUG_MAC80211("leave\n"); +} + +static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *ctl) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { + IWL_DEBUG_MAC80211("leave - monitor\n"); + return -1; + } + + IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ctl->tx_rate); + + if (iwl_tx_skb(priv, skb, ctl)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); + if (conf->mac_addr) + IWL_DEBUG_MAC80211("enter: MAC %s\n", + print_mac(mac, conf->mac_addr)); + + if (priv->interface_id) { + IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + priv->interface_id = conf->if_id; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + iwl_set_mode(priv, conf->type); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +/** + * iwl_mac_config - mac80211 config callback + * + * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to + * be set inappropriately and the driver currently sets the hardware up to + * use it whenever needed. + */ +static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); + + if (!iwl_is_ready(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only + * what is exposed through include/ declrations */ + if (unlikely(!iwl_param_disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + spin_lock_irqsave(&priv->lock, flags); + + ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel); + if (!is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", + conf->channel, conf->phymode); + IWL_DEBUG_MAC80211("leave - invalid channel\n"); + spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + +#ifdef CONFIG_IWLWIFI_HT + /* if we are switching fron ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel) +#ifdef IEEE80211_CONF_CHANNEL_SWITCH + && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) +#endif + ) + priv->staging_rxon.flags = 0; +#endif /* CONFIG_IWLWIFI_HT */ + + iwl_set_rxon_channel(priv, conf->phymode, conf->channel); + + iwl_set_flags_for_phymode(priv, conf->phymode); + + /* The list of supported rates and rate mask can be different + * for each phymode; since the phymode may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_set_rate(priv); + + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef IEEE80211_CONF_CHANNEL_SWITCH + if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { + iwl_hw_channel_switch(priv, conf->channel); + mutex_unlock(&priv->mutex); + return 0; + } +#endif + + iwl_radio_kill_sw(priv, !conf->radio_enabled); + + if (!conf->radio_enabled) { + IWL_DEBUG_MAC80211("leave - radio disabled\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (iwl_is_rfkill(priv)) { + IWL_DEBUG_MAC80211("leave - RF kill\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + iwl_set_rate(priv); + + if (memcmp(&priv->active_rxon, + &priv->staging_rxon, sizeof(priv->staging_rxon))) + iwl_commit_rxon(priv); + else + IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); + + IWL_DEBUG_MAC80211("leave\n"); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_config_ap(struct iwl_priv *priv) +{ + int rc = 0; + + if (priv->status & STATUS_EXIT_PENDING) + return; + + /* The following should be done only at AP bring up */ + if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { + + /* RXON - unassoc (to set timing command) */ + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + + /* RXON Timing */ + memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); + iwl_setup_rxon_timing(priv); + rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, + sizeof(priv->rxon_timing), &priv->rxon_timing); + if (rc) + IWL_WARNING("REPLY_RXON_TIMING failed - " + "Attempting to continue.\n"); + + iwl4965_set_rxon_chain(priv); + + /* FIXME: what should be the assoc_id for AP? */ + priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); + if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_PREAMBLE_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_PREAMBLE_MSK; + + if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { + if (priv->assoc_capability & + WLAN_CAPABILITY_SHORT_SLOT_TIME) + priv->staging_rxon.flags |= + RXON_FLG_SHORT_SLOT_MSK; + else + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + + if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) + priv->staging_rxon.flags &= + ~RXON_FLG_SHORT_SLOT_MSK; + } + /* restore RXON assoc */ + priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); +#ifdef CONFIG_IWLWIFI_QOS + iwl_activate_qos(priv, 1); +#endif + iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); + } + iwl_send_beacon_cmd(priv); + + /* FIXME - we need to add code here to detect a totally new + * configuration, reset the AP, unassoc, rxon timing, assoc, + * clear sta table, add BCAST sta... */ +} + +static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + DECLARE_MAC_BUF(mac); + unsigned long flags; + int rc; + + if (conf == NULL) + return -EIO; + + if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && + (!conf->beacon || !conf->ssid_len)) { + IWL_DEBUG_MAC80211 + ("Leaving in AP mode because HostAPD is not ready.\n"); + return 0; + } + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id); + if (conf->bssid) + IWL_DEBUG_MAC80211("bssid: %s\n", + print_mac(mac, conf->bssid)); + +/* + * very dubious code was here; the probe filtering flag is never set: + * + if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && + !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { + */ + if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { + IWL_DEBUG_MAC80211("leave - scanning\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->interface_id != if_id) { + IWL_DEBUG_MAC80211("leave - interface_id != if_id\n"); + mutex_unlock(&priv->mutex); + return 0; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { + if (!conf->bssid) { + conf->bssid = priv->mac_addr; + memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); + IWL_DEBUG_MAC80211("bssid was set to: %s\n", + print_mac(mac, conf->bssid)); + } + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = conf->beacon; + } + + if (conf->bssid && !is_zero_ether_addr(conf->bssid) && + !is_multicast_ether_addr(conf->bssid)) { + /* If there is currently a HW scan going on in the background + * then we need to cancel it else the RXON below will fail. */ + if (iwl_scan_cancel_timeout(priv, 100)) { + IWL_WARNING("Aborted scan still in progress " + "after 100ms\n"); + IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return -EAGAIN; + } + memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN); + + /* TODO: Audit driver for usage of these members and see + * if mac80211 deprecates them (priv->bssid looks like it + * shouldn't be there, but I haven't scanned the IBSS code + * to verify) - jpk */ + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_config_ap(priv); + else { + priv->staging_rxon.filter_flags |= + RXON_FILTER_ASSOC_MSK; + rc = iwl_commit_rxon(priv); + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) + iwl_rxon_add_station( + priv, priv->active_rxon.bssid_addr, 1); + } + + } else { + priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_commit_rxon(priv); + } + + spin_lock_irqsave(&priv->lock, flags); + if (!conf->ssid_len) + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + else + memcpy(priv->essid, conf->ssid, conf->ssid_len); + + priv->essid_len = conf->ssid_len; + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list) +{ + /* + * XXX: dummy + * see also iwl_connection_init_rx_config + */ + *total_flags = 0; +} + +static void iwl_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + mutex_lock(&priv->mutex); + if (priv->interface_id == conf->if_id) { + priv->interface_id = 0; + memset(priv->bssid, 0, ETH_ALEN); + memset(priv->essid, 0, IW_ESSID_MAX_SIZE); + priv->essid_len = 0; + } + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +#define IWL_DELAY_NEXT_SCAN (HZ*2) +static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) +{ + int rc = 0; + unsigned long flags; + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + + if (!iwl_is_ready_rf(priv)) { + rc = -EIO; + IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); + goto out_unlock; + } + + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ + rc = -EIO; + IWL_ERROR("ERROR: APs don't scan\n"); + goto out_unlock; + } + + /* if we just finished scan ask for delay */ + if (priv->last_scan_jiffies && + time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, + jiffies)) { + rc = -EAGAIN; + goto out_unlock; + } + if (len) { + IWL_DEBUG_SCAN("direct scan for " + "%s [%d]\n ", + iwl_escape_essid(ssid, len), (int)len); + + priv->one_direct_scan = 1; + priv->direct_ssid_len = (u8) + min((u8) len, (u8) IW_ESSID_MAX_SIZE); + memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); + } + + rc = iwl_scan_initiate(priv); + + IWL_DEBUG_MAC80211("leave\n"); + +out_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + + return rc; +} + +static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_addr, const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + DECLARE_MAC_BUF(mac); + int rc = 0; + u8 sta_id; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_param_hwcrypto) { + IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + if (is_zero_ether_addr(addr)) + /* only support pairwise keys */ + return -EOPNOTSUPP; + + sta_id = iwl_hw_find_station(priv, addr); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_MAC80211("leave - %s not in station map.\n", + print_mac(mac, addr)); + return -EINVAL; + } + + mutex_lock(&priv->mutex); + + switch (cmd) { + case SET_KEY: + rc = iwl_update_sta_key_info(priv, key, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 1); + iwl_commit_rxon(priv); + key->hw_key_idx = sta_id; + IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + break; + case DISABLE_KEY: + rc = iwl_clear_sta_key_info(priv, sta_id); + if (!rc) { + iwl_set_rxon_hwcrypto(priv, 0); + iwl_commit_rxon(priv); + IWL_DEBUG_MAC80211("disable hwcrypto key\n"); + } + break; + default: + rc = -EINVAL; + } + + IWL_DEBUG_MAC80211("leave\n"); + mutex_unlock(&priv->mutex); + + return rc; +} + +static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; +#ifdef CONFIG_IWLWIFI_QOS + unsigned long flags; + int q; +#endif /* CONFIG_IWL_QOS */ + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); + return 0; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (!priv->qos_data.qos_enable) { + priv->qos_data.qos_active = 0; + IWL_DEBUG_MAC80211("leave - qos not enabled\n"); + return 0; + } + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); + priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); + priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + priv->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->burst_time * 100)); + + priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; + priv->qos_data.qos_active = 1; + + spin_unlock_irqrestore(&priv->lock, flags); + + mutex_lock(&priv->mutex); + if (priv->iw_mode == IEEE80211_IF_TYPE_AP) + iwl_activate_qos(priv, 1); + else if (priv->assoc_id && iwl_is_associated(priv)) + iwl_activate_qos(priv, 0); + + mutex_unlock(&priv->mutex); + +#endif /*CONFIG_IWLWIFI_QOS */ + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct iwl_priv *priv = hw->priv; + int i, avail; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + unsigned long flags; + + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + for (i = 0; i < AC_NUM; i++) { + txq = &priv->txq[i]; + q = &txq->q; + avail = iwl_queue_space(q); + + stats->data[i].len = q->n_window - avail; + stats->data[i].limit = q->n_window - q->high_mark; + stats->data[i].count = q->n_window; + + } + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static int iwl_mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw) +{ + IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MAC80211("leave\n"); + + return 0; +} + +static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + + priv->lq_mngr.lq_ready = 0; +#ifdef CONFIG_IWLWIFI_HT + spin_lock_irqsave(&priv->lock, flags); + memset(&priv->current_assoc_ht, 0, sizeof(struct sta_ht_info)); + spin_unlock_irqrestore(&priv->lock, flags); +#ifdef CONFIG_IWLWIFI_HT_AGG +/* if (priv->lq_mngr.agg_ctrl.granted_ba) + iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/ + + memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl_agg_control)); + priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10; + priv->lq_mngr.agg_ctrl.ba_timeout = 5000; + priv->lq_mngr.agg_ctrl.auto_agg = 1; + + if (priv->lq_mngr.agg_ctrl.auto_agg) + priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED; +#endif /*CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + + cancel_delayed_work(&priv->post_associate); + + spin_lock_irqsave(&priv->lock, flags); + priv->assoc_id = 0; + priv->assoc_capability = 0; + priv->call_post_assoc_from_beacon = 0; + priv->assoc_station_added = 0; + + /* new association get rid of ibss beacon skb */ + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = NULL; + + priv->beacon_int = priv->hw->conf.beacon_int; + priv->timestamp1 = 0; + priv->timestamp0 = 0; + if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) + priv->beacon_int = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Per mac80211.h: This is only used in IBSS mode... */ + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not in IBSS\n"); + mutex_unlock(&priv->mutex); + return; + } + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + priv->only_active_channel = 0; + + iwl_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211("leave\n"); + +} + +static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211("enter\n"); + + if (!iwl_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211("leave - RF not ready\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { + IWL_DEBUG_MAC80211("leave - not IBSS\n"); + mutex_unlock(&priv->mutex); + return -EIO; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + priv->ibss_beacon = skb; + + priv->assoc_id = 0; + + IWL_DEBUG_MAC80211("leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + +#ifdef CONFIG_IWLWIFI_QOS + iwl_reset_qos(priv); +#endif + + queue_work(priv->workqueue, &priv->post_associate.work); + + mutex_unlock(&priv->mutex); + + return 0; +} + +#ifdef CONFIG_IWLWIFI_HT +union ht_cap_info { + struct { + u16 advanced_coding_cap :1; + u16 supported_chan_width_set :1; + u16 mimo_power_save_mode :2; + u16 green_field :1; + u16 short_GI20 :1; + u16 short_GI40 :1; + u16 tx_stbc :1; + u16 rx_stbc :1; + u16 beam_forming :1; + u16 delayed_ba :1; + u16 maximal_amsdu_size :1; + u16 cck_mode_at_40MHz :1; + u16 psmp_support :1; + u16 stbc_ctrl_frame_support :1; + u16 sig_txop_protection_support :1; + }; + u16 val; +} __attribute__ ((packed)); + +union ht_param_info{ + struct { + u8 max_rx_ampdu_factor :2; + u8 mpdu_density :3; + u8 reserved :3; + }; + u8 val; +} __attribute__ ((packed)); + +union ht_exra_param_info { + struct { + u8 ext_chan_offset :2; + u8 tx_chan_width :1; + u8 rifs_mode :1; + u8 controlled_access_only :1; + u8 service_interval_granularity :3; + }; + u8 val; +} __attribute__ ((packed)); + +union ht_operation_mode{ + struct { + u16 op_mode :2; + u16 non_GF :1; + u16 reserved :13; + }; + u16 val; +} __attribute__ ((packed)); + + +static int sta_ht_info_init(struct ieee80211_ht_capability *ht_cap, + struct ieee80211_ht_additional_info *ht_extra, + struct sta_ht_info *ht_info_ap, + struct sta_ht_info *ht_info) +{ + union ht_cap_info cap; + union ht_operation_mode op_mode; + union ht_param_info param_info; + union ht_exra_param_info extra_param_info; + + IWL_DEBUG_MAC80211("enter: \n"); + + if (!ht_info) { + IWL_DEBUG_MAC80211("leave: ht_info is NULL\n"); + return -1; + } + + if (ht_cap) { + cap.val = (u16) le16_to_cpu(ht_cap->capabilities_info); + param_info.val = ht_cap->mac_ht_params_info; + ht_info->is_ht = 1; + if (cap.short_GI20) + ht_info->sgf |= 0x1; + if (cap.short_GI40) + ht_info->sgf |= 0x2; + ht_info->is_green_field = cap.green_field; + ht_info->max_amsdu_size = cap.maximal_amsdu_size; + ht_info->supported_chan_width = cap.supported_chan_width_set; + ht_info->tx_mimo_ps_mode = cap.mimo_power_save_mode; + memcpy(ht_info->supp_rates, ht_cap->supported_mcs_set, 16); + + ht_info->ampdu_factor = param_info.max_rx_ampdu_factor; + ht_info->mpdu_density = param_info.mpdu_density; + + IWL_DEBUG_MAC80211("SISO mask 0x%X MIMO mask 0x%X \n", + ht_cap->supported_mcs_set[0], + ht_cap->supported_mcs_set[1]); + + if (ht_info_ap) { + ht_info->control_channel = ht_info_ap->control_channel; + ht_info->extension_chan_offset = + ht_info_ap->extension_chan_offset; + ht_info->tx_chan_width = ht_info_ap->tx_chan_width; + ht_info->operating_mode = ht_info_ap->operating_mode; + } + + if (ht_extra) { + extra_param_info.val = ht_extra->ht_param; + ht_info->control_channel = ht_extra->control_chan; + ht_info->extension_chan_offset = + extra_param_info.ext_chan_offset; + ht_info->tx_chan_width = extra_param_info.tx_chan_width; + op_mode.val = (u16) + le16_to_cpu(ht_extra->operation_mode); + ht_info->operating_mode = op_mode.op_mode; + IWL_DEBUG_MAC80211("control channel %d\n", + ht_extra->control_chan); + } + } else + ht_info->is_ht = 0; + + IWL_DEBUG_MAC80211("leave\n"); + return 0; +} + +static int iwl_mac_conf_ht(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap, + struct ieee80211_ht_additional_info *ht_extra) +{ + struct iwl_priv *priv = hw->priv; + int rs; + + IWL_DEBUG_MAC80211("enter: \n"); + + rs = sta_ht_info_init(ht_cap, ht_extra, NULL, &priv->current_assoc_ht); + iwl4965_set_rxon_chain(priv); + + if (priv && priv->assoc_id && + (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (priv->beacon_int) + queue_work(priv->workqueue, &priv->post_associate.work); + else + priv->call_post_assoc_from_beacon = 1; + spin_unlock_irqrestore(&priv->lock, flags); + } + + IWL_DEBUG_MAC80211("leave: control channel %d\n", + ht_extra->control_chan); + return rs; + +} + +static void iwl_set_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap, + u8 use_wide_chan) +{ + union ht_cap_info cap; + union ht_param_info param_info; + + memset(&cap, 0, sizeof(union ht_cap_info)); + memset(¶m_info, 0, sizeof(union ht_param_info)); + + cap.maximal_amsdu_size = HT_IE_MAX_AMSDU_SIZE_4K; + cap.green_field = 1; + cap.short_GI20 = 1; + cap.short_GI40 = 1; + cap.supported_chan_width_set = use_wide_chan; + cap.mimo_power_save_mode = 0x3; + + param_info.max_rx_ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + param_info.mpdu_density = CFG_HT_MPDU_DENSITY_DEF; + ht_cap->capabilities_info = (__le16) cpu_to_le16(cap.val); + ht_cap->mac_ht_params_info = (u8) param_info.val; + + ht_cap->supported_mcs_set[0] = 0xff; + ht_cap->supported_mcs_set[1] = 0xff; + ht_cap->supported_mcs_set[4] = + (cap.supported_chan_width_set) ? 0x1: 0x0; +} + +static void iwl_mac_get_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_ht_capability *ht_cap) +{ + u8 use_wide_channel = 1; + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211("enter: \n"); + if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) + use_wide_channel = 0; + + /* no fat tx allowed on 2.4GHZ */ + if (priv->phymode != MODE_IEEE80211A) + use_wide_channel = 0; + + iwl_set_ht_capab(hw, ht_cap, use_wide_channel); + IWL_DEBUG_MAC80211("leave: \n"); +} +#endif /*CONFIG_IWLWIFI_HT*/ + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + */ + +static ssize_t show_debug_level(struct device_driver *d, char *buf) +{ + return sprintf(buf, "0x%08X\n", iwl_debug_level); +} +static ssize_t store_debug_level(struct device_driver *d, + const char *buf, size_t count) +{ + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 0); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in hex or decimal form.\n", buf); + else + iwl_debug_level = val; + + return strnlen(buf, count); +} + +static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, + show_debug_level, store_debug_level); + +#endif /* CONFIG_IWLWIFI_DEBUG */ + +static ssize_t show_rf_kill(struct device *d, + struct device_attribute *attr, char *buf) +{ + /* + * 0 - RF kill not enabled + * 1 - SW based RF kill active (sysfs) + * 2 - HW based RF kill active + * 3 - Both HW and SW based RF kill active + */ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) | + (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0); + + return sprintf(buf, "%i\n", val); +} + +static ssize_t store_rf_kill(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + mutex_lock(&priv->mutex); + iwl_radio_kill_sw(priv, buf[0] == '1'); + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); + +static ssize_t show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv)); +} + +static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); + +static ssize_t show_rs_window(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct iwl_priv *priv = d->driver_data; + return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID); +} +static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); + +static ssize_t show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + return sprintf(buf, "%d\n", priv->user_txpower_limit); +} + +static ssize_t store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u32 val; + + val = simple_strtoul(p, &p, 10); + if (p == buf) + printk(KERN_INFO DRV_NAME + ": %s is not in decimal form.\n", buf); + else + iwl_hw_reg_set_txpower(priv, val); + + return count; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); + +static ssize_t show_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); +} + +static ssize_t store_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.flags) != flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", + flags); + priv->staging_rxon.flags = cpu_to_le32(flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); + +static ssize_t show_filter_flags(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->active_rxon.filter_flags)); +} + +static ssize_t store_filter_flags(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + u32 filter_flags = simple_strtoul(buf, NULL, 0); + + mutex_lock(&priv->mutex); + if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing rxon.filter_flags = " + "0x%04X\n", filter_flags); + priv->staging_rxon.filter_flags = + cpu_to_le32(filter_flags); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, + store_filter_flags); + +static ssize_t show_tune(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + + return sprintf(buf, "0x%04X\n", + (priv->phymode << 8) | + le16_to_cpu(priv->active_rxon.channel)); +} + +static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode); + +static ssize_t store_tune(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + char *p = (char *)buf; + u16 tune = simple_strtoul(p, &p, 0); + u8 phymode = (tune >> 8) & 0xff; + u16 channel = tune & 0xff; + + IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel); + + mutex_lock(&priv->mutex); + if ((le16_to_cpu(priv->staging_rxon.channel) != channel) || + (priv->phymode != phymode)) { + const struct iwl_channel_info *ch_info; + + ch_info = iwl_get_channel_info(priv, phymode, channel); + if (!ch_info) { + IWL_WARNING("Requested invalid phymode/channel " + "combination: %d %d\n", phymode, channel); + mutex_unlock(&priv->mutex); + return -EINVAL; + } + + /* Cancel any currently running scans... */ + if (iwl_scan_cancel_timeout(priv, 100)) + IWL_WARNING("Could not cancel scan.\n"); + else { + IWL_DEBUG_INFO("Committing phymode and " + "rxon.channel = %d %d\n", + phymode, channel); + + iwl_set_rxon_channel(priv, phymode, channel); + iwl_set_flags_for_phymode(priv, phymode); + + iwl_set_rate(priv); + iwl_commit_rxon(priv); + } + } + mutex_unlock(&priv->mutex); + + return count; +} + +static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune); + +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + +static ssize_t show_measurement(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct iwl_spectrum_notification measure_report; + u32 size = sizeof(measure_report), len = 0, ofs = 0; + u8 *data = (u8 *) & measure_report; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (!(priv->measurement_status & MEASUREMENT_READY)) { + spin_unlock_irqrestore(&priv->lock, flags); + return 0; + } + memcpy(&measure_report, &priv->measure_report, size); + priv->measurement_status = 0; + spin_unlock_irqrestore(&priv->lock, flags); + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static ssize_t store_measurement(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + struct ieee80211_measurement_params params = { + .channel = le16_to_cpu(priv->active_rxon.channel), + .start_time = cpu_to_le64(priv->last_tsf), + .duration = cpu_to_le16(1), + }; + u8 type = IWL_MEASURE_BASIC; + u8 buffer[32]; + u8 channel; + + if (count) { + char *p = buffer; + strncpy(buffer, buf, min(sizeof(buffer), count)); + channel = simple_strtoul(p, NULL, 0); + if (channel) + params.channel = channel; + + p = buffer; + while (*p && *p != ' ') + p++; + if (*p) + type = simple_strtoul(p + 1, NULL, 0); + } + + IWL_DEBUG_INFO("Invoking measurement of type %d on " + "channel %d (for '%s')\n", type, params.channel, buf); + iwl_get_measurement(priv, ¶ms, type); + + return count; +} + +static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, + show_measurement, store_measurement); +#endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */ + +static ssize_t store_retry_rate(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + priv->retry_rate = simple_strtoul(buf, NULL, 0); + if (priv->retry_rate <= 0) + priv->retry_rate = 1; + + return count; +} + +static ssize_t show_retry_rate(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "%d", priv->retry_rate); +} + +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, + store_retry_rate); + +static ssize_t store_power_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int rc; + int mode; + + mode = simple_strtoul(buf, NULL, 0); + mutex_lock(&priv->mutex); + + if (!iwl_is_ready(priv)) { + rc = -EAGAIN; + goto out; + } + + if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) + mode = IWL_POWER_AC; + else + mode |= IWL_POWER_ENABLED; + + if (mode != priv->power_mode) { + rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode)); + if (rc) { + IWL_DEBUG_MAC80211("failed setting power mode.\n"); + goto out; + } + priv->power_mode = mode; + } + + rc = count; + + out: + mutex_unlock(&priv->mutex); + return rc; +} + +#define MAX_WX_STRING 80 + +/* Values are in microsecond */ +static const s32 timeout_duration[] = { + 350000, + 250000, + 75000, + 37000, + 25000, +}; +static const s32 period_duration[] = { + 400000, + 700000, + 1000000, + 1000000, + 1000000 +}; + +static ssize_t show_power_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int level = IWL_POWER_LEVEL(priv->power_mode); + char *p = buf; + + p += sprintf(p, "%d ", level); + switch (level) { + case IWL_POWER_MODE_CAM: + case IWL_POWER_AC: + p += sprintf(p, "(AC)"); + break; + case IWL_POWER_BATTERY: + p += sprintf(p, "(BATTERY)"); + break; + default: + p += sprintf(p, + "(Timeout %dms, Period %dms)", + timeout_duration[level - 1] / 1000, + period_duration[level - 1] / 1000); + } + + if (!(priv->power_mode & IWL_POWER_ENABLED)) + p += sprintf(p, " OFF\n"); + else + p += sprintf(p, " \n"); + + return (p - buf + 1); + +} + +static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, + store_power_level); + +static ssize_t show_channels(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + int len = 0, i; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_hw_mode *hw_mode = NULL; + int count = 0; + + if (!iwl_is_ready(priv)) + return -EAGAIN; + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G); + if (!hw_mode) + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } + + len += + sprintf(&buf[len], + "Displaying %d channels in 2.4GHz band " + "(802.11bg):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A); + if (hw_mode) { + channels = hw_mode->channels; + count = hw_mode->num_channels; + } else { + channels = NULL; + count = 0; + } + + len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band " + "(802.11a):\n", count); + + for (i = 0; i < count; i++) + len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].chan, + channels[i].power_level, + channels[i]. + flag & IEEE80211_CHAN_W_RADAR_DETECT ? + " (IEEE 802.11h required)" : "", + (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) + || (channels[i]. + flag & + IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : + ", IBSS", + channels[i]. + flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? + "active/passive" : "passive only"); + + return len; +} + +static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); + +static ssize_t show_statistics(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + u32 size = sizeof(struct iwl_notif_statistics); + u32 len = 0, ofs = 0; + u8 *data = (u8 *) & priv->statistics; + int rc = 0; + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + mutex_lock(&priv->mutex); + rc = iwl_send_statistics_request(priv); + mutex_unlock(&priv->mutex); + + if (rc) { + len = sprintf(buf, + "Error sending statistics request: 0x%08X\n", rc); + return len; + } + + while (size && (PAGE_SIZE - len)) { + hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, + PAGE_SIZE - len, 1); + len = strlen(buf); + if (PAGE_SIZE - len) + buf[len++] = '\n'; + + ofs += 16; + size -= min(size, 16U); + } + + return len; +} + +static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); + +static ssize_t show_antenna(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->antenna); +} + +static ssize_t store_antenna(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ant; + struct iwl_priv *priv = dev_get_drvdata(d); + + if (count == 0) + return 0; + + if (sscanf(buf, "%1i", &ant) != 1) { + IWL_DEBUG_INFO("not in hex or decimal form.\n"); + return count; + } + + if ((ant >= 0) && (ant <= 2)) { + IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); + priv->antenna = (enum iwl_antenna)ant; + } else + IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); + + + return count; +} + +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); + +static ssize_t show_status(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; + if (!iwl_is_alive(priv)) + return -EAGAIN; + return sprintf(buf, "0x%08x\n", (int)priv->status); +} + +static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); + +static ssize_t dump_error_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); + +static ssize_t dump_event_log(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char *p = (char *)buf; + + if (p[0] == '1') + iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data); + + return strnlen(buf, count); +} + +static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->up, iwl_bg_up); + INIT_WORK(&priv->restart, iwl_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); + INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); + INIT_WORK(&priv->request_scan, iwl_bg_request_scan); + INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); + INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill); + INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); + INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); + + iwl_hw_setup_deferred_work(priv); + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl_irq_tasklet, (unsigned long)priv); +} + +static void iwl_cancel_deferred_work(struct iwl_priv *priv) +{ + iwl_hw_cancel_deferred_work(priv); + + cancel_delayed_work(&priv->scan_check); + cancel_delayed_work(&priv->alive_start); + cancel_delayed_work(&priv->post_associate); + cancel_work_sync(&priv->beacon_update); +} + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_antenna.attr, + &dev_attr_channels.attr, + &dev_attr_dump_errors.attr, + &dev_attr_dump_events.attr, + &dev_attr_flags.attr, + &dev_attr_filter_flags.attr, +#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT + &dev_attr_measurement.attr, +#endif + &dev_attr_power_level.attr, + &dev_attr_retry_rate.attr, + &dev_attr_rf_kill.attr, + &dev_attr_rs_window.attr, + &dev_attr_statistics.attr, + &dev_attr_status.attr, + &dev_attr_temperature.attr, + &dev_attr_tune.attr, + &dev_attr_tx_power.attr, + + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +static struct ieee80211_ops iwl_hw_ops = { + .tx = iwl_mac_tx, + .start = iwl_mac_start, + .stop = iwl_mac_stop, + .add_interface = iwl_mac_add_interface, + .remove_interface = iwl_mac_remove_interface, + .config = iwl_mac_config, + .config_interface = iwl_mac_config_interface, + .configure_filter = iwl_configure_filter, + .set_key = iwl_mac_set_key, + .get_stats = iwl_mac_get_stats, + .get_tx_stats = iwl_mac_get_tx_stats, + .conf_tx = iwl_mac_conf_tx, + .get_tsf = iwl_mac_get_tsf, + .reset_tsf = iwl_mac_reset_tsf, + .beacon_update = iwl_mac_beacon_update, +#ifdef CONFIG_IWLWIFI_HT + .conf_ht = iwl_mac_conf_ht, + .get_ht_capab = iwl_mac_get_ht_capab, +#ifdef CONFIG_IWLWIFI_HT_AGG + .ht_tx_agg_start = iwl_mac_ht_tx_agg_start, + .ht_tx_agg_stop = iwl_mac_ht_tx_agg_stop, + .ht_rx_agg_start = iwl_mac_ht_rx_agg_start, + .ht_rx_agg_stop = iwl_mac_ht_rx_agg_stop, +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + .hw_scan = iwl_mac_hw_scan +}; + +static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + int i; + + if (iwl_param_disable_hw_scan) { + IWL_DEBUG_INFO("Disabling hw_scan\n"); + iwl_hw_ops.hw_scan = NULL; + } + + if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) || + (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) { + IWL_ERROR("invalid queues_num, should be between %d and %d\n", + IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); + err = -EINVAL; + goto out; + } + + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops); + if (hw == NULL) { + IWL_ERROR("Can not allocate network device\n"); + err = -ENOMEM; + goto out; + } + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); + priv = hw->priv; + priv->hw = hw; + + priv->pci_dev = pdev; + priv->antenna = (enum iwl_antenna)iwl_param_antenna; +#ifdef CONFIG_IWLWIFI_DEBUG + iwl_debug_level = iwl_param_debug; + atomic_set(&priv->restrict_refcnt, 0); +#endif + priv->retry_rate = 1; + + priv->ibss_beacon = NULL; + + /* Tell mac80211 and its clients (e.g. Wireless Extensions) + * the range of signal quality values that we'll provide. + * Negative values for level/noise indicate that we'll provide dBm. + * For WE, at least, non-0 values here *enable* display of values + * in app (iwconfig). */ + hw->max_rssi = -20; /* signal level, negative indicates dBm */ + hw->max_noise = -20; /* noise level, negative indicates dBm */ + hw->max_signal = 100; /* link quality indication (%) */ + + /* Tell mac80211 our Tx characteristics */ + hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; + + hw->queues = 4; +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + hw->queues = 16; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->power_data.lock); + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + spin_lock_init(&priv->lq_mngr.lock); + + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) + INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + iwl_clear_stations_table(priv); + + priv->data_retry_limit = -1; + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->phymode = -1; + + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); + goto out_pci_disable_device; + } + + pci_set_drvdata(pdev, priv); + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, 0x41, 0x00); + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); + + /* Initialize module parameter values here */ + + if (iwl_param_disable) { + set_bit(STATUS_RF_KILL_SW, &priv->status); + IWL_DEBUG_INFO("Radio disabled.\n"); + } + + priv->iw_mode = IEEE80211_IF_TYPE_STA; + + priv->ps_mode = 0; + priv->use_ant_b_for_management_frame = 1; /* start with ant B */ + priv->is_ht_enabled = 1; + priv->channel_width = IWL_CHANNEL_WIDTH_40MHZ; + priv->valid_antenna = 0x7; /* assume all 3 connected */ + priv->ps_mode = IWL_MIMO_PS_NONE; + priv->cck_power_index_compensation = iwl_read32( + priv, CSR_HW_REV_WA_REG); + + iwl4965_set_rxon_chain(priv); + + printk(KERN_INFO DRV_NAME + ": Detected Intel Wireless WiFi Link 4965AGN\n"); + + /* Device-specific setup */ + if (iwl_hw_set_hw_setting(priv)) { + IWL_ERROR("failed to set hw settings\n"); + mutex_unlock(&priv->mutex); + goto out_iounmap; + } + +#ifdef CONFIG_IWLWIFI_QOS + if (iwl_param_qos_enable) + priv->qos_data.qos_enable = 1; + + iwl_reset_qos(priv); + + priv->qos_data.qos_active = 0; + priv->qos_data.qos_cap.val = 0; +#endif /* CONFIG_IWLWIFI_QOS */ + + iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6); + iwl_setup_deferred_work(priv); + iwl_setup_rx_handlers(priv); + + priv->rates_mask = IWL_RATES_MASK; + /* If power management is turned on, default to AC mode */ + priv->power_mode = IWL_POWER_AC; + priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; + + pci_enable_msi(pdev); + + err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERROR("Error allocating IRQ %d\n", pdev->irq); + goto out_disable_msi; + } + + mutex_lock(&priv->mutex); + + err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); + if (err) { + IWL_ERROR("failed to create sysfs device attributes\n"); + mutex_unlock(&priv->mutex); + goto out_release_irq; + } + + /* fetch ucode file from disk, alloc and copy to bus-master buffers ... + * ucode filename and max sizes are card-specific. */ + err = iwl_read_ucode(priv); + if (err) { + IWL_ERROR("Could not read microcode: %d\n", err); + mutex_unlock(&priv->mutex); + goto out_pci_alloc; + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_INFO("Queing UP work.\n"); + + queue_work(priv->workqueue, &priv->up); + + return 0; + + out_pci_alloc: + iwl_dealloc_ucode_pci(priv); + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + out_release_irq: + free_irq(pdev->irq, priv); + + out_disable_msi: + pci_disable_msi(pdev); + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_unset_hw_setting(priv); + + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + out_ieee80211_free_hw: + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void iwl_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + struct list_head *p, *q; + int i; + + if (!priv) + return; + + IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); + + mutex_lock(&priv->mutex); + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl_down(priv); + mutex_unlock(&priv->mutex); + + /* Free MAC hash list for ADHOC */ + for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { + list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { + list_del(p); + kfree(list_entry(p, struct iwl_ibss_seq, list)); + } + } + + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + iwl_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl_rx_queue_free(priv, &priv->rxq); + iwl_hw_txq_ctx_free(priv); + + iwl_unset_hw_setting(priv); + iwl_clear_stations_table(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + iwl_rate_control_unregister(priv->hw); + } + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + + free_irq(pdev->irq, priv); + pci_disable_msi(pdev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + kfree(priv->channel_info); + + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + + if (priv->ibss_beacon) + dev_kfree_skb(priv->ibss_beacon); + + ieee80211_free_hw(priv->hw); +} + +#ifdef CONFIG_PM + +static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + + mutex_lock(&priv->mutex); + + set_bit(STATUS_IN_SUSPEND, &priv->status); + + /* Take down the device; powers it off, etc. */ + __iwl_down(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + mutex_unlock(&priv->mutex); + + return 0; +} + +static void iwl_resume(struct iwl_priv *priv) +{ + unsigned long flags; + + /* The following it a temporary work around due to the + * suspend / resume not fully initializing the NIC correctly. + * Without all of the following, resume will not attempt to take + * down the NIC (it shouldn't really need to) and will just try + * and bring the NIC back up. However that fails during the + * ucode verification process. This then causes iwl_down to be + * called *after* iwl_hw_nic_init() has succeeded -- which + * then lets the next init sequence succeed. So, we've + * replicated all of that NIC init code here... */ + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + iwl_hw_nic_init(priv); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* tell the device to stop sending interrupts */ + iwl_disable_interrupts(priv); + + spin_lock_irqsave(&priv->lock, flags); + iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + if (!iwl_grab_restricted_access(priv)) { + iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + iwl_release_restricted_access(priv); + } + spin_unlock_irqrestore(&priv->lock, flags); + + udelay(5); + + iwl_hw_nic_reset(priv); + + /* Bring the device back up */ + clear_bit(STATUS_IN_SUSPEND, &priv->status); + queue_work(priv->workqueue, &priv->up); +} + +static int iwl_pci_resume(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + int err; + + printk(KERN_INFO "Coming out of suspend...\n"); + + mutex_lock(&priv->mutex); + + pci_set_power_state(pdev, PCI_D0); + err = pci_enable_device(pdev); + pci_restore_state(pdev); + + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries + * from interfering with C3 CPU state. pci_restore_state won't help + * here since it only restores the first 64 bytes pci config header. + */ + pci_write_config_byte(pdev, 0x41, 0x00); + + iwl_resume(priv); + mutex_unlock(&priv->mutex); + + return 0; +} + +#endif /* CONFIG_PM */ + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +static struct pci_driver iwl_driver = { + .name = DRV_NAME, + .id_table = iwl_hw_card_ids, + .probe = iwl_pci_probe, + .remove = __devexit_p(iwl_pci_remove), +#ifdef CONFIG_PM + .suspend = iwl_pci_suspend, + .resume = iwl_pci_resume, +#endif +}; + +static int __init iwl_init(void) +{ + + int ret; + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); + ret = pci_register_driver(&iwl_driver); + if (ret) { + IWL_ERROR("Unable to initialize PCI module\n"); + return ret; + } +#ifdef CONFIG_IWLWIFI_DEBUG + ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level); + if (ret) { + IWL_ERROR("Unable to create driver sysfs file\n"); + pci_unregister_driver(&iwl_driver); + return ret; + } +#endif + + return ret; +} + +static void __exit iwl_exit(void) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level); +#endif + pci_unregister_driver(&iwl_driver); +} + +module_param_named(antenna, iwl_param_antenna, int, 0444); +MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); +module_param_named(disable, iwl_param_disable, int, 0444); +MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); +module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444); +MODULE_PARM_DESC(hwcrypto, + "using hardware crypto engine (default 0 [software])\n"); +module_param_named(debug, iwl_param_debug, int, 0444); +MODULE_PARM_DESC(debug, "debug output mask"); +module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444); +MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); + +module_param_named(queues_num, iwl_param_queues_num, int, 0444); +MODULE_PARM_DESC(queues_num, "number of hw queues."); + +/* QoS */ +module_param_named(qos_enable, iwl_param_qos_enable, int, 0444); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); + +module_exit(iwl_exit); +module_init(iwl_init); diff --git a/drivers/net/wireless/iwlwifi/iwlwifi.h b/drivers/net/wireless/iwlwifi/iwlwifi.h new file mode 100644 index 0000000000000000000000000000000000000000..e0b97c3412153fee5f6b729d506d5b9f77b8da08 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwlwifi.h @@ -0,0 +1,713 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * James P. Ketrenos + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwlwifi_h__ +#define __iwlwifi_h__ + +#include /* for struct pci_device_id */ +#include +#include + +struct iwl_priv; + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +extern struct pci_device_id iwl_hw_card_ids[]; + +#if IWL == 3945 + +#define DRV_NAME "iwl3945" +#include "iwl-hw.h" +#include "iwl-3945-hw.h" + +#elif IWL == 4965 + +#define DRV_NAME "iwl4965" +#include "iwl-hw.h" +#include "iwl-4965-hw.h" + +#endif + +#include "iwl-prph.h" + +/* + * Driver implementation data structures, constants, inline + * functions + * + * NOTE: DO NOT PUT HARDWARE/UCODE SPECIFIC DECLRATIONS HERE + * + * Hardware specific declrations go into iwl-*hw.h + * + */ + +#include "iwl-debug.h" + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* Module parameters accessible from iwl-*.c */ +extern int iwl_param_disable_hw_scan; +extern int iwl_param_debug; +extern int iwl_param_mode; +extern int iwl_param_disable; +extern int iwl_param_antenna; +extern int iwl_param_hwcrypto; +extern int iwl_param_qos_enable; +extern int iwl_param_queues_num; + +enum iwl_antenna { + IWL_ANTENNA_DIVERSITY, + IWL_ANTENNA_MAIN, + IWL_ANTENNA_AUX +}; + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct iwl_rx_mem_buffer { + dma_addr_t dma_addr; + struct sk_buff *skb; + struct list_head list; +}; + +struct iwl_rt_rx_hdr { + struct ieee80211_radiotap_header rt_hdr; + __le64 rt_tsf; /* TSF */ + u8 rt_flags; /* radiotap packet flags */ + u8 rt_rate; /* rate in 500kb/s */ + __le16 rt_channelMHz; /* channel in MHz */ + __le16 rt_chbitmask; /* channel bitfield */ + s8 rt_dbmsignal; /* signal in dBm, kluged to signed */ + s8 rt_dbmnoise; + u8 rt_antenna; /* antenna number */ + u8 payload[0]; /* payload... */ +} __attribute__ ((packed)); + +struct iwl_rt_tx_hdr { + struct ieee80211_radiotap_header rt_hdr; + u8 rt_rate; /* rate in 500kb/s */ + __le16 rt_channel; /* channel in mHz */ + __le16 rt_chbitmask; /* channel bitfield */ + s8 rt_dbmsignal; /* signal in dBm, kluged to signed */ + u8 rt_antenna; /* antenna number */ + u8 payload[0]; /* payload... */ +} __attribute__ ((packed)); + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int first_empty; /* 1-st empty entry (index) host_w*/ + int last_used; /* last used entry (index) host_r*/ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +} __attribute__ ((packed)); + +#define MAX_NUM_OF_TBS (20) + +struct iwl_tx_info { + struct ieee80211_tx_status status; + struct sk_buff *skb[MAX_NUM_OF_TBS]; +}; + +/** + * struct iwl_tx_queue - Tx Queue for DMA + * @need_update: need to update read/write index + * @shed_retry: queue is HT AGG enabled + * + * Queue consists of circular buffer of BD's and required locking structures. + */ +struct iwl_tx_queue { + struct iwl_queue q; + struct iwl_tfd_frame *bd; + struct iwl_cmd *cmd; + dma_addr_t dma_addr_cmd; + struct iwl_tx_info *txb; + int need_update; + int sched_retry; + int active; +}; + +#include "iwl-channel.h" + +#if IWL == 3945 +#include "iwl-3945-rs.h" +#else +#include "iwl-4965-rs.h" +#endif + +#define IWL_TX_FIFO_AC0 0 +#define IWL_TX_FIFO_AC1 1 +#define IWL_TX_FIFO_AC2 2 +#define IWL_TX_FIFO_AC3 3 +#define IWL_TX_FIFO_HCCA_1 5 +#define IWL_TX_FIFO_HCCA_2 6 +#define IWL_TX_FIFO_NONE 7 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files */ +#define IWL_MIN_NUM_QUEUES 4 + +/* Power management (not Tx power) structures */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; +}; +#define IWL_POWER_RANGE_0 (0) +#define IWL_POWER_RANGE_1 (1) + +#define IWL_POWER_MODE_CAM 0x00 /* Continuously Aware Mode, always on */ +#define IWL_POWER_INDEX_3 0x03 +#define IWL_POWER_INDEX_5 0x05 +#define IWL_POWER_AC 0x06 +#define IWL_POWER_BATTERY 0x07 +#define IWL_POWER_LIMIT 0x07 +#define IWL_POWER_MASK 0x0F +#define IWL_POWER_ENABLED 0x10 +#define IWL_POWER_LEVEL(x) ((x) & IWL_POWER_MASK) + +struct iwl_power_mgr { + spinlock_t lock; + struct iwl_power_vec_entry pwr_range_0[IWL_POWER_AC]; + struct iwl_power_vec_entry pwr_range_1[IWL_POWER_AC]; + u8 active_index; + u32 dtim_val; +}; + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl_frame { + union { + struct ieee80211_hdr frame; + struct iwl_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf) +#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8) +#define SEQ_TO_INDEX(x) (x & 0xff) +#define INDEX_TO_SEQ(x) (x & 0xff) +#define SEQ_HUGE_FRAME (0x4000) +#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000) +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + /* CMD_SIZE_NORMAL = 0, */ + CMD_SIZE_HUGE = (1 << 0), + /* CMD_SYNC = 0, */ + CMD_ASYNC = (1 << 1), + /* CMD_NO_SKB = 0, */ + CMD_WANT_SKB = (1 << 2), +}; + +struct iwl_cmd; +struct iwl_priv; + +struct iwl_cmd_meta { + struct iwl_cmd_meta *source; + union { + struct sk_buff *skb; + int (*callback)(struct iwl_priv *priv, + struct iwl_cmd *cmd, struct sk_buff *skb); + } __attribute__ ((packed)) u; + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + +} __attribute__ ((packed)); + +struct iwl_cmd { + struct iwl_cmd_meta meta; + struct iwl_cmd_header hdr; + union { + struct iwl_addsta_cmd addsta; + struct iwl_led_cmd led; + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct iwl_bt_cmd bt; + struct iwl_rxon_time_cmd rxon_time; + struct iwl_powertable_cmd powertable; + struct iwl_qosparam_cmd qosparam; + struct iwl_tx_cmd tx; + struct iwl_tx_beacon_cmd tx_beacon; + struct iwl_rxon_assoc_cmd rxon_assoc; + u8 *indirect; + u8 payload[360]; + } __attribute__ ((packed)) cmd; +} __attribute__ ((packed)); + +struct iwl_host_cmd { + u8 id; + u16 len; + struct iwl_cmd_meta meta; + const void *data; +}; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \ + sizeof(struct iwl_cmd_meta)) + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct iwl_rx_queue - Rx queue + * @processed: Internal index to last handled Rx packet + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write index + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rx_queue { + __le32 *bd; + dma_addr_t dma_addr; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 processed; + u32 read; + u32 write; + u32 free_count; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + spinlock_t lock; +}; + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define SCAN_INTERVAL 100 + +#define MAX_A_CHANNELS 252 +#define MIN_A_CHANNELS 7 + +#define MAX_B_CHANNELS 14 +#define MIN_B_CHANNELS 1 + +#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ +#define STATUS_INT_ENABLED 1 +#define STATUS_RF_KILL_HW 2 +#define STATUS_RF_KILL_SW 3 +#define STATUS_INIT 4 +#define STATUS_ALIVE 5 +#define STATUS_READY 6 +#define STATUS_TEMPERATURE 7 +#define STATUS_GEO_CONFIGURED 8 +#define STATUS_EXIT_PENDING 9 +#define STATUS_IN_SUSPEND 10 +#define STATUS_STATISTICS 11 +#define STATUS_SCANNING 12 +#define STATUS_SCAN_ABORTING 13 +#define STATUS_SCAN_HW 14 +#define STATUS_POWER_PMI 15 +#define STATUS_FW_ERROR 16 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +#if IWL == 4965 +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG +struct iwl_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u32 bitmap0; + u32 bitmap1; + u32 rate_n_flags; +}; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ +#endif + +struct iwl_tid_data { + u16 seq_number; +#if IWL == 4965 +#ifdef CONFIG_IWLWIFI_HT +#ifdef CONFIG_IWLWIFI_HT_AGG + struct iwl_ht_agg agg; +#endif /* CONFIG_IWLWIFI_HT_AGG */ +#endif /* CONFIG_IWLWIFI_HT */ +#endif +}; + +struct iwl_hw_key { + enum ieee80211_key_alg alg; + int keylen; + u8 key[32]; +}; + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#ifdef CONFIG_IWLWIFI_HT +#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) +#define HT_IE_MAX_AMSDU_SIZE_4K (0) +#define CFG_HT_MPDU_DENSITY_2USEC (0x5) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_2USEC + +struct sta_ht_info { + u8 is_ht; + u16 rx_mimo_ps_mode; + u16 tx_mimo_ps_mode; + u16 control_channel; + u8 max_amsdu_size; + u8 ampdu_factor; + u8 mpdu_density; + u8 operating_mode; + u8 supported_chan_width; + u8 extension_chan_offset; + u8 is_green_field; + u8 sgf; + u8 supp_rates[16]; + u8 tx_chan_width; + u8 chan_width_cap; +}; +#endif /*CONFIG_IWLWIFI_HT */ + +#ifdef CONFIG_IWLWIFI_QOS + +union iwl_qos_capabity { + struct { + u8 edca_count:4; /* bit 0-3 */ + u8 q_ack:1; /* bit 4 */ + u8 queue_request:1; /* bit 5 */ + u8 txop_request:1; /* bit 6 */ + u8 reserved:1; /* bit 7 */ + } q_AP; + struct { + u8 acvo_APSD:1; /* bit 0 */ + u8 acvi_APSD:1; /* bit 1 */ + u8 ac_bk_APSD:1; /* bit 2 */ + u8 ac_be_APSD:1; /* bit 3 */ + u8 q_ack:1; /* bit 4 */ + u8 max_len:2; /* bit 5-6 */ + u8 more_data_ack:1; /* bit 7 */ + } q_STA; + u8 val; +}; + +/* QoS sturctures */ +struct iwl_qos_info { + int qos_enable; + int qos_active; + union iwl_qos_capabity qos_cap; + struct iwl_qosparam_cmd def_qos_parm; +}; +#endif /*CONFIG_IWLWIFI_QOS */ + +#define STA_PS_STATUS_WAKE 0 +#define STA_PS_STATUS_SLEEP 1 + +struct iwl_station_entry { + struct iwl_addsta_cmd sta; + struct iwl_tid_data tid[MAX_TID_COUNT]; +#if IWL == 3945 + union { + struct { + u8 rate; + u8 flags; + } s; + u16 rate_n_flags; + } current_rate; +#endif + u8 used; + u8 ps_status; + struct iwl_hw_key keyinfo; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_image_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct iwl_ucode { + __le32 ver; /* major/minor/subminor */ + __le32 inst_size; /* bytes of runtime instructions */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of initialization instructions */ + __le32 init_data_size; /* bytes of initialization data */ + __le32 boot_size; /* bytes of bootstrap instructions */ + u8 data[0]; /* data in same order as "size" elements */ +}; + +#define IWL_IBSS_MAC_HASH_SIZE 32 + +struct iwl_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct iwl_driver_hw_info { + u16 max_txq_num; + u16 ac_queue_count; + u32 rx_buffer_size; + u16 tx_cmd_len; + u16 max_rxq_size; + u16 max_rxq_log; + u32 cck_flag; + u8 max_stations; + u8 bcast_sta_id; + void *shared_virt; + dma_addr_t shared_phys; +}; + + +#define STA_FLG_RTS_MIMO_PROT_MSK __constant_cpu_to_le32(1 << 17) +#define STA_FLG_AGG_MPDU_8US_MSK __constant_cpu_to_le32(1 << 18) +#define STA_FLG_MAX_AGG_SIZE_POS (19) +#define STA_FLG_MAX_AGG_SIZE_MSK __constant_cpu_to_le32(3 << 19) +#define STA_FLG_FAT_EN_MSK __constant_cpu_to_le32(1 << 21) +#define STA_FLG_MIMO_DIS_MSK __constant_cpu_to_le32(1 << 22) +#define STA_FLG_AGG_MPDU_DENSITY_POS (23) +#define STA_FLG_AGG_MPDU_DENSITY_MSK __constant_cpu_to_le32(7 << 23) +#define HT_SHORT_GI_20MHZ_ONLY (1 << 0) +#define HT_SHORT_GI_40MHZ_ONLY (1 << 1) + + +#include "iwl-priv.h" + +/* Requires full declaration of iwl_priv before including */ +#include "iwl-io.h" + +#define IWL_RX_HDR(x) ((struct iwl_rx_frame_hdr *)(\ + x->u.rx_frame.stats.payload + \ + x->u.rx_frame.stats.phy_count)) +#define IWL_RX_END(x) ((struct iwl_rx_frame_end *)(\ + IWL_RX_HDR(x)->payload + \ + le16_to_cpu(IWL_RX_HDR(x)->len))) +#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) +#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) + + +/****************************************************************************** + * + * Functions implemented in iwl-base.c which are forward declared here + * for use by iwl-*.c + * + *****************************************************************************/ +struct iwl_addsta_cmd; +extern int iwl_send_add_station(struct iwl_priv *priv, + struct iwl_addsta_cmd *sta, u8 flags); +extern const char *iwl_get_tx_fail_reason(u32 status); +extern u8 iwl_add_station(struct iwl_priv *priv, const u8 *bssid, + int is_ap, u8 flags); +extern int iwl_is_network_packet(struct iwl_priv *priv, + struct ieee80211_hdr *header); +extern int iwl_power_init_handle(struct iwl_priv *priv); +extern int iwl_eeprom_init(struct iwl_priv *priv); +#ifdef CONFIG_IWLWIFI_DEBUG +extern void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, int group100); +#else +static inline void iwl_report_frame(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, + struct ieee80211_hdr *header, + int group100) {} +#endif +extern int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern void iwl_handle_data_packet_monitor(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb, + void *data, short len, + struct ieee80211_rx_status *stats, + u16 phy_flags); +extern int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr + *header); +extern void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +extern int iwl_rx_queue_alloc(struct iwl_priv *priv); +extern void iwl_rx_queue_reset(struct iwl_priv *priv, + struct iwl_rx_queue *rxq); +extern int iwl_calc_db_from_ratio(int sig_ratio); +extern int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm); +extern int iwl_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq, int count, u32 id); +extern int iwl_rx_queue_restock(struct iwl_priv *priv); +extern void iwl_rx_replenish(void *data); +extern void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq); +extern int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, + const void *data); +extern int __must_check iwl_send_cmd_async(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +extern int __must_check iwl_send_cmd_sync(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +extern int __must_check iwl_send_cmd(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +extern unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + const u8 *dest, int left); +extern int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); +extern int iwl_send_statistics_request(struct iwl_priv *priv); +extern void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, + u32 decrypt_res, + struct ieee80211_rx_status *stats); +extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr); + +extern const u8 BROADCAST_ADDR[ETH_ALEN]; + +/* + * Currently used by iwl-3945-rs... look at restructuring so that it doesn't + * call this... todo... fix that. +*/ +extern u8 iwl_sync_station(struct iwl_priv *priv, int sta_id, + u16 tx_rate, u8 flags); + +static inline int iwl_is_associated(struct iwl_priv *priv) +{ + return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +/****************************************************************************** + * + * Functions implemented in iwl-[34]*.c which are forward declared here + * for use by iwl-base.c + * + * NOTE: The implementation of these functions are hardware specific + * which is why they are in the hardware specific files (vs. iwl-base.c) + * + * Naming convention -- + * iwl_ <-- Its part of iwlwifi (should be changed to iwl_) + * iwl_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW) + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl_bg_ <-- Called from work queue context + * iwl_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl_hw_rx_handler_setup(struct iwl_priv *priv); +extern void iwl_hw_setup_deferred_work(struct iwl_priv *priv); +extern void iwl_hw_cancel_deferred_work(struct iwl_priv *priv); +extern int iwl_hw_rxq_stop(struct iwl_priv *priv); +extern int iwl_hw_set_hw_setting(struct iwl_priv *priv); +extern int iwl_hw_nic_init(struct iwl_priv *priv); +extern void iwl_hw_card_show_info(struct iwl_priv *priv); +extern int iwl_hw_nic_stop_master(struct iwl_priv *priv); +extern void iwl_hw_txq_ctx_free(struct iwl_priv *priv); +extern void iwl_hw_txq_ctx_stop(struct iwl_priv *priv); +extern int iwl_hw_nic_reset(struct iwl_priv *priv); +extern int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd, + dma_addr_t addr, u16 len); +extern int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +extern int iwl_hw_get_temperature(struct iwl_priv *priv); +extern int iwl_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +extern unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame, u8 rate); +extern int iwl_hw_get_rx_read(struct iwl_priv *priv); +extern void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv, + struct iwl_cmd *cmd, + struct ieee80211_tx_control *ctrl, + struct ieee80211_hdr *hdr, + int sta_id, int tx_id); +extern int iwl_hw_reg_send_txpower(struct iwl_priv *priv); +extern int iwl_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); +extern void iwl_hw_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +extern void iwl_disable_events(struct iwl_priv *priv); +extern int iwl4965_get_temperature(const struct iwl_priv *priv); + +/** + * iwl_hw_find_station - Find station id for a given BSSID + * @bssid: MAC address of station ID to find + * + * NOTE: This should not be hardware specific but the code has + * not yet been merged into a single common layer for managing the + * station tables. + */ +extern u8 iwl_hw_find_station(struct iwl_priv *priv, const u8 *bssid); + +extern int iwl_hw_channel_switch(struct iwl_priv *priv, u16 channel); +extern int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); +#endif diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c index 4cf0ff7b833d6e77d6c99e5ba36c066f1b61b037..9cf0211de67f497a8da8c0543f5c5e1cdd3e056c 100644 --- a/drivers/net/wireless/libertas/11d.c +++ b/drivers/net/wireless/libertas/11d.c @@ -124,17 +124,17 @@ static u8 wlan_channel_known_11d(u8 chan, u8 nr_chan = parsed_region_chan->nr_chan; u8 i = 0; - lbs_dbg_hex("11D:parsed_region_chan:", (char *)chanpwr, + lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr, sizeof(struct chan_power_11d) * nr_chan); for (i = 0; i < nr_chan; i++) { if (chan == chanpwr[i].chan) { - lbs_deb_11d("11D: Found Chan:%d\n", chan); + lbs_deb_11d("found chan %d\n", chan); return 1; } } - lbs_deb_11d("11D: Not Find Chan:%d\n", chan); + lbs_deb_11d("chan %d not found\n", chan); return 0; } @@ -174,8 +174,8 @@ static int generate_domain_info_11d(struct parsed_region_chan_11d memcpy(domaininfo->countrycode, parsed_region_chan->countrycode, COUNTRY_CODE_LEN); - lbs_deb_11d("11D:nrchan=%d\n", nr_chan); - lbs_dbg_hex("11D:parsed_region_chan:", (char *)parsed_region_chan, + lbs_deb_11d("nrchan %d\n", nr_chan); + lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan, sizeof(struct parsed_region_chan_11d)); for (i = 0; i < nr_chan; i++) { @@ -213,7 +213,7 @@ static int generate_domain_info_11d(struct parsed_region_chan_11d domaininfo->nr_subband = nr_subband; lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband); - lbs_dbg_hex("11D:domaininfo:", (char *)domaininfo, + lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo, COUNTRY_CODE_LEN + 1 + sizeof(struct ieeetypes_subbandset) * nr_subband); return 0; @@ -233,13 +233,13 @@ static void wlan_generate_parsed_region_chan_11d(struct region_channel * region_ struct chan_freq_power *cfp; if (region_chan == NULL) { - lbs_deb_11d("11D: region_chan is NULL\n"); + lbs_deb_11d("region_chan is NULL\n"); return; } cfp = region_chan->CFP; if (cfp == NULL) { - lbs_deb_11d("11D: cfp equal NULL \n"); + lbs_deb_11d("cfp is NULL \n"); return; } @@ -248,19 +248,19 @@ static void wlan_generate_parsed_region_chan_11d(struct region_channel * region_ memcpy(parsed_region_chan->countrycode, wlan_code_2_region(region_chan->region), COUNTRY_CODE_LEN); - lbs_deb_11d("11D: region[0x%x] band[%d]\n", parsed_region_chan->region, + lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region, parsed_region_chan->band); for (i = 0; i < region_chan->nrcfp; i++, cfp++) { parsed_region_chan->chanpwr[i].chan = cfp->channel; parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower; - lbs_deb_11d("11D: Chan[%d] Pwr[%d]\n", + lbs_deb_11d("chan %d, pwr %d\n", parsed_region_chan->chanpwr[i].chan, parsed_region_chan->chanpwr[i].pwr); } parsed_region_chan->nr_chan = region_chan->nrcfp; - lbs_deb_11d("11D: nrchan[%d]\n", parsed_region_chan->nr_chan); + lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan); return; } @@ -336,7 +336,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* 6. Others */ - lbs_dbg_hex("CountryInfo:", (u8 *) countryinfo, 30); + lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30); if ((*(countryinfo->countrycode)) == 0 || (countryinfo->len <= COUNTRY_CODE_LEN)) { @@ -349,7 +349,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* wlan_region_2_code(countryinfo->countrycode); lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region); - lbs_dbg_hex("CountryCode:", (char *)countryinfo->countrycode, + lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode, COUNTRY_CODE_LEN); parsed_region_chan->band = band; @@ -364,7 +364,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* if (countryinfo->subband[j].firstchan <= lastchan) { /*Step2&3. Check First Chan Num increment and no overlap */ - lbs_deb_11d("11D: Chan[%d>%d] Overlap\n", + lbs_deb_11d("chan %d>%d, overlap\n", countryinfo->subband[j].firstchan, lastchan); continue; } @@ -393,7 +393,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* } else { /*not supported and ignore the chan */ lbs_deb_11d( - "11D:i[%d] chan[%d] unsupported in region[%x] band[%d]\n", + "i %d, chan %d unsupported in region %x, band %d\n", i, curchan, region, band); } } @@ -405,7 +405,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* parsed_region_chan->nr_chan = idx; lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan); - lbs_dbg_hex("11D:parsed_region_chan:", (u8 *) parsed_region_chan, + lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan, 2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx); done: @@ -422,15 +422,15 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset* u8 libertas_get_scan_type_11d(u8 chan, struct parsed_region_chan_11d * parsed_region_chan) { - u8 scan_type = cmd_scan_type_passive; + u8 scan_type = CMD_SCAN_TYPE_PASSIVE; lbs_deb_enter(LBS_DEB_11D); if (wlan_channel_known_11d(chan, parsed_region_chan)) { - lbs_deb_11d("11D: Found and do Active Scan\n"); - scan_type = cmd_scan_type_active; + lbs_deb_11d("found, do active scan\n"); + scan_type = CMD_SCAN_TYPE_ACTIVE; } else { - lbs_deb_11d("11D: Not Find and do Passive Scan\n"); + lbs_deb_11d("not found, do passive scan\n"); } lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type); @@ -446,25 +446,6 @@ void libertas_init_11d(wlan_private * priv) return; } -static int wlan_enable_11d(wlan_private * priv, u8 flag) -{ - int ret; - - priv->adapter->enable11d = flag; - - /* send cmd to FW to enable/disable 11D function in FW */ - ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - cmd_act_set, - cmd_option_waitforrsp, - OID_802_11D_ENABLE, - &priv->adapter->enable11d); - if (ret) - lbs_deb_11d("11D: Fail to enable 11D \n"); - - return 0; -} - /** * @brief This function sets DOMAIN INFO to FW * @param priv pointer to wlan_private @@ -475,15 +456,15 @@ static int set_domain_info_11d(wlan_private * priv) int ret; if (!priv->adapter->enable11d) { - lbs_deb_11d("11D: dnld domain Info with 11d disabled\n"); + lbs_deb_11d("dnld domain Info with 11d disabled\n"); return 0; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11d_domain_info, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + ret = libertas_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (ret) - lbs_deb_11d("11D: Fail to dnld domain Info\n"); + lbs_deb_11d("fail to dnld domain info\n"); return ret; } @@ -505,7 +486,7 @@ int libertas_set_universaltable(wlan_private * priv, u8 band) adapter->universal_channel[i].nrcfp = sizeof(channel_freq_power_UN_BG) / size; - lbs_deb_11d("11D: BG-band nrcfp=%d\n", + lbs_deb_11d("BG-band nrcfp %d\n", adapter->universal_channel[i].nrcfp); adapter->universal_channel[i].CFP = channel_freq_power_UN_BG; @@ -541,10 +522,10 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, cmd->command = cpu_to_le16(cmdno); pdomaininfo->action = cpu_to_le16(cmdoption); - if (cmdoption == cmd_act_get) { + if (cmdoption == CMD_ACT_GET) { cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN); - lbs_dbg_hex("11D: 802_11D_DOMAIN_INFO:", (u8 *) cmd, + lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, (int)(cmd->size)); goto done; } @@ -562,7 +543,7 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, nr_subband * sizeof(struct ieeetypes_subbandset)); cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) + - domain->header.len + + le16_to_cpu(domain->header.len) + sizeof(struct mrvlietypesheader) + S_DS_GEN); } else { @@ -570,38 +551,13 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN); } - lbs_dbg_hex("11D:802_11D_DOMAIN_INFO:", (u8 *) cmd, le16_to_cpu(cmd->size)); + lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size)); done: lbs_deb_enter(LBS_DEB_11D); return 0; } -/** - * @brief This function implements private cmd: enable/disable 11D - * @param priv pointer to wlan_private - * @param wrq pointer to user data - * @return 0 or -1 - */ -int libertas_cmd_enable_11d(wlan_private * priv, struct iwreq *wrq) -{ - int data = 0; - int *val; - - lbs_deb_enter(LBS_DEB_11D); - data = SUBCMD_DATA(wrq); - - lbs_deb_11d("enable 11D: %s\n", - (data == 1) ? "enable" : "Disable"); - - wlan_enable_11d(priv, data); - val = (int *)wrq->u.name; - *val = priv->adapter->enable11d; - - lbs_deb_enter(LBS_DEB_11D); - return 0; -} - /** * @brief This function parses countryinfo from AP and download country info to FW * @param priv pointer to wlan_private @@ -619,13 +575,13 @@ int libertas_ret_802_11d_domain_info(wlan_private * priv, lbs_deb_enter(LBS_DEB_11D); - lbs_dbg_hex("11D DOMAIN Info Rsp Data:", (u8 *) resp, + lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp, (int)le16_to_cpu(resp->size)); nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) / sizeof(struct ieeetypes_subbandset); - lbs_deb_11d("11D Domain Info Resp: nr_subband=%d\n", nr_subband); + lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband); if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) { lbs_deb_11d("Invalid Numrer of Subband returned!!\n"); @@ -633,10 +589,10 @@ int libertas_ret_802_11d_domain_info(wlan_private * priv, } switch (action) { - case cmd_act_set: /*Proc Set action */ + case CMD_ACT_SET: /*Proc Set action */ break; - case cmd_act_get: + case CMD_ACT_GET: break; default: lbs_deb_11d("Invalid action:%d\n", domaininfo->action); @@ -667,7 +623,7 @@ int libertas_parse_dnld_countryinfo_11d(wlan_private * priv, &adapter->parsed_region_chan); if (ret == -1) { - lbs_deb_11d("11D: Err Parse domain_info from AP..\n"); + lbs_deb_11d("error parsing domain_info from AP\n"); goto done; } @@ -679,7 +635,7 @@ int libertas_parse_dnld_countryinfo_11d(wlan_private * priv, ret = set_domain_info_11d(priv); if (ret) { - lbs_deb_11d("11D: Err set domainInfo to FW\n"); + lbs_deb_11d("error setting domain info\n"); goto done; } } @@ -703,7 +659,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) u8 j; lbs_deb_enter(LBS_DEB_11D); - lbs_deb_11d("11D:curbssparams.band[%d]\n", adapter->curbssparams.band); + lbs_deb_11d("curbssparams.band %d\n", adapter->curbssparams.band); if (priv->adapter->enable11d) { /* update parsed_region_chan_11; dnld domaininf to FW */ @@ -712,7 +668,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) sizeof(adapter->region_channel[0]); j++) { region_chan = &adapter->region_channel[j]; - lbs_deb_11d("11D:[%d] region_chan->band[%d]\n", j, + lbs_deb_11d("%d region_chan->band %d\n", j, region_chan->band); if (!region_chan || !region_chan->valid @@ -725,7 +681,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) if (j >= sizeof(adapter->region_channel) / sizeof(adapter->region_channel[0])) { - lbs_deb_11d("11D:region_chan not found. band[%d]\n", + lbs_deb_11d("region_chan not found, band %d\n", adapter->curbssparams.band); ret = -1; goto done; @@ -745,7 +701,7 @@ int libertas_create_dnld_countryinfo_11d(wlan_private * priv) ret = set_domain_info_11d(priv); if (ret) { - lbs_deb_11d("11D: Err set domainInfo to FW\n"); + lbs_deb_11d("error setting domain info\n"); goto done; } diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h index 73e42e7129110a3da2fefc377fbdd8a10676ef0a..3a6d1f8db78fcaee88e6bf81bf7e7049d811ffad 100644 --- a/drivers/net/wireless/libertas/11d.h +++ b/drivers/net/wireless/libertas/11d.h @@ -83,8 +83,6 @@ u8 libertas_get_scan_type_11d(u8 chan, u32 libertas_chan_2_freq(u8 chan, u8 band); -enum state_11d libertas_get_state_11d(wlan_private * priv); - void libertas_init_11d(wlan_private * priv); int libertas_set_universaltable(wlan_private * priv, u8 band); @@ -93,8 +91,6 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, struct cmd_ds_command *cmd, u16 cmdno, u16 cmdOption); -int libertas_cmd_enable_11d(wlan_private * priv, struct iwreq *wrq); - int libertas_ret_802_11d_domain_info(wlan_private * priv, struct cmd_ds_command *resp); diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile index 32ed4136b0d482256ae855cb005f6da28cbb1f49..c469d569f0905269e6662806e5edce0663cc66e3 100644 --- a/drivers/net/wireless/libertas/Makefile +++ b/drivers/net/wireless/libertas/Makefile @@ -1,12 +1,13 @@ -libertas-objs := main.o fw.o wext.o \ +libertas-objs := main.o wext.o \ rx.o tx.o cmd.o \ cmdresp.o scan.o \ join.o 11d.o \ debugfs.o \ ethtool.o assoc.o -usb8xxx-objs += if_bootcmd.o usb8xxx-objs += if_usb.o +libertas_cs-objs += if_cs.o obj-$(CONFIG_LIBERTAS) += libertas.o obj-$(CONFIG_LIBERTAS_USB) += usb8xxx.o +obj-$(CONFIG_LIBERTAS_CS) += libertas_cs.o diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c index afd5617dd92b9e1cce0bf51612ac4a5843344333..b61b176e9d07a9e234f41444db7c618030772bb4 100644 --- a/drivers/net/wireless/libertas/assoc.c +++ b/drivers/net/wireless/libertas/assoc.c @@ -16,6 +16,7 @@ static const u8 bssid_off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void print_assoc_req(const char * extra, struct assoc_request * assoc_req) { + DECLARE_MAC_BUF(mac); lbs_deb_assoc( "#### Association Request: %s\n" " flags: 0x%08lX\n" @@ -23,13 +24,13 @@ static void print_assoc_req(const char * extra, struct assoc_request * assoc_req " channel: %d\n" " band: %d\n" " mode: %d\n" - " BSSID: " MAC_FMT "\n" + " BSSID: %s\n" " Encryption:%s%s%s\n" " auth: %d\n", extra, assoc_req->flags, escape_essid(assoc_req->ssid, assoc_req->ssid_len), assoc_req->channel, assoc_req->band, assoc_req->mode, - MAC_ARG(assoc_req->bssid), + print_mac(mac, assoc_req->bssid), assoc_req->secinfo.WPAenabled ? " WPA" : "", assoc_req->secinfo.WPA2enabled ? " WPA2" : "", assoc_req->secinfo.wep_enabled ? " WEP" : "", @@ -57,10 +58,8 @@ static int assoc_helper_essid(wlan_private *priv, lbs_deb_assoc("New SSID requested: '%s'\n", escape_essid(assoc_req->ssid, assoc_req->ssid_len)); if (assoc_req->mode == IW_MODE_INFRA) { - if (adapter->prescan) { - libertas_send_specific_ssid_scan(priv, assoc_req->ssid, - assoc_req->ssid_len, 0); - } + libertas_send_specific_ssid_scan(priv, assoc_req->ssid, + assoc_req->ssid_len, 0); bss = libertas_find_ssid_in_list(adapter, assoc_req->ssid, assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel); @@ -106,16 +105,17 @@ static int assoc_helper_bssid(wlan_private *priv, wlan_adapter *adapter = priv->adapter; int ret = 0; struct bss_descriptor * bss; + DECLARE_MAC_BUF(mac); - lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID " MAC_FMT, - MAC_ARG(assoc_req->bssid)); + lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %s", + print_mac(mac, assoc_req->bssid)); /* Search for index position in list for requested MAC */ bss = libertas_find_bssid_in_list(adapter, assoc_req->bssid, assoc_req->mode); if (bss == NULL) { - lbs_deb_assoc("ASSOC: WAP: BSSID " MAC_FMT " not found, " - "cannot associate.\n", MAC_ARG(assoc_req->bssid)); + lbs_deb_assoc("ASSOC: WAP: BSSID %s not found, " + "cannot associate.\n", print_mac(mac, assoc_req->bssid)); goto out; } @@ -175,14 +175,14 @@ static int assoc_helper_mode(wlan_private *priv, if (assoc_req->mode == IW_MODE_INFRA) { if (adapter->psstate != PS_STATE_FULL_POWER) - libertas_ps_wakeup(priv, cmd_option_waitforrsp); - adapter->psmode = wlan802_11powermodecam; + libertas_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); + adapter->psmode = WLAN802_11POWERMODECAM; } adapter->mode = assoc_req->mode; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - 0, cmd_option_waitforrsp, + CMD_802_11_SNMP_MIB, + 0, CMD_OPTION_WAITFORRSP, OID_802_11_INFRASTRUCTURE_MODE, /* Shoot me now */ (void *) (size_t) assoc_req->mode); @@ -195,9 +195,9 @@ static int assoc_helper_mode(wlan_private *priv, static int update_channel(wlan_private * priv) { /* the channel in f/w could be out of sync, get the current channel */ - return libertas_prepare_and_send_command(priv, cmd_802_11_rf_channel, - cmd_opt_802_11_rf_channel_get, - cmd_option_waitforrsp, 0, NULL); + return libertas_prepare_and_send_command(priv, CMD_802_11_RF_CHANNEL, + CMD_OPT_802_11_RF_CHANNEL_GET, + CMD_OPTION_WAITFORRSP, 0, NULL); } void libertas_sync_channel(struct work_struct *work) @@ -227,9 +227,9 @@ static int assoc_helper_channel(wlan_private *priv, lbs_deb_assoc("ASSOC: channel: %d -> %d\n", adapter->curbssparams.channel, assoc_req->channel); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_rf_channel, - cmd_opt_802_11_rf_channel_set, - cmd_option_waitforrsp, 0, &assoc_req->channel); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_RF_CHANNEL, + CMD_OPT_802_11_RF_CHANNEL_SET, + CMD_OPTION_WAITFORRSP, 0, &assoc_req->channel); if (ret < 0) { lbs_deb_assoc("ASSOC: channel: error setting channel."); } @@ -278,15 +278,15 @@ static int assoc_helper_wep_keys(wlan_private *priv, || assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len) { ret = libertas_prepare_and_send_command(priv, - cmd_802_11_set_wep, - cmd_act_add, - cmd_option_waitforrsp, + CMD_802_11_SET_WEP, + CMD_ACT_ADD, + CMD_OPTION_WAITFORRSP, 0, assoc_req); } else { ret = libertas_prepare_and_send_command(priv, - cmd_802_11_set_wep, - cmd_act_remove, - cmd_option_waitforrsp, + CMD_802_11_SET_WEP, + CMD_ACT_REMOVE, + CMD_OPTION_WAITFORRSP, 0, NULL); } @@ -295,9 +295,9 @@ static int assoc_helper_wep_keys(wlan_private *priv, /* enable/disable the MAC's WEP packet filter */ if (assoc_req->secinfo.wep_enabled) - adapter->currentpacketfilter |= cmd_act_mac_wep_enable; + adapter->currentpacketfilter |= CMD_ACT_MAC_WEP_ENABLE; else - adapter->currentpacketfilter &= ~cmd_act_mac_wep_enable; + adapter->currentpacketfilter &= ~CMD_ACT_MAC_WEP_ENABLE; ret = libertas_set_mac_packet_filter(priv); if (ret) goto out; @@ -307,7 +307,7 @@ static int assoc_helper_wep_keys(wlan_private *priv, /* Copy WEP keys into adapter wep key fields */ for (i = 0; i < 4; i++) { memcpy(&adapter->wep_keys[i], &assoc_req->wep_keys[i], - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } adapter->wep_tx_keyidx = assoc_req->wep_tx_keyidx; @@ -342,9 +342,9 @@ static int assoc_helper_secinfo(wlan_private *priv, /* Get RSN enabled/disabled */ ret = libertas_prepare_and_send_command(priv, - cmd_802_11_enable_rsn, - cmd_act_set, - cmd_option_waitforrsp, + CMD_802_11_ENABLE_RSN, + CMD_ACT_GET, + CMD_OPTION_WAITFORRSP, 0, &rsn); if (ret) { lbs_deb_assoc("Failed to get RSN status: %d", ret); @@ -359,9 +359,9 @@ static int assoc_helper_secinfo(wlan_private *priv, /* Set RSN enabled/disabled */ rsn = do_wpa; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_enable_rsn, - cmd_act_set, - cmd_option_waitforrsp, + CMD_802_11_ENABLE_RSN, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, &rsn); out: @@ -374,15 +374,40 @@ static int assoc_helper_wpa_keys(wlan_private *priv, struct assoc_request * assoc_req) { int ret = 0; + unsigned int flags = assoc_req->flags; lbs_deb_enter(LBS_DEB_ASSOC); - ret = libertas_prepare_and_send_command(priv, - cmd_802_11_key_material, - cmd_act_set, - cmd_option_waitforrsp, - 0, assoc_req); + /* Work around older firmware bug where WPA unicast and multicast + * keys must be set independently. Seen in SDIO parts with firmware + * version 5.0.11p0. + */ + + if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { + clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); + ret = libertas_prepare_and_send_command(priv, + CMD_802_11_KEY_MATERIAL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, + 0, assoc_req); + assoc_req->flags = flags; + } + + if (ret) + goto out; + if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { + clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); + + ret = libertas_prepare_and_send_command(priv, + CMD_802_11_KEY_MATERIAL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, + 0, assoc_req); + assoc_req->flags = flags; + } + +out: lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); return ret; } @@ -412,7 +437,7 @@ static int assoc_helper_wpa_ie(wlan_private *priv, static int should_deauth_infrastructure(wlan_adapter *adapter, struct assoc_request * assoc_req) { - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) return 0; if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { @@ -453,7 +478,7 @@ static int should_deauth_infrastructure(wlan_adapter *adapter, static int should_stop_adhoc(wlan_adapter *adapter, struct assoc_request * assoc_req) { - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) return 0; if (libertas_ssid_cmp(adapter->curbssparams.ssid, @@ -483,6 +508,7 @@ void libertas_association_worker(struct work_struct *work) struct assoc_request * assoc_req = NULL; int ret = 0; int find_any_ssid = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_ASSOC); @@ -556,7 +582,8 @@ void libertas_association_worker(struct work_struct *work) if (test_bit(ASSOC_FLAG_MODE, &assoc_req->flags)) { ret = assoc_helper_mode(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) mode: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) mode: ret = %d\n", + __LINE__, ret); goto out; } } @@ -574,7 +601,8 @@ lbs_deb_assoc("ASSOC(:%d) mode: ret = %d\n", __LINE__, ret); || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) { ret = assoc_helper_wep_keys(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) wep_keys: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) wep_keys: ret = %d\n", + __LINE__, ret); goto out; } } @@ -582,7 +610,8 @@ lbs_deb_assoc("ASSOC(:%d) wep_keys: ret = %d\n", __LINE__, ret); if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { ret = assoc_helper_secinfo(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) secinfo: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) secinfo: ret = %d\n", + __LINE__, ret); goto out; } } @@ -590,7 +619,8 @@ lbs_deb_assoc("ASSOC(:%d) secinfo: ret = %d\n", __LINE__, ret); if (test_bit(ASSOC_FLAG_WPA_IE, &assoc_req->flags)) { ret = assoc_helper_wpa_ie(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) wpa_ie: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) wpa_ie: ret = %d\n", + __LINE__, ret); goto out; } } @@ -599,7 +629,8 @@ lbs_deb_assoc("ASSOC(:%d) wpa_ie: ret = %d\n", __LINE__, ret); || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { ret = assoc_helper_wpa_keys(priv, assoc_req); if (ret) { -lbs_deb_assoc("ASSOC(:%d) wpa_keys: ret = %d\n", __LINE__, ret); + lbs_deb_assoc("ASSOC(:%d) wpa_keys: ret = %d\n", + __LINE__, ret); goto out; } } @@ -618,25 +649,25 @@ lbs_deb_assoc("ASSOC(:%d) wpa_keys: ret = %d\n", __LINE__, ret); success = 0; } - if (adapter->connect_status != libertas_connected) { - lbs_deb_assoc("ASSOC: assoication attempt unsuccessful, " + if (adapter->connect_status != LIBERTAS_CONNECTED) { + lbs_deb_assoc("ASSOC: association attempt unsuccessful, " "not connected.\n"); success = 0; } if (success) { lbs_deb_assoc("ASSOC: association attempt successful. " - "Associated to '%s' (" MAC_FMT ")\n", + "Associated to '%s' (%s)\n", escape_essid(adapter->curbssparams.ssid, adapter->curbssparams.ssid_len), - MAC_ARG(adapter->curbssparams.bssid)); + print_mac(mac, adapter->curbssparams.bssid)); libertas_prepare_and_send_command(priv, - cmd_802_11_rssi, - 0, cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RSSI, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); libertas_prepare_and_send_command(priv, - cmd_802_11_get_log, - 0, cmd_option_waitforrsp, 0, NULL); + CMD_802_11_GET_LOG, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); } else { ret = -1; } @@ -703,7 +734,7 @@ struct assoc_request * wlan_get_association_request(wlan_adapter *adapter) int i; for (i = 0; i < 4; i++) { memcpy(&assoc_req->wep_keys[i], &adapter->wep_keys[i], - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } } @@ -712,12 +743,12 @@ struct assoc_request * wlan_get_association_request(wlan_adapter *adapter) if (!test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { memcpy(&assoc_req->wpa_mcast_key, &adapter->wpa_mcast_key, - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } if (!test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { memcpy(&assoc_req->wpa_unicast_key, &adapter->wpa_unicast_key, - sizeof(struct WLAN_802_11_KEY)); + sizeof(struct enc_key)); } if (!test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) { diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h index 5e9c31f0932b1d64377184c5fa038c16dbbdd19f..e09b7490abbdf18710cb1b112dab29d912efe8b9 100644 --- a/drivers/net/wireless/libertas/assoc.h +++ b/drivers/net/wireless/libertas/assoc.h @@ -17,7 +17,7 @@ static inline void wlan_postpone_association_work(wlan_private *priv) if (priv->adapter->surpriseremoved) return; cancel_delayed_work(&priv->assoc_work); - queue_delayed_work(priv->assoc_thread, &priv->assoc_work, ASSOC_DELAY); + queue_delayed_work(priv->work_thread, &priv->assoc_work, ASSOC_DELAY); } static inline void wlan_cancel_association_work(wlan_private *priv) diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 4a8f5dc70239c48a0aebda7e23591b9a5d4bd578..1cbbd96fdbde913200fac048a1b2c62fef54fb36 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -15,7 +15,7 @@ static void cleanup_cmdnode(struct cmd_ctrl_node *ptempnode); static u16 commands_allowed_in_ps[] = { - cmd_802_11_rssi, + CMD_802_11_RSSI, }; /** @@ -43,7 +43,7 @@ static int wlan_cmd_hw_spec(wlan_private * priv, struct cmd_ds_command *cmd) lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_get_hw_spec); + cmd->command = cpu_to_le16(CMD_GET_HW_SPEC); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_get_hw_spec) + S_DS_GEN); memcpy(hwspec->permanentaddr, priv->adapter->current_addr, ETH_ALEN); @@ -56,34 +56,29 @@ static int wlan_cmd_802_11_ps_mode(wlan_private * priv, u16 cmd_action) { struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode; - wlan_adapter *adapter = priv->adapter; lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_ps_mode); + cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + S_DS_GEN); psm->action = cpu_to_le16(cmd_action); psm->multipledtim = 0; switch (cmd_action) { - case cmd_subcmd_enter_ps: + case CMD_SUBCMD_ENTER_PS: lbs_deb_cmd("PS command:" "SubCode- Enter PS\n"); - lbs_deb_cmd("locallisteninterval = %d\n", - adapter->locallisteninterval); - psm->locallisteninterval = - cpu_to_le16(adapter->locallisteninterval); - psm->nullpktinterval = - cpu_to_le16(adapter->nullpktinterval); + psm->locallisteninterval = 0; + psm->nullpktinterval = 0; psm->multipledtim = - cpu_to_le16(priv->adapter->multipledtim); + cpu_to_le16(MRVDRV_DEFAULT_MULTIPLE_DTIM); break; - case cmd_subcmd_exit_ps: + case CMD_SUBCMD_EXIT_PS: lbs_deb_cmd("PS command:" "SubCode- Exit PS\n"); break; - case cmd_subcmd_sleep_confirmed: + case CMD_SUBCMD_SLEEP_CONFIRMED: lbs_deb_cmd("PS command: SubCode- sleep confirm\n"); break; @@ -101,7 +96,9 @@ static int wlan_cmd_802_11_inactivity_timeout(wlan_private * priv, { u16 *timeout = pdata_buf; - cmd->command = cpu_to_le16(cmd_802_11_inactivity_timeout); + lbs_deb_enter(LBS_DEB_CMD); + + cmd->command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_inactivity_timeout) + S_DS_GEN); @@ -113,6 +110,7 @@ static int wlan_cmd_802_11_inactivity_timeout(wlan_private * priv, else cmd->params.inactivity_timeout.timeout = 0; + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -127,13 +125,13 @@ static int wlan_cmd_802_11_sleep_params(wlan_private * priv, cmd->size = cpu_to_le16((sizeof(struct cmd_ds_802_11_sleep_params)) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_sleep_params); + cmd->command = cpu_to_le16(CMD_802_11_SLEEP_PARAMS); - if (cmd_action == cmd_act_get) { + if (cmd_action == CMD_ACT_GET) { memset(&adapter->sp, 0, sizeof(struct sleep_params)); memset(sp, 0, sizeof(struct cmd_ds_802_11_sleep_params)); sp->action = cpu_to_le16(cmd_action); - } else if (cmd_action == cmd_act_set) { + } else if (cmd_action == CMD_ACT_SET) { sp->action = cpu_to_le16(cmd_action); sp->error = cpu_to_le16(adapter->sp.sp_error); sp->offset = cpu_to_le16(adapter->sp.sp_offset); @@ -159,10 +157,10 @@ static int wlan_cmd_802_11_set_wep(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_set_wep); + cmd->command = cpu_to_le16(CMD_802_11_SET_WEP); cmd->size = cpu_to_le16(sizeof(*wep) + S_DS_GEN); - if (cmd_act == cmd_act_add) { + if (cmd_act == CMD_ACT_ADD) { int i; if (!assoc_req) { @@ -171,48 +169,47 @@ static int wlan_cmd_802_11_set_wep(wlan_private * priv, goto done; } - wep->action = cpu_to_le16(cmd_act_add); + wep->action = cpu_to_le16(CMD_ACT_ADD); /* default tx key index */ wep->keyindex = cpu_to_le16((u16)(assoc_req->wep_tx_keyidx & - (u32)cmd_WEP_KEY_INDEX_MASK)); - - lbs_deb_cmd("Tx key Index: %u\n", le16_to_cpu(wep->keyindex)); + (u32)CMD_WEP_KEY_INDEX_MASK)); /* Copy key types and material to host command structure */ for (i = 0; i < 4; i++) { - struct WLAN_802_11_KEY * pkey = &assoc_req->wep_keys[i]; + struct enc_key * pkey = &assoc_req->wep_keys[i]; switch (pkey->len) { case KEY_LEN_WEP_40: - wep->keytype[i] = - cpu_to_le16(cmd_type_wep_40_bit); + wep->keytype[i] = CMD_TYPE_WEP_40_BIT; memmove(&wep->keymaterial[i], pkey->key, pkey->len); + lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i); break; case KEY_LEN_WEP_104: - wep->keytype[i] = - cpu_to_le16(cmd_type_wep_104_bit); + wep->keytype[i] = CMD_TYPE_WEP_104_BIT; memmove(&wep->keymaterial[i], pkey->key, pkey->len); + lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i); break; case 0: break; default: - lbs_deb_cmd("Invalid WEP key %d length of %d\n", + lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n", i, pkey->len); ret = -1; goto done; break; } } - } else if (cmd_act == cmd_act_remove) { + } else if (cmd_act == CMD_ACT_REMOVE) { /* ACT_REMOVE clears _all_ WEP keys */ - wep->action = cpu_to_le16(cmd_act_remove); + wep->action = cpu_to_le16(CMD_ACT_REMOVE); /* default tx key index */ wep->keyindex = cpu_to_le16((u16)(adapter->wep_tx_keyidx & - (u32)cmd_WEP_KEY_INDEX_MASK)); + (u32)CMD_WEP_KEY_INDEX_MASK)); + lbs_deb_cmd("SET_WEP: remove key %d\n", adapter->wep_tx_keyidx); } ret = 0; @@ -232,15 +229,16 @@ static int wlan_cmd_802_11_enable_rsn(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_enable_rsn); + cmd->command = cpu_to_le16(CMD_802_11_ENABLE_RSN); cmd->size = cpu_to_le16(sizeof(*penableRSN) + S_DS_GEN); penableRSN->action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_set) { + if (cmd_action == CMD_ACT_SET) { if (*enable) - penableRSN->enable = cpu_to_le16(cmd_enable_rsn); + penableRSN->enable = cpu_to_le16(CMD_ENABLE_RSN); else - penableRSN->enable = cpu_to_le16(cmd_disable_rsn); + penableRSN->enable = cpu_to_le16(CMD_DISABLE_RSN); + lbs_deb_cmd("ENABLE_RSN: %d\n", *enable); } lbs_deb_leave(LBS_DEB_CMD); @@ -249,9 +247,9 @@ static int wlan_cmd_802_11_enable_rsn(wlan_private * priv, static void set_one_wpa_key(struct MrvlIEtype_keyParamSet * pkeyparamset, - struct WLAN_802_11_KEY * pkey) + struct enc_key * pkey) { - pkeyparamset->keytypeid = cpu_to_le16(pkey->type); + lbs_deb_enter(LBS_DEB_CMD); if (pkey->flags & KEY_INFO_WPA_ENABLED) { pkeyparamset->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED); @@ -264,12 +262,14 @@ static void set_one_wpa_key(struct MrvlIEtype_keyParamSet * pkeyparamset, } pkeyparamset->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); + pkeyparamset->keytypeid = cpu_to_le16(pkey->type); pkeyparamset->keylen = cpu_to_le16(pkey->len); memcpy(pkeyparamset->key, pkey->key, pkey->len); pkeyparamset->length = cpu_to_le16( sizeof(pkeyparamset->keytypeid) + sizeof(pkeyparamset->keyinfo) + sizeof(pkeyparamset->keylen) + sizeof(pkeyparamset->key)); + lbs_deb_leave(LBS_DEB_CMD); } static int wlan_cmd_802_11_key_material(wlan_private * priv, @@ -285,10 +285,10 @@ static int wlan_cmd_802_11_key_material(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_key_material); + cmd->command = cpu_to_le16(CMD_802_11_KEY_MATERIAL); pkeymaterial->action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_get) { + if (cmd_action == CMD_ACT_GET) { cmd->size = cpu_to_le16(S_DS_GEN + sizeof (pkeymaterial->action)); ret = 0; goto done; @@ -324,30 +324,37 @@ static int wlan_cmd_802_11_reset(wlan_private * priv, { struct cmd_ds_802_11_reset *reset = &cmd->params.reset; - cmd->command = cpu_to_le16(cmd_802_11_reset); + lbs_deb_enter(LBS_DEB_CMD); + + cmd->command = cpu_to_le16(CMD_802_11_RESET); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN); reset->action = cpu_to_le16(cmd_action); + lbs_deb_leave(LBS_DEB_CMD); return 0; } static int wlan_cmd_802_11_get_log(wlan_private * priv, struct cmd_ds_command *cmd) { - cmd->command = cpu_to_le16(cmd_802_11_get_log); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_GET_LOG); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_get_log) + S_DS_GEN); + lbs_deb_leave(LBS_DEB_CMD); return 0; } static int wlan_cmd_802_11_get_stat(wlan_private * priv, struct cmd_ds_command *cmd) { - cmd->command = cpu_to_le16(cmd_802_11_get_stat); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_GET_STAT); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_get_stat) + S_DS_GEN); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -364,15 +371,15 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, lbs_deb_cmd("SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); - cmd->command = cpu_to_le16(cmd_802_11_snmp_mib); + cmd->command = cpu_to_le16(CMD_802_11_SNMP_MIB); cmd->size = cpu_to_le16(sizeof(*pSNMPMIB) + S_DS_GEN); switch (cmd_oid) { case OID_802_11_INFRASTRUCTURE_MODE: { u8 mode = (u8) (size_t) pdata_buf; - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); - pSNMPMIB->oid = cpu_to_le16((u16) desired_bsstype_i); + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); + pSNMPMIB->oid = cpu_to_le16((u16) DESIRED_BSSTYPE_I); pSNMPMIB->bufsize = sizeof(u8); if (mode == IW_MODE_ADHOC) { ucTemp = SNMP_MIB_VALUE_ADHOC; @@ -390,10 +397,10 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, { u32 ulTemp; - pSNMPMIB->oid = cpu_to_le16((u16) dot11d_i); + pSNMPMIB->oid = cpu_to_le16((u16) DOT11D_I); - if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cmd_act_set; + if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = CMD_ACT_SET; pSNMPMIB->bufsize = sizeof(u16); ulTemp = *(u32 *)pdata_buf; *((__le16 *)(pSNMPMIB->value)) = @@ -406,12 +413,12 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, { u32 ulTemp; - pSNMPMIB->oid = cpu_to_le16((u16) fragthresh_i); + pSNMPMIB->oid = cpu_to_le16((u16) FRAGTHRESH_I); - if (cmd_action == cmd_act_get) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_get); - } else if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); + if (cmd_action == CMD_ACT_GET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); + } else if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16)); ulTemp = *((u32 *) pdata_buf); *((__le16 *)(pSNMPMIB->value)) = @@ -426,12 +433,12 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, { u32 ulTemp; - pSNMPMIB->oid = le16_to_cpu((u16) rtsthresh_i); + pSNMPMIB->oid = le16_to_cpu((u16) RTSTHRESH_I); - if (cmd_action == cmd_act_get) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_get); - } else if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); + if (cmd_action == CMD_ACT_GET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); + } else if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16)); ulTemp = *((u32 *)pdata_buf); *(__le16 *)(pSNMPMIB->value) = @@ -441,12 +448,12 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, break; } case OID_802_11_TX_RETRYCOUNT: - pSNMPMIB->oid = cpu_to_le16((u16) short_retrylim_i); + pSNMPMIB->oid = cpu_to_le16((u16) SHORT_RETRYLIM_I); - if (cmd_action == cmd_act_get) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_get); - } else if (cmd_action == cmd_act_set) { - pSNMPMIB->querytype = cpu_to_le16(cmd_act_set); + if (cmd_action == CMD_ACT_GET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); + } else if (cmd_action == CMD_ACT_SET) { + pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16)); *((__le16 *)(pSNMPMIB->value)) = cpu_to_le16((u16) adapter->txretrycount); @@ -463,7 +470,7 @@ static int wlan_cmd_802_11_snmp_mib(wlan_private * priv, le16_to_cpu(cmd->seqnum), le16_to_cpu(cmd->result)); lbs_deb_cmd( - "SNMP_CMD: action=0x%x, oid=0x%x, oidsize=0x%x, value=0x%x\n", + "SNMP_CMD: action 0x%x, oid 0x%x, oidsize 0x%x, value 0x%x\n", le16_to_cpu(pSNMPMIB->querytype), le16_to_cpu(pSNMPMIB->oid), le16_to_cpu(pSNMPMIB->bufsize), le16_to_cpu(*(__le16 *) pSNMPMIB->value)); @@ -484,20 +491,20 @@ static int wlan_cmd_802_11_radio_control(wlan_private * priv, cmd->size = cpu_to_le16((sizeof(struct cmd_ds_802_11_radio_control)) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_radio_control); + cmd->command = cpu_to_le16(CMD_802_11_RADIO_CONTROL); pradiocontrol->action = cpu_to_le16(cmd_action); switch (adapter->preamble) { - case cmd_type_short_preamble: + case CMD_TYPE_SHORT_PREAMBLE: pradiocontrol->control = cpu_to_le16(SET_SHORT_PREAMBLE); break; - case cmd_type_long_preamble: + case CMD_TYPE_LONG_PREAMBLE: pradiocontrol->control = cpu_to_le16(SET_LONG_PREAMBLE); break; - case cmd_type_auto_preamble: + case CMD_TYPE_AUTO_PREAMBLE: default: pradiocontrol->control = cpu_to_le16(SET_AUTO_PREAMBLE); break; @@ -523,7 +530,7 @@ static int wlan_cmd_802_11_rf_tx_power(wlan_private * priv, cmd->size = cpu_to_le16((sizeof(struct cmd_ds_802_11_rf_tx_power)) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_rf_tx_power); + cmd->command = cpu_to_le16(CMD_802_11_RF_TX_POWER); prtp->action = cpu_to_le16(cmd_action); lbs_deb_cmd("RF_TX_POWER_CMD: size:%d cmd:0x%x Act:%d\n", @@ -531,23 +538,23 @@ static int wlan_cmd_802_11_rf_tx_power(wlan_private * priv, le16_to_cpu(prtp->action)); switch (cmd_action) { - case cmd_act_tx_power_opt_get: - prtp->action = cpu_to_le16(cmd_act_get); + case CMD_ACT_TX_POWER_OPT_GET: + prtp->action = cpu_to_le16(CMD_ACT_GET); prtp->currentlevel = 0; break; - case cmd_act_tx_power_opt_set_high: - prtp->action = cpu_to_le16(cmd_act_set); - prtp->currentlevel = cpu_to_le16(cmd_act_tx_power_index_high); + case CMD_ACT_TX_POWER_OPT_SET_HIGH: + prtp->action = cpu_to_le16(CMD_ACT_SET); + prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_HIGH); break; - case cmd_act_tx_power_opt_set_mid: - prtp->action = cpu_to_le16(cmd_act_set); - prtp->currentlevel = cpu_to_le16(cmd_act_tx_power_index_mid); + case CMD_ACT_TX_POWER_OPT_SET_MID: + prtp->action = cpu_to_le16(CMD_ACT_SET); + prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_MID); break; - case cmd_act_tx_power_opt_set_low: - prtp->action = cpu_to_le16(cmd_act_set); + case CMD_ACT_TX_POWER_OPT_SET_LOW: + prtp->action = cpu_to_le16(CMD_ACT_SET); prtp->currentlevel = cpu_to_le16(*((u16 *) pdata_buf)); break; } @@ -556,19 +563,21 @@ static int wlan_cmd_802_11_rf_tx_power(wlan_private * priv, return 0; } -static int wlan_cmd_802_11_rf_antenna(wlan_private * priv, +static int wlan_cmd_802_11_monitor_mode(wlan_private * priv, struct cmd_ds_command *cmd, u16 cmd_action, void *pdata_buf) { - struct cmd_ds_802_11_rf_antenna *rant = &cmd->params.rant; + struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor; - cmd->command = cpu_to_le16(cmd_802_11_rf_antenna); - cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rf_antenna) + - S_DS_GEN); + cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); + cmd->size = + cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + + S_DS_GEN); - rant->action = cpu_to_le16(cmd_action); - if ((cmd_action == cmd_act_set_rx) || (cmd_action == cmd_act_set_tx)) { - rant->antennamode = cpu_to_le16((u16) (*(u32 *) pdata_buf)); + monitor->action = cpu_to_le16(cmd_action); + if (cmd_action == CMD_ACT_SET) { + monitor->mode = + cpu_to_le16((u16) (*(u32 *) pdata_buf)); } return 0; @@ -582,12 +591,11 @@ static int wlan_cmd_802_11_rate_adapt_rateset(wlan_private * priv, *rateadapt = &cmd->params.rateset; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_CMD); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rate_adapt_rateset) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_802_11_rate_adapt_rateset); - - lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RATE_ADAPT_RATESET); rateadapt->action = cpu_to_le16(cmd_action); rateadapt->enablehwauto = cpu_to_le16(adapter->enablehwauto); @@ -608,19 +616,16 @@ static int wlan_cmd_802_11_data_rate(wlan_private * priv, cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_data_rate) + S_DS_GEN); - - cmd->command = cpu_to_le16(cmd_802_11_data_rate); - + cmd->command = cpu_to_le16(CMD_802_11_DATA_RATE); memset(pdatarate, 0, sizeof(struct cmd_ds_802_11_data_rate)); - pdatarate->action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_set_tx_fix_rate) { - pdatarate->datarate[0] = libertas_data_rate_to_index(adapter->datarate); - lbs_deb_cmd("Setting FW for fixed rate 0x%02X\n", - adapter->datarate); - } else if (cmd_action == cmd_act_set_tx_auto) { - lbs_deb_cmd("Setting FW for AUTO rate\n"); + if (cmd_action == CMD_ACT_SET_TX_FIX_RATE) { + pdatarate->rates[0] = libertas_data_rate_to_fw_index(adapter->cur_rate); + lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", + adapter->cur_rate); + } else if (cmd_action == CMD_ACT_SET_TX_AUTO) { + lbs_deb_cmd("DATA_RATE: setting auto\n"); } lbs_deb_leave(LBS_DEB_CMD); @@ -634,16 +639,19 @@ static int wlan_cmd_mac_multicast_adr(wlan_private * priv, struct cmd_ds_mac_multicast_adr *pMCastAdr = &cmd->params.madr; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_CMD); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_multicast_adr) + S_DS_GEN); - cmd->command = cpu_to_le16(cmd_mac_multicast_adr); + cmd->command = cpu_to_le16(CMD_MAC_MULTICAST_ADR); + lbs_deb_cmd("MULTICAST_ADR: setting %d addresses\n", pMCastAdr->nr_of_adrs); pMCastAdr->action = cpu_to_le16(cmd_action); pMCastAdr->nr_of_adrs = cpu_to_le16((u16) adapter->nr_of_multicastmacaddr); memcpy(pMCastAdr->maclist, adapter->multicastlist, adapter->nr_of_multicastmacaddr * ETH_ALEN); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -653,16 +661,18 @@ static int wlan_cmd_802_11_rf_channel(wlan_private * priv, { struct cmd_ds_802_11_rf_channel *rfchan = &cmd->params.rfchannel; - cmd->command = cpu_to_le16(cmd_802_11_rf_channel); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RF_CHANNEL); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rf_channel) + S_DS_GEN); - if (option == cmd_opt_802_11_rf_channel_set) { + if (option == CMD_OPT_802_11_RF_CHANNEL_SET) { rfchan->currentchannel = cpu_to_le16(*((u16 *) pdata_buf)); } rfchan->action = cpu_to_le16(option); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -671,9 +681,10 @@ static int wlan_cmd_802_11_rssi(wlan_private * priv, { wlan_adapter *adapter = priv->adapter; - cmd->command = cpu_to_le16(cmd_802_11_rssi); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_RSSI); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN); - cmd->params.rssi.N = cpu_to_le16(priv->adapter->bcn_avg_factor); + cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR); /* reset Beacon SNR/NF/RSSI values */ adapter->SNR[TYPE_BEACON][TYPE_NOAVG] = 0; @@ -683,6 +694,7 @@ static int wlan_cmd_802_11_rssi(wlan_private * priv, adapter->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0; adapter->RSSI[TYPE_BEACON][TYPE_AVG] = 0; + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -697,7 +709,7 @@ static int wlan_cmd_reg_access(wlan_private * priv, offval = (struct wlan_offset_value *)pdata_buf; switch (cmdptr->command) { - case cmd_mac_reg_access: + case CMD_MAC_REG_ACCESS: { struct cmd_ds_mac_reg_access *macreg; @@ -715,7 +727,7 @@ static int wlan_cmd_reg_access(wlan_private * priv, break; } - case cmd_bbp_reg_access: + case CMD_BBP_REG_ACCESS: { struct cmd_ds_bbp_reg_access *bbpreg; @@ -734,7 +746,7 @@ static int wlan_cmd_reg_access(wlan_private * priv, break; } - case cmd_rf_reg_access: + case CMD_RF_REG_ACCESS: { struct cmd_ds_rf_reg_access *rfreg; @@ -767,19 +779,21 @@ static int wlan_cmd_802_11_mac_address(wlan_private * priv, { wlan_adapter *adapter = priv->adapter; - cmd->command = cpu_to_le16(cmd_802_11_mac_address); + lbs_deb_enter(LBS_DEB_CMD); + cmd->command = cpu_to_le16(CMD_802_11_MAC_ADDRESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_mac_address) + S_DS_GEN); cmd->result = 0; cmd->params.macadd.action = cpu_to_le16(cmd_action); - if (cmd_action == cmd_act_set) { + if (cmd_action == CMD_ACT_SET) { memcpy(cmd->params.macadd.macadd, adapter->current_addr, ETH_ALEN); - lbs_dbg_hex("SET_CMD: MAC ADDRESS-", adapter->current_addr, 6); + lbs_deb_hex(LBS_DEB_CMD, "SET_CMD: MAC addr", adapter->current_addr, 6); } + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -791,7 +805,7 @@ static int wlan_cmd_802_11_eeprom_access(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_802_11_eeprom_access); + cmd->command = cpu_to_le16(CMD_802_11_EEPROM_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) + S_DS_GEN); cmd->result = 0; @@ -801,6 +815,7 @@ static int wlan_cmd_802_11_eeprom_access(wlan_private * priv, cmd->params.rdeeprom.bytecount = cpu_to_le16(ea->NOB); cmd->params.rdeeprom.value = 0; + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -809,35 +824,36 @@ static int wlan_cmd_bt_access(wlan_private * priv, u16 cmd_action, void *pdata_buf) { struct cmd_ds_bt_access *bt_access = &cmd->params.bt; - lbs_deb_cmd("BT CMD(%d)\n", cmd_action); + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); - cmd->command = cpu_to_le16(cmd_bt_access); + cmd->command = cpu_to_le16(CMD_BT_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN); cmd->result = 0; bt_access->action = cpu_to_le16(cmd_action); switch (cmd_action) { - case cmd_act_bt_access_add: + case CMD_ACT_BT_ACCESS_ADD: memcpy(bt_access->addr1, pdata_buf, 2 * ETH_ALEN); - lbs_dbg_hex("BT_ADD: blinded mac address-", bt_access->addr1, 6); + lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr", bt_access->addr1, 6); break; - case cmd_act_bt_access_del: + case CMD_ACT_BT_ACCESS_DEL: memcpy(bt_access->addr1, pdata_buf, 1 * ETH_ALEN); - lbs_dbg_hex("BT_DEL: blinded mac address-", bt_access->addr1, 6); + lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr", bt_access->addr1, 6); break; - case cmd_act_bt_access_list: + case CMD_ACT_BT_ACCESS_LIST: bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); break; - case cmd_act_bt_access_reset: + case CMD_ACT_BT_ACCESS_RESET: break; - case cmd_act_bt_access_set_invert: + case CMD_ACT_BT_ACCESS_SET_INVERT: bt_access->id = cpu_to_le32(*(u32 *) pdata_buf); break; - case cmd_act_bt_access_get_invert: + case CMD_ACT_BT_ACCESS_GET_INVERT: break; default: break; } + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -846,9 +862,9 @@ static int wlan_cmd_fwt_access(wlan_private * priv, u16 cmd_action, void *pdata_buf) { struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt; - lbs_deb_cmd("FWT CMD(%d)\n", cmd_action); + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); - cmd->command = cpu_to_le16(cmd_fwt_access); + cmd->command = cpu_to_le16(CMD_FWT_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN); cmd->result = 0; @@ -859,6 +875,7 @@ static int wlan_cmd_fwt_access(wlan_private * priv, fwt_access->action = cpu_to_le16(cmd_action); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -867,9 +884,9 @@ static int wlan_cmd_mesh_access(wlan_private * priv, u16 cmd_action, void *pdata_buf) { struct cmd_ds_mesh_access *mesh_access = &cmd->params.mesh; - lbs_deb_cmd("FWT CMD(%d)\n", cmd_action); + lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); - cmd->command = cpu_to_le16(cmd_mesh_access); + cmd->command = cpu_to_le16(CMD_MESH_ACCESS); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mesh_access) + S_DS_GEN); cmd->result = 0; @@ -880,6 +897,18 @@ static int wlan_cmd_mesh_access(wlan_private * priv, mesh_access->action = cpu_to_le16(cmd_action); + lbs_deb_leave(LBS_DEB_CMD); + return 0; +} + +static int wlan_cmd_set_boot2_ver(wlan_private * priv, + struct cmd_ds_command *cmd, + u16 cmd_action, void *pdata_buf) +{ + struct cmd_ds_set_boot2_ver *boot2_ver = &cmd->params.boot2_ver; + cmd->command = cpu_to_le16(CMD_SET_BOOT2_VER); + cmd->size = cpu_to_le16(sizeof(struct cmd_ds_set_boot2_ver) + S_DS_GEN); + boot2_ver->version = priv->boot2_version; return 0; } @@ -888,23 +917,23 @@ void libertas_queue_cmd(wlan_adapter * adapter, struct cmd_ctrl_node *cmdnode, u unsigned long flags; struct cmd_ds_command *cmdptr; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!cmdnode) { - lbs_deb_cmd("QUEUE_CMD: cmdnode is NULL\n"); + lbs_deb_host("QUEUE_CMD: cmdnode is NULL\n"); goto done; } cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; if (!cmdptr) { - lbs_deb_cmd("QUEUE_CMD: cmdptr is NULL\n"); + lbs_deb_host("QUEUE_CMD: cmdptr is NULL\n"); goto done; } /* Exit_PS command needs to be queued in the header always. */ - if (cmdptr->command == cmd_802_11_ps_mode) { + if (cmdptr->command == CMD_802_11_PS_MODE) { struct cmd_ds_802_11_ps_mode *psm = &cmdptr->params.psmode; - if (psm->action == cpu_to_le16(cmd_subcmd_exit_ps)) { + if (psm->action == cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { if (adapter->psstate != PS_STATE_FULL_POWER) addtail = 0; } @@ -920,17 +949,16 @@ void libertas_queue_cmd(wlan_adapter * adapter, struct cmd_ctrl_node *cmdnode, u spin_unlock_irqrestore(&adapter->driver_lock, flags); - lbs_deb_cmd("QUEUE_CMD: Inserted node=%p, cmd=0x%x in cmdpendingq\n", - cmdnode, + lbs_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n", le16_to_cpu(((struct cmd_ds_gen*)cmdnode->bufvirtualaddr)->command)); done: - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /* * TODO: Fix the issue when DownloadcommandToStation is being called the - * second time when the command timesout. All the cmdptr->xxx are in little + * second time when the command times out. All the cmdptr->xxx are in little * endian and therefore all the comparissions will fail. * For now - we are not performing the endian conversion the second time - but * for PS and DEEP_SLEEP we need to worry @@ -941,68 +969,59 @@ static int DownloadcommandToStation(wlan_private * priv, unsigned long flags; struct cmd_ds_command *cmdptr; wlan_adapter *adapter = priv->adapter; - int ret = 0; + int ret = -1; u16 cmdsize; u16 command; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!adapter || !cmdnode) { - lbs_deb_cmd("DNLD_CMD: adapter = %p, cmdnode = %p\n", - adapter, cmdnode); - if (cmdnode) { - spin_lock_irqsave(&adapter->driver_lock, flags); - __libertas_cleanup_and_insert_cmd(priv, cmdnode); - spin_unlock_irqrestore(&adapter->driver_lock, flags); - } - ret = -1; + lbs_deb_host("DNLD_CMD: adapter or cmdmode is NULL\n"); goto done; } cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; - spin_lock_irqsave(&adapter->driver_lock, flags); if (!cmdptr || !cmdptr->size) { - lbs_deb_cmd("DNLD_CMD: cmdptr is Null or cmd size is Zero, " - "Not sending\n"); + lbs_deb_host("DNLD_CMD: cmdptr is NULL or zero\n"); __libertas_cleanup_and_insert_cmd(priv, cmdnode); spin_unlock_irqrestore(&adapter->driver_lock, flags); - ret = -1; goto done; } adapter->cur_cmd = cmdnode; adapter->cur_cmd_retcode = 0; spin_unlock_irqrestore(&adapter->driver_lock, flags); - lbs_deb_cmd("DNLD_CMD:: Before download, size of cmd = %d\n", - le16_to_cpu(cmdptr->size)); cmdsize = cmdptr->size; - command = cpu_to_le16(cmdptr->command); + lbs_deb_host("DNLD_CMD: command 0x%04x, size %d, jiffies %lu\n", + command, le16_to_cpu(cmdptr->size), jiffies); + lbs_deb_hex(LBS_DEB_HOST, "DNLD_CMD", cmdnode->bufvirtualaddr, cmdsize); + cmdnode->cmdwaitqwoken = 0; cmdsize = cpu_to_le16(cmdsize); ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmdptr, cmdsize); if (ret != 0) { - lbs_deb_cmd("DNLD_CMD: Host to Card failed\n"); + lbs_deb_host("DNLD_CMD: hw_host_to_card failed\n"); spin_lock_irqsave(&adapter->driver_lock, flags); + adapter->cur_cmd_retcode = ret; __libertas_cleanup_and_insert_cmd(priv, adapter->cur_cmd); + adapter->nr_cmd_pending--; adapter->cur_cmd = NULL; spin_unlock_irqrestore(&adapter->driver_lock, flags); - ret = -1; goto done; } - lbs_deb_cmd("DNLD_CMD: Sent command 0x%x @ %lu\n", command, jiffies); - lbs_dbg_hex("DNLD_CMD: command", cmdnode->bufvirtualaddr, cmdsize); + lbs_deb_cmd("DNLD_CMD: sent command 0x%04x, jiffies %lu\n", command, jiffies); /* Setup the timer after transmit command */ - if (command == cmd_802_11_scan || command == cmd_802_11_authenticate - || command == cmd_802_11_associate) + if (command == CMD_802_11_SCAN || command == CMD_802_11_AUTHENTICATE + || command == CMD_802_11_ASSOCIATE) mod_timer(&adapter->command_timer, jiffies + (10*HZ)); else mod_timer(&adapter->command_timer, jiffies + (5*HZ)); @@ -1010,7 +1029,7 @@ static int DownloadcommandToStation(wlan_private * priv, ret = 0; done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } @@ -1021,11 +1040,11 @@ static int wlan_cmd_mac_control(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - cmd->command = cpu_to_le16(cmd_mac_control); + cmd->command = cpu_to_le16(CMD_MAC_CONTROL); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_control) + S_DS_GEN); mac->action = cpu_to_le16(priv->adapter->currentpacketfilter); - lbs_deb_cmd("wlan_cmd_mac_control(): action=0x%X size=%d\n", + lbs_deb_cmd("MAC_CONTROL: action 0x%x, size %d\n", le16_to_cpu(mac->action), le16_to_cpu(cmd->size)); lbs_deb_leave(LBS_DEB_CMD); @@ -1041,15 +1060,13 @@ void __libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node wlan_adapter *adapter = priv->adapter; if (!ptempcmd) - goto done; + return; cleanup_cmdnode(ptempcmd); list_add_tail((struct list_head *)ptempcmd, &adapter->cmdfreeq); -done: - return; } -void libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node *ptempcmd) +static void libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node *ptempcmd) { unsigned long flags; @@ -1065,11 +1082,11 @@ int libertas_set_radio_control(wlan_private * priv) lbs_deb_enter(LBS_DEB_CMD); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_radio_control, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RADIO_CONTROL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); - lbs_deb_cmd("RADIO_SET: on or off: 0x%X, preamble = 0x%X\n", + lbs_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->adapter->radioon, priv->adapter->preamble); lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); @@ -1082,12 +1099,9 @@ int libertas_set_mac_packet_filter(wlan_private * priv) lbs_deb_enter(LBS_DEB_CMD); - lbs_deb_cmd("libertas_set_mac_packet_filter value = %x\n", - priv->adapter->currentpacketfilter); - /* Send MAC control command to station */ ret = libertas_prepare_and_send_command(priv, - cmd_mac_control, 0, 0, 0, NULL); + CMD_MAC_CONTROL, 0, 0, 0, NULL); lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; @@ -1115,16 +1129,16 @@ int libertas_prepare_and_send_command(wlan_private * priv, struct cmd_ds_command *cmdptr; unsigned long flags; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!adapter) { - lbs_deb_cmd("PREP_CMD: adapter is Null\n"); + lbs_deb_host("PREP_CMD: adapter is NULL\n"); ret = -1; goto done; } if (adapter->surpriseremoved) { - lbs_deb_cmd("PREP_CMD: Card is Removed\n"); + lbs_deb_host("PREP_CMD: card removed\n"); ret = -1; goto done; } @@ -1132,10 +1146,10 @@ int libertas_prepare_and_send_command(wlan_private * priv, cmdnode = libertas_get_free_cmd_ctrl_node(priv); if (cmdnode == NULL) { - lbs_deb_cmd("PREP_CMD: No free cmdnode\n"); + lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); /* Wake up main thread to execute next command */ - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); ret = -1; goto done; } @@ -1144,11 +1158,10 @@ int libertas_prepare_and_send_command(wlan_private * priv, cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; - lbs_deb_cmd("PREP_CMD: Val of cmd ptr=%p, command=0x%X\n", - cmdptr, cmd_no); + lbs_deb_host("PREP_CMD: command 0x%04x\n", cmd_no); if (!cmdptr) { - lbs_deb_cmd("PREP_CMD: bufvirtualaddr of cmdnode is NULL\n"); + lbs_deb_host("PREP_CMD: cmdptr is NULL\n"); libertas_cleanup_and_insert_cmd(priv, cmdnode); ret = -1; goto done; @@ -1162,136 +1175,136 @@ int libertas_prepare_and_send_command(wlan_private * priv, cmdptr->result = 0; switch (cmd_no) { - case cmd_get_hw_spec: + case CMD_GET_HW_SPEC: ret = wlan_cmd_hw_spec(priv, cmdptr); break; - case cmd_802_11_ps_mode: + case CMD_802_11_PS_MODE: ret = wlan_cmd_802_11_ps_mode(priv, cmdptr, cmd_action); break; - case cmd_802_11_scan: + case CMD_802_11_SCAN: ret = libertas_cmd_80211_scan(priv, cmdptr, pdata_buf); break; - case cmd_mac_control: + case CMD_MAC_CONTROL: ret = wlan_cmd_mac_control(priv, cmdptr); break; - case cmd_802_11_associate: - case cmd_802_11_reassociate: + case CMD_802_11_ASSOCIATE: + case CMD_802_11_REASSOCIATE: ret = libertas_cmd_80211_associate(priv, cmdptr, pdata_buf); break; - case cmd_802_11_deauthenticate: + case CMD_802_11_DEAUTHENTICATE: ret = libertas_cmd_80211_deauthenticate(priv, cmdptr); break; - case cmd_802_11_set_wep: + case CMD_802_11_SET_WEP: ret = wlan_cmd_802_11_set_wep(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_ad_hoc_start: + case CMD_802_11_AD_HOC_START: ret = libertas_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf); break; - case cmd_code_dnld: + case CMD_CODE_DNLD: break; - case cmd_802_11_reset: + case CMD_802_11_RESET: ret = wlan_cmd_802_11_reset(priv, cmdptr, cmd_action); break; - case cmd_802_11_get_log: + case CMD_802_11_GET_LOG: ret = wlan_cmd_802_11_get_log(priv, cmdptr); break; - case cmd_802_11_authenticate: + case CMD_802_11_AUTHENTICATE: ret = libertas_cmd_80211_authenticate(priv, cmdptr, pdata_buf); break; - case cmd_802_11_get_stat: + case CMD_802_11_GET_STAT: ret = wlan_cmd_802_11_get_stat(priv, cmdptr); break; - case cmd_802_11_snmp_mib: + case CMD_802_11_SNMP_MIB: ret = wlan_cmd_802_11_snmp_mib(priv, cmdptr, cmd_action, cmd_oid, pdata_buf); break; - case cmd_mac_reg_access: - case cmd_bbp_reg_access: - case cmd_rf_reg_access: + case CMD_MAC_REG_ACCESS: + case CMD_BBP_REG_ACCESS: + case CMD_RF_REG_ACCESS: ret = wlan_cmd_reg_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_rf_channel: + case CMD_802_11_RF_CHANNEL: ret = wlan_cmd_802_11_rf_channel(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_rf_tx_power: + case CMD_802_11_RF_TX_POWER: ret = wlan_cmd_802_11_rf_tx_power(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_radio_control: + case CMD_802_11_RADIO_CONTROL: ret = wlan_cmd_802_11_radio_control(priv, cmdptr, cmd_action); break; - case cmd_802_11_rf_antenna: - ret = wlan_cmd_802_11_rf_antenna(priv, cmdptr, - cmd_action, pdata_buf); - break; - - case cmd_802_11_data_rate: + case CMD_802_11_DATA_RATE: ret = wlan_cmd_802_11_data_rate(priv, cmdptr, cmd_action); break; - case cmd_802_11_rate_adapt_rateset: + case CMD_802_11_RATE_ADAPT_RATESET: ret = wlan_cmd_802_11_rate_adapt_rateset(priv, cmdptr, cmd_action); break; - case cmd_mac_multicast_adr: + case CMD_MAC_MULTICAST_ADR: ret = wlan_cmd_mac_multicast_adr(priv, cmdptr, cmd_action); break; - case cmd_802_11_ad_hoc_join: + case CMD_802_11_MONITOR_MODE: + ret = wlan_cmd_802_11_monitor_mode(priv, cmdptr, + cmd_action, pdata_buf); + break; + + case CMD_802_11_AD_HOC_JOIN: ret = libertas_cmd_80211_ad_hoc_join(priv, cmdptr, pdata_buf); break; - case cmd_802_11_rssi: + case CMD_802_11_RSSI: ret = wlan_cmd_802_11_rssi(priv, cmdptr); break; - case cmd_802_11_ad_hoc_stop: + case CMD_802_11_AD_HOC_STOP: ret = libertas_cmd_80211_ad_hoc_stop(priv, cmdptr); break; - case cmd_802_11_enable_rsn: + case CMD_802_11_ENABLE_RSN: ret = wlan_cmd_802_11_enable_rsn(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_key_material: + case CMD_802_11_KEY_MATERIAL: ret = wlan_cmd_802_11_key_material(priv, cmdptr, cmd_action, cmd_oid, pdata_buf); break; - case cmd_802_11_pairwise_tsc: + case CMD_802_11_PAIRWISE_TSC: break; - case cmd_802_11_group_tsc: + case CMD_802_11_GROUP_TSC: break; - case cmd_802_11_mac_address: + case CMD_802_11_MAC_ADDRESS: ret = wlan_cmd_802_11_mac_address(priv, cmdptr, cmd_action); break; - case cmd_802_11_eeprom_access: + case CMD_802_11_EEPROM_ACCESS: ret = wlan_cmd_802_11_eeprom_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_set_afc: - case cmd_802_11_get_afc: + case CMD_802_11_SET_AFC: + case CMD_802_11_GET_AFC: cmdptr->command = cpu_to_le16(cmd_no); cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + @@ -1303,22 +1316,22 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; goto done; - case cmd_802_11d_domain_info: + case CMD_802_11D_DOMAIN_INFO: ret = libertas_cmd_802_11d_domain_info(priv, cmdptr, cmd_no, cmd_action); break; - case cmd_802_11_sleep_params: + case CMD_802_11_SLEEP_PARAMS: ret = wlan_cmd_802_11_sleep_params(priv, cmdptr, cmd_action); break; - case cmd_802_11_inactivity_timeout: + case CMD_802_11_INACTIVITY_TIMEOUT: ret = wlan_cmd_802_11_inactivity_timeout(priv, cmdptr, cmd_action, pdata_buf); libertas_set_cmd_ctrl_node(priv, cmdnode, 0, 0, pdata_buf); break; - case cmd_802_11_tpc_cfg: - cmdptr->command = cpu_to_le16(cmd_802_11_tpc_cfg); + case CMD_802_11_TPC_CFG: + cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + S_DS_GEN); @@ -1328,7 +1341,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; break; - case cmd_802_11_led_gpio_ctrl: + case CMD_802_11_LED_GPIO_CTRL: { struct mrvlietypes_ledgpio *gpio = (struct mrvlietypes_ledgpio*) @@ -1339,7 +1352,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, sizeof(struct cmd_ds_802_11_led_ctrl)); cmdptr->command = - cpu_to_le16(cmd_802_11_led_gpio_ctrl); + cpu_to_le16(CMD_802_11_LED_GPIO_CTRL); #define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8 cmdptr->size = @@ -1350,8 +1363,8 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; break; } - case cmd_802_11_pwr_cfg: - cmdptr->command = cpu_to_le16(cmd_802_11_pwr_cfg); + case CMD_802_11_PWR_CFG: + cmdptr->command = cpu_to_le16(CMD_802_11_PWR_CFG); cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_pwr_cfg) + S_DS_GEN); @@ -1360,40 +1373,37 @@ int libertas_prepare_and_send_command(wlan_private * priv, ret = 0; break; - case cmd_bt_access: + case CMD_BT_ACCESS: ret = wlan_cmd_bt_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_fwt_access: + case CMD_FWT_ACCESS: ret = wlan_cmd_fwt_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_mesh_access: + case CMD_MESH_ACCESS: ret = wlan_cmd_mesh_access(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_get_tsf: - cmdptr->command = cpu_to_le16(cmd_get_tsf); - cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) + - S_DS_GEN); - ret = 0; + case CMD_SET_BOOT2_VER: + ret = wlan_cmd_set_boot2_ver(priv, cmdptr, cmd_action, pdata_buf); break; - case cmd_802_11_tx_rate_query: - cmdptr->command = cpu_to_le16(cmd_802_11_tx_rate_query); - cmdptr->size = cpu_to_le16(sizeof(struct cmd_tx_rate_query) + + + case CMD_GET_TSF: + cmdptr->command = cpu_to_le16(CMD_GET_TSF); + cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) + S_DS_GEN); - adapter->txrate = 0; ret = 0; break; default: - lbs_deb_cmd("PREP_CMD: unknown command- %#x\n", cmd_no); + lbs_deb_host("PREP_CMD: unknown command 0x%04x\n", cmd_no); ret = -1; break; } /* return error, since the command preparation failed */ if (ret != 0) { - lbs_deb_cmd("PREP_CMD: command preparation failed\n"); + lbs_deb_host("PREP_CMD: command preparation failed\n"); libertas_cleanup_and_insert_cmd(priv, cmdnode); ret = -1; goto done; @@ -1403,10 +1413,10 @@ int libertas_prepare_and_send_command(wlan_private * priv, libertas_queue_cmd(adapter, cmdnode, 1); adapter->nr_cmd_pending++; - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); - if (wait_option & cmd_option_waitforrsp) { - lbs_deb_cmd("PREP_CMD: Wait for CMD response\n"); + if (wait_option & CMD_OPTION_WAITFORRSP) { + lbs_deb_host("PREP_CMD: wait for response\n"); might_sleep(); wait_event_interruptible(cmdnode->cmdwait_q, cmdnode->cmdwaitqwoken); @@ -1414,7 +1424,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd_retcode) { - lbs_deb_cmd("PREP_CMD: command failed with return code=%d\n", + lbs_deb_host("PREP_CMD: command failed with return code %d\n", adapter->cur_cmd_retcode); adapter->cur_cmd_retcode = 0; ret = -1; @@ -1422,7 +1432,7 @@ int libertas_prepare_and_send_command(wlan_private * priv, spin_unlock_irqrestore(&adapter->driver_lock, flags); done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } EXPORT_SYMBOL_GPL(libertas_prepare_and_send_command); @@ -1443,14 +1453,13 @@ int libertas_allocate_cmd_buffer(wlan_private * priv) u8 *ptempvirtualaddr; wlan_adapter *adapter = priv->adapter; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); /* Allocate and initialize cmdCtrlNode */ ulbufsize = sizeof(struct cmd_ctrl_node) * MRVDRV_NUM_OF_CMD_BUFFER; if (!(tempcmd_array = kzalloc(ulbufsize, GFP_KERNEL))) { - lbs_deb_cmd( - "ALLOC_CMD_BUF: failed to allocate tempcmd_array\n"); + lbs_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n"); ret = -1; goto done; } @@ -1460,8 +1469,7 @@ int libertas_allocate_cmd_buffer(wlan_private * priv) ulbufsize = MRVDRV_SIZE_OF_CMD_BUFFER; for (i = 0; i < MRVDRV_NUM_OF_CMD_BUFFER; i++) { if (!(ptempvirtualaddr = kzalloc(ulbufsize, GFP_KERNEL))) { - lbs_deb_cmd( - "ALLOC_CMD_BUF: ptempvirtualaddr: out of memory\n"); + lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n"); ret = -1; goto done; } @@ -1478,7 +1486,7 @@ int libertas_allocate_cmd_buffer(wlan_private * priv) ret = 0; done: - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } @@ -1495,11 +1503,11 @@ int libertas_free_cmd_buffer(wlan_private * priv) struct cmd_ctrl_node *tempcmd_array; wlan_adapter *adapter = priv->adapter; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); /* need to check if cmd array is allocated or not */ if (adapter->cmd_array == NULL) { - lbs_deb_cmd("FREE_CMD_BUF: cmd_array is Null\n"); + lbs_deb_host("FREE_CMD_BUF: cmd_array is NULL\n"); goto done; } @@ -1509,7 +1517,6 @@ int libertas_free_cmd_buffer(wlan_private * priv) ulbufsize = MRVDRV_SIZE_OF_CMD_BUFFER; for (i = 0; i < MRVDRV_NUM_OF_CMD_BUFFER; i++) { if (tempcmd_array[i].bufvirtualaddr) { - lbs_deb_cmd("Free all the array\n"); kfree(tempcmd_array[i].bufvirtualaddr); tempcmd_array[i].bufvirtualaddr = NULL; } @@ -1517,13 +1524,12 @@ int libertas_free_cmd_buffer(wlan_private * priv) /* Release cmd_ctrl_node */ if (adapter->cmd_array) { - lbs_deb_cmd("Free cmd_array\n"); kfree(adapter->cmd_array); adapter->cmd_array = NULL; } done: - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); return 0; } @@ -1540,6 +1546,8 @@ struct cmd_ctrl_node *libertas_get_free_cmd_ctrl_node(wlan_private * priv) wlan_adapter *adapter = priv->adapter; unsigned long flags; + lbs_deb_enter(LBS_DEB_HOST); + if (!adapter) return NULL; @@ -1549,21 +1557,16 @@ struct cmd_ctrl_node *libertas_get_free_cmd_ctrl_node(wlan_private * priv) tempnode = (struct cmd_ctrl_node *)adapter->cmdfreeq.next; list_del((struct list_head *)tempnode); } else { - lbs_deb_cmd("GET_CMD_NODE: cmd_ctrl_node is not available\n"); + lbs_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n"); tempnode = NULL; } spin_unlock_irqrestore(&adapter->driver_lock, flags); - if (tempnode) { - /* - lbs_pr_debug(3, "GET_CMD_NODE: cmdCtrlNode available\n"); - lbs_pr_debug(3, "GET_CMD_NODE: cmdCtrlNode Address = %p\n", - tempnode); - */ + if (tempnode) cleanup_cmdnode(tempnode); - } + lbs_deb_leave(LBS_DEB_HOST); return tempnode; } @@ -1575,6 +1578,8 @@ struct cmd_ctrl_node *libertas_get_free_cmd_ctrl_node(wlan_private * priv) */ static void cleanup_cmdnode(struct cmd_ctrl_node *ptempnode) { + lbs_deb_enter(LBS_DEB_HOST); + if (!ptempnode) return; ptempnode->cmdwaitqwoken = 1; @@ -1586,7 +1591,8 @@ static void cleanup_cmdnode(struct cmd_ctrl_node *ptempnode) if (ptempnode->bufvirtualaddr != NULL) memset(ptempnode->bufvirtualaddr, 0, MRVDRV_SIZE_OF_CMD_BUFFER); - return; + + lbs_deb_leave(LBS_DEB_HOST); } /** @@ -1603,7 +1609,7 @@ void libertas_set_cmd_ctrl_node(wlan_private * priv, struct cmd_ctrl_node *ptempnode, u32 cmd_oid, u16 wait_option, void *pdata_buf) { - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (!ptempnode) return; @@ -1612,7 +1618,7 @@ void libertas_set_cmd_ctrl_node(wlan_private * priv, ptempnode->wait_option = wait_option; ptempnode->pdata_buf = pdata_buf; - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /** @@ -1631,12 +1637,15 @@ int libertas_execute_next_command(wlan_private * priv) unsigned long flags; int ret = 0; - lbs_deb_enter(LBS_DEB_CMD); + // Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the + // only caller to us is libertas_thread() and we get even when a + // data packet is received + lbs_deb_enter(LBS_DEB_THREAD); spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd) { - lbs_pr_alert( "EXEC_NEXT_CMD: there is command in processing!\n"); + lbs_pr_alert( "EXEC_NEXT_CMD: already processing command!\n"); spin_unlock_irqrestore(&adapter->driver_lock, flags); ret = -1; goto done; @@ -1650,22 +1659,20 @@ int libertas_execute_next_command(wlan_private * priv) spin_unlock_irqrestore(&adapter->driver_lock, flags); if (cmdnode) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Got next command from cmdpendingq\n"); cmdptr = (struct cmd_ds_command *)cmdnode->bufvirtualaddr; if (is_command_allowed_in_ps(cmdptr->command)) { if ((adapter->psstate == PS_STATE_SLEEP) || (adapter->psstate == PS_STATE_PRE_SLEEP)) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Cannot send cmd 0x%x in psstate %d\n", + lbs_deb_host( + "EXEC_NEXT_CMD: cannot send cmd 0x%04x in psstate %d\n", le16_to_cpu(cmdptr->command), adapter->psstate); ret = -1; goto done; } - lbs_deb_cmd("EXEC_NEXT_CMD: OK to send command " - "0x%x in psstate %d\n", + lbs_deb_host("EXEC_NEXT_CMD: OK to send command " + "0x%04x in psstate %d\n", le16_to_cpu(cmdptr->command), adapter->psstate); } else if (adapter->psstate != PS_STATE_FULL_POWER) { @@ -1681,7 +1688,7 @@ int libertas_execute_next_command(wlan_private * priv) * immediately. */ if (cmdptr->command != - cpu_to_le16(cmd_802_11_ps_mode)) { + cpu_to_le16(CMD_802_11_PS_MODE)) { /* Prepare to send Exit PS, * this non PS command will be sent later */ if ((adapter->psstate == PS_STATE_SLEEP) @@ -1703,13 +1710,13 @@ int libertas_execute_next_command(wlan_private * priv) struct cmd_ds_802_11_ps_mode *psm = &cmdptr->params.psmode; - lbs_deb_cmd( - "EXEC_NEXT_CMD: PS cmd- action=0x%x\n", + lbs_deb_host( + "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", psm->action); if (psm->action != - cpu_to_le16(cmd_subcmd_exit_ps)) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Ignore Enter PS cmd\n"); + cpu_to_le16(CMD_SUBCMD_EXIT_PS)) { + lbs_deb_host( + "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); list_del((struct list_head *)cmdnode); libertas_cleanup_and_insert_cmd(priv, cmdnode); @@ -1719,8 +1726,8 @@ int libertas_execute_next_command(wlan_private * priv) if ((adapter->psstate == PS_STATE_SLEEP) || (adapter->psstate == PS_STATE_PRE_SLEEP)) { - lbs_deb_cmd( - "EXEC_NEXT_CMD: Ignore ExitPS cmd in sleep\n"); + lbs_deb_host( + "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); list_del((struct list_head *)cmdnode); libertas_cleanup_and_insert_cmd(priv, cmdnode); adapter->needtowakeup = 1; @@ -1729,12 +1736,12 @@ int libertas_execute_next_command(wlan_private * priv) goto done; } - lbs_deb_cmd( - "EXEC_NEXT_CMD: Sending Exit_PS down...\n"); + lbs_deb_host( + "EXEC_NEXT_CMD: sending EXIT_PS\n"); } } list_del((struct list_head *)cmdnode); - lbs_deb_cmd("EXEC_NEXT_CMD: Sending 0x%04X command\n", + lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", le16_to_cpu(cmdptr->command)); DownloadcommandToStation(priv, cmdnode); } else { @@ -1742,23 +1749,23 @@ int libertas_execute_next_command(wlan_private * priv) * check if in power save mode, if yes, put the device back * to PS mode */ - if ((adapter->psmode != wlan802_11powermodecam) && + if ((adapter->psmode != WLAN802_11POWERMODECAM) && (adapter->psstate == PS_STATE_FULL_POWER) && - (adapter->connect_status == libertas_connected)) { + (adapter->connect_status == LIBERTAS_CONNECTED)) { if (adapter->secinfo.WPAenabled || adapter->secinfo.WPA2enabled) { /* check for valid WPA group keys */ if (adapter->wpa_mcast_key.len || adapter->wpa_unicast_key.len) { - lbs_deb_cmd( + lbs_deb_host( "EXEC_NEXT_CMD: WPA enabled and GTK_SET" " go back to PS_SLEEP"); libertas_ps_sleep(priv, 0); } } else { - lbs_deb_cmd( - "EXEC_NEXT_CMD: command PendQ is empty," - " go back to PS_SLEEP"); + lbs_deb_host( + "EXEC_NEXT_CMD: cmdpendingq empty, " + "go back to PS_SLEEP"); libertas_ps_sleep(priv, 0); } } @@ -1766,7 +1773,7 @@ int libertas_execute_next_command(wlan_private * priv) ret = 0; done: - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_THREAD); return ret; } @@ -1775,7 +1782,7 @@ void libertas_send_iwevcustom_event(wlan_private * priv, s8 * str) union iwreq_data iwrq; u8 buf[50]; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_WEXT); memset(&iwrq, 0, sizeof(union iwreq_data)); memset(buf, 0, sizeof(buf)); @@ -1785,13 +1792,13 @@ void libertas_send_iwevcustom_event(wlan_private * priv, s8 * str) iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN; /* Send Event to upper layer */ - lbs_deb_cmd("Event Indication string = %s\n", (char *)buf); - lbs_deb_cmd("Event Indication String length = %d\n", iwrq.data.length); + lbs_deb_wext("event indication string %s\n", (char *)buf); + lbs_deb_wext("event indication length %d\n", iwrq.data.length); + lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str); - lbs_deb_cmd("Sending wireless event IWEVCUSTOM for %s\n", str); wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_WEXT); } static int sendconfirmsleep(wlan_private * priv, u8 * cmdptr, u16 size) @@ -1800,19 +1807,19 @@ static int sendconfirmsleep(wlan_private * priv, u8 * cmdptr, u16 size) wlan_adapter *adapter = priv->adapter; int ret = 0; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); - lbs_deb_cmd("SEND_SLEEPC_CMD: Before download, size of cmd = %d\n", + lbs_deb_host("SEND_SLEEPC_CMD: before download, cmd size %d\n", size); - lbs_dbg_hex("SEND_SLEEPC_CMD: Sleep confirm command", cmdptr, size); + lbs_deb_hex(LBS_DEB_HOST, "sleep confirm command", cmdptr, size); ret = priv->hw_host_to_card(priv, MVMS_CMD, cmdptr, size); priv->dnld_sent = DNLD_RES_RECEIVED; spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->intcounter || adapter->currenttxskb) - lbs_deb_cmd("SEND_SLEEPC_CMD: intcounter=%d currenttxskb=%p\n", + lbs_deb_host("SEND_SLEEPC_CMD: intcounter %d, currenttxskb %p\n", adapter->intcounter, adapter->currenttxskb); spin_unlock_irqrestore(&adapter->driver_lock, flags); @@ -1824,36 +1831,35 @@ static int sendconfirmsleep(wlan_private * priv, u8 * cmdptr, u16 size) if (!adapter->intcounter) { adapter->psstate = PS_STATE_SLEEP; } else { - lbs_deb_cmd("SEND_SLEEPC_CMD: After sent,IntC=%d\n", + lbs_deb_host("SEND_SLEEPC_CMD: after sent, intcounter %d\n", adapter->intcounter); } spin_unlock_irqrestore(&adapter->driver_lock, flags); - lbs_deb_cmd("SEND_SLEEPC_CMD: Sent Confirm Sleep command\n"); - lbs_deb_cmd("+"); + lbs_deb_host("SEND_SLEEPC_CMD: sent confirm sleep\n"); } - lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } void libertas_ps_sleep(wlan_private * priv, int wait_option) { - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); /* * PS is currently supported only in Infrastructure mode * Remove this check if it is to be supported in IBSS mode also */ - libertas_prepare_and_send_command(priv, cmd_802_11_ps_mode, - cmd_subcmd_enter_ps, wait_option, 0, NULL); + libertas_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_SUBCMD_ENTER_PS, wait_option, 0, NULL); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /** - * @brief This function sends Eixt_PS command to firmware. + * @brief This function sends Exit_PS command to firmware. * * @param priv A pointer to wlan_private structure * @param wait_option wait response or not @@ -1863,17 +1869,15 @@ void libertas_ps_wakeup(wlan_private * priv, int wait_option) { __le32 Localpsmode; - lbs_deb_enter(LBS_DEB_CMD); - - Localpsmode = cpu_to_le32(wlan802_11powermodecam); + lbs_deb_enter(LBS_DEB_HOST); - lbs_deb_cmd("Exit_PS: Localpsmode = %d\n", wlan802_11powermodecam); + Localpsmode = cpu_to_le32(WLAN802_11POWERMODECAM); - libertas_prepare_and_send_command(priv, cmd_802_11_ps_mode, - cmd_subcmd_exit_ps, + libertas_prepare_and_send_command(priv, CMD_802_11_PS_MODE, + CMD_SUBCMD_EXIT_PS, wait_option, 0, &Localpsmode); - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } /** @@ -1890,31 +1894,31 @@ void libertas_ps_confirm_sleep(wlan_private * priv, u16 psmode) wlan_adapter *adapter = priv->adapter; u8 allowed = 1; - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_enter(LBS_DEB_HOST); if (priv->dnld_sent) { allowed = 0; - lbs_deb_cmd("D"); + lbs_deb_host("dnld_sent was set"); } spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd) { allowed = 0; - lbs_deb_cmd("C"); + lbs_deb_host("cur_cmd was set"); } if (adapter->intcounter > 0) { allowed = 0; - lbs_deb_cmd("I%d", adapter->intcounter); + lbs_deb_host("intcounter %d", adapter->intcounter); } spin_unlock_irqrestore(&adapter->driver_lock, flags); if (allowed) { - lbs_deb_cmd("Sending libertas_ps_confirm_sleep\n"); + lbs_deb_host("sending libertas_ps_confirm_sleep\n"); sendconfirmsleep(priv, (u8 *) & adapter->libertas_ps_confirm_sleep, sizeof(struct PS_CMD_ConfirmSleep)); } else { - lbs_deb_cmd("Sleep Confirm has been delayed\n"); + lbs_deb_host("sleep confirm has been delayed\n"); } - lbs_deb_leave(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_HOST); } diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index 6ac0d4752fa493a293f992308a301e478a08c88e..8f90892ea22ee7b39cb0a17620b6800e4512704a 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -28,10 +28,10 @@ void libertas_mac_event_disconnected(wlan_private * priv) wlan_adapter *adapter = priv->adapter; union iwreq_data wrqu; - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) return; - lbs_deb_cmd("Handles disconnect event.\n"); + lbs_deb_enter(LBS_DEB_CMD); memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; @@ -60,22 +60,12 @@ void libertas_mac_event_disconnected(wlan_private * priv) memset(adapter->rawNF, 0x00, sizeof(adapter->rawNF)); adapter->nextSNRNF = 0; adapter->numSNRNF = 0; - adapter->rxpd_rate = 0; - lbs_deb_cmd("Current SSID='%s', ssid length=%u\n", + lbs_deb_cmd("current SSID '%s', length %u\n", escape_essid(adapter->curbssparams.ssid, adapter->curbssparams.ssid_len), adapter->curbssparams.ssid_len); - lbs_deb_cmd("Previous SSID='%s', ssid length=%u\n", - escape_essid(adapter->prev_ssid, adapter->prev_ssid_len), - adapter->prev_ssid_len); - - adapter->connect_status = libertas_disconnected; - /* Save previous SSID and BSSID for possible reassociation */ - memcpy(&adapter->prev_ssid, &adapter->curbssparams.ssid, - IW_ESSID_MAX_SIZE); - adapter->prev_ssid_len = adapter->curbssparams.ssid_len; - memcpy(adapter->prev_bssid, adapter->curbssparams.bssid, ETH_ALEN); + adapter->connect_status = LIBERTAS_DISCONNECTED; /* Clear out associated SSID and BSSID since connection is * no longer valid. @@ -86,9 +76,10 @@ void libertas_mac_event_disconnected(wlan_private * priv) if (adapter->psstate != PS_STATE_FULL_POWER) { /* make firmware to exit PS mode */ - lbs_deb_cmd("Disconnected, so exit PS mode.\n"); + lbs_deb_cmd("disconnected, so exit PS mode\n"); libertas_ps_wakeup(priv, 0); } + lbs_deb_leave(LBS_DEB_CMD); } /** @@ -102,6 +93,7 @@ static void handle_mic_failureevent(wlan_private * priv, u32 event) { char buf[50]; + lbs_deb_enter(LBS_DEB_CMD); memset(buf, 0, sizeof(buf)); sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication "); @@ -113,6 +105,7 @@ static void handle_mic_failureevent(wlan_private * priv, u32 event) } libertas_send_iwevcustom_event(priv, buf); + lbs_deb_leave(LBS_DEB_CMD); } static int wlan_ret_reg_access(wlan_private * priv, @@ -124,7 +117,7 @@ static int wlan_ret_reg_access(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); switch (type) { - case cmd_ret_mac_reg_access: + case CMD_RET(CMD_MAC_REG_ACCESS): { struct cmd_ds_mac_reg_access *reg = &resp->params.macreg; @@ -133,7 +126,7 @@ static int wlan_ret_reg_access(wlan_private * priv, break; } - case cmd_ret_bbp_reg_access: + case CMD_RET(CMD_BBP_REG_ACCESS): { struct cmd_ds_bbp_reg_access *reg = &resp->params.bbpreg; @@ -142,7 +135,7 @@ static int wlan_ret_reg_access(wlan_private * priv, break; } - case cmd_ret_rf_reg_access: + case CMD_RET(CMD_RF_REG_ACCESS): { struct cmd_ds_rf_reg_access *reg = &resp->params.rfreg; @@ -155,7 +148,7 @@ static int wlan_ret_reg_access(wlan_private * priv, ret = -1; } - lbs_deb_enter_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; } @@ -166,6 +159,7 @@ static int wlan_ret_get_hw_spec(wlan_private * priv, struct cmd_ds_get_hw_spec *hwspec = &resp->params.hwspec; wlan_adapter *adapter = priv->adapter; int ret = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_CMD); @@ -173,22 +167,23 @@ static int wlan_ret_get_hw_spec(wlan_private * priv, memcpy(adapter->fwreleasenumber, hwspec->fwreleasenumber, 4); - lbs_deb_cmd("GET_HW_SPEC: FWReleaseVersion- %u.%u.%u.p%u\n", + lbs_deb_cmd("GET_HW_SPEC: firmware release %u.%u.%up%u\n", adapter->fwreleasenumber[2], adapter->fwreleasenumber[1], adapter->fwreleasenumber[0], adapter->fwreleasenumber[3]); - lbs_deb_cmd("GET_HW_SPEC: Permanent addr- %2x:%2x:%2x:%2x:%2x:%2x\n", - hwspec->permanentaddr[0], hwspec->permanentaddr[1], - hwspec->permanentaddr[2], hwspec->permanentaddr[3], - hwspec->permanentaddr[4], hwspec->permanentaddr[5]); - lbs_deb_cmd("GET_HW_SPEC: hwifversion=0x%X version=0x%X\n", + lbs_deb_cmd("GET_HW_SPEC: MAC addr %s\n", + print_mac(mac, hwspec->permanentaddr)); + lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n", hwspec->hwifversion, hwspec->version); - adapter->regioncode = le16_to_cpu(hwspec->regioncode); + /* Clamp region code to 8-bit since FW spec indicates that it should + * only ever be 8-bit, even though the field size is 16-bit. Some firmware + * returns non-zero high 8 bits here. + */ + adapter->regioncode = le16_to_cpu(hwspec->regioncode) & 0xFF; for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) { /* use the region code to search for the index */ if (adapter->regioncode == libertas_region_code_to_index[i]) { - adapter->regiontableindex = (u16) i; break; } } @@ -196,7 +191,6 @@ static int wlan_ret_get_hw_spec(wlan_private * priv, /* if it's unidentified region code, use the default (USA) */ if (i >= MRVDRV_MAX_REGION_CODE) { adapter->regioncode = 0x10; - adapter->regiontableindex = 0; lbs_pr_info("unidentified region code; using the default (USA)\n"); } @@ -230,8 +224,8 @@ static int wlan_ret_802_11_sleep_params(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - lbs_deb_cmd("error=%x offset=%x stabletime=%x calcontrol=%x\n" - " extsleepclk=%x\n", le16_to_cpu(sp->error), + lbs_deb_cmd("error 0x%x, offset 0x%x, stabletime 0x%x, calcontrol 0x%x " + "extsleepclk 0x%x\n", le16_to_cpu(sp->error), le16_to_cpu(sp->offset), le16_to_cpu(sp->stabletime), sp->calcontrol, sp->externalsleepclk); @@ -249,6 +243,7 @@ static int wlan_ret_802_11_sleep_params(wlan_private * priv, static int wlan_ret_802_11_stat(wlan_private * priv, struct cmd_ds_command *resp) { + lbs_deb_enter(LBS_DEB_CMD); /* currently adapter->wlan802_11Stat is unused struct cmd_ds_802_11_get_stat *p11Stat = &resp->params.gstat; @@ -258,6 +253,7 @@ static int wlan_ret_802_11_stat(wlan_private * priv, memcpy(&adapter->wlan802_11Stat, p11Stat, sizeof(struct cmd_ds_802_11_get_stat)); */ + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -270,28 +266,28 @@ static int wlan_ret_802_11_snmp_mib(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - lbs_deb_cmd("SNMP_RESP: value of the oid = %x, querytype=%x\n", oid, + lbs_deb_cmd("SNMP_RESP: oid 0x%x, querytype 0x%x\n", oid, querytype); - lbs_deb_cmd("SNMP_RESP: Buf size = %x\n", le16_to_cpu(smib->bufsize)); + lbs_deb_cmd("SNMP_RESP: Buf size %d\n", le16_to_cpu(smib->bufsize)); - if (querytype == cmd_act_get) { + if (querytype == CMD_ACT_GET) { switch (oid) { - case fragthresh_i: + case FRAGTHRESH_I: priv->adapter->fragthsd = le16_to_cpu(*((__le16 *)(smib->value))); - lbs_deb_cmd("SNMP_RESP: fragthsd =%u\n", + lbs_deb_cmd("SNMP_RESP: frag threshold %u\n", priv->adapter->fragthsd); break; - case rtsthresh_i: + case RTSTHRESH_I: priv->adapter->rtsthsd = le16_to_cpu(*((__le16 *)(smib->value))); - lbs_deb_cmd("SNMP_RESP: rtsthsd =%u\n", + lbs_deb_cmd("SNMP_RESP: rts threshold %u\n", priv->adapter->rtsthsd); break; - case short_retrylim_i: + case SHORT_RETRYLIM_I: priv->adapter->txretrycount = le16_to_cpu(*((__le16 *)(smib->value))); - lbs_deb_cmd("SNMP_RESP: txretrycount =%u\n", + lbs_deb_cmd("SNMP_RESP: tx retry count %u\n", priv->adapter->rtsthsd); break; default: @@ -314,18 +310,19 @@ static int wlan_ret_802_11_key_material(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); /* Copy the returned key to driver private data */ - if (action == cmd_act_get) { + if (action == CMD_ACT_GET) { u8 * buf_ptr = (u8 *) &pkeymaterial->keyParamSet; u8 * resp_end = (u8 *) (resp + le16_to_cpu(resp->size)); while (buf_ptr < resp_end) { struct MrvlIEtype_keyParamSet * pkeyparamset = (struct MrvlIEtype_keyParamSet *) buf_ptr; - struct WLAN_802_11_KEY * pkey; - u16 key_info = le16_to_cpu(pkeyparamset->keyinfo); + struct enc_key * pkey; u16 param_set_len = le16_to_cpu(pkeyparamset->length); - u8 * end; u16 key_len = le16_to_cpu(pkeyparamset->keylen); + u16 key_flags = le16_to_cpu(pkeyparamset->keyinfo); + u16 key_type = le16_to_cpu(pkeyparamset->keytypeid); + u8 * end; end = (u8 *) pkeyparamset + sizeof (pkeyparamset->type) + sizeof (pkeyparamset->length) @@ -334,20 +331,20 @@ static int wlan_ret_802_11_key_material(wlan_private * priv, if (end > resp_end) break; - if (key_info & KEY_INFO_WPA_UNICAST) + if (key_flags & KEY_INFO_WPA_UNICAST) pkey = &adapter->wpa_unicast_key; - else if (key_info & KEY_INFO_WPA_MCAST) + else if (key_flags & KEY_INFO_WPA_MCAST) pkey = &adapter->wpa_mcast_key; else break; /* Copy returned key into driver */ - memset(pkey, 0, sizeof(struct WLAN_802_11_KEY)); + memset(pkey, 0, sizeof(struct enc_key)); if (key_len > sizeof(pkey->key)) break; - pkey->type = le16_to_cpu(pkeyparamset->keytypeid); - pkey->flags = le16_to_cpu(pkeyparamset->keyinfo); - pkey->len = le16_to_cpu(pkeyparamset->keylen); + pkey->type = key_type; + pkey->flags = key_flags; + pkey->len = key_len; memcpy(pkey->key, pkeyparamset->key, pkey->len); buf_ptr = end + 1; @@ -382,28 +379,9 @@ static int wlan_ret_802_11_rf_tx_power(wlan_private * priv, adapter->txpowerlevel = le16_to_cpu(rtp->currentlevel); - lbs_deb_cmd("Current TxPower Level = %d\n", adapter->txpowerlevel); - - lbs_deb_enter(LBS_DEB_CMD); - return 0; -} - -static int wlan_ret_802_11_rf_antenna(wlan_private * priv, - struct cmd_ds_command *resp) -{ - struct cmd_ds_802_11_rf_antenna *pAntenna = &resp->params.rant; - wlan_adapter *adapter = priv->adapter; - u16 action = le16_to_cpu(pAntenna->action); - - if (action == cmd_act_get_rx) - adapter->rxantennamode = le16_to_cpu(pAntenna->antennamode); - - if (action == cmd_act_get_tx) - adapter->txantennamode = le16_to_cpu(pAntenna->antennamode); - - lbs_deb_cmd("RF_ANT_RESP: action = 0x%x, mode = 0x%04x\n", - action, le16_to_cpu(pAntenna->antennamode)); + lbs_deb_cmd("TX power currently %d\n", adapter->txpowerlevel); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -415,12 +393,12 @@ static int wlan_ret_802_11_rate_adapt_rateset(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - if (rates->action == cmd_act_get) { + if (rates->action == CMD_ACT_GET) { adapter->enablehwauto = le16_to_cpu(rates->enablehwauto); adapter->ratebitmap = le16_to_cpu(rates->bitmap); } - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -429,21 +407,19 @@ static int wlan_ret_802_11_data_rate(wlan_private * priv, { struct cmd_ds_802_11_data_rate *pdatarate = &resp->params.drate; wlan_adapter *adapter = priv->adapter; - u8 dot11datarate; lbs_deb_enter(LBS_DEB_CMD); - lbs_dbg_hex("DATA_RATE_RESP: data_rate- ", - (u8 *) pdatarate, sizeof(struct cmd_ds_802_11_data_rate)); + lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) pdatarate, + sizeof(struct cmd_ds_802_11_data_rate)); - dot11datarate = pdatarate->datarate[0]; - if (pdatarate->action == cpu_to_le16(cmd_act_get_tx_rate)) { - memcpy(adapter->libertas_supported_rates, pdatarate->datarate, - sizeof(adapter->libertas_supported_rates)); - } - adapter->datarate = libertas_index_to_data_rate(dot11datarate); + /* FIXME: get actual rates FW can do if this command actually returns + * all data rates supported. + */ + adapter->cur_rate = libertas_fw_index_to_data_rate(pdatarate->rates[0]); + lbs_deb_cmd("DATA_RATE: current rate 0x%02x\n", adapter->cur_rate); - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -457,9 +433,9 @@ static int wlan_ret_802_11_rf_channel(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - if (action == cmd_opt_802_11_rf_channel_get + if (action == CMD_OPT_802_11_RF_CHANNEL_GET && adapter->curbssparams.channel != newchannel) { - lbs_deb_cmd("channel Switch: %d to %d\n", + lbs_deb_cmd("channel switch from %d to %d\n", adapter->curbssparams.channel, newchannel); /* Update the channel again */ @@ -476,6 +452,8 @@ static int wlan_ret_802_11_rssi(wlan_private * priv, struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_CMD); + /* store the non average value */ adapter->SNR[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->SNR); adapter->NF[TYPE_BEACON][TYPE_NOAVG] = le16_to_cpu(rssirsp->noisefloor); @@ -491,9 +469,11 @@ static int wlan_ret_802_11_rssi(wlan_private * priv, CAL_RSSI(adapter->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE, adapter->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE); - lbs_deb_cmd("Beacon RSSI value = 0x%x\n", + lbs_deb_cmd("RSSI: beacon %d, avg %d\n", + adapter->RSSI[TYPE_BEACON][TYPE_NOAVG], adapter->RSSI[TYPE_BEACON][TYPE_AVG]); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -504,11 +484,11 @@ static int wlan_ret_802_11_eeprom_access(wlan_private * priv, struct wlan_ioctl_regrdwr *pbuf; pbuf = (struct wlan_ioctl_regrdwr *) adapter->prdeeprom; - lbs_deb_cmd("eeprom read len=%x\n", + lbs_deb_enter_args(LBS_DEB_CMD, "len %d", le16_to_cpu(resp->params.rdeeprom.bytecount)); if (pbuf->NOB < le16_to_cpu(resp->params.rdeeprom.bytecount)) { pbuf->NOB = 0; - lbs_deb_cmd("eeprom read return length is too big\n"); + lbs_deb_cmd("EEPROM read length too big\n"); return -1; } pbuf->NOB = le16_to_cpu(resp->params.rdeeprom.bytecount); @@ -516,9 +496,10 @@ static int wlan_ret_802_11_eeprom_access(wlan_private * priv, memcpy(&pbuf->value, (u8 *) & resp->params.rdeeprom.value, le16_to_cpu(resp->params.rdeeprom.bytecount)); - lbs_dbg_hex("adapter", (char *)&pbuf->value, + lbs_deb_hex(LBS_DEB_CMD, "EEPROM", (char *)&pbuf->value, le16_to_cpu(resp->params.rdeeprom.bytecount)); } + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -533,7 +514,7 @@ static int wlan_ret_get_log(wlan_private * priv, /* Stored little-endian */ memcpy(&adapter->logmsg, logmessage, sizeof(struct cmd_ds_802_11_get_log)); - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -546,12 +527,12 @@ static int libertas_ret_802_11_enable_rsn(wlan_private * priv, lbs_deb_enter(LBS_DEB_CMD); - if (enable_rsn->action == cpu_to_le16(cmd_act_get)) { + if (enable_rsn->action == cpu_to_le16(CMD_ACT_GET)) { if (pdata_buf) *pdata_buf = (u32) le16_to_cpu(enable_rsn->enable); } - lbs_deb_enter(LBS_DEB_CMD); + lbs_deb_leave(LBS_DEB_CMD); return 0; } @@ -563,135 +544,134 @@ static inline int handle_cmd_response(u16 respcmd, unsigned long flags; wlan_adapter *adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_HOST); + switch (respcmd) { - case cmd_ret_mac_reg_access: - case cmd_ret_bbp_reg_access: - case cmd_ret_rf_reg_access: + case CMD_RET(CMD_MAC_REG_ACCESS): + case CMD_RET(CMD_BBP_REG_ACCESS): + case CMD_RET(CMD_RF_REG_ACCESS): ret = wlan_ret_reg_access(priv, respcmd, resp); break; - case cmd_ret_hw_spec_info: + case CMD_RET(CMD_GET_HW_SPEC): ret = wlan_ret_get_hw_spec(priv, resp); break; - case cmd_ret_802_11_scan: + case CMD_RET(CMD_802_11_SCAN): ret = libertas_ret_80211_scan(priv, resp); break; - case cmd_ret_802_11_get_log: + case CMD_RET(CMD_802_11_GET_LOG): ret = wlan_ret_get_log(priv, resp); break; - case cmd_ret_802_11_associate: - case cmd_ret_802_11_reassociate: + case CMD_RET_802_11_ASSOCIATE: + case CMD_RET(CMD_802_11_ASSOCIATE): + case CMD_RET(CMD_802_11_REASSOCIATE): ret = libertas_ret_80211_associate(priv, resp); break; - case cmd_ret_802_11_disassociate: - case cmd_ret_802_11_deauthenticate: + case CMD_RET(CMD_802_11_DISASSOCIATE): + case CMD_RET(CMD_802_11_DEAUTHENTICATE): ret = libertas_ret_80211_disassociate(priv, resp); break; - case cmd_ret_802_11_ad_hoc_start: - case cmd_ret_802_11_ad_hoc_join: + case CMD_RET(CMD_802_11_AD_HOC_START): + case CMD_RET(CMD_802_11_AD_HOC_JOIN): ret = libertas_ret_80211_ad_hoc_start(priv, resp); break; - case cmd_ret_802_11_stat: + case CMD_RET(CMD_802_11_GET_STAT): ret = wlan_ret_802_11_stat(priv, resp); break; - case cmd_ret_802_11_snmp_mib: + case CMD_RET(CMD_802_11_SNMP_MIB): ret = wlan_ret_802_11_snmp_mib(priv, resp); break; - case cmd_ret_802_11_rf_tx_power: + case CMD_RET(CMD_802_11_RF_TX_POWER): ret = wlan_ret_802_11_rf_tx_power(priv, resp); break; - case cmd_ret_802_11_set_afc: - case cmd_ret_802_11_get_afc: + case CMD_RET(CMD_802_11_SET_AFC): + case CMD_RET(CMD_802_11_GET_AFC): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.afc, sizeof(struct cmd_ds_802_11_afc)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_rf_antenna: - ret = wlan_ret_802_11_rf_antenna(priv, resp); - break; - case cmd_ret_mac_multicast_adr: - case cmd_ret_mac_control: - case cmd_ret_802_11_set_wep: - case cmd_ret_802_11_reset: - case cmd_ret_802_11_authenticate: - case cmd_ret_802_11_radio_control: - case cmd_ret_802_11_beacon_stop: + case CMD_RET(CMD_MAC_MULTICAST_ADR): + case CMD_RET(CMD_MAC_CONTROL): + case CMD_RET(CMD_802_11_SET_WEP): + case CMD_RET(CMD_802_11_RESET): + case CMD_RET(CMD_802_11_AUTHENTICATE): + case CMD_RET(CMD_802_11_RADIO_CONTROL): + case CMD_RET(CMD_802_11_BEACON_STOP): break; - case cmd_ret_802_11_enable_rsn: + case CMD_RET(CMD_802_11_ENABLE_RSN): ret = libertas_ret_802_11_enable_rsn(priv, resp); break; - case cmd_ret_802_11_data_rate: + case CMD_RET(CMD_802_11_DATA_RATE): ret = wlan_ret_802_11_data_rate(priv, resp); break; - case cmd_ret_802_11_rate_adapt_rateset: + case CMD_RET(CMD_802_11_RATE_ADAPT_RATESET): ret = wlan_ret_802_11_rate_adapt_rateset(priv, resp); break; - case cmd_ret_802_11_rf_channel: + case CMD_RET(CMD_802_11_RF_CHANNEL): ret = wlan_ret_802_11_rf_channel(priv, resp); break; - case cmd_ret_802_11_rssi: + case CMD_RET(CMD_802_11_RSSI): ret = wlan_ret_802_11_rssi(priv, resp); break; - case cmd_ret_802_11_mac_address: + case CMD_RET(CMD_802_11_MAC_ADDRESS): ret = wlan_ret_802_11_mac_address(priv, resp); break; - case cmd_ret_802_11_ad_hoc_stop: + case CMD_RET(CMD_802_11_AD_HOC_STOP): ret = libertas_ret_80211_ad_hoc_stop(priv, resp); break; - case cmd_ret_802_11_key_material: - lbs_deb_cmd("CMD_RESP: KEY_MATERIAL command response\n"); + case CMD_RET(CMD_802_11_KEY_MATERIAL): ret = wlan_ret_802_11_key_material(priv, resp); break; - case cmd_ret_802_11_eeprom_access: + case CMD_RET(CMD_802_11_EEPROM_ACCESS): ret = wlan_ret_802_11_eeprom_access(priv, resp); break; - case cmd_ret_802_11d_domain_info: + case CMD_RET(CMD_802_11D_DOMAIN_INFO): ret = libertas_ret_802_11d_domain_info(priv, resp); break; - case cmd_ret_802_11_sleep_params: + case CMD_RET(CMD_802_11_SLEEP_PARAMS): ret = wlan_ret_802_11_sleep_params(priv, resp); break; - case cmd_ret_802_11_inactivity_timeout: + case CMD_RET(CMD_802_11_INACTIVITY_TIMEOUT): spin_lock_irqsave(&adapter->driver_lock, flags); *((u16 *) adapter->cur_cmd->pdata_buf) = le16_to_cpu(resp->params.inactivity_timeout.timeout); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_tpc_cfg: + case CMD_RET(CMD_802_11_TPC_CFG): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.tpccfg, sizeof(struct cmd_ds_802_11_tpc_cfg)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_led_gpio_ctrl: + case CMD_RET(CMD_802_11_LED_GPIO_CTRL): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.ledgpio, sizeof(struct cmd_ds_802_11_led_ctrl)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_802_11_pwr_cfg: + case CMD_RET(CMD_802_11_PWR_CFG): spin_lock_irqsave(&adapter->driver_lock, flags); memmove(adapter->cur_cmd->pdata_buf, &resp->params.pwrcfg, sizeof(struct cmd_ds_802_11_pwr_cfg)); @@ -699,39 +679,37 @@ static inline int handle_cmd_response(u16 respcmd, break; - case cmd_ret_get_tsf: + case CMD_RET(CMD_GET_TSF): spin_lock_irqsave(&adapter->driver_lock, flags); memcpy(priv->adapter->cur_cmd->pdata_buf, &resp->params.gettsf.tsfvalue, sizeof(u64)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_bt_access: + case CMD_RET(CMD_BT_ACCESS): spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd->pdata_buf) memcpy(adapter->cur_cmd->pdata_buf, &resp->params.bt.addr1, 2 * ETH_ALEN); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_fwt_access: + case CMD_RET(CMD_FWT_ACCESS): spin_lock_irqsave(&adapter->driver_lock, flags); if (adapter->cur_cmd->pdata_buf) memcpy(adapter->cur_cmd->pdata_buf, &resp->params.fwt, sizeof(resp->params.fwt)); spin_unlock_irqrestore(&adapter->driver_lock, flags); break; - case cmd_ret_mesh_access: + case CMD_RET(CMD_MESH_ACCESS): if (adapter->cur_cmd->pdata_buf) memcpy(adapter->cur_cmd->pdata_buf, &resp->params.mesh, sizeof(resp->params.mesh)); break; - case cmd_rte_802_11_tx_rate_query: - priv->adapter->txrate = resp->params.txrate.txrate; - break; default: - lbs_deb_cmd("CMD_RESP: Unknown command response %#x\n", + lbs_deb_host("CMD_RESP: unknown cmd response 0x%04x\n", resp->command); break; } + lbs_deb_leave(LBS_DEB_HOST); return ret; } @@ -744,9 +722,7 @@ int libertas_process_rx_command(wlan_private * priv) ulong flags; u16 result; - lbs_deb_enter(LBS_DEB_CMD); - - lbs_deb_cmd("CMD_RESP: @ %lu\n", jiffies); + lbs_deb_enter(LBS_DEB_HOST); /* Now we got response from FW, cancel the command timer */ del_timer(&adapter->command_timer); @@ -755,25 +731,23 @@ int libertas_process_rx_command(wlan_private * priv) spin_lock_irqsave(&adapter->driver_lock, flags); if (!adapter->cur_cmd) { - lbs_deb_cmd("CMD_RESP: NULL cur_cmd=%p\n", adapter->cur_cmd); + lbs_deb_host("CMD_RESP: cur_cmd is NULL\n"); ret = -1; spin_unlock_irqrestore(&adapter->driver_lock, flags); goto done; } resp = (struct cmd_ds_command *)(adapter->cur_cmd->bufvirtualaddr); - lbs_dbg_hex("CMD_RESP:", adapter->cur_cmd->bufvirtualaddr, - priv->upld_len); - respcmd = le16_to_cpu(resp->command); - result = le16_to_cpu(resp->result); - lbs_deb_cmd("CMD_RESP: %x result: %d length: %d\n", respcmd, - result, priv->upld_len); + lbs_deb_host("CMD_RESP: response 0x%04x, size %d, jiffies %lu\n", + respcmd, priv->upld_len, jiffies); + lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", adapter->cur_cmd->bufvirtualaddr, + priv->upld_len); if (!(respcmd & 0x8000)) { - lbs_deb_cmd("Invalid response to command!"); + lbs_deb_host("invalid response!\n"); adapter->cur_cmd_retcode = -1; __libertas_cleanup_and_insert_cmd(priv, adapter->cur_cmd); adapter->nr_cmd_pending--; @@ -786,16 +760,16 @@ int libertas_process_rx_command(wlan_private * priv) /* Store the response code to cur_cmd_retcode. */ adapter->cur_cmd_retcode = result;; - if (respcmd == cmd_ret_802_11_ps_mode) { + if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { struct cmd_ds_802_11_ps_mode *psmode = &resp->params.psmode; u16 action = le16_to_cpu(psmode->action); - lbs_deb_cmd( - "CMD_RESP: PS_MODE cmd reply result=%#x action=0x%X\n", + lbs_deb_host( + "CMD_RESP: PS_MODE cmd reply result 0x%x, action 0x%x\n", result, action); if (result) { - lbs_deb_cmd("CMD_RESP: PS command failed- %#x \n", + lbs_deb_host("CMD_RESP: PS command failed with 0x%x\n", result); /* * We should not re-try enter-ps command in @@ -803,20 +777,20 @@ int libertas_process_rx_command(wlan_private * priv) * libertas_execute_next_command(). */ if (adapter->mode == IW_MODE_ADHOC && - action == cmd_subcmd_enter_ps) - adapter->psmode = wlan802_11powermodecam; - } else if (action == cmd_subcmd_enter_ps) { + action == CMD_SUBCMD_ENTER_PS) + adapter->psmode = WLAN802_11POWERMODECAM; + } else if (action == CMD_SUBCMD_ENTER_PS) { adapter->needtowakeup = 0; adapter->psstate = PS_STATE_AWAKE; - lbs_deb_cmd("CMD_RESP: Enter_PS command response\n"); - if (adapter->connect_status != libertas_connected) { + lbs_deb_host("CMD_RESP: ENTER_PS command response\n"); + if (adapter->connect_status != LIBERTAS_CONNECTED) { /* * When Deauth Event received before Enter_PS command * response, We need to wake up the firmware. */ - lbs_deb_cmd( - "Disconnected, Going to invoke libertas_ps_wakeup\n"); + lbs_deb_host( + "disconnected, invoking libertas_ps_wakeup\n"); spin_unlock_irqrestore(&adapter->driver_lock, flags); mutex_unlock(&adapter->lock); @@ -824,12 +798,12 @@ int libertas_process_rx_command(wlan_private * priv) mutex_lock(&adapter->lock); spin_lock_irqsave(&adapter->driver_lock, flags); } - } else if (action == cmd_subcmd_exit_ps) { + } else if (action == CMD_SUBCMD_EXIT_PS) { adapter->needtowakeup = 0; adapter->psstate = PS_STATE_FULL_POWER; - lbs_deb_cmd("CMD_RESP: Exit_PS command response\n"); + lbs_deb_host("CMD_RESP: EXIT_PS command response\n"); } else { - lbs_deb_cmd("CMD_RESP: PS- action=0x%X\n", action); + lbs_deb_host("CMD_RESP: PS action 0x%X\n", action); } __libertas_cleanup_and_insert_cmd(priv, adapter->cur_cmd); @@ -843,22 +817,22 @@ int libertas_process_rx_command(wlan_private * priv) if (adapter->cur_cmd->cmdflags & CMD_F_HOSTCMD) { /* Copy the response back to response buffer */ - memcpy(adapter->cur_cmd->pdata_buf, resp, resp->size); - + memcpy(adapter->cur_cmd->pdata_buf, resp, + le16_to_cpu(resp->size)); adapter->cur_cmd->cmdflags &= ~CMD_F_HOSTCMD; } /* If the command is not successful, cleanup and return failure */ if ((result != 0 || !(respcmd & 0x8000))) { - lbs_deb_cmd("CMD_RESP: command reply %#x result=%#x\n", - respcmd, result); + lbs_deb_host("CMD_RESP: error 0x%04x in command reply 0x%04x\n", + result, respcmd); /* * Handling errors here */ switch (respcmd) { - case cmd_ret_hw_spec_info: - case cmd_ret_802_11_reset: - lbs_deb_cmd("CMD_RESP: Reset command failed\n"); + case CMD_RET(CMD_GET_HW_SPEC): + case CMD_RET(CMD_802_11_RESET): + lbs_deb_host("CMD_RESP: reset failed\n"); break; } @@ -888,7 +862,7 @@ int libertas_process_rx_command(wlan_private * priv) done: mutex_unlock(&adapter->lock); - lbs_deb_enter_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); return ret; } @@ -898,13 +872,13 @@ int libertas_process_event(wlan_private * priv) wlan_adapter *adapter = priv->adapter; u32 eventcause; + lbs_deb_enter(LBS_DEB_CMD); + spin_lock_irq(&adapter->driver_lock); eventcause = adapter->eventcause; spin_unlock_irq(&adapter->driver_lock); - lbs_deb_enter(LBS_DEB_CMD); - - lbs_deb_cmd("EVENT Cause %x\n", eventcause); + lbs_deb_cmd("event cause 0x%x\n", eventcause); switch (eventcause >> SBI_EVENT_CAUSE_SHIFT) { case MACREG_INT_CODE_LINK_SENSED: @@ -912,28 +886,27 @@ int libertas_process_event(wlan_private * priv) break; case MACREG_INT_CODE_DEAUTHENTICATED: - lbs_deb_cmd("EVENT: Deauthenticated\n"); + lbs_deb_cmd("EVENT: deauthenticated\n"); libertas_mac_event_disconnected(priv); break; case MACREG_INT_CODE_DISASSOCIATED: - lbs_deb_cmd("EVENT: Disassociated\n"); + lbs_deb_cmd("EVENT: disassociated\n"); libertas_mac_event_disconnected(priv); break; case MACREG_INT_CODE_LINK_LOSE_NO_SCAN: - lbs_deb_cmd("EVENT: Link lost\n"); + lbs_deb_cmd("EVENT: link lost\n"); libertas_mac_event_disconnected(priv); break; case MACREG_INT_CODE_PS_SLEEP: - lbs_deb_cmd("EVENT: SLEEP\n"); - lbs_deb_cmd("_"); + lbs_deb_cmd("EVENT: sleep\n"); /* handle unexpected PS SLEEP event */ if (adapter->psstate == PS_STATE_FULL_POWER) { lbs_deb_cmd( - "EVENT: In FULL POWER mode - ignore PS SLEEP\n"); + "EVENT: in FULL POWER mode, ignoreing PS_SLEEP\n"); break; } adapter->psstate = PS_STATE_PRE_SLEEP; @@ -943,8 +916,7 @@ int libertas_process_event(wlan_private * priv) break; case MACREG_INT_CODE_PS_AWAKE: - lbs_deb_cmd("EVENT: AWAKE \n"); - lbs_deb_cmd("|"); + lbs_deb_cmd("EVENT: awake\n"); /* handle unexpected PS AWAKE event */ if (adapter->psstate == PS_STATE_FULL_POWER) { @@ -962,7 +934,7 @@ int libertas_process_event(wlan_private * priv) * adapter->needtowakeup will be set to FALSE * in libertas_ps_wakeup() */ - lbs_deb_cmd("Waking up...\n"); + lbs_deb_cmd("waking up ...\n"); libertas_ps_wakeup(priv, 0); } break; @@ -981,38 +953,43 @@ int libertas_process_event(wlan_private * priv) break; case MACREG_INT_CODE_ADHOC_BCN_LOST: - lbs_deb_cmd("EVENT: HWAC - ADHOC BCN LOST\n"); + lbs_deb_cmd("EVENT: ADHOC beacon lost\n"); break; case MACREG_INT_CODE_RSSI_LOW: - lbs_pr_alert( "EVENT: RSSI_LOW\n"); + lbs_pr_alert("EVENT: rssi low\n"); break; case MACREG_INT_CODE_SNR_LOW: - lbs_pr_alert( "EVENT: SNR_LOW\n"); + lbs_pr_alert("EVENT: snr low\n"); break; case MACREG_INT_CODE_MAX_FAIL: - lbs_pr_alert( "EVENT: MAX_FAIL\n"); + lbs_pr_alert("EVENT: max fail\n"); break; case MACREG_INT_CODE_RSSI_HIGH: - lbs_pr_alert( "EVENT: RSSI_HIGH\n"); + lbs_pr_alert("EVENT: rssi high\n"); break; case MACREG_INT_CODE_SNR_HIGH: - lbs_pr_alert( "EVENT: SNR_HIGH\n"); + lbs_pr_alert("EVENT: snr high\n"); break; case MACREG_INT_CODE_MESH_AUTO_STARTED: - lbs_pr_alert( "EVENT: MESH_AUTO_STARTED\n"); - adapter->connect_status = libertas_connected ; + /* Ignore spurious autostart events if autostart is disabled */ + if (!priv->mesh_autostart_enabled) { + lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n"); + break; + } + lbs_pr_info("EVENT: MESH_AUTO_STARTED\n"); + adapter->connect_status = LIBERTAS_CONNECTED; if (priv->mesh_open == 1) { - netif_wake_queue(priv->mesh_dev) ; - netif_carrier_on(priv->mesh_dev) ; + netif_wake_queue(priv->mesh_dev); + netif_carrier_on(priv->mesh_dev); } - adapter->mode = IW_MODE_ADHOC ; + adapter->mode = IW_MODE_ADHOC; schedule_work(&priv->sync_channel); break; default: - lbs_pr_alert( "EVENT: unknown event id: %#x\n", + lbs_pr_alert("EVENT: unknown event id 0x%04x\n", eventcause >> SBI_EVENT_CAUSE_SHIFT); break; } @@ -1021,6 +998,6 @@ int libertas_process_event(wlan_private * priv) adapter->eventcause = 0; spin_unlock_irq(&adapter->driver_lock); - lbs_deb_enter_args(LBS_DEB_CMD, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); return ret; } diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 715cbdaa1d4bcf17170717b6148d409e4907ef8d..0bda0b5119109d1f23f13ab78dd31de2dff1ce25 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include "dev.h" @@ -63,27 +64,27 @@ static ssize_t libertas_getscantable(struct file *file, char __user *userbuf, int numscansdone = 0, res; unsigned long addr = get_zeroed_page(GFP_KERNEL); char *buf = (char *)addr; + DECLARE_MAC_BUF(mac); struct bss_descriptor * iter_bss; pos += snprintf(buf+pos, len-pos, - "# | ch | ss | bssid | cap | TSF | Qual | SSID \n"); + "# | ch | rssi | bssid | cap | Qual | SSID \n"); mutex_lock(&priv->adapter->lock); list_for_each_entry (iter_bss, &priv->adapter->network_list, list) { - u16 cap; + u16 ibss = (iter_bss->capability & WLAN_CAPABILITY_IBSS); + u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY); + u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT); - memcpy(&cap, &iter_bss->cap, sizeof(cap)); pos += snprintf(buf+pos, len-pos, - "%02u| %03d | %03ld | " MAC_FMT " |", + "%02u| %03d | %04ld | %s |", numscansdone, iter_bss->channel, iter_bss->rssi, - MAC_ARG(iter_bss->bssid)); - pos += snprintf(buf+pos, len-pos, " %04x-", cap); + print_mac(mac, iter_bss->bssid)); + pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability); pos += snprintf(buf+pos, len-pos, "%c%c%c |", - iter_bss->cap.ibss ? 'A' : 'I', - iter_bss->cap.privacy ? 'P' : ' ', - iter_bss->cap.spectrummgmt ? 'S' : ' '); - pos += snprintf(buf+pos, len-pos, " %08llx |", iter_bss->networktsf); - pos += snprintf(buf+pos, len-pos, " %d |", SCAN_RSSI(iter_bss->rssi)); + ibss ? 'A' : 'I', privacy ? 'P' : ' ', + spectrum_mgmt ? 'S' : ' '); + pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi)); pos += snprintf(buf+pos, len-pos, " %s\n", escape_essid(iter_bss->ssid, iter_bss->ssid_len)); @@ -125,9 +126,9 @@ static ssize_t libertas_sleepparams_write(struct file *file, priv->adapter->sp.sp_reserved = p6; res = libertas_prepare_and_send_command(priv, - cmd_802_11_sleep_params, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_SLEEP_PARAMS, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (!res) res = count; @@ -150,9 +151,9 @@ static ssize_t libertas_sleepparams_read(struct file *file, char __user *userbuf char *buf = (char *)addr; res = libertas_prepare_and_send_command(priv, - cmd_802_11_sleep_params, - cmd_act_get, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_SLEEP_PARAMS, + CMD_ACT_GET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (res) { res = -EFAULT; goto out_unlock; @@ -205,7 +206,7 @@ static int libertas_parse_chan(char *buf, size_t count, if (!start) return -EINVAL; start += 5; - end = strstr(start, " "); + end = strchr(start, ' '); if (!end) end = buf + count; hold = kzalloc((end - start)+1, GFP_KERNEL); @@ -256,7 +257,7 @@ static void libertas_parse_ssid(char *buf, size_t count, if (!hold) return; hold += 5; - end = strstr(hold, " "); + end = strchr(hold, ' '); if (!end) end = buf + count - 1; @@ -386,7 +387,7 @@ static int libertas_event_initcmd(wlan_private *priv, void **response_buf, struct cmd_ctrl_node **cmdnode, struct cmd_ds_command **cmd) { - u16 wait_option = cmd_option_waitforrsp; + u16 wait_option = CMD_OPTION_WAITFORRSP; if (!(*cmdnode = libertas_get_free_cmd_ctrl_node(priv))) { lbs_deb_debugfs("failed libertas_get_free_cmd_ctrl_node\n"); @@ -402,7 +403,7 @@ static int libertas_event_initcmd(wlan_private *priv, void **response_buf, (*cmdnode)->cmdflags |= CMD_F_HOSTCMD; (*cmdnode)->cmdwaitqwoken = 0; *cmd = (struct cmd_ds_command *)(*cmdnode)->bufvirtualaddr; - (*cmd)->command = cpu_to_le16(cmd_802_11_subscribe_event); + (*cmd)->command = cpu_to_le16(CMD_802_11_SUBSCRIBE_EVENT); (*cmd)->seqnum = cpu_to_le16(++priv->adapter->seqnum); (*cmd)->result = 0; return 0; @@ -429,10 +430,10 @@ static ssize_t libertas_lowrssi_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -447,7 +448,7 @@ static ssize_t libertas_lowrssi_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -493,10 +494,10 @@ static u16 libertas_get_events_bitmap(wlan_private *priv) return res; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -511,7 +512,7 @@ static u16 libertas_get_events_bitmap(wlan_private *priv) return 0; } - if (pcmdptr->command != cmd_ret_802_11_subscribe_event) { + if (le16_to_cpu(pcmdptr->command) != CMD_RET(CMD_802_11_SUBSCRIBE_EVENT)) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); return 0; @@ -559,7 +560,7 @@ static ssize_t libertas_lowrssi_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_rssithreshold)); @@ -575,7 +576,7 @@ static ssize_t libertas_lowrssi_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -591,7 +592,7 @@ static ssize_t libertas_lowrssi_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -625,10 +626,10 @@ static ssize_t libertas_lowsnr_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -644,7 +645,7 @@ static ssize_t libertas_lowsnr_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -712,7 +713,7 @@ static ssize_t libertas_lowsnr_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_snrthreshold)); @@ -727,7 +728,7 @@ static ssize_t libertas_lowsnr_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -743,7 +744,7 @@ static ssize_t libertas_lowsnr_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -778,10 +779,10 @@ static ssize_t libertas_failcount_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -797,7 +798,7 @@ static ssize_t libertas_failcount_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -864,7 +865,7 @@ static ssize_t libertas_failcount_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_failurecount)); @@ -879,7 +880,7 @@ static ssize_t libertas_failcount_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -895,7 +896,7 @@ static ssize_t libertas_failcount_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -929,10 +930,10 @@ static ssize_t libertas_bcnmiss_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -948,7 +949,7 @@ static ssize_t libertas_bcnmiss_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); free_page(addr); kfree(response_buf); @@ -1015,7 +1016,7 @@ static ssize_t libertas_bcnmiss_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_beaconsmissed)); @@ -1029,7 +1030,7 @@ static ssize_t libertas_bcnmiss_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1045,7 +1046,7 @@ static ssize_t libertas_bcnmiss_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); free_page(addr); kfree(response_buf); @@ -1079,10 +1080,10 @@ static ssize_t libertas_highrssi_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1098,7 +1099,7 @@ static ssize_t libertas_highrssi_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -1166,7 +1167,7 @@ static ssize_t libertas_highrssi_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_rssithreshold)); @@ -1181,7 +1182,7 @@ static ssize_t libertas_highrssi_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1196,7 +1197,7 @@ static ssize_t libertas_highrssi_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); return 0; @@ -1229,10 +1230,10 @@ static ssize_t libertas_highsnr_read(struct file *file, char __user *userbuf, } event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_get); + event->action = cpu_to_le16(CMD_ACT_GET); pcmdptr->size = cpu_to_le16(sizeof(*event) + S_DS_GEN); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1248,7 +1249,7 @@ static ssize_t libertas_highsnr_read(struct file *file, char __user *userbuf, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -1316,7 +1317,7 @@ static ssize_t libertas_highsnr_write(struct file *file, goto out_unlock; event = &pcmdptr->params.subscribe_event; - event->action = cpu_to_le16(cmd_act_set); + event->action = cpu_to_le16(CMD_ACT_SET); pcmdptr->size = cpu_to_le16(S_DS_GEN + sizeof(struct cmd_ds_802_11_subscribe_event) + sizeof(struct mrvlietypes_snrthreshold)); @@ -1331,7 +1332,7 @@ static ssize_t libertas_highsnr_write(struct file *file, event->events = cpu_to_le16(event_bitmap); libertas_queue_cmd(adapter, pcmdnode, 1); - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); /* Sleep until response is generated by FW */ wait_event_interruptible(pcmdnode->cmdwait_q, @@ -1347,7 +1348,7 @@ static ssize_t libertas_highsnr_write(struct file *file, return 0; } - if (pcmdptr->command != cpu_to_le16(cmd_ret_802_11_subscribe_event)) { + if (pcmdptr->command != cpu_to_le16(CMD_RET(CMD_802_11_SUBSCRIBE_EVENT))) { lbs_pr_err("command response incorrect!\n"); kfree(response_buf); free_page(addr); @@ -1375,8 +1376,8 @@ static ssize_t libertas_rdmac_read(struct file *file, char __user *userbuf, offval.value = 0; ret = libertas_prepare_and_send_command(priv, - cmd_mac_reg_access, 0, - cmd_option_waitforrsp, 0, &offval); + CMD_MAC_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", priv->mac_offset, adapter->offsetvalue.value); @@ -1433,8 +1434,8 @@ static ssize_t libertas_wrmac_write(struct file *file, offval.offset = offset; offval.value = value; res = libertas_prepare_and_send_command(priv, - cmd_mac_reg_access, 1, - cmd_option_waitforrsp, 0, &offval); + CMD_MAC_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); res = count; @@ -1458,8 +1459,8 @@ static ssize_t libertas_rdbbp_read(struct file *file, char __user *userbuf, offval.value = 0; ret = libertas_prepare_and_send_command(priv, - cmd_bbp_reg_access, 0, - cmd_option_waitforrsp, 0, &offval); + CMD_BBP_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", priv->bbp_offset, adapter->offsetvalue.value); @@ -1517,8 +1518,8 @@ static ssize_t libertas_wrbbp_write(struct file *file, offval.offset = offset; offval.value = value; res = libertas_prepare_and_send_command(priv, - cmd_bbp_reg_access, 1, - cmd_option_waitforrsp, 0, &offval); + CMD_BBP_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); res = count; @@ -1542,8 +1543,8 @@ static ssize_t libertas_rdrf_read(struct file *file, char __user *userbuf, offval.value = 0; ret = libertas_prepare_and_send_command(priv, - cmd_rf_reg_access, 0, - cmd_option_waitforrsp, 0, &offval); + CMD_RF_REG_ACCESS, 0, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", priv->rf_offset, adapter->offsetvalue.value); @@ -1601,8 +1602,8 @@ static ssize_t libertas_wrrf_write(struct file *file, offval.offset = offset; offval.value = value; res = libertas_prepare_and_send_command(priv, - cmd_rf_reg_access, 1, - cmd_option_waitforrsp, 0, &offval); + CMD_RF_REG_ACCESS, 1, + CMD_OPTION_WAITFORRSP, 0, &offval); mdelay(10); res = count; @@ -1839,7 +1840,7 @@ static ssize_t wlan_debugfs_write(struct file *f, const char __user *buf, char *p2; struct debug_data *d = (struct debug_data *)f->private_data; - pdata = (char *)kmalloc(cnt, GFP_KERNEL); + pdata = kmalloc(cnt, GFP_KERNEL); if (pdata == NULL) return 0; diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h index 40f56bb1eac8332aa9b172df9778d8778d2116e4..87fea9d5b90a7394e4aba2c7e01347e64adb6238 100644 --- a/drivers/net/wireless/libertas/decl.h +++ b/drivers/net/wireless/libertas/decl.h @@ -15,14 +15,9 @@ struct wlan_private; struct sk_buff; struct net_device; -extern char *libertas_fw_name; - -void libertas_free_adapter(wlan_private * priv); int libertas_set_mac_packet_filter(wlan_private * priv); -int libertas_send_null_packet(wlan_private * priv, u8 pwr_mgmt); void libertas_send_tx_feedback(wlan_private * priv); -u8 libertas_check_last_packet_indication(wlan_private * priv); int libertas_free_cmd_buffer(wlan_private * priv); struct cmd_ctrl_node; @@ -44,8 +39,8 @@ int libertas_execute_next_command(wlan_private * priv); int libertas_process_event(wlan_private * priv); void libertas_interrupt(struct net_device *); int libertas_set_radio_control(wlan_private * priv); -u32 libertas_index_to_data_rate(u8 index); -u8 libertas_data_rate_to_index(u32 rate); +u32 libertas_fw_index_to_data_rate(u8 index); +u8 libertas_data_rate_to_fw_index(u32 rate); void libertas_get_fwversion(wlan_adapter * adapter, char *fwversion, int maxlen); void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb); @@ -53,8 +48,6 @@ void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb); /** The proc fs interface */ int libertas_process_rx_command(wlan_private * priv); int libertas_process_tx(wlan_private * priv, struct sk_buff *skb); -void libertas_cleanup_and_insert_cmd(wlan_private * priv, - struct cmd_ctrl_node *ptempcmd); void __libertas_cleanup_and_insert_cmd(wlan_private * priv, struct cmd_ctrl_node *ptempcmd); @@ -75,17 +68,15 @@ void libertas_mac_event_disconnected(wlan_private * priv); void libertas_send_iwevcustom_event(wlan_private * priv, s8 * str); -/* fw.c */ -int libertas_init_fw(wlan_private * priv, char *fw_name); - /* main.c */ struct chan_freq_power *libertas_get_region_cfp_table(u8 region, u8 band, int *cfp_no); wlan_private *libertas_add_card(void *card, struct device *dmdev); -int libertas_activate_card(wlan_private *priv, char *fw_name); int libertas_remove_card(wlan_private *priv); +int libertas_start_card(wlan_private *priv); +int libertas_stop_card(wlan_private *priv); int libertas_add_mesh(wlan_private *priv, struct device *dev); void libertas_remove_mesh(wlan_private *priv); - +int libertas_reset_device(wlan_private *priv); #endif /* _WLAN_DECL_H_ */ diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h index 4dd43e59bda004422ddfd6ca5b8540edb54cc702..7c5b7f7b45db3231f37f8ae563904f436cc274ac 100644 --- a/drivers/net/wireless/libertas/defs.h +++ b/drivers/net/wireless/libertas/defs.h @@ -43,43 +43,43 @@ extern unsigned int libertas_debug; #ifdef DEBUG -#define LBS_DEB_LL(grp, fmt, args...) \ +#define LBS_DEB_LL(grp, grpnam, fmt, args...) \ do { if ((libertas_debug & (grp)) == (grp)) \ - printk(KERN_DEBUG DRV_NAME "%s: " fmt, \ + printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \ in_interrupt() ? " (INT)" : "", ## args); } while (0) #else -#define LBS_DEB_LL(grp, fmt, args...) do {} while (0) +#define LBS_DEB_LL(grp, grpnam, fmt, args...) do {} while (0) #endif #define lbs_deb_enter(grp) \ - LBS_DEB_LL(grp | LBS_DEB_ENTER, "%s():%d enter\n", __FUNCTION__, __LINE__); + LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s():%d\n", __FUNCTION__, __LINE__); #define lbs_deb_enter_args(grp, fmt, args...) \ - LBS_DEB_LL(grp | LBS_DEB_ENTER, "%s(" fmt "):%d\n", __FUNCTION__, ## args, __LINE__); + LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s(" fmt "):%d\n", __FUNCTION__, ## args, __LINE__); #define lbs_deb_leave(grp) \ - LBS_DEB_LL(grp | LBS_DEB_LEAVE, "%s():%d leave\n", __FUNCTION__, __LINE__); + LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s():%d\n", __FUNCTION__, __LINE__); #define lbs_deb_leave_args(grp, fmt, args...) \ - LBS_DEB_LL(grp | LBS_DEB_LEAVE, "%s():%d leave, " fmt "\n", \ + LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s():%d, " fmt "\n", \ __FUNCTION__, __LINE__, ##args); -#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, fmt, ##args) -#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, fmt, ##args) -#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, fmt, ##args) -#define lbs_deb_wext(fmt, args...) LBS_DEB_LL(LBS_DEB_WEXT, fmt, ##args) -#define lbs_deb_ioctl(fmt, args...) LBS_DEB_LL(LBS_DEB_IOCTL, fmt, ##args) -#define lbs_deb_scan(fmt, args...) LBS_DEB_LL(LBS_DEB_SCAN, fmt, ##args) -#define lbs_deb_assoc(fmt, args...) LBS_DEB_LL(LBS_DEB_ASSOC, fmt, ##args) -#define lbs_deb_join(fmt, args...) LBS_DEB_LL(LBS_DEB_JOIN, fmt, ##args) -#define lbs_deb_11d(fmt, args...) LBS_DEB_LL(LBS_DEB_11D, fmt, ##args) -#define lbs_deb_debugfs(fmt, args...) LBS_DEB_LL(LBS_DEB_DEBUGFS, fmt, ##args) -#define lbs_deb_ethtool(fmt, args...) LBS_DEB_LL(LBS_DEB_ETHTOOL, fmt, ##args) -#define lbs_deb_host(fmt, args...) LBS_DEB_LL(LBS_DEB_HOST, fmt, ##args) -#define lbs_deb_cmd(fmt, args...) LBS_DEB_LL(LBS_DEB_CMD, fmt, ##args) -#define lbs_deb_rx(fmt, args...) LBS_DEB_LL(LBS_DEB_RX, fmt, ##args) -#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, fmt, ##args) -#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, fmt, ##args) -#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, fmt, ##args) -#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, "%s:" fmt, (dev)->bus_id, ##args) -#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, fmt, ##args) -#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, fmt, ##args) +#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, " main", fmt, ##args) +#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, " net", fmt, ##args) +#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, " mesh", fmt, ##args) +#define lbs_deb_wext(fmt, args...) LBS_DEB_LL(LBS_DEB_WEXT, " wext", fmt, ##args) +#define lbs_deb_ioctl(fmt, args...) LBS_DEB_LL(LBS_DEB_IOCTL, " ioctl", fmt, ##args) +#define lbs_deb_scan(fmt, args...) LBS_DEB_LL(LBS_DEB_SCAN, " scan", fmt, ##args) +#define lbs_deb_assoc(fmt, args...) LBS_DEB_LL(LBS_DEB_ASSOC, " assoc", fmt, ##args) +#define lbs_deb_join(fmt, args...) LBS_DEB_LL(LBS_DEB_JOIN, " join", fmt, ##args) +#define lbs_deb_11d(fmt, args...) LBS_DEB_LL(LBS_DEB_11D, " 11d", fmt, ##args) +#define lbs_deb_debugfs(fmt, args...) LBS_DEB_LL(LBS_DEB_DEBUGFS, " debugfs", fmt, ##args) +#define lbs_deb_ethtool(fmt, args...) LBS_DEB_LL(LBS_DEB_ETHTOOL, " ethtool", fmt, ##args) +#define lbs_deb_host(fmt, args...) LBS_DEB_LL(LBS_DEB_HOST, " host", fmt, ##args) +#define lbs_deb_cmd(fmt, args...) LBS_DEB_LL(LBS_DEB_CMD, " cmd", fmt, ##args) +#define lbs_deb_rx(fmt, args...) LBS_DEB_LL(LBS_DEB_RX, " rx", fmt, ##args) +#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, " tx", fmt, ##args) +#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, " fw", fmt, ##args) +#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usb", fmt, ##args) +#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args) +#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) +#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) #define lbs_pr_info(format, args...) \ printk(KERN_INFO DRV_NAME": " format, ## args) @@ -89,22 +89,28 @@ do { if ((libertas_debug & (grp)) == (grp)) \ printk(KERN_ALERT DRV_NAME": " format, ## args) #ifdef DEBUG -static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) +static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len) { int i = 0; - if (!(libertas_debug & LBS_DEB_HEX)) - return; - - printk(KERN_DEBUG "%s: ", prompt); - for (i = 1; i <= len; i++) { - printk("%02x ", (u8) * buf); - buf++; + if (len && + (libertas_debug & LBS_DEB_HEX) && + (libertas_debug & grp)) + { + for (i = 1; i <= len; i++) { + if ((i & 0xf) == 1) { + if (i != 1) + printk("\n"); + printk(DRV_NAME " %s: ", prompt); + } + printk("%02x ", (u8) * buf); + buf++; + } + printk("\n"); } - printk("\n"); } #else -#define lbs_dbg_hex(x,y,z) do {} while (0) +#define lbs_deb_hex(grp,prompt,buf,len) do {} while (0) #endif @@ -149,17 +155,18 @@ static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) #define MRVDRV_CHANNELS_PER_SCAN 4 #define MRVDRV_MAX_CHANNELS_PER_SCAN 14 -#define MRVDRV_DEBUG_RX_PATH 0x00000001 -#define MRVDRV_DEBUG_TX_PATH 0x00000002 - #define MRVDRV_MIN_BEACON_INTERVAL 20 #define MRVDRV_MAX_BEACON_INTERVAL 1000 #define MRVDRV_BEACON_INTERVAL 100 +#define MARVELL_MESH_IE_LENGTH 9 + /** INT status Bit Definition*/ -#define his_cmddnldrdy 0x01 -#define his_cardevent 0x02 -#define his_cmdupldrdy 0x04 +#define MRVDRV_TX_DNLD_RDY 0x0001 +#define MRVDRV_RX_UPLD_RDY 0x0002 +#define MRVDRV_CMD_DNLD_RDY 0x0004 +#define MRVDRV_CMD_UPLD_RDY 0x0008 +#define MRVDRV_CARDEVENT 0x0010 #define SBI_EVENT_CAUSE_SHIFT 3 @@ -218,9 +225,6 @@ static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) #define CMD_F_HOSTCMD (1 << 0) #define FW_CAPINFO_WPA (1 << 0) -/** WPA key LENGTH*/ -#define MRVL_MAX_KEY_WPA_KEY_LENGTH 32 - #define KEY_LEN_WPA_AES 16 #define KEY_LEN_WPA_TKIP 32 #define KEY_LEN_WEP_104 13 @@ -247,10 +251,7 @@ static inline void lbs_dbg_hex(char *prompt, u8 * buf, int len) ((((int)(AVG) * (N -1)) + ((u16)(SNRNF) * \ AVG_SCALE)) / N)) -#define B_SUPPORTED_RATES 8 -#define G_SUPPORTED_RATES 14 - -#define WLAN_SUPPORTED_RATES 14 +#define MAX_RATES 14 #define MAX_LEDS 8 @@ -264,11 +265,7 @@ typedef struct _wlan_adapter wlan_adapter; extern const char libertas_driver_version[]; extern u16 libertas_region_code_to_index[MRVDRV_MAX_REGION_CODE]; -extern u8 libertas_supported_rates[G_SUPPORTED_RATES]; - -extern u8 libertas_adhoc_rates_g[G_SUPPORTED_RATES]; - -extern u8 libertas_adhoc_rates_b[4]; +extern u8 libertas_bg_rates[MAX_RATES]; /** ENUM definition*/ /** SNRNF_TYPE */ @@ -287,11 +284,11 @@ enum SNRNF_DATA { /** WLAN_802_11_POWER_MODE */ enum WLAN_802_11_POWER_MODE { - wlan802_11powermodecam, - wlan802_11powermodemax_psp, - wlan802_11Powermodefast_psp, + WLAN802_11POWERMODECAM, + WLAN802_11POWERMODEMAX_PSP, + WLAN802_11POWERMODEFAST_PSP, /*not a real mode, defined as an upper bound */ - wlan802_11powemodemax + WLAN802_11POWEMODEMAX }; /** PS_STATE */ @@ -311,14 +308,14 @@ enum DNLD_STATE { /** WLAN_MEDIA_STATE */ enum WLAN_MEDIA_STATE { - libertas_connected, - libertas_disconnected + LIBERTAS_CONNECTED, + LIBERTAS_DISCONNECTED }; /** WLAN_802_11_PRIVACY_FILTER */ enum WLAN_802_11_PRIVACY_FILTER { - wlan802_11privfilteracceptall, - wlan802_11privfilter8021xWEP + WLAN802_11PRIVFILTERACCEPTALL, + WLAN802_11PRIVFILTER8021XWEP }; /** mv_ms_type */ @@ -331,23 +328,23 @@ enum mv_ms_type { /** SNMP_MIB_INDEX_e */ enum SNMP_MIB_INDEX_e { - desired_bsstype_i = 0, - op_rateset_i, - bcnperiod_i, - dtimperiod_i, - assocrsp_timeout_i, - rtsthresh_i, - short_retrylim_i, - long_retrylim_i, - fragthresh_i, - dot11d_i, - dot11h_i, - manufid_i, - prodID_i, - manuf_oui_i, - manuf_name_i, - manuf_prodname_i, - manuf_prodver_i, + DESIRED_BSSTYPE_I = 0, + OP_RATESET_I, + BCNPERIOD_I, + DTIMPERIOD_I, + ASSOCRSP_TIMEOUT_I, + RTSTHRESH_I, + SHORT_RETRYLIM_I, + LONG_RETRYLIM_I, + FRAGTHRESH_I, + DOT11D_I, + DOT11H_I, + MANUFID_I, + PRODID_I, + MANUF_OUI_I, + MANUF_NAME_I, + MANUF_PRODNAME_I, + MANUF_PRODVER_I, }; /** KEY_TYPE_ID */ diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 785192b884bc27c1419d7aff773c8d477b1fb21a..1fb807aa91b96ec84e9742777360bb5681776991 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -14,7 +14,6 @@ #include "defs.h" #include "scan.h" -#include "thread.h" extern struct ethtool_ops libertas_ethtool_ops; @@ -73,10 +72,8 @@ struct current_bss_params { u8 band; /** channel */ u8 channel; - /** number of rates supported */ - int numofrates; - /** supported rates*/ - u8 datarates[WLAN_SUPPORTED_RATES]; + /** zero-terminated array of supported data rates */ + u8 rates[MAX_RATES + 1]; }; /** sleep_params */ @@ -106,6 +103,8 @@ struct _wlan_private { int open; int mesh_open; int infra_open; + int mesh_autostart_enabled; + __le16 boot2_version; char name[DEV_NAME_LEN]; @@ -114,7 +113,9 @@ struct _wlan_private { struct net_device *dev; struct net_device_stats stats; - struct net_device *mesh_dev ; /* Virtual device */ + struct net_device *mesh_dev; /* Virtual device */ + struct net_device *rtap_net_dev; + struct ieee80211_device *ieee; struct iw_statistics wstats; struct wlan_mesh_stats mstats; @@ -142,20 +143,18 @@ struct _wlan_private { all other bits reserved 0 */ u8 dnld_sent; - const struct firmware *firmware; struct device *hotplug_device; /** thread to service interrupts */ - struct wlan_thread mainthread; + struct task_struct *main_thread; + wait_queue_head_t waitq; + struct workqueue_struct *work_thread; + struct delayed_work scan_work; struct delayed_work assoc_work; - struct workqueue_struct *assoc_thread; struct work_struct sync_channel; /** Hardware access */ - int (*hw_register_dev) (wlan_private * priv); - int (*hw_unregister_dev) (wlan_private *); - int (*hw_prog_firmware) (wlan_private *); int (*hw_host_to_card) (wlan_private * priv, u8 type, u8 * payload, u16 nb); int (*hw_get_int_status) (wlan_private * priv, u8 *); int (*hw_read_event_cause) (wlan_private *); @@ -188,12 +187,12 @@ struct assoc_request { u8 bssid[ETH_ALEN]; /** WEP keys */ - struct WLAN_802_11_KEY wep_keys[4]; + struct enc_key wep_keys[4]; u16 wep_tx_keyidx; /** WPA keys */ - struct WLAN_802_11_KEY wpa_mcast_key; - struct WLAN_802_11_KEY wpa_unicast_key; + struct enc_key wpa_mcast_key; + struct enc_key wpa_unicast_key; struct wlan_802_11_security secinfo; @@ -259,23 +258,15 @@ struct _wlan_adapter { /* IW_MODE_* */ u8 mode; - u8 prev_ssid[IW_ESSID_MAX_SIZE + 1]; - u8 prev_ssid_len; - u8 prev_bssid[ETH_ALEN]; - /* Scan results list */ struct list_head network_list; struct list_head network_free_list; struct bss_descriptor *networks; - u8 scantype; - u32 scanmode; - - u16 beaconperiod; u8 adhoccreate; /** capability Info used in Association, start, join */ - struct ieeetypes_capinfo capinfo; + u16 capability; /** MAC address information */ u8 current_addr[ETH_ALEN]; @@ -287,20 +278,10 @@ struct _wlan_adapter { u16 enablehwauto; u16 ratebitmap; - /** control G rates */ - u8 adhoc_grate_enabled; - - u32 txantenna; - u32 rxantenna; u32 fragthsd; u32 rtsthsd; - u32 datarate; - u8 is_datarate_auto; - - u16 listeninterval; - u16 prescan; u8 txretrycount; /** Tx-related variables (for single packet tx) */ @@ -311,22 +292,17 @@ struct _wlan_adapter { u16 currentpacketfilter; u32 connect_status; u16 regioncode; - u16 regiontableindex; u16 txpowerlevel; /** POWER MANAGEMENT AND PnP SUPPORT */ u8 surpriseremoved; - u16 atimwindow; u16 psmode; /* Wlan802_11PowermodeCAM=disable Wlan802_11PowermodeMAX_PSP=enable */ - u16 multipledtim; u32 psstate; u8 needtowakeup; struct PS_CMD_ConfirmSleep libertas_ps_confirm_sleep; - u16 locallisteninterval; - u16 nullpktinterval; struct assoc_request * pending_assoc_req; struct assoc_request * in_progress_assoc_req; @@ -335,23 +311,18 @@ struct _wlan_adapter { struct wlan_802_11_security secinfo; /** WEP keys */ - struct WLAN_802_11_KEY wep_keys[4]; + struct enc_key wep_keys[4]; u16 wep_tx_keyidx; /** WPA keys */ - struct WLAN_802_11_KEY wpa_mcast_key; - struct WLAN_802_11_KEY wpa_unicast_key; + struct enc_key wpa_mcast_key; + struct enc_key wpa_unicast_key; /** WPA Information Elements*/ u8 wpa_ie[MAX_WPA_IE_LEN]; u8 wpa_ie_len; - u16 rxantennamode; - u16 txantennamode; - /** Requested Signal Strength*/ - u16 bcn_avg_factor; - u16 data_avg_factor; u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; u16 NF[MAX_TYPE_B][MAX_TYPE_AVG]; u8 RSSI[MAX_TYPE_B][MAX_TYPE_AVG]; @@ -359,15 +330,13 @@ struct _wlan_adapter { u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; u16 nextSNRNF; u16 numSNRNF; - u16 rxpd_rate; u8 radioon; u32 preamble; - /** Multi bands Parameter*/ - u8 libertas_supported_rates[G_SUPPORTED_RATES]; - - /** Blue Tooth Co-existence Arbitration */ + /** data rate stuff */ + u8 cur_rate; + u8 auto_rate; /** sleep_params */ struct sleep_params sp; @@ -392,14 +361,8 @@ struct _wlan_adapter { struct wlan_offset_value offsetvalue; struct cmd_ds_802_11_get_log logmsg; - u16 scanprobes; - - u32 pkttxctrl; - u16 txrate; - u32 linkmode; - u32 radiomode; - u32 debugmode; + u32 monitormode; u8 fw_ready; u8 last_scanned_channel; diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c index 96f1974685d4f4662fd88bbd185edf359daaf626..3dae15211b6ad3124c28324eefb9e0f04a4933c8 100644 --- a/drivers/net/wireless/libertas/ethtool.c +++ b/drivers/net/wireless/libertas/ethtool.c @@ -60,8 +60,7 @@ static int libertas_ethtool_get_eeprom(struct net_device *dev, // mutex_lock(&priv->mutex); - adapter->prdeeprom = - (char *)kmalloc(eeprom->len+sizeof(regctrl), GFP_KERNEL); + adapter->prdeeprom = kmalloc(eeprom->len+sizeof(regctrl), GFP_KERNEL); if (!adapter->prdeeprom) return -ENOMEM; memcpy(adapter->prdeeprom, ®ctrl, sizeof(regctrl)); @@ -72,9 +71,9 @@ static int libertas_ethtool_get_eeprom(struct net_device *dev, regctrl.action, regctrl.offset, regctrl.NOB); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_eeprom_access, + CMD_802_11_EEPROM_ACCESS, regctrl.action, - cmd_option_waitforrsp, 0, + CMD_OPTION_WAITFORRSP, 0, ®ctrl); if (ret) { @@ -110,56 +109,48 @@ static void libertas_ethtool_get_stats(struct net_device * dev, struct ethtool_stats * stats, u64 * data) { wlan_private *priv = dev->priv; + struct cmd_ds_mesh_access mesh_access; + int ret; lbs_deb_enter(LBS_DEB_ETHTOOL); - stats->cmd = ETHTOOL_GSTATS; - BUG_ON(stats->n_stats != MESH_STATS_NUM); - - data[0] = priv->mstats.fwd_drop_rbt; - data[1] = priv->mstats.fwd_drop_ttl; - data[2] = priv->mstats.fwd_drop_noroute; - data[3] = priv->mstats.fwd_drop_nobuf; - data[4] = priv->mstats.fwd_unicast_cnt; - data[5] = priv->mstats.fwd_bcast_cnt; - data[6] = priv->mstats.drop_blind; - data[7] = priv->mstats.tx_failed_cnt; + /* Get Mesh Statistics */ + ret = libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, CMD_ACT_MESH_GET_STATS, + CMD_OPTION_WAITFORRSP, 0, &mesh_access); + + if (ret) + return; + + priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); + priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); + priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); + priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); + priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); + priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); + priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); + priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); + + data[0] = priv->mstats.fwd_drop_rbt; + data[1] = priv->mstats.fwd_drop_ttl; + data[2] = priv->mstats.fwd_drop_noroute; + data[3] = priv->mstats.fwd_drop_nobuf; + data[4] = priv->mstats.fwd_unicast_cnt; + data[5] = priv->mstats.fwd_bcast_cnt; + data[6] = priv->mstats.drop_blind; + data[7] = priv->mstats.tx_failed_cnt; lbs_deb_enter(LBS_DEB_ETHTOOL); } -static int libertas_ethtool_get_stats_count(struct net_device * dev) +static int libertas_ethtool_get_sset_count(struct net_device * dev, int sset) { - int ret; - wlan_private *priv = dev->priv; - struct cmd_ds_mesh_access mesh_access; - - lbs_deb_enter(LBS_DEB_ETHTOOL); - - /* Get Mesh Statistics */ - ret = libertas_prepare_and_send_command(priv, - cmd_mesh_access, cmd_act_mesh_get_stats, - cmd_option_waitforrsp, 0, &mesh_access); - - if (ret) { - ret = 0; - goto done; + switch (sset) { + case ETH_SS_STATS: + return MESH_STATS_NUM; + default: + return -EOPNOTSUPP; } - - priv->mstats.fwd_drop_rbt = le32_to_cpu(mesh_access.data[0]); - priv->mstats.fwd_drop_ttl = le32_to_cpu(mesh_access.data[1]); - priv->mstats.fwd_drop_noroute = le32_to_cpu(mesh_access.data[2]); - priv->mstats.fwd_drop_nobuf = le32_to_cpu(mesh_access.data[3]); - priv->mstats.fwd_unicast_cnt = le32_to_cpu(mesh_access.data[4]); - priv->mstats.fwd_bcast_cnt = le32_to_cpu(mesh_access.data[5]); - priv->mstats.drop_blind = le32_to_cpu(mesh_access.data[6]); - priv->mstats.tx_failed_cnt = le32_to_cpu(mesh_access.data[7]); - - ret = MESH_STATS_NUM; - -done: - lbs_deb_enter_args(LBS_DEB_ETHTOOL, "ret %d", ret); - return ret; } static void libertas_ethtool_get_strings (struct net_device * dev, @@ -186,7 +177,7 @@ struct ethtool_ops libertas_ethtool_ops = { .get_drvinfo = libertas_ethtool_get_drvinfo, .get_eeprom = libertas_ethtool_get_eeprom, .get_eeprom_len = libertas_ethtool_get_eeprom_len, - .get_stats_count = libertas_ethtool_get_stats_count, + .get_sset_count = libertas_ethtool_get_sset_count, .get_ethtool_stats = libertas_ethtool_get_stats, .get_strings = libertas_ethtool_get_strings, }; diff --git a/drivers/net/wireless/libertas/fw.c b/drivers/net/wireless/libertas/fw.c deleted file mode 100644 index 2dc84ff7a54ac4b574f6c82915d187abaf37deaf..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/fw.c +++ /dev/null @@ -1,349 +0,0 @@ -/** - * This file contains the initialization for FW and HW - */ -#include - -#include "host.h" -#include "defs.h" -#include "decl.h" -#include "dev.h" -#include "wext.h" -#include "if_usb.h" - -/** - * @brief This function checks the validity of Boot2/FW image. - * - * @param data pointer to image - * len image length - * @return 0 or -1 - */ -static int check_fwfile_format(u8 *data, u32 totlen) -{ - u32 bincmd, exit; - u32 blksize, offset, len; - int ret; - - ret = 1; - exit = len = 0; - - do { - struct fwheader *fwh = (void *)data; - - bincmd = le32_to_cpu(fwh->dnldcmd); - blksize = le32_to_cpu(fwh->datalength); - switch (bincmd) { - case FW_HAS_DATA_TO_RECV: - offset = sizeof(struct fwheader) + blksize; - data += offset; - len += offset; - if (len >= totlen) - exit = 1; - break; - case FW_HAS_LAST_BLOCK: - exit = 1; - ret = 0; - break; - default: - exit = 1; - break; - } - } while (!exit); - - if (ret) - lbs_pr_err("firmware file format check FAIL\n"); - else - lbs_deb_fw("firmware file format check PASS\n"); - - return ret; -} - -/** - * @brief This function downloads firmware image, gets - * HW spec from firmware and set basic parameters to - * firmware. - * - * @param priv A pointer to wlan_private structure - * @return 0 or -1 - */ -static int wlan_setup_station_hw(wlan_private * priv, char *fw_name) -{ - int ret = -1; - wlan_adapter *adapter = priv->adapter; - - lbs_deb_enter(LBS_DEB_FW); - - if ((ret = request_firmware(&priv->firmware, fw_name, - priv->hotplug_device)) < 0) { - lbs_pr_err("request_firmware() failed with %#x\n", ret); - lbs_pr_err("firmware %s not found\n", fw_name); - goto done; - } - - if (check_fwfile_format(priv->firmware->data, priv->firmware->size)) { - release_firmware(priv->firmware); - goto done; - } - - ret = priv->hw_prog_firmware(priv); - - release_firmware(priv->firmware); - - if (ret) { - lbs_deb_fw("bootloader in invalid state\n"); - ret = -1; - goto done; - } - - /* - * Read MAC address from HW - */ - memset(adapter->current_addr, 0xff, ETH_ALEN); - - ret = libertas_prepare_and_send_command(priv, cmd_get_hw_spec, - 0, cmd_option_waitforrsp, 0, NULL); - - if (ret) { - ret = -1; - goto done; - } - - libertas_set_mac_packet_filter(priv); - - /* Get the supported Data rates */ - ret = libertas_prepare_and_send_command(priv, cmd_802_11_data_rate, - cmd_act_get_tx_rate, - cmd_option_waitforrsp, 0, NULL); - - if (ret) { - ret = -1; - goto done; - } - - ret = 0; -done: - lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); - return ret; -} - -static int wlan_allocate_adapter(wlan_private * priv) -{ - size_t bufsize; - wlan_adapter *adapter = priv->adapter; - - /* Allocate buffer to store the BSSID list */ - bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); - adapter->networks = kzalloc(bufsize, GFP_KERNEL); - if (!adapter->networks) { - lbs_pr_err("Out of memory allocating beacons\n"); - libertas_free_adapter(priv); - return -ENOMEM; - } - - /* Allocate the command buffers */ - libertas_allocate_cmd_buffer(priv); - - memset(&adapter->libertas_ps_confirm_sleep, 0, sizeof(struct PS_CMD_ConfirmSleep)); - adapter->libertas_ps_confirm_sleep.seqnum = cpu_to_le16(++adapter->seqnum); - adapter->libertas_ps_confirm_sleep.command = - cpu_to_le16(cmd_802_11_ps_mode); - adapter->libertas_ps_confirm_sleep.size = - cpu_to_le16(sizeof(struct PS_CMD_ConfirmSleep)); - adapter->libertas_ps_confirm_sleep.result = 0; - adapter->libertas_ps_confirm_sleep.action = - cpu_to_le16(cmd_subcmd_sleep_confirmed); - - return 0; -} - -static void wlan_init_adapter(wlan_private * priv) -{ - wlan_adapter *adapter = priv->adapter; - int i; - - adapter->scanprobes = 0; - - adapter->bcn_avg_factor = DEFAULT_BCN_AVG_FACTOR; - adapter->data_avg_factor = DEFAULT_DATA_AVG_FACTOR; - - /* ATIM params */ - adapter->atimwindow = 0; - - adapter->connect_status = libertas_disconnected; - memset(adapter->current_addr, 0xff, ETH_ALEN); - - /* scan type */ - adapter->scantype = cmd_scan_type_active; - - /* scan mode */ - adapter->scanmode = cmd_bss_type_any; - - /* 802.11 specific */ - adapter->secinfo.wep_enabled = 0; - for (i = 0; i < sizeof(adapter->wep_keys) / sizeof(adapter->wep_keys[0]); - i++) - memset(&adapter->wep_keys[i], 0, sizeof(struct WLAN_802_11_KEY)); - adapter->wep_tx_keyidx = 0; - adapter->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; - adapter->mode = IW_MODE_INFRA; - - adapter->pending_assoc_req = NULL; - adapter->in_progress_assoc_req = NULL; - - /* Initialize scan result lists */ - INIT_LIST_HEAD(&adapter->network_free_list); - INIT_LIST_HEAD(&adapter->network_list); - for (i = 0; i < MAX_NETWORK_COUNT; i++) { - list_add_tail(&adapter->networks[i].list, - &adapter->network_free_list); - } - - mutex_init(&adapter->lock); - - adapter->prescan = 1; - - memset(&adapter->curbssparams, 0, sizeof(adapter->curbssparams)); - adapter->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; - - /* PnP and power profile */ - adapter->surpriseremoved = 0; - - adapter->currentpacketfilter = - cmd_act_mac_rx_on | cmd_act_mac_tx_on; - - adapter->radioon = RADIO_ON; - adapter->txantenna = RF_ANTENNA_2; - adapter->rxantenna = RF_ANTENNA_AUTO; - - adapter->is_datarate_auto = 1; - adapter->beaconperiod = MRVDRV_BEACON_INTERVAL; - - // set default value of capinfo. -#define SHORT_PREAMBLE_ALLOWED 1 - memset(&adapter->capinfo, 0, sizeof(adapter->capinfo)); - adapter->capinfo.shortpreamble = SHORT_PREAMBLE_ALLOWED; - - adapter->psmode = wlan802_11powermodecam; - adapter->multipledtim = MRVDRV_DEFAULT_MULTIPLE_DTIM; - - adapter->listeninterval = MRVDRV_DEFAULT_LISTEN_INTERVAL; - - adapter->psstate = PS_STATE_FULL_POWER; - adapter->needtowakeup = 0; - adapter->locallisteninterval = 0; /* default value in firmware will be used */ - - adapter->datarate = 0; // Initially indicate the rate as auto - - adapter->adhoc_grate_enabled = 0; - - adapter->intcounter = 0; - - adapter->currenttxskb = NULL; - adapter->pkttxctrl = 0; - - memset(&adapter->tx_queue_ps, 0, NR_TX_QUEUE*sizeof(struct sk_buff*)); - adapter->tx_queue_idx = 0; - spin_lock_init(&adapter->txqueue_lock); - - return; -} - -static void command_timer_fn(unsigned long data); - -int libertas_init_fw(wlan_private * priv, char *fw_name) -{ - int ret = -1; - wlan_adapter *adapter = priv->adapter; - - lbs_deb_enter(LBS_DEB_FW); - - /* Allocate adapter structure */ - if ((ret = wlan_allocate_adapter(priv)) != 0) - goto done; - - /* init adapter structure */ - wlan_init_adapter(priv); - - /* init timer etc. */ - setup_timer(&adapter->command_timer, command_timer_fn, - (unsigned long)priv); - - /* download fimrware etc. */ - if ((ret = wlan_setup_station_hw(priv, fw_name)) != 0) { - del_timer_sync(&adapter->command_timer); - goto done; - } - - /* init 802.11d */ - libertas_init_11d(priv); - - ret = 0; -done: - lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); - return ret; -} - -void libertas_free_adapter(wlan_private * priv) -{ - wlan_adapter *adapter = priv->adapter; - - if (!adapter) { - lbs_deb_fw("why double free adapter?\n"); - return; - } - - lbs_deb_fw("free command buffer\n"); - libertas_free_cmd_buffer(priv); - - lbs_deb_fw("free command_timer\n"); - del_timer(&adapter->command_timer); - - lbs_deb_fw("free scan results table\n"); - kfree(adapter->networks); - adapter->networks = NULL; - - /* Free the adapter object itself */ - lbs_deb_fw("free adapter\n"); - kfree(adapter); - priv->adapter = NULL; -} - -/** - * This function handles the timeout of command sending. - * It will re-send the same command again. - */ -static void command_timer_fn(unsigned long data) -{ - wlan_private *priv = (wlan_private *)data; - wlan_adapter *adapter = priv->adapter; - struct cmd_ctrl_node *ptempnode; - struct cmd_ds_command *cmd; - unsigned long flags; - - ptempnode = adapter->cur_cmd; - if (ptempnode == NULL) { - lbs_deb_fw("ptempnode empty\n"); - return; - } - - cmd = (struct cmd_ds_command *)ptempnode->bufvirtualaddr; - if (!cmd) { - lbs_deb_fw("cmd is NULL\n"); - return; - } - - lbs_deb_fw("command_timer_fn fired, cmd %x\n", cmd->command); - - if (!adapter->fw_ready) - return; - - spin_lock_irqsave(&adapter->driver_lock, flags); - adapter->cur_cmd = NULL; - spin_unlock_irqrestore(&adapter->driver_lock, flags); - - lbs_deb_fw("re-sending same command because of timeout\n"); - libertas_queue_cmd(adapter, ptempnode, 0); - - wake_up_interruptible(&priv->mainthread.waitq); - - return; -} diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h index 7509cc10af3c376a2f3bdf94f942b9bd7d59b920..b37ddbca969f74dd1e056b358f6f2fdebbca0ef4 100644 --- a/drivers/net/wireless/libertas/host.h +++ b/drivers/net/wireless/libertas/host.h @@ -20,224 +20,163 @@ #define OID_802_11_TX_RETRYCOUNT 0x0000801D #define OID_802_11D_ENABLE 0x00008020 -#define cmd_option_waitforrsp 0x0002 +#define CMD_OPTION_WAITFORRSP 0x0002 -/** Host command ID */ -#define cmd_code_dnld 0x0002 -#define cmd_get_hw_spec 0x0003 -#define cmd_eeprom_update 0x0004 -#define cmd_802_11_reset 0x0005 -#define cmd_802_11_scan 0x0006 -#define cmd_802_11_get_log 0x000b -#define cmd_mac_multicast_adr 0x0010 -#define cmd_802_11_authenticate 0x0011 -#define cmd_802_11_eeprom_access 0x0059 -#define cmd_802_11_associate 0x0050 -#define cmd_802_11_set_wep 0x0013 -#define cmd_802_11_get_stat 0x0014 -#define cmd_802_3_get_stat 0x0015 -#define cmd_802_11_snmp_mib 0x0016 -#define cmd_mac_reg_map 0x0017 -#define cmd_bbp_reg_map 0x0018 -#define cmd_mac_reg_access 0x0019 -#define cmd_bbp_reg_access 0x001a -#define cmd_rf_reg_access 0x001b -#define cmd_802_11_radio_control 0x001c -#define cmd_802_11_rf_channel 0x001d -#define cmd_802_11_rf_tx_power 0x001e -#define cmd_802_11_rssi 0x001f -#define cmd_802_11_rf_antenna 0x0020 +/** Host command IDs */ -#define cmd_802_11_ps_mode 0x0021 +/* Return command are almost always the same as the host command, but with + * bit 15 set high. There are a few exceptions, though... + */ +#define CMD_RET(cmd) (0x8000 | cmd) -#define cmd_802_11_data_rate 0x0022 -#define cmd_rf_reg_map 0x0023 -#define cmd_802_11_deauthenticate 0x0024 -#define cmd_802_11_reassociate 0x0025 -#define cmd_802_11_disassociate 0x0026 -#define cmd_mac_control 0x0028 -#define cmd_802_11_ad_hoc_start 0x002b -#define cmd_802_11_ad_hoc_join 0x002c +/* Return command convention exceptions: */ +#define CMD_RET_802_11_ASSOCIATE 0x8012 -#define cmd_802_11_query_tkip_reply_cntrs 0x002e -#define cmd_802_11_enable_rsn 0x002f -#define cmd_802_11_pairwise_tsc 0x0036 -#define cmd_802_11_group_tsc 0x0037 -#define cmd_802_11_key_material 0x005e +/* Command codes */ +#define CMD_CODE_DNLD 0x0002 +#define CMD_GET_HW_SPEC 0x0003 +#define CMD_EEPROM_UPDATE 0x0004 +#define CMD_802_11_RESET 0x0005 +#define CMD_802_11_SCAN 0x0006 +#define CMD_802_11_GET_LOG 0x000b +#define CMD_MAC_MULTICAST_ADR 0x0010 +#define CMD_802_11_AUTHENTICATE 0x0011 +#define CMD_802_11_EEPROM_ACCESS 0x0059 +#define CMD_802_11_ASSOCIATE 0x0050 +#define CMD_802_11_SET_WEP 0x0013 +#define CMD_802_11_GET_STAT 0x0014 +#define CMD_802_3_GET_STAT 0x0015 +#define CMD_802_11_SNMP_MIB 0x0016 +#define CMD_MAC_REG_MAP 0x0017 +#define CMD_BBP_REG_MAP 0x0018 +#define CMD_MAC_REG_ACCESS 0x0019 +#define CMD_BBP_REG_ACCESS 0x001a +#define CMD_RF_REG_ACCESS 0x001b +#define CMD_802_11_RADIO_CONTROL 0x001c +#define CMD_802_11_RF_CHANNEL 0x001d +#define CMD_802_11_RF_TX_POWER 0x001e +#define CMD_802_11_RSSI 0x001f +#define CMD_802_11_RF_ANTENNA 0x0020 -#define cmd_802_11_set_afc 0x003c -#define cmd_802_11_get_afc 0x003d +#define CMD_802_11_PS_MODE 0x0021 -#define cmd_802_11_ad_hoc_stop 0x0040 +#define CMD_802_11_DATA_RATE 0x0022 +#define CMD_RF_REG_MAP 0x0023 +#define CMD_802_11_DEAUTHENTICATE 0x0024 +#define CMD_802_11_REASSOCIATE 0x0025 +#define CMD_802_11_DISASSOCIATE 0x0026 +#define CMD_MAC_CONTROL 0x0028 +#define CMD_802_11_AD_HOC_START 0x002b +#define CMD_802_11_AD_HOC_JOIN 0x002c -#define cmd_802_11_beacon_stop 0x0049 +#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e +#define CMD_802_11_ENABLE_RSN 0x002f +#define CMD_802_11_PAIRWISE_TSC 0x0036 +#define CMD_802_11_GROUP_TSC 0x0037 +#define CMD_802_11_KEY_MATERIAL 0x005e -#define cmd_802_11_mac_address 0x004D -#define cmd_802_11_eeprom_access 0x0059 +#define CMD_802_11_SET_AFC 0x003c +#define CMD_802_11_GET_AFC 0x003d -#define cmd_802_11_band_config 0x0058 +#define CMD_802_11_AD_HOC_STOP 0x0040 -#define cmd_802_11d_domain_info 0x005b +#define CMD_802_11_BEACON_STOP 0x0049 -#define cmd_802_11_sleep_params 0x0066 +#define CMD_802_11_MAC_ADDRESS 0x004D +#define CMD_802_11_EEPROM_ACCESS 0x0059 -#define cmd_802_11_inactivity_timeout 0x0067 +#define CMD_802_11_BAND_CONFIG 0x0058 -#define cmd_802_11_tpc_cfg 0x0072 -#define cmd_802_11_pwr_cfg 0x0073 +#define CMD_802_11D_DOMAIN_INFO 0x005b -#define cmd_802_11_led_gpio_ctrl 0x004e +#define CMD_802_11_SLEEP_PARAMS 0x0066 -#define cmd_802_11_subscribe_event 0x0075 +#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 -#define cmd_802_11_rate_adapt_rateset 0x0076 +#define CMD_802_11_TPC_CFG 0x0072 +#define CMD_802_11_PWR_CFG 0x0073 -#define cmd_802_11_tx_rate_query 0x007f +#define CMD_802_11_LED_GPIO_CTRL 0x004e -#define cmd_get_tsf 0x0080 +#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 -#define cmd_bt_access 0x0087 -#define cmd_ret_bt_access 0x8087 +#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 -#define cmd_fwt_access 0x0095 -#define cmd_ret_fwt_access 0x8095 +#define CMD_802_11_TX_RATE_QUERY 0x007f -#define cmd_mesh_access 0x009b -#define cmd_ret_mesh_access 0x809b +#define CMD_GET_TSF 0x0080 + +#define CMD_BT_ACCESS 0x0087 + +#define CMD_FWT_ACCESS 0x0095 + +#define CMD_802_11_MONITOR_MODE 0x0098 + +#define CMD_MESH_ACCESS 0x009b + +#define CMD_SET_BOOT2_VER 0x00a5 /* For the IEEE Power Save */ -#define cmd_subcmd_enter_ps 0x0030 -#define cmd_subcmd_exit_ps 0x0031 -#define cmd_subcmd_sleep_confirmed 0x0034 -#define cmd_subcmd_full_powerdown 0x0035 -#define cmd_subcmd_full_powerup 0x0036 +#define CMD_SUBCMD_ENTER_PS 0x0030 +#define CMD_SUBCMD_EXIT_PS 0x0031 +#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 +#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 +#define CMD_SUBCMD_FULL_POWERUP 0x0036 + +#define CMD_ENABLE_RSN 0x0001 +#define CMD_DISABLE_RSN 0x0000 + +#define CMD_ACT_SET 0x0001 +#define CMD_ACT_GET 0x0000 + +#define CMD_ACT_GET_AES (CMD_ACT_GET + 2) +#define CMD_ACT_SET_AES (CMD_ACT_SET + 2) +#define CMD_ACT_REMOVE_AES (CMD_ACT_SET + 3) + +/* Define action or option for CMD_802_11_SET_WEP */ +#define CMD_ACT_ADD 0x0002 +#define CMD_ACT_REMOVE 0x0004 +#define CMD_ACT_USE_DEFAULT 0x0008 + +#define CMD_TYPE_WEP_40_BIT 0x01 +#define CMD_TYPE_WEP_104_BIT 0x02 + +#define CMD_NUM_OF_WEP_KEYS 4 + +#define CMD_WEP_KEY_INDEX_MASK 0x3fff -/* command RET code, MSB is set to 1 */ -#define cmd_ret_hw_spec_info 0x8003 -#define cmd_ret_eeprom_update 0x8004 -#define cmd_ret_802_11_reset 0x8005 -#define cmd_ret_802_11_scan 0x8006 -#define cmd_ret_802_11_get_log 0x800b -#define cmd_ret_mac_control 0x8028 -#define cmd_ret_mac_multicast_adr 0x8010 -#define cmd_ret_802_11_authenticate 0x8011 -#define cmd_ret_802_11_deauthenticate 0x8024 -#define cmd_ret_802_11_associate 0x8012 -#define cmd_ret_802_11_reassociate 0x8025 -#define cmd_ret_802_11_disassociate 0x8026 -#define cmd_ret_802_11_set_wep 0x8013 -#define cmd_ret_802_11_stat 0x8014 -#define cmd_ret_802_3_stat 0x8015 -#define cmd_ret_802_11_snmp_mib 0x8016 -#define cmd_ret_mac_reg_map 0x8017 -#define cmd_ret_bbp_reg_map 0x8018 -#define cmd_ret_rf_reg_map 0x8023 -#define cmd_ret_mac_reg_access 0x8019 -#define cmd_ret_bbp_reg_access 0x801a -#define cmd_ret_rf_reg_access 0x801b -#define cmd_ret_802_11_radio_control 0x801c -#define cmd_ret_802_11_rf_channel 0x801d -#define cmd_ret_802_11_rssi 0x801f -#define cmd_ret_802_11_rf_tx_power 0x801e -#define cmd_ret_802_11_rf_antenna 0x8020 -#define cmd_ret_802_11_ps_mode 0x8021 -#define cmd_ret_802_11_data_rate 0x8022 - -#define cmd_ret_802_11_ad_hoc_start 0x802B -#define cmd_ret_802_11_ad_hoc_join 0x802C - -#define cmd_ret_802_11_query_tkip_reply_cntrs 0x802e -#define cmd_ret_802_11_enable_rsn 0x802f -#define cmd_ret_802_11_pairwise_tsc 0x8036 -#define cmd_ret_802_11_group_tsc 0x8037 -#define cmd_ret_802_11_key_material 0x805e - -#define cmd_enable_rsn 0x0001 -#define cmd_disable_rsn 0x0000 - -#define cmd_act_set 0x0001 -#define cmd_act_get 0x0000 - -#define cmd_act_get_AES (cmd_act_get + 2) -#define cmd_act_set_AES (cmd_act_set + 2) -#define cmd_act_remove_aes (cmd_act_set + 3) - -#define cmd_ret_802_11_set_afc 0x803c -#define cmd_ret_802_11_get_afc 0x803d - -#define cmd_ret_802_11_ad_hoc_stop 0x8040 - -#define cmd_ret_802_11_beacon_stop 0x8049 - -#define cmd_ret_802_11_mac_address 0x804D -#define cmd_ret_802_11_eeprom_access 0x8059 - -#define cmd_ret_802_11_band_config 0x8058 - -#define cmd_ret_802_11_sleep_params 0x8066 - -#define cmd_ret_802_11_inactivity_timeout 0x8067 - -#define cmd_ret_802_11d_domain_info (0x8000 | \ - cmd_802_11d_domain_info) - -#define cmd_ret_802_11_tpc_cfg (cmd_802_11_tpc_cfg | 0x8000) -#define cmd_ret_802_11_pwr_cfg (cmd_802_11_pwr_cfg | 0x8000) - -#define cmd_ret_802_11_led_gpio_ctrl 0x804e - -#define cmd_ret_802_11_subscribe_event (cmd_802_11_subscribe_event | 0x8000) - -#define cmd_ret_802_11_rate_adapt_rateset (cmd_802_11_rate_adapt_rateset | 0x8000) - -#define cmd_rte_802_11_tx_rate_query (cmd_802_11_tx_rate_query | 0x8000) - -#define cmd_ret_get_tsf 0x8080 - -/* Define action or option for cmd_802_11_set_wep */ -#define cmd_act_add 0x0002 -#define cmd_act_remove 0x0004 -#define cmd_act_use_default 0x0008 - -#define cmd_type_wep_40_bit 0x0001 -#define cmd_type_wep_104_bit 0x0002 - -#define cmd_NUM_OF_WEP_KEYS 4 - -#define cmd_WEP_KEY_INDEX_MASK 0x3fff - -/* Define action or option for cmd_802_11_reset */ -#define cmd_act_halt 0x0003 +/* Define action or option for CMD_802_11_RESET */ +#define CMD_ACT_HALT 0x0003 -/* Define action or option for cmd_802_11_scan */ -#define cmd_bss_type_bss 0x0001 -#define cmd_bss_type_ibss 0x0002 -#define cmd_bss_type_any 0x0003 +/* Define action or option for CMD_802_11_SCAN */ +#define CMD_BSS_TYPE_BSS 0x0001 +#define CMD_BSS_TYPE_IBSS 0x0002 +#define CMD_BSS_TYPE_ANY 0x0003 -/* Define action or option for cmd_802_11_scan */ -#define cmd_scan_type_active 0x0000 -#define cmd_scan_type_passive 0x0001 +/* Define action or option for CMD_802_11_SCAN */ +#define CMD_SCAN_TYPE_ACTIVE 0x0000 +#define CMD_SCAN_TYPE_PASSIVE 0x0001 -#define cmd_scan_radio_type_bg 0 +#define CMD_SCAN_RADIO_TYPE_BG 0 -#define cmd_scan_probe_delay_time 0 +#define CMD_SCAN_PROBE_DELAY_TIME 0 -/* Define action or option for cmd_mac_control */ -#define cmd_act_mac_rx_on 0x0001 -#define cmd_act_mac_tx_on 0x0002 -#define cmd_act_mac_loopback_on 0x0004 -#define cmd_act_mac_wep_enable 0x0008 -#define cmd_act_mac_int_enable 0x0010 -#define cmd_act_mac_multicast_enable 0x0020 -#define cmd_act_mac_broadcast_enable 0x0040 -#define cmd_act_mac_promiscuous_enable 0x0080 -#define cmd_act_mac_all_multicast_enable 0x0100 -#define cmd_act_mac_strict_protection_enable 0x0400 +/* Define action or option for CMD_MAC_CONTROL */ +#define CMD_ACT_MAC_RX_ON 0x0001 +#define CMD_ACT_MAC_TX_ON 0x0002 +#define CMD_ACT_MAC_LOOPBACK_ON 0x0004 +#define CMD_ACT_MAC_WEP_ENABLE 0x0008 +#define CMD_ACT_MAC_INT_ENABLE 0x0010 +#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 +#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 +#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 +#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 +#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 -/* Define action or option for cmd_802_11_radio_control */ -#define cmd_type_auto_preamble 0x0001 -#define cmd_type_short_preamble 0x0002 -#define cmd_type_long_preamble 0x0003 +/* Define action or option for CMD_802_11_RADIO_CONTROL */ +#define CMD_TYPE_AUTO_PREAMBLE 0x0001 +#define CMD_TYPE_SHORT_PREAMBLE 0x0002 +#define CMD_TYPE_LONG_PREAMBLE 0x0003 #define TURN_ON_RF 0x01 #define RADIO_ON 0x01 @@ -248,70 +187,80 @@ #define SET_LONG_PREAMBLE 0x01 /* Define action or option for CMD_802_11_RF_CHANNEL */ -#define cmd_opt_802_11_rf_channel_get 0x00 -#define cmd_opt_802_11_rf_channel_set 0x01 - -/* Define action or option for cmd_802_11_rf_tx_power */ -#define cmd_act_tx_power_opt_get 0x0000 -#define cmd_act_tx_power_opt_set_high 0x8007 -#define cmd_act_tx_power_opt_set_mid 0x8004 -#define cmd_act_tx_power_opt_set_low 0x8000 - -#define cmd_act_tx_power_index_high 0x0007 -#define cmd_act_tx_power_index_mid 0x0004 -#define cmd_act_tx_power_index_low 0x0000 - -/* Define action or option for cmd_802_11_data_rate */ -#define cmd_act_set_tx_auto 0x0000 -#define cmd_act_set_tx_fix_rate 0x0001 -#define cmd_act_get_tx_rate 0x0002 - -#define cmd_act_set_rx 0x0001 -#define cmd_act_set_tx 0x0002 -#define cmd_act_set_both 0x0003 -#define cmd_act_get_rx 0x0004 -#define cmd_act_get_tx 0x0008 -#define cmd_act_get_both 0x000c - -/* Define action or option for cmd_802_11_ps_mode */ -#define cmd_type_cam 0x0000 -#define cmd_type_max_psp 0x0001 -#define cmd_type_fast_psp 0x0002 - -/* Define action or option for cmd_bt_access */ +#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 +#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 + +/* Define action or option for CMD_802_11_RF_TX_POWER */ +#define CMD_ACT_TX_POWER_OPT_GET 0x0000 +#define CMD_ACT_TX_POWER_OPT_SET_HIGH 0x8007 +#define CMD_ACT_TX_POWER_OPT_SET_MID 0x8004 +#define CMD_ACT_TX_POWER_OPT_SET_LOW 0x8000 + +#define CMD_ACT_TX_POWER_INDEX_HIGH 0x0007 +#define CMD_ACT_TX_POWER_INDEX_MID 0x0004 +#define CMD_ACT_TX_POWER_INDEX_LOW 0x0000 + +/* Define action or option for CMD_802_11_DATA_RATE */ +#define CMD_ACT_SET_TX_AUTO 0x0000 +#define CMD_ACT_SET_TX_FIX_RATE 0x0001 +#define CMD_ACT_GET_TX_RATE 0x0002 + +#define CMD_ACT_SET_RX 0x0001 +#define CMD_ACT_SET_TX 0x0002 +#define CMD_ACT_SET_BOTH 0x0003 +#define CMD_ACT_GET_RX 0x0004 +#define CMD_ACT_GET_TX 0x0008 +#define CMD_ACT_GET_BOTH 0x000c + +/* Define action or option for CMD_802_11_PS_MODE */ +#define CMD_TYPE_CAM 0x0000 +#define CMD_TYPE_MAX_PSP 0x0001 +#define CMD_TYPE_FAST_PSP 0x0002 + +/* Define action or option for CMD_BT_ACCESS */ enum cmd_bt_access_opts { /* The bt commands start at 5 instead of 1 because the old dft commands * are mapped to 1-4. These old commands are no longer maintained and * should not be called. */ - cmd_act_bt_access_add = 5, - cmd_act_bt_access_del, - cmd_act_bt_access_list, - cmd_act_bt_access_reset, - cmd_act_bt_access_set_invert, - cmd_act_bt_access_get_invert + CMD_ACT_BT_ACCESS_ADD = 5, + CMD_ACT_BT_ACCESS_DEL, + CMD_ACT_BT_ACCESS_LIST, + CMD_ACT_BT_ACCESS_RESET, + CMD_ACT_BT_ACCESS_SET_INVERT, + CMD_ACT_BT_ACCESS_GET_INVERT }; -/* Define action or option for cmd_fwt_access */ +/* Define action or option for CMD_FWT_ACCESS */ enum cmd_fwt_access_opts { - cmd_act_fwt_access_add = 1, - cmd_act_fwt_access_del, - cmd_act_fwt_access_lookup, - cmd_act_fwt_access_list, - cmd_act_fwt_access_list_route, - cmd_act_fwt_access_list_neighbor, - cmd_act_fwt_access_reset, - cmd_act_fwt_access_cleanup, - cmd_act_fwt_access_time, + CMD_ACT_FWT_ACCESS_ADD = 1, + CMD_ACT_FWT_ACCESS_DEL, + CMD_ACT_FWT_ACCESS_LOOKUP, + CMD_ACT_FWT_ACCESS_LIST, + CMD_ACT_FWT_ACCESS_LIST_route, + CMD_ACT_FWT_ACCESS_LIST_neighbor, + CMD_ACT_FWT_ACCESS_RESET, + CMD_ACT_FWT_ACCESS_CLEANUP, + CMD_ACT_FWT_ACCESS_TIME, }; -/* Define action or option for cmd_mesh_access */ +/* Define action or option for CMD_MESH_ACCESS */ enum cmd_mesh_access_opts { - cmd_act_mesh_get_ttl = 1, - cmd_act_mesh_set_ttl, - cmd_act_mesh_get_stats, - cmd_act_mesh_get_anycast, - cmd_act_mesh_set_anycast, + CMD_ACT_MESH_GET_TTL = 1, + CMD_ACT_MESH_SET_TTL, + CMD_ACT_MESH_GET_STATS, + CMD_ACT_MESH_GET_ANYCAST, + CMD_ACT_MESH_SET_ANYCAST, + CMD_ACT_MESH_SET_LINK_COSTS, + CMD_ACT_MESH_GET_LINK_COSTS, + CMD_ACT_MESH_SET_BCAST_RATE, + CMD_ACT_MESH_GET_BCAST_RATE, + CMD_ACT_MESH_SET_RREQ_DELAY, + CMD_ACT_MESH_GET_RREQ_DELAY, + CMD_ACT_MESH_SET_ROUTE_EXP, + CMD_ACT_MESH_GET_ROUTE_EXP, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_ACT_MESH_GET_AUTOSTART_ENABLED, }; /** Card Event definition */ diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h index 09b898f6719c2193d8d9dfe14d9b124b94154911..e1045dc02cce26ca18736bfba0568d591d4e17b9 100644 --- a/drivers/net/wireless/libertas/hostcmd.h +++ b/drivers/net/wireless/libertas/hostcmd.h @@ -83,23 +83,12 @@ struct cmd_ctrl_node { wait_queue_head_t cmdwait_q; }; -/* WLAN_802_11_KEY - * - * Generic structure to hold all key types. key type (WEP40, WEP104, TKIP, AES) - * is determined from the keylength field. - */ -struct WLAN_802_11_KEY { - __le32 len; - __le32 flags; /* KEY_INFO_* from wlan_defs.h */ - u8 key[MRVL_MAX_KEY_WPA_KEY_LENGTH]; - __le16 type; /* KEY_TYPE_* from wlan_defs.h */ -}; - -struct IE_WPA { - u8 elementid; - u8 len; - u8 oui[4]; - __le16 version; +/* Generic structure to hold all key types. */ +struct enc_key { + u16 len; + u16 flags; /* KEY_INFO_* from wlan_defs.h */ + u16 type; /* KEY_TYPE_* from wlan_defs.h */ + u8 key[32]; }; /* wlan_offset_value */ @@ -108,18 +97,6 @@ struct wlan_offset_value { u32 value; }; -struct WLAN_802_11_FIXED_IEs { - __le64 timestamp; - __le16 beaconinterval; - u16 capabilities; /* Actually struct ieeetypes_capinfo */ -}; - -struct WLAN_802_11_VARIABLE_IEs { - u8 elementid; - u8 length; - u8 data[1]; -}; - /* Define general data structure */ /* cmd_DS_GEN */ struct cmd_ds_gen { @@ -131,7 +108,7 @@ struct cmd_ds_gen { #define S_DS_GEN sizeof(struct cmd_ds_gen) /* - * Define data structure for cmd_get_hw_spec + * Define data structure for CMD_GET_HW_SPEC * This structure defines the response for the GET_HW_SPEC command */ struct cmd_ds_get_hw_spec { @@ -178,11 +155,11 @@ struct cmd_ds_802_11_subscribe_event { /* * This scan handle Country Information IE(802.11d compliant) - * Define data structure for cmd_802_11_scan + * Define data structure for CMD_802_11_SCAN */ struct cmd_ds_802_11_scan { u8 bsstype; - u8 BSSID[ETH_ALEN]; + u8 bssid[ETH_ALEN]; u8 tlvbuffer[1]; #if 0 mrvlietypes_ssidparamset_t ssidParamSet; @@ -237,7 +214,7 @@ struct cmd_ds_802_11_deauthenticate { struct cmd_ds_802_11_associate { u8 peerstaaddr[6]; - struct ieeetypes_capinfo capinfo; + __le16 capability; __le16 listeninterval; __le16 bcnperiod; u8 dtimperiod; @@ -260,8 +237,8 @@ struct cmd_ds_802_11_associate_rsp { }; struct cmd_ds_802_11_ad_hoc_result { - u8 PAD[3]; - u8 BSSID[ETH_ALEN]; + u8 pad[3]; + u8 bssid[ETH_ALEN]; }; struct cmd_ds_802_11_set_wep { @@ -428,6 +405,16 @@ struct cmd_ds_802_11_rf_antenna { }; +struct cmd_ds_802_11_monitor_mode { + u16 action; + u16 mode; +}; + +struct cmd_ds_set_boot2_ver { + u16 action; + u16 version; +}; + struct cmd_ds_802_11_ps_mode { __le16 action; __le16 nullpktinterval; @@ -451,8 +438,8 @@ struct PS_CMD_ConfirmSleep { struct cmd_ds_802_11_data_rate { __le16 action; - __le16 reserverd; - u8 datarate[G_SUPPORTED_RATES]; + __le16 reserved; + u8 rates[MAX_RATES]; }; struct cmd_ds_802_11_rate_adapt_rateset { @@ -462,30 +449,30 @@ struct cmd_ds_802_11_rate_adapt_rateset { }; struct cmd_ds_802_11_ad_hoc_start { - u8 SSID[IW_ESSID_MAX_SIZE]; + u8 ssid[IW_ESSID_MAX_SIZE]; u8 bsstype; __le16 beaconperiod; u8 dtimperiod; union IEEEtypes_ssparamset ssparamset; union ieeetypes_phyparamset phyparamset; __le16 probedelay; - struct ieeetypes_capinfo cap; - u8 datarate[G_SUPPORTED_RATES]; + __le16 capability; + u8 rates[MAX_RATES]; u8 tlv_memory_size_pad[100]; } __attribute__ ((packed)); struct adhoc_bssdesc { - u8 BSSID[6]; - u8 SSID[32]; - u8 bsstype; + u8 bssid[6]; + u8 ssid[32]; + u8 type; __le16 beaconperiod; u8 dtimperiod; __le64 timestamp; __le64 localtime; union ieeetypes_phyparamset phyparamset; union IEEEtypes_ssparamset ssparamset; - struct ieeetypes_capinfo cap; - u8 datarates[G_SUPPORTED_RATES]; + __le16 capability; + u8 rates[MAX_RATES]; /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the * Adhoc join command and will cause a binary layout mismatch with @@ -494,7 +481,7 @@ struct adhoc_bssdesc { } __attribute__ ((packed)); struct cmd_ds_802_11_ad_hoc_join { - struct adhoc_bssdesc bssdescriptor; + struct adhoc_bssdesc bss; __le16 failtimeout; __le16 probedelay; @@ -646,6 +633,7 @@ struct cmd_ds_command { struct cmd_ds_802_11_snmp_mib smib; struct cmd_ds_802_11_rf_tx_power txp; struct cmd_ds_802_11_rf_antenna rant; + struct cmd_ds_802_11_monitor_mode monitor; struct cmd_ds_802_11_data_rate drate; struct cmd_ds_802_11_rate_adapt_rateset rateset; struct cmd_ds_mac_multicast_adr madr; @@ -677,6 +665,7 @@ struct cmd_ds_command { struct cmd_ds_bt_access bt; struct cmd_ds_fwt_access fwt; struct cmd_ds_mesh_access mesh; + struct cmd_ds_set_boot2_ver boot2_ver; struct cmd_ds_get_tsf gettsf; struct cmd_ds_802_11_subscribe_event subscribe_event; } params; diff --git a/drivers/net/wireless/libertas/if_bootcmd.c b/drivers/net/wireless/libertas/if_bootcmd.c deleted file mode 100644 index 8bca306ffad9e7e3b0916db45cbddbf4283a7851..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/if_bootcmd.c +++ /dev/null @@ -1,40 +0,0 @@ -/** - * This file contains functions used in USB Boot command - * and Boot2/FW update - */ - -#include -#include -#include -#include - -#define DRV_NAME "usb8xxx" - -#include "defs.h" -#include "dev.h" -#include "if_usb.h" - -/** - * @brief This function issues Boot command to the Boot2 code - * @param ivalue 1:Boot from FW by USB-Download - * 2:Boot from FW in EEPROM - * @return 0 - */ -int if_usb_issue_boot_command(wlan_private *priv, int ivalue) -{ - struct usb_card_rec *cardp = priv->card; - struct bootcmdstr sbootcmd; - int i; - - /* Prepare command */ - sbootcmd.u32magicnumber = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); - sbootcmd.u8cmd_tag = ivalue; - for (i=0; i<11; i++) - sbootcmd.au8dumy[i]=0x00; - memcpy(cardp->bulk_out_buffer, &sbootcmd, sizeof(struct bootcmdstr)); - - /* Issue command */ - usb_tx_block(priv, cardp->bulk_out_buffer, sizeof(struct bootcmdstr)); - - return 0; -} diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c new file mode 100644 index 0000000000000000000000000000000000000000..0360cad363a8dacd314be2fb57272daed142c1af --- /dev/null +++ b/drivers/net/wireless/libertas/if_cs.c @@ -0,0 +1,969 @@ +/* + + Driver for the Marvell 8385 based compact flash WLAN cards. + + (C) 2007 by Holger Schurig + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, + Boston, MA 02110-1301, USA. + +*/ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define DRV_NAME "libertas_cs" + +#include "decl.h" +#include "defs.h" +#include "dev.h" + + +/********************************************************************/ +/* Module stuff */ +/********************************************************************/ + +MODULE_AUTHOR("Holger Schurig "); +MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); +MODULE_LICENSE("GPL"); + + + +/********************************************************************/ +/* Data structures */ +/********************************************************************/ + +struct if_cs_card { + struct pcmcia_device *p_dev; + wlan_private *priv; + void __iomem *iobase; +}; + + + +/********************************************************************/ +/* Hardware access */ +/********************************************************************/ + +/* This define enables wrapper functions which allow you + to dump all register accesses. You normally won't this, + except for development */ +/* #define DEBUG_IO */ + +#ifdef DEBUG_IO +static int debug_output = 0; +#else +/* This way the compiler optimizes the printk's away */ +#define debug_output 0 +#endif + +static inline unsigned int if_cs_read8(struct if_cs_card *card, uint reg) +{ + unsigned int val = ioread8(card->iobase + reg); + if (debug_output) + printk(KERN_INFO "##inb %08x<%02x\n", reg, val); + return val; +} +static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg) +{ + unsigned int val = ioread16(card->iobase + reg); + if (debug_output) + printk(KERN_INFO "##inw %08x<%04x\n", reg, val); + return val; +} +static inline void if_cs_read16_rep( + struct if_cs_card *card, + uint reg, + void *buf, + unsigned long count) +{ + if (debug_output) + printk(KERN_INFO "##insw %08x<(0x%lx words)\n", + reg, count); + ioread16_rep(card->iobase + reg, buf, count); +} + +static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val) +{ + if (debug_output) + printk(KERN_INFO "##outb %08x>%02x\n", reg, val); + iowrite8(val, card->iobase + reg); +} + +static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val) +{ + if (debug_output) + printk(KERN_INFO "##outw %08x>%04x\n", reg, val); + iowrite16(val, card->iobase + reg); +} + +static inline void if_cs_write16_rep( + struct if_cs_card *card, + uint reg, + void *buf, + unsigned long count) +{ + if (debug_output) + printk(KERN_INFO "##outsw %08x>(0x%lx words)\n", + reg, count); + iowrite16_rep(card->iobase + reg, buf, count); +} + + +/* + * I know that polling/delaying is frowned upon. However, this procedure + * with polling is needed while downloading the firmware. At this stage, + * the hardware does unfortunately not create any interrupts. + * + * Fortunately, this function is never used once the firmware is in + * the card. :-) + * + * As a reference, see the "Firmware Specification v5.1", page 18 + * and 19. I did not follow their suggested timing to the word, + * but this works nice & fast anyway. + */ +static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 reg) +{ + int i; + + for (i = 0; i < 500; i++) { + u8 val = if_cs_read8(card, addr); + if (val == reg) + return i; + udelay(100); + } + return -ETIME; +} + + + +/* Host control registers and their bit definitions */ + +#define IF_CS_H_STATUS 0x00000000 +#define IF_CS_H_STATUS_TX_OVER 0x0001 +#define IF_CS_H_STATUS_RX_OVER 0x0002 +#define IF_CS_H_STATUS_DNLD_OVER 0x0004 + +#define IF_CS_H_INT_CAUSE 0x00000002 +#define IF_CS_H_IC_TX_OVER 0x0001 +#define IF_CS_H_IC_RX_OVER 0x0002 +#define IF_CS_H_IC_DNLD_OVER 0x0004 +#define IF_CS_H_IC_HOST_EVENT 0x0008 +#define IF_CS_H_IC_MASK 0x001f + +#define IF_CS_H_INT_MASK 0x00000004 +#define IF_CS_H_IM_MASK 0x001f + +#define IF_CS_H_WRITE_LEN 0x00000014 + +#define IF_CS_H_WRITE 0x00000016 + +#define IF_CS_H_CMD_LEN 0x00000018 + +#define IF_CS_H_CMD 0x0000001A + +#define IF_CS_C_READ_LEN 0x00000024 + +#define IF_CS_H_READ 0x00000010 + +/* Card control registers and their bit definitions */ + +#define IF_CS_C_STATUS 0x00000020 +#define IF_CS_C_S_TX_DNLD_RDY 0x0001 +#define IF_CS_C_S_RX_UPLD_RDY 0x0002 +#define IF_CS_C_S_CMD_DNLD_RDY 0x0004 +#define IF_CS_C_S_CMD_UPLD_RDY 0x0008 +#define IF_CS_C_S_CARDEVENT 0x0010 +#define IF_CS_C_S_MASK 0x001f +#define IF_CS_C_S_STATUS_MASK 0x7f00 +/* The following definitions should be the same as the MRVDRV_ ones */ + +#if MRVDRV_CMD_DNLD_RDY != IF_CS_C_S_CMD_DNLD_RDY +#error MRVDRV_CMD_DNLD_RDY and IF_CS_C_S_CMD_DNLD_RDY not in sync +#endif +#if MRVDRV_CMD_UPLD_RDY != IF_CS_C_S_CMD_UPLD_RDY +#error MRVDRV_CMD_UPLD_RDY and IF_CS_C_S_CMD_UPLD_RDY not in sync +#endif +#if MRVDRV_CARDEVENT != IF_CS_C_S_CARDEVENT +#error MRVDRV_CARDEVENT and IF_CS_C_S_CARDEVENT not in sync +#endif + +#define IF_CS_C_INT_CAUSE 0x00000022 +#define IF_CS_C_IC_MASK 0x001f + +#define IF_CS_C_SQ_READ_LOW 0x00000028 +#define IF_CS_C_SQ_HELPER_OK 0x10 + +#define IF_CS_C_CMD_LEN 0x00000030 + +#define IF_CS_C_CMD 0x00000012 + +#define IF_CS_SCRATCH 0x0000003F + + + +/********************************************************************/ +/* Interrupts */ +/********************************************************************/ + +static inline void if_cs_enable_ints(struct if_cs_card *card) +{ + lbs_deb_enter(LBS_DEB_CS); + if_cs_write16(card, IF_CS_H_INT_MASK, 0); +} + +static inline void if_cs_disable_ints(struct if_cs_card *card) +{ + lbs_deb_enter(LBS_DEB_CS); + if_cs_write16(card, IF_CS_H_INT_MASK, IF_CS_H_IM_MASK); +} + +static irqreturn_t if_cs_interrupt(int irq, void *data) +{ + struct if_cs_card *card = (struct if_cs_card *)data; + u16 int_cause; + + lbs_deb_enter(LBS_DEB_CS); + + int_cause = if_cs_read16(card, IF_CS_C_INT_CAUSE); + if(int_cause == 0x0) { + /* Not for us */ + return IRQ_NONE; + + } else if(int_cause == 0xffff) { + /* Read in junk, the card has probably been removed */ + card->priv->adapter->surpriseremoved = 1; + + } else { + if(int_cause & IF_CS_H_IC_TX_OVER) { + card->priv->dnld_sent = DNLD_RES_RECEIVED; + if (!card->priv->adapter->cur_cmd) + wake_up_interruptible(&card->priv->waitq); + + if (card->priv->adapter->connect_status == LIBERTAS_CONNECTED) + netif_wake_queue(card->priv->dev); + } + + /* clear interrupt */ + if_cs_write16(card, IF_CS_C_INT_CAUSE, int_cause & IF_CS_C_IC_MASK); + } + + libertas_interrupt(card->priv->dev); + + return IRQ_HANDLED; +} + + + + +/********************************************************************/ +/* I/O */ +/********************************************************************/ + +/* + * Called from if_cs_host_to_card to send a command to the hardware + */ +static int if_cs_send_cmd(wlan_private *priv, u8 *buf, u16 nb) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + int ret = -1; + int loops = 0; + + lbs_deb_enter(LBS_DEB_CS); + + /* Is hardware ready? */ + while (1) { + u16 val = if_cs_read16(card, IF_CS_C_STATUS); + if (val & IF_CS_C_S_CMD_DNLD_RDY) + break; + if (++loops > 100) { + lbs_pr_err("card not ready for commands\n"); + goto done; + } + mdelay(1); + } + + if_cs_write16(card, IF_CS_H_CMD_LEN, nb); + + if_cs_write16_rep(card, IF_CS_H_CMD, buf, nb / 2); + /* Are we supposed to transfer an odd amount of bytes? */ + if (nb & 1) + if_cs_write8(card, IF_CS_H_CMD, buf[nb-1]); + + /* "Assert the download over interrupt command in the Host + * status register" */ + if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); + + /* "Assert the download over interrupt command in the Card + * interrupt case register" */ + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +/* + * Called from if_cs_host_to_card to send a data to the hardware + */ +static void if_cs_send_data(wlan_private *priv, u8 *buf, u16 nb) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + + lbs_deb_enter(LBS_DEB_CS); + + if_cs_write16(card, IF_CS_H_WRITE_LEN, nb); + + /* write even number of bytes, then odd byte if necessary */ + if_cs_write16_rep(card, IF_CS_H_WRITE, buf, nb / 2); + if (nb & 1) + if_cs_write8(card, IF_CS_H_WRITE, buf[nb-1]); + + if_cs_write16(card, IF_CS_H_STATUS, IF_CS_H_STATUS_TX_OVER); + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_STATUS_TX_OVER); + + lbs_deb_leave(LBS_DEB_CS); +} + + +/* + * Get the command result out of the card. + */ +static int if_cs_receive_cmdres(wlan_private *priv, u8* data, u32 *len) +{ + int ret = -1; + u16 val; + + lbs_deb_enter(LBS_DEB_CS); + + /* is hardware ready? */ + val = if_cs_read16(priv->card, IF_CS_C_STATUS); + if ((val & IF_CS_C_S_CMD_UPLD_RDY) == 0) { + lbs_pr_err("card not ready for CMD\n"); + goto out; + } + + *len = if_cs_read16(priv->card, IF_CS_C_CMD_LEN); + if ((*len == 0) || (*len > MRVDRV_SIZE_OF_CMD_BUFFER)) { + lbs_pr_err("card cmd buffer has invalid # of bytes (%d)\n", *len); + goto out; + } + + /* read even number of bytes, then odd byte if necessary */ + if_cs_read16_rep(priv->card, IF_CS_C_CMD, data, *len/sizeof(u16)); + if (*len & 1) + data[*len-1] = if_cs_read8(priv->card, IF_CS_C_CMD); + + ret = 0; +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len); + return ret; +} + + +static struct sk_buff *if_cs_receive_data(wlan_private *priv) +{ + struct sk_buff *skb = NULL; + u16 len; + u8 *data; + + lbs_deb_enter(LBS_DEB_CS); + + len = if_cs_read16(priv->card, IF_CS_C_READ_LEN); + if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { + lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); + priv->stats.rx_dropped++; + printk(KERN_INFO "##HS %s:%d TODO\n", __FUNCTION__, __LINE__); + goto dat_err; + } + + //TODO: skb = dev_alloc_skb(len+ETH_FRAME_LEN+MRVDRV_SNAP_HEADER_LEN+EXTRA_LEN); + skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2); + if (!skb) + goto out; + skb_put(skb, len); + skb_reserve(skb, 2);/* 16 byte align */ + data = skb->data; + + /* read even number of bytes, then odd byte if necessary */ + if_cs_read16_rep(priv->card, IF_CS_H_READ, data, len/sizeof(u16)); + if (len & 1) + data[len-1] = if_cs_read8(priv->card, IF_CS_H_READ); + +dat_err: + if_cs_write16(priv->card, IF_CS_H_STATUS, IF_CS_H_STATUS_RX_OVER); + if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_RX_OVER); + +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb); + return skb; +} + + + +/********************************************************************/ +/* Firmware */ +/********************************************************************/ + +/* + * Tries to program the helper firmware. + * + * Return 0 on success + */ +static int if_cs_prog_helper(struct if_cs_card *card) +{ + int ret = 0; + int sent = 0; + u8 scratch; + const struct firmware *fw; + + lbs_deb_enter(LBS_DEB_CS); + + scratch = if_cs_read8(card, IF_CS_SCRATCH); + + /* "If the value is 0x5a, the firmware is already + * downloaded successfully" + */ + if (scratch == 0x5a) + goto done; + + /* "If the value is != 00, it is invalid value of register */ + if (scratch != 0x00) { + ret = -ENODEV; + goto done; + } + + /* TODO: make firmware file configurable */ + ret = request_firmware(&fw, "libertas_cs_helper.fw", + &handle_to_dev(card->p_dev)); + if (ret) { + lbs_pr_err("can't load helper firmware\n"); + ret = -ENODEV; + goto done; + } + lbs_deb_cs("helper size %td\n", fw->size); + + /* "Set the 5 bytes of the helper image to 0" */ + /* Not needed, this contains an ARM branch instruction */ + + for (;;) { + /* "the number of bytes to send is 256" */ + int count = 256; + int remain = fw->size - sent; + + if (remain < count) + count = remain; + /* printk(KERN_INFO "//HS %d loading %d of %d bytes\n", + __LINE__, sent, fw->size); */ + + /* "write the number of bytes to be sent to the I/O Command + * write length register" */ + if_cs_write16(card, IF_CS_H_CMD_LEN, count); + + /* "write this to I/O Command port register as 16 bit writes */ + if (count) + if_cs_write16_rep(card, IF_CS_H_CMD, + &fw->data[sent], + count >> 1); + + /* "Assert the download over interrupt command in the Host + * status register" */ + if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); + + /* "Assert the download over interrupt command in the Card + * interrupt case register" */ + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); + + /* "The host polls the Card Status register ... for 50 ms before + declaring a failure */ + ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, + IF_CS_C_S_CMD_DNLD_RDY); + if (ret < 0) { + lbs_pr_err("can't download helper at 0x%x, ret %d\n", + sent, ret); + goto done; + } + + if (count == 0) + break; + + sent += count; + } + + release_firmware(fw); + ret = 0; + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +static int if_cs_prog_real(struct if_cs_card *card) +{ + const struct firmware *fw; + int ret = 0; + int retry = 0; + int len = 0; + int sent; + + lbs_deb_enter(LBS_DEB_CS); + + /* TODO: make firmware file configurable */ + ret = request_firmware(&fw, "libertas_cs.fw", + &handle_to_dev(card->p_dev)); + if (ret) { + lbs_pr_err("can't load firmware\n"); + ret = -ENODEV; + goto done; + } + lbs_deb_cs("fw size %td\n", fw->size); + + ret = if_cs_poll_while_fw_download(card, IF_CS_C_SQ_READ_LOW, IF_CS_C_SQ_HELPER_OK); + if (ret < 0) { + int i; + lbs_pr_err("helper firmware doesn't answer\n"); + for (i = 0; i < 0x50; i += 2) + printk(KERN_INFO "## HS %02x: %04x\n", + i, if_cs_read16(card, i)); + goto err_release; + } + + for (sent = 0; sent < fw->size; sent += len) { + len = if_cs_read16(card, IF_CS_C_SQ_READ_LOW); + /* printk(KERN_INFO "//HS %d loading %d of %d bytes\n", + __LINE__, sent, fw->size); */ + if (len & 1) { + retry++; + lbs_pr_info("odd, need to retry this firmware block\n"); + } else { + retry = 0; + } + + if (retry > 20) { + lbs_pr_err("could not download firmware\n"); + ret = -ENODEV; + goto err_release; + } + if (retry) { + sent -= len; + } + + + if_cs_write16(card, IF_CS_H_CMD_LEN, len); + + if_cs_write16_rep(card, IF_CS_H_CMD, + &fw->data[sent], + (len+1) >> 1); + if_cs_write8(card, IF_CS_H_STATUS, IF_CS_H_STATUS_DNLD_OVER); + if_cs_write16(card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_DNLD_OVER); + + ret = if_cs_poll_while_fw_download(card, IF_CS_C_STATUS, + IF_CS_C_S_CMD_DNLD_RDY); + if (ret < 0) { + lbs_pr_err("can't download firmware at 0x%x\n", sent); + goto err_release; + } + } + + ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); + if (ret < 0) { + lbs_pr_err("firmware download failed\n"); + goto err_release; + } + + ret = 0; + goto done; + + +err_release: + release_firmware(fw); + +done: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + + +/********************************************************************/ +/* Callback functions for libertas.ko */ +/********************************************************************/ + +/* Send commands or data packets to the card */ +static int if_cs_host_to_card(wlan_private *priv, u8 type, u8 *buf, u16 nb) +{ + int ret = -1; + + lbs_deb_enter_args(LBS_DEB_CS, "type %d, bytes %d", type, nb); + + switch (type) { + case MVMS_DAT: + priv->dnld_sent = DNLD_DATA_SENT; + if_cs_send_data(priv, buf, nb); + ret = 0; + break; + case MVMS_CMD: + priv->dnld_sent = DNLD_CMD_SENT; + ret = if_cs_send_cmd(priv, buf, nb); + break; + default: + lbs_pr_err("%s: unsupported type %d\n", __FUNCTION__, type); + } + + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +static int if_cs_get_int_status(wlan_private *priv, u8 *ireg) +{ + struct if_cs_card *card = (struct if_cs_card *)priv->card; + //wlan_adapter *adapter = priv->adapter; + int ret = 0; + u16 int_cause; + u8 *cmdbuf; + *ireg = 0; + + lbs_deb_enter(LBS_DEB_CS); + + if (priv->adapter->surpriseremoved) + goto out; + + int_cause = if_cs_read16(card, IF_CS_C_INT_CAUSE) & IF_CS_C_IC_MASK; + if_cs_write16(card, IF_CS_C_INT_CAUSE, int_cause); + + *ireg = if_cs_read16(card, IF_CS_C_STATUS) & IF_CS_C_S_MASK; + + if (!*ireg) + goto sbi_get_int_status_exit; + +sbi_get_int_status_exit: + + /* is there a data packet for us? */ + if (*ireg & IF_CS_C_S_RX_UPLD_RDY) { + struct sk_buff *skb = if_cs_receive_data(priv); + libertas_process_rxed_packet(priv, skb); + *ireg &= ~IF_CS_C_S_RX_UPLD_RDY; + } + + if (*ireg & IF_CS_C_S_TX_DNLD_RDY) { + priv->dnld_sent = DNLD_RES_RECEIVED; + } + + /* Card has a command result for us */ + if (*ireg & IF_CS_C_S_CMD_UPLD_RDY) { + spin_lock(&priv->adapter->driver_lock); + if (!priv->adapter->cur_cmd) { + cmdbuf = priv->upld_buf; + priv->adapter->hisregcpy &= ~IF_CS_C_S_RX_UPLD_RDY; + } else { + cmdbuf = priv->adapter->cur_cmd->bufvirtualaddr; + } + + ret = if_cs_receive_cmdres(priv, cmdbuf, &priv->upld_len); + spin_unlock(&priv->adapter->driver_lock); + if (ret < 0) + lbs_pr_err("could not receive cmd from card\n"); + } + +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d, ireg 0x%x, hisregcpy 0x%x", ret, *ireg, priv->adapter->hisregcpy); + return ret; +} + + +static int if_cs_read_event_cause(wlan_private *priv) +{ + lbs_deb_enter(LBS_DEB_CS); + + priv->adapter->eventcause = (if_cs_read16(priv->card, IF_CS_C_STATUS) & IF_CS_C_S_STATUS_MASK) >> 5; + if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_HOST_EVENT); + + return 0; +} + + + +/********************************************************************/ +/* Card Services */ +/********************************************************************/ + +/* + * After a card is removed, if_cs_release() will unregister the + * device, and release the PCMCIA configuration. If the device is + * still open, this will be postponed until it is closed. + */ +static void if_cs_release(struct pcmcia_device *p_dev) +{ + struct if_cs_card *card = p_dev->priv; + + lbs_deb_enter(LBS_DEB_CS); + + pcmcia_disable_device(p_dev); + free_irq(p_dev->irq.AssignedIRQ, card); + if (card->iobase) + ioport_unmap(card->iobase); + + lbs_deb_leave(LBS_DEB_CS); +} + + +/* + * This creates an "instance" of the driver, allocating local data + * structures for one device. The device is registered with Card + * Services. + * + * The dev_link structure is initialized, but we don't actually + * configure the card at this point -- we wait until we receive a card + * insertion event. + */ +static int if_cs_probe(struct pcmcia_device *p_dev) +{ + int ret = -ENOMEM; + wlan_private *priv; + struct if_cs_card *card; + /* CIS parsing */ + tuple_t tuple; + cisparse_t parse; + cistpl_cftable_entry_t *cfg = &parse.cftable_entry; + cistpl_io_t *io = &cfg->io; + u_char buf[64]; + + lbs_deb_enter(LBS_DEB_CS); + + card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL); + if (!card) { + lbs_pr_err("error in kzalloc\n"); + goto out; + } + card->p_dev = p_dev; + p_dev->priv = card; + + p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; + p_dev->irq.Handler = NULL; + p_dev->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID; + + p_dev->conf.Attributes = 0; + p_dev->conf.IntType = INT_MEMORY_AND_IO; + + tuple.Attributes = 0; + tuple.TupleData = buf; + tuple.TupleDataMax = sizeof(buf); + tuple.TupleOffset = 0; + + tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; + if ((ret = pcmcia_get_first_tuple(p_dev, &tuple)) != 0 || + (ret = pcmcia_get_tuple_data(p_dev, &tuple)) != 0 || + (ret = pcmcia_parse_tuple(p_dev, &tuple, &parse)) != 0) + { + lbs_pr_err("error in pcmcia_get_first_tuple etc\n"); + goto out1; + } + + p_dev->conf.ConfigIndex = cfg->index; + + /* Do we need to allocate an interrupt? */ + if (cfg->irq.IRQInfo1) { + p_dev->conf.Attributes |= CONF_ENABLE_IRQ; + } + + /* IO window settings */ + if (cfg->io.nwin != 1) { + lbs_pr_err("wrong CIS (check number of IO windows)\n"); + ret = -ENODEV; + goto out1; + } + p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; + p_dev->io.BasePort1 = io->win[0].base; + p_dev->io.NumPorts1 = io->win[0].len; + + /* This reserves IO space but doesn't actually enable it */ + ret = pcmcia_request_io(p_dev, &p_dev->io); + if (ret) { + lbs_pr_err("error in pcmcia_request_io\n"); + goto out1; + } + + /* + * Allocate an interrupt line. Note that this does not assign + * a handler to the interrupt, unless the 'Handler' member of + * the irq structure is initialized. + */ + if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) { + ret = pcmcia_request_irq(p_dev, &p_dev->irq); + if (ret) { + lbs_pr_err("error in pcmcia_request_irq\n"); + goto out1; + } + } + + /* Initialize io access */ + card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1); + if (!card->iobase) { + lbs_pr_err("error in ioport_map\n"); + ret = -EIO; + goto out1; + } + + /* + * This actually configures the PCMCIA socket -- setting up + * the I/O windows and the interrupt mapping, and putting the + * card and host interface into "Memory and IO" mode. + */ + ret = pcmcia_request_configuration(p_dev, &p_dev->conf); + if (ret) { + lbs_pr_err("error in pcmcia_request_configuration\n"); + goto out2; + } + + /* Finally, report what we've done */ + lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n", + p_dev->irq.AssignedIRQ, p_dev->io.BasePort1, + p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1); + + + /* Load the firmware early, before calling into libertas.ko */ + ret = if_cs_prog_helper(card); + if (ret == 0) + ret = if_cs_prog_real(card); + if (ret) + goto out2; + + /* Make this card known to the libertas driver */ + priv = libertas_add_card(card, &p_dev->dev); + if (!priv) { + ret = -ENOMEM; + goto out2; + } + + /* Store pointers to our call-back functions */ + card->priv = priv; + priv->card = card; + priv->hw_host_to_card = if_cs_host_to_card; + priv->hw_get_int_status = if_cs_get_int_status; + priv->hw_read_event_cause = if_cs_read_event_cause; + + priv->adapter->fw_ready = 1; + + /* Now actually get the IRQ */ + ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt, + IRQF_SHARED, DRV_NAME, card); + if (ret) { + lbs_pr_err("error in request_irq\n"); + goto out3; + } + + if_cs_enable_ints(card); + + /* And finally bring the card up */ + if (libertas_start_card(priv) != 0) { + lbs_pr_err("could not activate card\n"); + goto out3; + } + + ret = 0; + goto out; + +out3: + libertas_remove_card(priv); +out2: + ioport_unmap(card->iobase); +out1: + pcmcia_disable_device(p_dev); +out: + lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); + return ret; +} + + +/* + * This deletes a driver "instance". The device is de-registered with + * Card Services. If it has been released, all local data structures + * are freed. Otherwise, the structures will be freed when the device + * is released. + */ +static void if_cs_detach(struct pcmcia_device *p_dev) +{ + struct if_cs_card *card = p_dev->priv; + + lbs_deb_enter(LBS_DEB_CS); + + libertas_stop_card(card->priv); + libertas_remove_card(card->priv); + if_cs_disable_ints(card); + if_cs_release(p_dev); + kfree(card); + + lbs_deb_leave(LBS_DEB_CS); +} + + + +/********************************************************************/ +/* Module initialization */ +/********************************************************************/ + +static struct pcmcia_device_id if_cs_ids[] = { + PCMCIA_DEVICE_MANF_CARD(0x02df, 0x8103), + PCMCIA_DEVICE_NULL, +}; +MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); + + +static struct pcmcia_driver libertas_driver = { + .owner = THIS_MODULE, + .drv = { + .name = DRV_NAME, + }, + .probe = if_cs_probe, + .remove = if_cs_detach, + .id_table = if_cs_ids, +}; + + +static int __init if_cs_init(void) +{ + int ret; + + lbs_deb_enter(LBS_DEB_CS); + ret = pcmcia_register_driver(&libertas_driver); + lbs_deb_leave(LBS_DEB_CS); + return ret; +} + + +static void __exit if_cs_exit(void) +{ + lbs_deb_enter(LBS_DEB_CS); + pcmcia_unregister_driver(&libertas_driver); + lbs_deb_leave(LBS_DEB_CS); +} + + +module_init(if_cs_init); +module_exit(if_cs_exit); diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 998317571ec26cdc3b97342b2da376a0d2f2561f..cb59f46ed126471816cee87712f89ee32d2cd4ef 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -21,7 +21,7 @@ static const char usbdriver_name[] = "usb8xxx"; static u8 *default_fw_name = "usb8388.bin"; -char *libertas_fw_name = NULL; +static char *libertas_fw_name = NULL; module_param_named(fw_name, libertas_fw_name, charp, 0644); /* @@ -44,13 +44,14 @@ MODULE_DEVICE_TABLE(usb, if_usb_table); static void if_usb_receive(struct urb *urb); static void if_usb_receive_fwload(struct urb *urb); -static int if_usb_reset_device(wlan_private *priv); -static int if_usb_register_dev(wlan_private * priv); -static int if_usb_unregister_dev(wlan_private *); -static int if_usb_prog_firmware(wlan_private *); +static int if_usb_prog_firmware(struct usb_card_rec *cardp); static int if_usb_host_to_card(wlan_private * priv, u8 type, u8 * payload, u16 nb); static int if_usb_get_int_status(wlan_private * priv, u8 *); static int if_usb_read_event_cause(wlan_private *); +static int usb_tx_block(struct usb_card_rec *cardp, u8 *payload, u16 nb); +static void if_usb_free(struct usb_card_rec *cardp); +static int if_usb_submit_rx_urb(struct usb_card_rec *cardp); +static int if_usb_reset_device(struct usb_card_rec *cardp); /** * @brief call back function to handle the status of the URB @@ -59,29 +60,40 @@ static int if_usb_read_event_cause(wlan_private *); */ static void if_usb_write_bulk_callback(struct urb *urb) { - wlan_private *priv = (wlan_private *) (urb->context); - wlan_adapter *adapter = priv->adapter; - struct net_device *dev = priv->dev; + struct usb_card_rec *cardp = (struct usb_card_rec *) urb->context; /* handle the transmission complete validations */ - if (urb->status != 0) { - /* print the failure status number for debug */ - lbs_pr_info("URB in failure status: %d\n", urb->status); - } else { + if (urb->status == 0) { + wlan_private *priv = cardp->priv; + /* lbs_deb_usbd(&urb->dev->dev, "URB status is successfull\n"); lbs_deb_usbd(&urb->dev->dev, "Actual length transmitted %d\n", urb->actual_length); */ - priv->dnld_sent = DNLD_RES_RECEIVED; - /* Wake main thread if commands are pending */ - if (!adapter->cur_cmd) - wake_up_interruptible(&priv->mainthread.waitq); - if ((adapter->connect_status == libertas_connected)) { - netif_wake_queue(dev); - netif_wake_queue(priv->mesh_dev); + + /* Used for both firmware TX and regular TX. priv isn't + * valid at firmware load time. + */ + if (priv) { + wlan_adapter *adapter = priv->adapter; + struct net_device *dev = priv->dev; + + priv->dnld_sent = DNLD_RES_RECEIVED; + + /* Wake main thread if commands are pending */ + if (!adapter->cur_cmd) + wake_up_interruptible(&priv->waitq); + + if ((adapter->connect_status == LIBERTAS_CONNECTED)) { + netif_wake_queue(dev); + netif_wake_queue(priv->mesh_dev); + } } + } else { + /* print the failure status number for debug */ + lbs_pr_info("URB in failure status: %d\n", urb->status); } return; @@ -92,7 +104,7 @@ static void if_usb_write_bulk_callback(struct urb *urb) * @param cardp pointer usb_card_rec * @return N/A */ -void if_usb_free(struct usb_card_rec *cardp) +static void if_usb_free(struct usb_card_rec *cardp) { lbs_deb_enter(LBS_DEB_USB); @@ -203,21 +215,35 @@ static int if_usb_probe(struct usb_interface *intf, } } + /* Upload firmware */ + cardp->rinfo.cardp = cardp; + if (if_usb_prog_firmware(cardp)) { + lbs_deb_usbd(&udev->dev, "FW upload failed"); + goto err_prog_firmware; + } + if (!(priv = libertas_add_card(cardp, &udev->dev))) - goto dealloc; + goto err_prog_firmware; + + cardp->priv = priv; if (libertas_add_mesh(priv, &udev->dev)) goto err_add_mesh; - priv->hw_register_dev = if_usb_register_dev; - priv->hw_unregister_dev = if_usb_unregister_dev; - priv->hw_prog_firmware = if_usb_prog_firmware; + cardp->eth_dev = priv->dev; + priv->hw_host_to_card = if_usb_host_to_card; priv->hw_get_int_status = if_usb_get_int_status; priv->hw_read_event_cause = if_usb_read_event_cause; + priv->boot2_version = udev->descriptor.bcdDevice; - if (libertas_activate_card(priv, libertas_fw_name)) - goto err_activate_card; + /* Delay 200 ms to waiting for the FW ready */ + if_usb_submit_rx_urb(cardp); + msleep_interruptible(200); + priv->adapter->fw_ready = 1; + + if (libertas_start_card(priv)) + goto err_start_card; list_add_tail(&cardp->list, &usb_devices); @@ -226,11 +252,12 @@ static int if_usb_probe(struct usb_interface *intf, return 0; -err_activate_card: +err_start_card: libertas_remove_mesh(priv); err_add_mesh: - free_netdev(priv->dev); - kfree(priv->adapter); + libertas_remove_card(priv); +err_prog_firmware: + if_usb_reset_device(cardp); dealloc: if_usb_free(cardp); @@ -247,21 +274,22 @@ static void if_usb_disconnect(struct usb_interface *intf) { struct usb_card_rec *cardp = usb_get_intfdata(intf); wlan_private *priv = (wlan_private *) cardp->priv; - wlan_adapter *adapter = NULL; - adapter = priv->adapter; + lbs_deb_enter(LBS_DEB_MAIN); - /* - * Update Surprise removed to TRUE - */ - adapter->surpriseremoved = 1; + /* Update Surprise removed to TRUE */ + cardp->surprise_removed = 1; list_del(&cardp->list); - /* card is removed and we can call wlan_remove_card */ - lbs_deb_usbd(&cardp->udev->dev, "call remove card\n"); - libertas_remove_mesh(priv); - libertas_remove_card(priv); + if (priv) { + wlan_adapter *adapter = priv->adapter; + + adapter->surpriseremoved = 1; + libertas_stop_card(priv); + libertas_remove_mesh(priv); + libertas_remove_card(priv); + } /* Unlink and free urb */ if_usb_free(cardp); @@ -269,7 +297,7 @@ static void if_usb_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); - return; + lbs_deb_leave(LBS_DEB_MAIN); } /** @@ -277,12 +305,11 @@ static void if_usb_disconnect(struct usb_interface *intf) * @param priv pointer to wlan_private * @return 0 */ -static int if_prog_firmware(wlan_private * priv) +static int if_prog_firmware(struct usb_card_rec *cardp) { - struct usb_card_rec *cardp = priv->card; struct FWData *fwdata; struct fwheader *fwheader; - u8 *firmware = priv->firmware->data; + u8 *firmware = cardp->fw->data; fwdata = kmalloc(sizeof(struct FWData), GFP_ATOMIC); @@ -330,7 +357,7 @@ static int if_prog_firmware(wlan_private * priv) cardp->totalbytes); */ memcpy(cardp->bulk_out_buffer, fwheader, FW_DATA_XMIT_SIZE); - usb_tx_block(priv, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); + usb_tx_block(cardp, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); } else if (fwdata->fwheader.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) { /* @@ -340,7 +367,7 @@ static int if_prog_firmware(wlan_private * priv) "Donwloading FW JUMP BLOCK\n"); */ memcpy(cardp->bulk_out_buffer, fwheader, FW_DATA_XMIT_SIZE); - usb_tx_block(priv, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); + usb_tx_block(cardp, cardp->bulk_out_buffer, FW_DATA_XMIT_SIZE); cardp->fwfinalblk = 1; } @@ -355,17 +382,20 @@ static int if_prog_firmware(wlan_private * priv) return 0; } -static int libertas_do_reset(wlan_private *priv) +static int if_usb_reset_device(struct usb_card_rec *cardp) { int ret; - struct usb_card_rec *cardp = priv->card; + wlan_private * priv = cardp->priv; lbs_deb_enter(LBS_DEB_USB); + /* Try a USB port reset first, if that fails send the reset + * command to the firmware. + */ ret = usb_reset_device(cardp->udev); - if (!ret) { + if (!ret && priv) { msleep(10); - if_usb_reset_device(priv); + ret = libertas_reset_device(priv); msleep(10); } @@ -381,14 +411,12 @@ static int libertas_do_reset(wlan_private *priv) * @param nb data length * @return 0 or -1 */ -int usb_tx_block(wlan_private * priv, u8 * payload, u16 nb) +static int usb_tx_block(struct usb_card_rec *cardp, u8 * payload, u16 nb) { - /* pointer to card structure */ - struct usb_card_rec *cardp = priv->card; int ret = -1; /* check if device is removed */ - if (priv->adapter->surpriseremoved) { + if (cardp->surprise_removed) { lbs_deb_usbd(&cardp->udev->dev, "Device removed\n"); goto tx_ret; } @@ -396,7 +424,7 @@ int usb_tx_block(wlan_private * priv, u8 * payload, u16 nb) usb_fill_bulk_urb(cardp->tx_urb, cardp->udev, usb_sndbulkpipe(cardp->udev, cardp->bulk_out_endpointAddr), - payload, nb, if_usb_write_bulk_callback, priv); + payload, nb, if_usb_write_bulk_callback, cardp); cardp->tx_urb->transfer_flags |= URB_ZERO_PACKET; @@ -413,11 +441,9 @@ int usb_tx_block(wlan_private * priv, u8 * payload, u16 nb) return ret; } -static int __if_usb_submit_rx_urb(wlan_private * priv, - void (*callbackfn) - (struct urb *urb)) +static int __if_usb_submit_rx_urb(struct usb_card_rec *cardp, + void (*callbackfn)(struct urb *urb)) { - struct usb_card_rec *cardp = priv->card; struct sk_buff *skb; struct read_cb_info *rinfo = &cardp->rinfo; int ret = -1; @@ -453,22 +479,21 @@ static int __if_usb_submit_rx_urb(wlan_private * priv, return ret; } -static inline int if_usb_submit_rx_urb_fwload(wlan_private * priv) +static int if_usb_submit_rx_urb_fwload(struct usb_card_rec *cardp) { - return __if_usb_submit_rx_urb(priv, &if_usb_receive_fwload); + return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload); } -static inline int if_usb_submit_rx_urb(wlan_private * priv) +static int if_usb_submit_rx_urb(struct usb_card_rec *cardp) { - return __if_usb_submit_rx_urb(priv, &if_usb_receive); + return __if_usb_submit_rx_urb(cardp, &if_usb_receive); } static void if_usb_receive_fwload(struct urb *urb) { struct read_cb_info *rinfo = (struct read_cb_info *)urb->context; - wlan_private *priv = rinfo->priv; struct sk_buff *skb = rinfo->skb; - struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; + struct usb_card_rec *cardp = (struct usb_card_rec *)rinfo->cardp; struct fwsyncheader *syncfwheader; struct bootcmdrespStr bootcmdresp; @@ -484,7 +509,7 @@ static void if_usb_receive_fwload(struct urb *urb) sizeof(bootcmdresp)); if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { kfree_skb(skb); - if_usb_submit_rx_urb_fwload(priv); + if_usb_submit_rx_urb_fwload(cardp); cardp->bootcmdresp = 1; lbs_deb_usbd(&cardp->udev->dev, "Received valid boot command response\n"); @@ -508,7 +533,7 @@ static void if_usb_receive_fwload(struct urb *urb) "Received valid boot command response\n"); } kfree_skb(skb); - if_usb_submit_rx_urb_fwload(priv); + if_usb_submit_rx_urb_fwload(cardp); return; } @@ -544,9 +569,9 @@ static void if_usb_receive_fwload(struct urb *urb) goto exit; } - if_prog_firmware(priv); + if_prog_firmware(cardp); - if_usb_submit_rx_urb_fwload(priv); + if_usb_submit_rx_urb_fwload(cardp); exit: kfree(syncfwheader); @@ -596,11 +621,11 @@ static inline void process_cmdrequest(int recvlength, u8 *recvbuff, * data to clear the interrupt */ if (!priv->adapter->cur_cmd) { cmdbuf = priv->upld_buf; - priv->adapter->hisregcpy &= ~his_cmdupldrdy; + priv->adapter->hisregcpy &= ~MRVDRV_CMD_UPLD_RDY; } else cmdbuf = priv->adapter->cur_cmd->bufvirtualaddr; - cardp->usb_int_cause |= his_cmdupldrdy; + cardp->usb_int_cause |= MRVDRV_CMD_UPLD_RDY; priv->upld_len = (recvlength - MESSAGE_HEADER_LEN); memcpy(cmdbuf, recvbuff + MESSAGE_HEADER_LEN, priv->upld_len); @@ -625,17 +650,19 @@ static inline void process_cmdrequest(int recvlength, u8 *recvbuff, static void if_usb_receive(struct urb *urb) { struct read_cb_info *rinfo = (struct read_cb_info *)urb->context; - wlan_private *priv = rinfo->priv; struct sk_buff *skb = rinfo->skb; - struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; + struct usb_card_rec *cardp = (struct usb_card_rec *) rinfo->cardp; + wlan_private * priv = cardp->priv; int recvlength = urb->actual_length; u8 *recvbuff = NULL; - u32 recvtype; + u32 recvtype = 0; lbs_deb_enter(LBS_DEB_USB); if (recvlength) { + __le32 tmp; + if (urb->status) { lbs_deb_usbd(&cardp->udev->dev, "URB status is failed\n"); @@ -644,18 +671,14 @@ static void if_usb_receive(struct urb *urb) } recvbuff = skb->data + IPFIELD_ALIGN_OFFSET; - memcpy(&recvtype, recvbuff, sizeof(u32)); - lbs_deb_usbd(&cardp->udev->dev, - "Recv length = 0x%x\n", recvlength); - lbs_deb_usbd(&cardp->udev->dev, - "Receive type = 0x%X\n", recvtype); - recvtype = le32_to_cpu(recvtype); + memcpy(&tmp, recvbuff, sizeof(u32)); + recvtype = le32_to_cpu(tmp); lbs_deb_usbd(&cardp->udev->dev, - "Receive type after = 0x%X\n", recvtype); + "Recv length = 0x%x, Recv type = 0x%X\n", + recvlength, recvtype); } else if (urb->status) goto rx_exit; - switch (recvtype) { case CMD_TYPE_DATA: process_cmdtypedata(recvlength, skb, cardp, priv); @@ -677,18 +700,20 @@ static void if_usb_receive(struct urb *urb) break; } cardp->usb_event_cause <<= 3; - cardp->usb_int_cause |= his_cardevent; + cardp->usb_int_cause |= MRVDRV_CARDEVENT; kfree_skb(skb); libertas_interrupt(priv->dev); spin_unlock(&priv->adapter->driver_lock); goto rx_exit; default: + lbs_deb_usbd(&cardp->udev->dev, "Unknown command type 0x%X\n", + recvtype); kfree_skb(skb); break; } setup_for_next: - if_usb_submit_rx_urb(priv); + if_usb_submit_rx_urb(cardp); rx_exit: lbs_deb_leave(LBS_DEB_USB); } @@ -703,21 +728,19 @@ static void if_usb_receive(struct urb *urb) */ static int if_usb_host_to_card(wlan_private * priv, u8 type, u8 * payload, u16 nb) { - int ret = -1; - u32 tmp; struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; lbs_deb_usbd(&cardp->udev->dev,"*** type = %u\n", type); lbs_deb_usbd(&cardp->udev->dev,"size after = %d\n", nb); if (type == MVMS_CMD) { - tmp = cpu_to_le32(CMD_TYPE_REQUEST); + __le32 tmp = cpu_to_le32(CMD_TYPE_REQUEST); priv->dnld_sent = DNLD_CMD_SENT; memcpy(cardp->bulk_out_buffer, (u8 *) & tmp, MESSAGE_HEADER_LEN); } else { - tmp = cpu_to_le32(CMD_TYPE_DATA); + __le32 tmp = cpu_to_le32(CMD_TYPE_DATA); priv->dnld_sent = DNLD_DATA_SENT; memcpy(cardp->bulk_out_buffer, (u8 *) & tmp, MESSAGE_HEADER_LEN); @@ -725,10 +748,8 @@ static int if_usb_host_to_card(wlan_private * priv, u8 type, u8 * payload, u16 n memcpy((cardp->bulk_out_buffer + MESSAGE_HEADER_LEN), payload, nb); - ret = - usb_tx_block(priv, cardp->bulk_out_buffer, nb + MESSAGE_HEADER_LEN); - - return ret; + return usb_tx_block(cardp, cardp->bulk_out_buffer, + nb + MESSAGE_HEADER_LEN); } /* called with adapter->driver_lock held */ @@ -747,80 +768,109 @@ static int if_usb_get_int_status(wlan_private * priv, u8 * ireg) static int if_usb_read_event_cause(wlan_private * priv) { struct usb_card_rec *cardp = priv->card; + priv->adapter->eventcause = cardp->usb_event_cause; /* Re-submit rx urb here to avoid event lost issue */ - if_usb_submit_rx_urb(priv); + if_usb_submit_rx_urb(cardp); return 0; } -static int if_usb_reset_device(wlan_private *priv) +/** + * @brief This function issues Boot command to the Boot2 code + * @param ivalue 1:Boot from FW by USB-Download + * 2:Boot from FW in EEPROM + * @return 0 + */ +static int if_usb_issue_boot_command(struct usb_card_rec *cardp, int ivalue) { - int ret; - - lbs_deb_enter(LBS_DEB_USB); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_reset, - cmd_act_halt, 0, 0, NULL); - msleep_interruptible(10); - - lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); - return ret; -} + struct bootcmdstr sbootcmd; + int i; -static int if_usb_unregister_dev(wlan_private * priv) -{ - int ret = 0; + /* Prepare command */ + sbootcmd.u32magicnumber = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER); + sbootcmd.u8cmd_tag = ivalue; + for (i=0; i<11; i++) + sbootcmd.au8dumy[i]=0x00; + memcpy(cardp->bulk_out_buffer, &sbootcmd, sizeof(struct bootcmdstr)); - /* Need to send a Reset command to device before USB resources freed - * and wlan_remove_card() called, then device can handle FW download - * again. - */ - if (priv) - if_usb_reset_device(priv); + /* Issue command */ + usb_tx_block(cardp, cardp->bulk_out_buffer, sizeof(struct bootcmdstr)); - return ret; + return 0; } /** - * @brief This function register usb device and initialize parameter - * @param priv pointer to wlan_private - * @return 0 or -1 + * @brief This function checks the validity of Boot2/FW image. + * + * @param data pointer to image + * len image length + * @return 0 or -1 */ -static int if_usb_register_dev(wlan_private * priv) +static int check_fwfile_format(u8 *data, u32 totlen) { - struct usb_card_rec *cardp = (struct usb_card_rec *)priv->card; + u32 bincmd, exit; + u32 blksize, offset, len; + int ret; - lbs_deb_enter(LBS_DEB_USB); + ret = 1; + exit = len = 0; - cardp->priv = priv; - cardp->eth_dev = priv->dev; - priv->hotplug_device = &(cardp->udev->dev); + do { + struct fwheader *fwh = (void *)data; + + bincmd = le32_to_cpu(fwh->dnldcmd); + blksize = le32_to_cpu(fwh->datalength); + switch (bincmd) { + case FW_HAS_DATA_TO_RECV: + offset = sizeof(struct fwheader) + blksize; + data += offset; + len += offset; + if (len >= totlen) + exit = 1; + break; + case FW_HAS_LAST_BLOCK: + exit = 1; + ret = 0; + break; + default: + exit = 1; + break; + } + } while (!exit); - lbs_deb_usbd(&cardp->udev->dev, "udev pointer is at %p\n", - cardp->udev); + if (ret) + lbs_pr_err("firmware file format check FAIL\n"); + else + lbs_deb_fw("firmware file format check PASS\n"); - lbs_deb_leave(LBS_DEB_USB); - return 0; + return ret; } - -static int if_usb_prog_firmware(wlan_private * priv) +static int if_usb_prog_firmware(struct usb_card_rec *cardp) { - struct usb_card_rec *cardp = priv->card; int i = 0; static int reset_count = 10; int ret = 0; lbs_deb_enter(LBS_DEB_USB); - cardp->rinfo.priv = priv; + if ((ret = request_firmware(&cardp->fw, libertas_fw_name, + &cardp->udev->dev)) < 0) { + lbs_pr_err("request_firmware() failed with %#x\n", ret); + lbs_pr_err("firmware %s not found\n", libertas_fw_name); + goto done; + } + + if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) + goto release_fw; restart: - if (if_usb_submit_rx_urb_fwload(priv) < 0) { + if (if_usb_submit_rx_urb_fwload(cardp) < 0) { lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); ret = -1; - goto done; + goto release_fw; } cardp->bootcmdresp = 0; @@ -828,7 +878,7 @@ static int if_usb_prog_firmware(wlan_private * priv) int j = 0; i++; /* Issue Boot command = 1, Boot from Download-FW */ - if_usb_issue_boot_command(priv, BOOT_CMD_FW_BY_USB); + if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB); /* wait for command response */ do { j++; @@ -838,14 +888,13 @@ static int if_usb_prog_firmware(wlan_private * priv) if (cardp->bootcmdresp == 0) { if (--reset_count >= 0) { - libertas_do_reset(priv); + if_usb_reset_device(cardp); goto restart; } return -1; } i = 0; - priv->adapter->fw_ready = 0; cardp->totalbytes = 0; cardp->fwlastblksent = 0; @@ -855,40 +904,38 @@ static int if_usb_prog_firmware(wlan_private * priv) cardp->totalbytes = 0; cardp->fwfinalblk = 0; - if_prog_firmware(priv); + if_prog_firmware(cardp); do { lbs_deb_usbd(&cardp->udev->dev,"Wlan sched timeout\n"); i++; msleep_interruptible(100); - if (priv->adapter->surpriseremoved || i >= 20) + if (cardp->surprise_removed || i >= 20) break; } while (!cardp->fwdnldover); if (!cardp->fwdnldover) { lbs_pr_info("failed to load fw, resetting device!\n"); if (--reset_count >= 0) { - libertas_do_reset(priv); + if_usb_reset_device(cardp); goto restart; } lbs_pr_info("FW download failure, time = %d ms\n", i * 100); ret = -1; - goto done; + goto release_fw; } - if_usb_submit_rx_urb(priv); - - /* Delay 200 ms to waiting for the FW ready */ - msleep_interruptible(200); - - priv->adapter->fw_ready = 1; +release_fw: + release_firmware(cardp->fw); + cardp->fw = NULL; done: lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); return ret; } + #ifdef CONFIG_PM static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) { @@ -900,6 +947,19 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message) if (priv->adapter->psstate != PS_STATE_FULL_POWER) return -1; + if (priv->mesh_dev && !priv->mesh_autostart_enabled) { + /* Mesh autostart must be activated while sleeping + * On resume it will go back to the current state + */ + struct cmd_ds_mesh_access mesh_access; + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(1); + libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + } + netif_device_detach(cardp->eth_dev); netif_device_detach(priv->mesh_dev); @@ -927,6 +987,19 @@ static int if_usb_resume(struct usb_interface *intf) netif_device_attach(cardp->eth_dev); netif_device_attach(priv->mesh_dev); + if (priv->mesh_dev && !priv->mesh_autostart_enabled) { + /* Mesh autostart was activated while sleeping + * Disable it if appropriate + */ + struct cmd_ds_mesh_access mesh_access; + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(0); + libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + } + lbs_deb_leave(LBS_DEB_USB); return 0; } @@ -970,8 +1043,10 @@ static void if_usb_exit_module(void) lbs_deb_enter(LBS_DEB_MAIN); - list_for_each_entry_safe(cardp, cardp_temp, &usb_devices, list) - if_usb_reset_device((wlan_private *) cardp->priv); + list_for_each_entry_safe(cardp, cardp_temp, &usb_devices, list) { + libertas_prepare_and_send_command(cardp->priv, CMD_802_11_RESET, + CMD_ACT_HALT, 0, 0, NULL); + } /* API unregisters the driver from USB subsystem */ usb_deregister(&if_usb_driver); diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h index 156bb485e1a6f9929ec0b668815bc54c0974471a..e07a10ed28b5d34447ab28cd97d9be71faea8f73 100644 --- a/drivers/net/wireless/libertas/if_usb.h +++ b/drivers/net/wireless/libertas/if_usb.h @@ -38,7 +38,7 @@ struct bootcmdrespStr /* read callback private data */ struct read_cb_info { - wlan_private *priv; + struct usb_card_rec *cardp; struct sk_buff *skb; }; @@ -58,6 +58,7 @@ struct usb_card_rec { int bulk_out_size; u8 bulk_out_endpointAddr; + const struct firmware *fw; u8 CRC_OK; u32 fwseqnum; u32 lastseqnum; @@ -65,6 +66,7 @@ struct usb_card_rec { u32 fwlastblksent; u8 fwdnldover; u8 fwfinalblk; + u8 surprise_removed; u32 usb_event_cause; u8 usb_int_cause; @@ -102,8 +104,4 @@ struct fwsyncheader { #define FW_DATA_XMIT_SIZE \ sizeof(struct fwheader) + le32_to_cpu(fwdata->fwheader.datalength) + sizeof(u32) -int usb_tx_block(wlan_private *priv, u8 *payload, u16 nb); -void if_usb_free(struct usb_card_rec *cardp); -int if_usb_issue_boot_command(wlan_private *priv, int ivalue); - #endif diff --git a/drivers/net/wireless/libertas/join.c b/drivers/net/wireless/libertas/join.c index 78ac3064a0bd20d3852c1d1e7b530573df26aa9a..dc24a05c9447557d72f3443020e8e00ef0cbc701 100644 --- a/drivers/net/wireless/libertas/join.c +++ b/drivers/net/wireless/libertas/join.c @@ -17,10 +17,13 @@ #include "dev.h" #include "assoc.h" -#define AD_HOC_CAP_PRIVACY_ON 1 +/* The firmware needs certain bits masked out of the beacon-derviced capability + * field when associating/joining to BSSs. + */ +#define CAPINFO_MASK (~(0xda00)) /** - * @brief This function finds out the common rates between rate1 and rate2. + * @brief This function finds common rates between rate1 and card rates. * * It will fill common rates in rate1 as output if found. * @@ -29,75 +32,87 @@ * * @param adapter A pointer to wlan_adapter structure * @param rate1 the buffer which keeps input and output - * @param rate1_size the size of rate1 buffer - * @param rate2 the buffer which keeps rate2 - * @param rate2_size the size of rate2 buffer. + * @param rate1_size the size of rate1 buffer; new size of buffer on return * * @return 0 or -1 */ -static int get_common_rates(wlan_adapter * adapter, u8 * rate1, - int rate1_size, u8 * rate2, int rate2_size) +static int get_common_rates(wlan_adapter * adapter, u8 * rates, u16 *rates_size) { - u8 *ptr = rate1; - int ret = 0; + u8 *card_rates = libertas_bg_rates; + size_t num_card_rates = sizeof(libertas_bg_rates); + int ret = 0, i, j; u8 tmp[30]; - int i; + size_t tmp_size = 0; - memset(&tmp, 0, sizeof(tmp)); - memcpy(&tmp, rate1, min_t(size_t, rate1_size, sizeof(tmp))); - memset(rate1, 0, rate1_size); - - /* Mask the top bit of the original values */ - for (i = 0; tmp[i] && i < sizeof(tmp); i++) - tmp[i] &= 0x7F; - - for (i = 0; rate2[i] && i < rate2_size; i++) { - /* Check for Card Rate in tmp, excluding the top bit */ - if (strchr(tmp, rate2[i] & 0x7F)) { - /* values match, so copy the Card Rate to rate1 */ - *rate1++ = rate2[i]; + /* For each rate in card_rates that exists in rate1, copy to tmp */ + for (i = 0; card_rates[i] && (i < num_card_rates); i++) { + for (j = 0; rates[j] && (j < *rates_size); j++) { + if (rates[j] == card_rates[i]) + tmp[tmp_size++] = card_rates[i]; } } - lbs_dbg_hex("rate1 (AP) rates:", tmp, sizeof(tmp)); - lbs_dbg_hex("rate2 (Card) rates:", rate2, rate2_size); - lbs_dbg_hex("Common rates:", ptr, rate1_size); - lbs_deb_join("Tx datarate is set to 0x%X\n", adapter->datarate); + lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); + lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates); + lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); + lbs_deb_join("Tx datarate is currently 0x%X\n", adapter->cur_rate); - if (!adapter->is_datarate_auto) { - while (*ptr) { - if ((*ptr & 0x7f) == adapter->datarate) { - ret = 0; + if (!adapter->auto_rate) { + for (i = 0; i < tmp_size; i++) { + if (tmp[i] == adapter->cur_rate) goto done; - } - ptr++; } - lbs_pr_alert( "Previously set fixed data rate %#x isn't " - "compatible with the network.\n", adapter->datarate); - + lbs_pr_alert("Previously set fixed data rate %#x isn't " + "compatible with the network.\n", adapter->cur_rate); ret = -1; goto done; } - ret = 0; + done: + memset(rates, 0, *rates_size); + *rates_size = min_t(int, tmp_size, *rates_size); + memcpy(rates, tmp, *rates_size); return ret; } -int libertas_send_deauth(wlan_private * priv) + +/** + * @brief Sets the MSB on basic rates as the firmware requires + * + * Scan through an array and set the MSB for basic data rates. + * + * @param rates buffer of data rates + * @param len size of buffer + */ +static void libertas_set_basic_rate_flags(u8 * rates, size_t len) { - wlan_adapter *adapter = priv->adapter; - int ret = 0; + int i; - if (adapter->mode == IW_MODE_INFRA && - adapter->connect_status == libertas_connected) - ret = libertas_send_deauthentication(priv); - else - ret = -ENOTSUPP; + for (i = 0; i < len; i++) { + if (rates[i] == 0x02 || rates[i] == 0x04 || + rates[i] == 0x0b || rates[i] == 0x16) + rates[i] |= 0x80; + } +} - return ret; +/** + * @brief Unsets the MSB on basic rates + * + * Scan through an array and unset the MSB for basic data rates. + * + * @param rates buffer of data rates + * @param len size of buffer + */ +void libertas_unset_basic_rate_flags(u8 * rates, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + rates[i] &= 0x7f; } + /** * @brief Associate to a specific BSS discovered in a scan * @@ -113,23 +128,24 @@ int wlan_associate(wlan_private * priv, struct assoc_request * assoc_req) lbs_deb_enter(LBS_DEB_JOIN); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_authenticate, - 0, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE, + 0, CMD_OPTION_WAITFORRSP, 0, assoc_req->bss.bssid); if (ret) goto done; /* set preamble to firmware */ - if (adapter->capinfo.shortpreamble && assoc_req->bss.cap.shortpreamble) - adapter->preamble = cmd_type_short_preamble; + if ( (adapter->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + && (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) + adapter->preamble = CMD_TYPE_SHORT_PREAMBLE; else - adapter->preamble = cmd_type_long_preamble; + adapter->preamble = CMD_TYPE_LONG_PREAMBLE; libertas_set_radio_control(priv); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_associate, - 0, cmd_option_waitforrsp, 0, assoc_req); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE, + 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); done: lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); @@ -150,12 +166,12 @@ int libertas_start_adhoc_network(wlan_private * priv, struct assoc_request * ass adapter->adhoccreate = 1; - if (!adapter->capinfo.shortpreamble) { - lbs_deb_join("AdhocStart: Long preamble\n"); - adapter->preamble = cmd_type_long_preamble; - } else { + if (adapter->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { lbs_deb_join("AdhocStart: Short preamble\n"); - adapter->preamble = cmd_type_short_preamble; + adapter->preamble = CMD_TYPE_SHORT_PREAMBLE; + } else { + lbs_deb_join("AdhocStart: Long preamble\n"); + adapter->preamble = CMD_TYPE_LONG_PREAMBLE; } libertas_set_radio_control(priv); @@ -163,8 +179,8 @@ int libertas_start_adhoc_network(wlan_private * priv, struct assoc_request * ass lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel); lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_ad_hoc_start, - 0, cmd_option_waitforrsp, 0, assoc_req); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START, + 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); return ret; } @@ -194,25 +210,37 @@ int libertas_join_adhoc_network(wlan_private * priv, struct assoc_request * asso bss->ssid_len); /* check if the requested SSID is already joined */ - if (adapter->curbssparams.ssid_len + if ( adapter->curbssparams.ssid_len && !libertas_ssid_cmp(adapter->curbssparams.ssid, adapter->curbssparams.ssid_len, bss->ssid, bss->ssid_len) - && (adapter->mode == IW_MODE_ADHOC)) { - lbs_deb_join( - "ADHOC_J_CMD: New ad-hoc SSID is the same as current, " - "not attempting to re-join"); - return -1; + && (adapter->mode == IW_MODE_ADHOC) + && (adapter->connect_status == LIBERTAS_CONNECTED)) { + union iwreq_data wrqu; + + lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as " + "current, not attempting to re-join"); + + /* Send the re-association event though, because the association + * request really was successful, even if just a null-op. + */ + memset(&wrqu, 0, sizeof(wrqu)); + memcpy(wrqu.ap_addr.sa_data, adapter->curbssparams.bssid, + ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + goto out; } - /*Use shortpreamble only when both creator and card supports + /* Use shortpreamble only when both creator and card supports short preamble */ - if (!bss->cap.shortpreamble || !adapter->capinfo.shortpreamble) { + if ( !(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + || !(adapter->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) { lbs_deb_join("AdhocJoin: Long preamble\n"); - adapter->preamble = cmd_type_long_preamble; + adapter->preamble = CMD_TYPE_LONG_PREAMBLE; } else { lbs_deb_join("AdhocJoin: Short preamble\n"); - adapter->preamble = cmd_type_short_preamble; + adapter->preamble = CMD_TYPE_SHORT_PREAMBLE; } libertas_set_radio_control(priv); @@ -222,17 +250,18 @@ int libertas_join_adhoc_network(wlan_private * priv, struct assoc_request * asso adapter->adhoccreate = 0; - ret = libertas_prepare_and_send_command(priv, cmd_802_11_ad_hoc_join, - 0, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN, + 0, CMD_OPTION_WAITFORRSP, OID_802_11_SSID, assoc_req); +out: return ret; } int libertas_stop_adhoc_network(wlan_private * priv) { - return libertas_prepare_and_send_command(priv, cmd_802_11_ad_hoc_stop, - 0, cmd_option_waitforrsp, 0, NULL); + return libertas_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); } /** @@ -243,8 +272,8 @@ int libertas_stop_adhoc_network(wlan_private * priv) */ int libertas_send_deauthentication(wlan_private * priv) { - return libertas_prepare_and_send_command(priv, cmd_802_11_deauthenticate, - 0, cmd_option_waitforrsp, 0, NULL); + return libertas_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); } /** @@ -264,10 +293,11 @@ int libertas_cmd_80211_authenticate(wlan_private * priv, struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth; int ret = -1; u8 *bssid = pdata_buf; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_JOIN); - cmd->command = cpu_to_le16(cmd_802_11_authenticate); + cmd->command = cpu_to_le16(CMD_802_11_AUTHENTICATE); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_authenticate) + S_DS_GEN); @@ -290,8 +320,8 @@ int libertas_cmd_80211_authenticate(wlan_private * priv, memcpy(pauthenticate->macaddr, bssid, ETH_ALEN); - lbs_deb_join("AUTH_CMD: BSSID is : " MAC_FMT " auth=0x%X\n", - MAC_ARG(bssid), pauthenticate->authtype); + lbs_deb_join("AUTH_CMD: BSSID is : %s auth=0x%X\n", + print_mac(mac, bssid), pauthenticate->authtype); ret = 0; out: @@ -307,7 +337,7 @@ int libertas_cmd_80211_deauthenticate(wlan_private * priv, lbs_deb_enter(LBS_DEB_JOIN); - cmd->command = cpu_to_le16(cmd_802_11_deauthenticate); + cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE); cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) + S_DS_GEN); @@ -330,9 +360,7 @@ int libertas_cmd_80211_associate(wlan_private * priv, int ret = 0; struct assoc_request * assoc_req = pdata_buf; struct bss_descriptor * bss = &assoc_req->bss; - u8 *card_rates; u8 *pos; - int card_rates_size; u16 tmpcap, tmplen; struct mrvlietypes_ssidparamset *ssid; struct mrvlietypes_phyparamset *phy; @@ -349,15 +377,15 @@ int libertas_cmd_80211_associate(wlan_private * priv, goto done; } - cmd->command = cpu_to_le16(cmd_802_11_associate); + cmd->command = cpu_to_le16(CMD_802_11_ASSOCIATE); memcpy(passo->peerstaaddr, bss->bssid, sizeof(passo->peerstaaddr)); pos += sizeof(passo->peerstaaddr); /* set the listen interval */ - passo->listeninterval = cpu_to_le16(adapter->listeninterval); + passo->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL); - pos += sizeof(passo->capinfo); + pos += sizeof(passo->capability); pos += sizeof(passo->listeninterval); pos += sizeof(passo->bcnperiod); pos += sizeof(passo->dtimperiod); @@ -386,23 +414,24 @@ int libertas_cmd_80211_associate(wlan_private * priv, rates = (struct mrvlietypes_ratesparamset *) pos; rates->header.type = cpu_to_le16(TLV_TYPE_RATES); - - memcpy(&rates->rates, &bss->libertas_supported_rates, WLAN_SUPPORTED_RATES); - - card_rates = libertas_supported_rates; - card_rates_size = sizeof(libertas_supported_rates); - - if (get_common_rates(adapter, rates->rates, WLAN_SUPPORTED_RATES, - card_rates, card_rates_size)) { + memcpy(&rates->rates, &bss->rates, MAX_RATES); + tmplen = MAX_RATES; + if (get_common_rates(adapter, rates->rates, &tmplen)) { ret = -1; goto done; } - - tmplen = min_t(size_t, strlen(rates->rates), WLAN_SUPPORTED_RATES); - adapter->curbssparams.numofrates = tmplen; - pos += sizeof(rates->header) + tmplen; rates->header.len = cpu_to_le16(tmplen); + lbs_deb_join("ASSOC_CMD: num rates = %u\n", tmplen); + + /* Copy the infra. association rates into Current BSS state structure */ + memset(&adapter->curbssparams.rates, 0, sizeof(adapter->curbssparams.rates)); + memcpy(&adapter->curbssparams.rates, &rates->rates, tmplen); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + libertas_set_basic_rate_flags(rates->rates, tmplen); if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) { rsn = (struct mrvlietypes_rsnparamset *) pos; @@ -411,7 +440,7 @@ int libertas_cmd_80211_associate(wlan_private * priv, tmplen = (u16) assoc_req->wpa_ie[1]; rsn->header.len = cpu_to_le16(tmplen); memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen); - lbs_dbg_hex("ASSOC_CMD: RSN IE", (u8 *) rsn, + lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: RSN IE", (u8 *) rsn, sizeof(rsn->header) + tmplen); pos += sizeof(rsn->header) + tmplen; } @@ -419,20 +448,6 @@ int libertas_cmd_80211_associate(wlan_private * priv, /* update curbssparams */ adapter->curbssparams.channel = bss->phyparamset.dsparamset.currentchan; - /* Copy the infra. association rates into Current BSS state structure */ - memcpy(&adapter->curbssparams.datarates, &rates->rates, - min_t(size_t, sizeof(adapter->curbssparams.datarates), - cpu_to_le16(rates->header.len))); - - lbs_deb_join("ASSOC_CMD: rates->header.len = %d\n", - cpu_to_le16(rates->header.len)); - - /* set IBSS field */ - if (bss->mode == IW_MODE_INFRA) { -#define CAPINFO_ESS_MODE 1 - passo->capinfo.ess = CAPINFO_ESS_MODE; - } - if (libertas_parse_dnld_countryinfo_11d(priv, bss)) { ret = -1; goto done; @@ -440,12 +455,13 @@ int libertas_cmd_80211_associate(wlan_private * priv, cmd->size = cpu_to_le16((u16) (pos - (u8 *) passo) + S_DS_GEN); - /* set the capability info at last */ - memcpy(&tmpcap, &bss->cap, sizeof(passo->capinfo)); - tmpcap &= CAPINFO_MASK; - lbs_deb_join("ASSOC_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", + /* set the capability info */ + tmpcap = (bss->capability & CAPINFO_MASK); + if (bss->mode == IW_MODE_INFRA) + tmpcap |= WLAN_CAPABILITY_ESS; + passo->capability = cpu_to_le16(tmpcap); + lbs_deb_join("ASSOC_CMD: capability=%4X CAPINFO_MASK=%4X\n", tmpcap, CAPINFO_MASK); - memcpy(&passo->capinfo, &tmpcap, sizeof(passo->capinfo)); done: lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); @@ -459,8 +475,9 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads; int ret = 0; int cmdappendsize = 0; - int i; struct assoc_request * assoc_req = pdata_buf; + u16 tmpcap = 0; + size_t ratesize = 0; lbs_deb_enter(LBS_DEB_JOIN); @@ -469,7 +486,7 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, goto done; } - cmd->command = cpu_to_le16(cmd_802_11_ad_hoc_start); + cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START); /* * Fill in the parameters for 2 data structures: @@ -483,17 +500,17 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, * and operational rates. */ - memset(adhs->SSID, 0, IW_ESSID_MAX_SIZE); - memcpy(adhs->SSID, assoc_req->ssid, assoc_req->ssid_len); + memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE); + memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len); lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n", escape_essid(assoc_req->ssid, assoc_req->ssid_len), assoc_req->ssid_len); /* set the BSS type */ - adhs->bsstype = cmd_bss_type_ibss; + adhs->bsstype = CMD_BSS_TYPE_IBSS; adapter->mode = IW_MODE_ADHOC; - adhs->beaconperiod = cpu_to_le16(adapter->beaconperiod); + adhs->beaconperiod = cpu_to_le16(MRVDRV_BEACON_INTERVAL); /* set Physical param set */ #define DS_PARA_IE_ID 3 @@ -515,45 +532,36 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID; adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN; - adhs->ssparamset.ibssparamset.atimwindow = cpu_to_le16(adapter->atimwindow); + adhs->ssparamset.ibssparamset.atimwindow = 0; /* set capability info */ - adhs->cap.ess = 0; - adhs->cap.ibss = 1; - - /* probedelay */ - adhs->probedelay = cpu_to_le16(cmd_scan_probe_delay_time); - - /* set up privacy in adapter->scantable[i] */ + tmpcap = WLAN_CAPABILITY_IBSS; if (assoc_req->secinfo.wep_enabled) { lbs_deb_join("ADHOC_S_CMD: WEP enabled, setting privacy on\n"); - adhs->cap.privacy = AD_HOC_CAP_PRIVACY_ON; + tmpcap |= WLAN_CAPABILITY_PRIVACY; } else { lbs_deb_join("ADHOC_S_CMD: WEP disabled, setting privacy off\n"); } + adhs->capability = cpu_to_le16(tmpcap); - memset(adhs->datarate, 0, sizeof(adhs->datarate)); - - if (adapter->adhoc_grate_enabled) { - memcpy(adhs->datarate, libertas_adhoc_rates_g, - min(sizeof(adhs->datarate), sizeof(libertas_adhoc_rates_g))); - } else { - memcpy(adhs->datarate, libertas_adhoc_rates_b, - min(sizeof(adhs->datarate), sizeof(libertas_adhoc_rates_b))); - } - - /* Find the last non zero */ - for (i = 0; i < sizeof(adhs->datarate) && adhs->datarate[i]; i++) ; + /* probedelay */ + adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); - adapter->curbssparams.numofrates = i; + memset(adhs->rates, 0, sizeof(adhs->rates)); + ratesize = min(sizeof(adhs->rates), sizeof(libertas_bg_rates)); + memcpy(adhs->rates, libertas_bg_rates, ratesize); /* Copy the ad-hoc creating rates into Current BSS state structure */ - memcpy(&adapter->curbssparams.datarates, - &adhs->datarate, adapter->curbssparams.numofrates); + memset(&adapter->curbssparams.rates, 0, sizeof(adapter->curbssparams.rates)); + memcpy(&adapter->curbssparams.rates, &adhs->rates, ratesize); + + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. + */ + libertas_set_basic_rate_flags(adhs->rates, ratesize); lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n", - adhs->datarate[0], adhs->datarate[1], - adhs->datarate[2], adhs->datarate[3]); + adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]); lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n"); @@ -575,7 +583,7 @@ int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, int libertas_cmd_80211_ad_hoc_stop(wlan_private * priv, struct cmd_ds_command *cmd) { - cmd->command = cpu_to_le16(cmd_802_11_ad_hoc_stop); + cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP); cmd->size = cpu_to_le16(S_DS_GEN); return 0; @@ -585,101 +593,84 @@ int libertas_cmd_80211_ad_hoc_join(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf) { wlan_adapter *adapter = priv->adapter; - struct cmd_ds_802_11_ad_hoc_join *padhocjoin = &cmd->params.adj; + struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj; struct assoc_request * assoc_req = pdata_buf; struct bss_descriptor *bss = &assoc_req->bss; int cmdappendsize = 0; int ret = 0; - u8 *card_rates; - int card_rates_size; - u16 tmpcap; - int i; + u16 ratesize = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_JOIN); - cmd->command = cpu_to_le16(cmd_802_11_ad_hoc_join); + cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN); - padhocjoin->bssdescriptor.bsstype = cmd_bss_type_ibss; + join_cmd->bss.type = CMD_BSS_TYPE_IBSS; + join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod); - padhocjoin->bssdescriptor.beaconperiod = cpu_to_le16(bss->beaconperiod); + memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN); + memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len); - memcpy(&padhocjoin->bssdescriptor.BSSID, &bss->bssid, ETH_ALEN); - memcpy(&padhocjoin->bssdescriptor.SSID, &bss->ssid, bss->ssid_len); + memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset, + sizeof(union ieeetypes_phyparamset)); - memcpy(&padhocjoin->bssdescriptor.phyparamset, - &bss->phyparamset, sizeof(union ieeetypes_phyparamset)); - - memcpy(&padhocjoin->bssdescriptor.ssparamset, - &bss->ssparamset, sizeof(union IEEEtypes_ssparamset)); - - memcpy(&tmpcap, &bss->cap, sizeof(struct ieeetypes_capinfo)); - tmpcap &= CAPINFO_MASK; + memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset, + sizeof(union IEEEtypes_ssparamset)); + join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK); lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n", - tmpcap, CAPINFO_MASK); - memcpy(&padhocjoin->bssdescriptor.cap, &tmpcap, - sizeof(struct ieeetypes_capinfo)); + bss->capability, CAPINFO_MASK); /* information on BSSID descriptor passed to FW */ lbs_deb_join( - "ADHOC_J_CMD: BSSID = " MAC_FMT ", SSID = '%s'\n", - MAC_ARG(padhocjoin->bssdescriptor.BSSID), - padhocjoin->bssdescriptor.SSID); + "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n", + print_mac(mac, join_cmd->bss.bssid), + join_cmd->bss.ssid); /* failtimeout */ - padhocjoin->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); + join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT); /* probedelay */ - padhocjoin->probedelay = cpu_to_le16(cmd_scan_probe_delay_time); - - /* Copy Data rates from the rates recorded in scan response */ - memset(padhocjoin->bssdescriptor.datarates, 0, - sizeof(padhocjoin->bssdescriptor.datarates)); - memcpy(padhocjoin->bssdescriptor.datarates, bss->datarates, - min(sizeof(padhocjoin->bssdescriptor.datarates), - sizeof(bss->datarates))); - - card_rates = libertas_supported_rates; - card_rates_size = sizeof(libertas_supported_rates); + join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); adapter->curbssparams.channel = bss->channel; - if (get_common_rates(adapter, padhocjoin->bssdescriptor.datarates, - sizeof(padhocjoin->bssdescriptor.datarates), - card_rates, card_rates_size)) { + /* Copy Data rates from the rates recorded in scan response */ + memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates)); + ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES); + memcpy(join_cmd->bss.rates, bss->rates, ratesize); + if (get_common_rates(adapter, join_cmd->bss.rates, &ratesize)) { lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n"); ret = -1; goto done; } - /* Find the last non zero */ - for (i = 0; i < sizeof(padhocjoin->bssdescriptor.datarates) - && padhocjoin->bssdescriptor.datarates[i]; i++) ; - - adapter->curbssparams.numofrates = i; + /* Copy the ad-hoc creating rates into Current BSS state structure */ + memset(&adapter->curbssparams.rates, 0, sizeof(adapter->curbssparams.rates)); + memcpy(&adapter->curbssparams.rates, join_cmd->bss.rates, ratesize); - /* - * Copy the adhoc joining rates to Current BSS State structure + /* Set MSB on basic rates as the firmware requires, but _after_ + * copying to current bss rates. */ - memcpy(adapter->curbssparams.datarates, - padhocjoin->bssdescriptor.datarates, - adapter->curbssparams.numofrates); + libertas_set_basic_rate_flags(join_cmd->bss.rates, ratesize); - padhocjoin->bssdescriptor.ssparamset.ibssparamset.atimwindow = + join_cmd->bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow); if (assoc_req->secinfo.wep_enabled) { - padhocjoin->bssdescriptor.cap.privacy = AD_HOC_CAP_PRIVACY_ON; + u16 tmp = le16_to_cpu(join_cmd->bss.capability); + tmp |= WLAN_CAPABILITY_PRIVACY; + join_cmd->bss.capability = cpu_to_le16(tmp); } - if (adapter->psmode == wlan802_11powermodemax_psp) { + if (adapter->psmode == WLAN802_11POWERMODEMAX_PSP) { /* wake up first */ __le32 Localpsmode; - Localpsmode = cpu_to_le32(wlan802_11powermodecam); + Localpsmode = cpu_to_le32(WLAN802_11POWERMODECAM); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_ps_mode, - cmd_act_set, + CMD_802_11_PS_MODE, + CMD_ACT_SET, 0, 0, &Localpsmode); if (ret) { @@ -709,6 +700,7 @@ int libertas_ret_80211_associate(wlan_private * priv, union iwreq_data wrqu; struct ieeetypes_assocrsp *passocrsp; struct bss_descriptor * bss; + u16 status_code; lbs_deb_enter(LBS_DEB_JOIN); @@ -721,21 +713,65 @@ int libertas_ret_80211_associate(wlan_private * priv, passocrsp = (struct ieeetypes_assocrsp *) & resp->params; - if (le16_to_cpu(passocrsp->statuscode)) { - libertas_mac_event_disconnected(priv); + /* + * Older FW versions map the IEEE 802.11 Status Code in the association + * response to the following values returned in passocrsp->statuscode: + * + * IEEE Status Code Marvell Status Code + * 0 -> 0x0000 ASSOC_RESULT_SUCCESS + * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED + * others -> 0x0003 ASSOC_RESULT_REFUSED + * + * Other response codes: + * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused) + * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for + * association response from the AP) + */ - lbs_deb_join("ASSOC_RESP: Association failed, status code = %d\n", - le16_to_cpu(passocrsp->statuscode)); + status_code = le16_to_cpu(passocrsp->statuscode); + switch (status_code) { + case 0x00: + lbs_deb_join("ASSOC_RESP: Association succeeded\n"); + break; + case 0x01: + lbs_deb_join("ASSOC_RESP: Association failed; invalid " + "parameters (status code %d)\n", status_code); + break; + case 0x02: + lbs_deb_join("ASSOC_RESP: Association failed; internal timer " + "expired while waiting for the AP (status code %d)" + "\n", status_code); + break; + case 0x03: + lbs_deb_join("ASSOC_RESP: Association failed; association " + "was refused by the AP (status code %d)\n", + status_code); + break; + case 0x04: + lbs_deb_join("ASSOC_RESP: Association failed; authentication " + "was refused by the AP (status code %d)\n", + status_code); + break; + default: + lbs_deb_join("ASSOC_RESP: Association failed; reason unknown " + "(status code %d)\n", status_code); + break; + } + if (status_code) { + libertas_mac_event_disconnected(priv); ret = -1; goto done; } - lbs_dbg_hex("ASSOC_RESP:", (void *)&resp->params, + lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_RESP", (void *)&resp->params, le16_to_cpu(resp->size) - S_DS_GEN); /* Send a Media Connected event, according to the Spec */ - adapter->connect_status = libertas_connected; + adapter->connect_status = LIBERTAS_CONNECTED; lbs_deb_join("ASSOC_RESP: assocated to '%s'\n", escape_essid(bss->ssid, bss->ssid_len)); @@ -759,10 +795,10 @@ int libertas_ret_80211_associate(wlan_private * priv, netif_carrier_on(priv->dev); netif_wake_queue(priv->dev); - netif_carrier_on(priv->mesh_dev); - netif_wake_queue(priv->mesh_dev); - - lbs_deb_join("ASSOC_RESP: Associated \n"); + if (priv->mesh_dev) { + netif_carrier_on(priv->mesh_dev); + netif_wake_queue(priv->mesh_dev); + } memcpy(wrqu.ap_addr.sa_data, adapter->curbssparams.bssid, ETH_ALEN); wrqu.ap_addr.sa_family = ARPHRD_ETHER; @@ -794,6 +830,7 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_802_11_ad_hoc_result *padhocresult; union iwreq_data wrqu; struct bss_descriptor *bss; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_JOIN); @@ -815,7 +852,7 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, */ if (result) { lbs_deb_join("ADHOC_RESP: failed\n"); - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { libertas_mac_event_disconnected(priv); } ret = -1; @@ -830,11 +867,11 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, escape_essid(bss->ssid, bss->ssid_len)); /* Send a Media Connected event, according to the Spec */ - adapter->connect_status = libertas_connected; + adapter->connect_status = LIBERTAS_CONNECTED; - if (command == cmd_ret_802_11_ad_hoc_start) { + if (command == CMD_RET(CMD_802_11_AD_HOC_START)) { /* Update the created network descriptor with the new BSSID */ - memcpy(bss->bssid, padhocresult->BSSID, ETH_ALEN); + memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN); } /* Set the BSSID from the joined/started descriptor */ @@ -847,8 +884,10 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, netif_carrier_on(priv->dev); netif_wake_queue(priv->dev); - netif_carrier_on(priv->mesh_dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) { + netif_carrier_on(priv->mesh_dev); + netif_wake_queue(priv->mesh_dev); + } memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.ap_addr.sa_data, adapter->curbssparams.bssid, ETH_ALEN); @@ -857,8 +896,8 @@ int libertas_ret_80211_ad_hoc_start(wlan_private * priv, lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n"); lbs_deb_join("ADHOC_RESP: channel = %d\n", adapter->curbssparams.channel); - lbs_deb_join("ADHOC_RESP: BSSID = " MAC_FMT "\n", - MAC_ARG(padhocresult->BSSID)); + lbs_deb_join("ADHOC_RESP: BSSID = %s\n", + print_mac(mac, padhocresult->bssid)); done: lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); diff --git a/drivers/net/wireless/libertas/join.h b/drivers/net/wireless/libertas/join.h index d522630ff8cf0a76c55124eff395aeeb1418c8ce..894a072b9f8d4e6ab8164a422deec0aa68e462df 100644 --- a/drivers/net/wireless/libertas/join.h +++ b/drivers/net/wireless/libertas/join.h @@ -12,45 +12,42 @@ #include "dev.h" struct cmd_ds_command; -extern int libertas_cmd_80211_authenticate(wlan_private * priv, +int libertas_cmd_80211_authenticate(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_cmd_80211_ad_hoc_join(wlan_private * priv, +int libertas_cmd_80211_ad_hoc_join(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_cmd_80211_ad_hoc_stop(wlan_private * priv, +int libertas_cmd_80211_ad_hoc_stop(wlan_private * priv, struct cmd_ds_command *cmd); -extern int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, +int libertas_cmd_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_cmd_80211_deauthenticate(wlan_private * priv, +int libertas_cmd_80211_deauthenticate(wlan_private * priv, struct cmd_ds_command *cmd); -extern int libertas_cmd_80211_associate(wlan_private * priv, +int libertas_cmd_80211_associate(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_ret_80211_ad_hoc_start(wlan_private * priv, +int libertas_ret_80211_ad_hoc_start(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_ret_80211_ad_hoc_stop(wlan_private * priv, +int libertas_ret_80211_ad_hoc_stop(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_ret_80211_disassociate(wlan_private * priv, +int libertas_ret_80211_disassociate(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_ret_80211_associate(wlan_private * priv, +int libertas_ret_80211_associate(wlan_private * priv, struct cmd_ds_command *resp); -extern int libertas_reassociation_thread(void *data); - -extern int libertas_start_adhoc_network(wlan_private * priv, +int libertas_start_adhoc_network(wlan_private * priv, struct assoc_request * assoc_req); -extern int libertas_join_adhoc_network(wlan_private * priv, +int libertas_join_adhoc_network(wlan_private * priv, struct assoc_request * assoc_req); -extern int libertas_stop_adhoc_network(wlan_private * priv); - -extern int libertas_send_deauthentication(wlan_private * priv); -extern int libertas_send_deauth(wlan_private * priv); +int libertas_stop_adhoc_network(wlan_private * priv); -extern int libertas_do_adhocstop_ioctl(wlan_private * priv); +int libertas_send_deauthentication(wlan_private * priv); int wlan_associate(wlan_private * priv, struct assoc_request * assoc_req); +void libertas_unset_basic_rate_flags(u8 * rates, size_t len); + #endif diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 9f366242c392c6afe3e8518129028d89be76a0e4..5ead08312e1e48b4443ec9cd3fe830dc1288afdd 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -20,8 +21,9 @@ #include "wext.h" #include "debugfs.h" #include "assoc.h" +#include "join.h" -#define DRIVER_RELEASE_VERSION "322.p0" +#define DRIVER_RELEASE_VERSION "323.p0" const char libertas_driver_version[] = "COMM-USB8388-" DRIVER_RELEASE_VERSION #ifdef DEBUG "-dbg" @@ -121,57 +123,88 @@ struct region_cfp_table { static struct region_cfp_table region_cfp_table[] = { {0x10, /*US FCC */ channel_freq_power_US_BG, - sizeof(channel_freq_power_US_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_US_BG), } , {0x20, /*CANADA IC */ channel_freq_power_US_BG, - sizeof(channel_freq_power_US_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_US_BG), } , {0x30, /*EU*/ channel_freq_power_EU_BG, - sizeof(channel_freq_power_EU_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_EU_BG), } , {0x31, /*SPAIN*/ channel_freq_power_SPN_BG, - sizeof(channel_freq_power_SPN_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_SPN_BG), } , {0x32, /*FRANCE*/ channel_freq_power_FR_BG, - sizeof(channel_freq_power_FR_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_FR_BG), } , {0x40, /*JAPAN*/ channel_freq_power_JPN_BG, - sizeof(channel_freq_power_JPN_BG) / sizeof(struct chan_freq_power), + ARRAY_SIZE(channel_freq_power_JPN_BG), } , /*Add new region here */ }; /** - * the rates supported + * the table to keep region code */ -u8 libertas_supported_rates[G_SUPPORTED_RATES] = - { 0x82, 0x84, 0x8b, 0x96, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, -0 }; +u16 libertas_region_code_to_index[MRVDRV_MAX_REGION_CODE] = + { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; /** - * the rates supported for ad-hoc G mode + * 802.11b/g supported bitrates (in 500Kb/s units) */ -u8 libertas_adhoc_rates_g[G_SUPPORTED_RATES] = - { 0x82, 0x84, 0x8b, 0x96, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, -0 }; +u8 libertas_bg_rates[MAX_RATES] = + { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c, +0x00, 0x00 }; /** - * the rates supported for ad-hoc B mode + * FW rate table. FW refers to rates by their index in this table, not by the + * rate value itself. Values of 0x00 are + * reserved positions. */ -u8 libertas_adhoc_rates_b[4] = { 0x82, 0x84, 0x8b, 0x96 }; +static u8 fw_data_rates[MAX_RATES] = + { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, + 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 +}; /** - * the table to keep region code + * @brief use index to get the data rate + * + * @param idx The index of data rate + * @return data rate or 0 */ -u16 libertas_region_code_to_index[MRVDRV_MAX_REGION_CODE] = - { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; +u32 libertas_fw_index_to_data_rate(u8 idx) +{ + if (idx >= sizeof(fw_data_rates)) + idx = 0; + return fw_data_rates[idx]; +} + +/** + * @brief use rate to get the index + * + * @param rate data rate + * @return index or 0 + */ +u8 libertas_data_rate_to_fw_index(u32 rate) +{ + u8 i; + + if (!rate) + return 0; + + for (i = 0; i < sizeof(fw_data_rates); i++) { + if (rate == fw_data_rates[i]) + return i; + } + return 0; +} /** * Attributes exported through sysfs @@ -187,9 +220,9 @@ static ssize_t libertas_anycast_get(struct device * dev, memset(&mesh_access, 0, sizeof(mesh_access)); libertas_prepare_and_send_command(to_net_dev(dev)->priv, - cmd_mesh_access, - cmd_act_mesh_get_anycast, - cmd_option_waitforrsp, 0, (void *)&mesh_access); + CMD_MESH_ACCESS, + CMD_ACT_MESH_GET_ANYCAST, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0])); } @@ -208,18 +241,127 @@ static ssize_t libertas_anycast_set(struct device * dev, mesh_access.data[0] = cpu_to_le32(datum); libertas_prepare_and_send_command((to_net_dev(dev))->priv, - cmd_mesh_access, - cmd_act_mesh_set_anycast, - cmd_option_waitforrsp, 0, (void *)&mesh_access); + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_ANYCAST, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + return strlen(buf); +} + +int libertas_add_rtap(wlan_private *priv); +void libertas_remove_rtap(wlan_private *priv); + +/** + * Get function for sysfs attribute rtap + */ +static ssize_t libertas_rtap_get(struct device * dev, + struct device_attribute *attr, char * buf) +{ + wlan_private *priv = (wlan_private *) (to_net_dev(dev))->priv; + wlan_adapter *adapter = priv->adapter; + return snprintf(buf, 5, "0x%X\n", adapter->monitormode); +} + +/** + * Set function for sysfs attribute rtap + */ +static ssize_t libertas_rtap_set(struct device * dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + int monitor_mode; + wlan_private *priv = (wlan_private *) (to_net_dev(dev))->priv; + wlan_adapter *adapter = priv->adapter; + + sscanf(buf, "%x", &monitor_mode); + if (monitor_mode != WLAN_MONITOR_OFF) { + if(adapter->monitormode == monitor_mode) + return strlen(buf); + if (adapter->monitormode == WLAN_MONITOR_OFF) { + if (adapter->mode == IW_MODE_INFRA) + libertas_send_deauthentication(priv); + else if (adapter->mode == IW_MODE_ADHOC) + libertas_stop_adhoc_network(priv); + libertas_add_rtap(priv); + } + adapter->monitormode = monitor_mode; + } + + else { + if(adapter->monitormode == WLAN_MONITOR_OFF) + return strlen(buf); + adapter->monitormode = WLAN_MONITOR_OFF; + libertas_remove_rtap(priv); + netif_wake_queue(priv->dev); + netif_wake_queue(priv->mesh_dev); + } + + libertas_prepare_and_send_command(priv, + CMD_802_11_MONITOR_MODE, CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, &adapter->monitormode); return strlen(buf); } +/** + * libertas_rtap attribute to be exported per mshX interface + * through sysfs (/sys/class/net/mshX/libertas-rtap) + */ +static DEVICE_ATTR(libertas_rtap, 0644, libertas_rtap_get, + libertas_rtap_set ); + /** * anycast_mask attribute to be exported per mshX interface * through sysfs (/sys/class/net/mshX/anycast_mask) */ static DEVICE_ATTR(anycast_mask, 0644, libertas_anycast_get, libertas_anycast_set); +static ssize_t libertas_autostart_enabled_get(struct device * dev, + struct device_attribute *attr, char * buf) +{ + struct cmd_ds_mesh_access mesh_access; + + memset(&mesh_access, 0, sizeof(mesh_access)); + libertas_prepare_and_send_command(to_net_dev(dev)->priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_GET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + + return sprintf(buf, "%d\n", le32_to_cpu(mesh_access.data[0])); +} + +static ssize_t libertas_autostart_enabled_set(struct device * dev, + struct device_attribute *attr, const char * buf, size_t count) +{ + struct cmd_ds_mesh_access mesh_access; + uint32_t datum; + wlan_private * priv = (to_net_dev(dev))->priv; + int ret; + + memset(&mesh_access, 0, sizeof(mesh_access)); + sscanf(buf, "%d", &datum); + mesh_access.data[0] = cpu_to_le32(datum); + + ret = libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + if (ret == 0) + priv->mesh_autostart_enabled = datum ? 1 : 0; + + return strlen(buf); +} + +static DEVICE_ATTR(autostart_enabled, 0644, + libertas_autostart_enabled_get, libertas_autostart_enabled_set); + +static struct attribute *libertas_mesh_sysfs_entries[] = { + &dev_attr_anycast_mask.attr, + &dev_attr_autostart_enabled.attr, + NULL, +}; + +static struct attribute_group libertas_mesh_attr_group = { + .attrs = libertas_mesh_sysfs_entries, +}; + /** * @brief Check if the device can be open and wait if necessary. * @@ -255,7 +397,7 @@ static int pre_open_check(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int wlan_dev_open(struct net_device *dev) +static int libertas_dev_open(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; wlan_adapter *adapter = priv->adapter; @@ -264,12 +406,14 @@ static int wlan_dev_open(struct net_device *dev) priv->open = 1; - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { netif_carrier_on(priv->dev); - netif_carrier_on(priv->mesh_dev); + if (priv->mesh_dev) + netif_carrier_on(priv->mesh_dev); } else { netif_carrier_off(priv->dev); - netif_carrier_off(priv->mesh_dev); + if (priv->mesh_dev) + netif_carrier_off(priv->mesh_dev); } lbs_deb_leave(LBS_DEB_NET); @@ -281,7 +425,7 @@ static int wlan_dev_open(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int mesh_open(struct net_device *dev) +static int libertas_mesh_open(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv ; @@ -290,7 +434,7 @@ static int mesh_open(struct net_device *dev) priv->mesh_open = 1 ; netif_wake_queue(priv->mesh_dev); if (priv->infra_open == 0) - return wlan_dev_open(priv->dev) ; + return libertas_dev_open(priv->dev) ; return 0; } @@ -300,7 +444,7 @@ static int mesh_open(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int wlan_open(struct net_device *dev) +static int libertas_open(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv ; @@ -309,11 +453,11 @@ static int wlan_open(struct net_device *dev) priv->infra_open = 1 ; netif_wake_queue(priv->dev); if (priv->open == 0) - return wlan_dev_open(priv->dev) ; + return libertas_dev_open(priv->dev) ; return 0; } -static int wlan_dev_close(struct net_device *dev) +static int libertas_dev_close(struct net_device *dev) { wlan_private *priv = dev->priv; @@ -332,14 +476,14 @@ static int wlan_dev_close(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int mesh_close(struct net_device *dev) +static int libertas_mesh_close(struct net_device *dev) { wlan_private *priv = (wlan_private *) (dev->priv); priv->mesh_open = 0; netif_stop_queue(priv->mesh_dev); if (priv->infra_open == 0) - return wlan_dev_close(dev); + return libertas_dev_close(dev); else return 0; } @@ -350,20 +494,20 @@ static int mesh_close(struct net_device *dev) * @param dev A pointer to net_device structure * @return 0 */ -static int wlan_close(struct net_device *dev) +static int libertas_close(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; netif_stop_queue(dev); priv->infra_open = 0; if (priv->mesh_open == 0) - return wlan_dev_close(dev); + return libertas_dev_close(dev); else return 0; } -static int wlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int libertas_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int ret = 0; wlan_private *priv = dev->priv; @@ -376,7 +520,8 @@ static int wlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } netif_stop_queue(priv->dev); - netif_stop_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); if (libertas_process_tx(priv, skb) == 0) dev->trans_start = jiffies; @@ -386,41 +531,52 @@ static int wlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } /** - * @brief Mark mesh packets and handover them to wlan_hard_start_xmit + * @brief Mark mesh packets and handover them to libertas_hard_start_xmit * */ -static int mesh_pre_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int libertas_mesh_pre_start_xmit(struct sk_buff *skb, + struct net_device *dev) { wlan_private *priv = dev->priv; int ret; lbs_deb_enter(LBS_DEB_MESH); + if(priv->adapter->monitormode != WLAN_MONITOR_OFF) { + netif_stop_queue(dev); + return -EOPNOTSUPP; + } SET_MESH_FRAME(skb); - ret = wlan_hard_start_xmit(skb, priv->dev); + ret = libertas_hard_start_xmit(skb, priv->dev); lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); return ret; } /** - * @brief Mark non-mesh packets and handover them to wlan_hard_start_xmit + * @brief Mark non-mesh packets and handover them to libertas_hard_start_xmit * */ -static int wlan_pre_start_xmit(struct sk_buff *skb, struct net_device *dev) +static int libertas_pre_start_xmit(struct sk_buff *skb, struct net_device *dev) { + wlan_private *priv = dev->priv; int ret; lbs_deb_enter(LBS_DEB_NET); + if(priv->adapter->monitormode != WLAN_MONITOR_OFF) { + netif_stop_queue(dev); + return -EOPNOTSUPP; + } + UNSET_MESH_FRAME(skb); - ret = wlan_hard_start_xmit(skb, dev); + ret = libertas_hard_start_xmit(skb, dev); lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); return ret; } -static void wlan_tx_timeout(struct net_device *dev) +static void libertas_tx_timeout(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; @@ -432,16 +588,17 @@ static void wlan_tx_timeout(struct net_device *dev) dev->trans_start = jiffies; if (priv->adapter->currenttxskb) { - if (priv->adapter->radiomode == WLAN_RADIOMODE_RADIOTAP) { + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) { /* If we are here, we have not received feedback from the previous packet. Assume TX_FAIL and move on. */ priv->adapter->eventcause = 0x01000000; libertas_send_tx_feedback(priv); } else - wake_up_interruptible(&priv->mainthread.waitq); - } else if (priv->adapter->connect_status == libertas_connected) { + wake_up_interruptible(&priv->waitq); + } else if (priv->adapter->connect_status == LIBERTAS_CONNECTED) { netif_wake_queue(priv->dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_wake_queue(priv->mesh_dev); } lbs_deb_leave(LBS_DEB_TX); @@ -453,14 +610,14 @@ static void wlan_tx_timeout(struct net_device *dev) * @param dev A pointer to wlan_private structure * @return A pointer to net_device_stats structure */ -static struct net_device_stats *wlan_get_stats(struct net_device *dev) +static struct net_device_stats *libertas_get_stats(struct net_device *dev) { wlan_private *priv = (wlan_private *) dev->priv; return &priv->stats; } -static int wlan_set_mac_address(struct net_device *dev, void *addr) +static int libertas_set_mac_address(struct net_device *dev, void *addr) { int ret = 0; wlan_private *priv = (wlan_private *) dev->priv; @@ -475,14 +632,14 @@ static int wlan_set_mac_address(struct net_device *dev, void *addr) memset(adapter->current_addr, 0, ETH_ALEN); /* dev->dev_addr is 8 bytes */ - lbs_dbg_hex("dev->dev_addr:", dev->dev_addr, ETH_ALEN); + lbs_deb_hex(LBS_DEB_NET, "dev->dev_addr", dev->dev_addr, ETH_ALEN); - lbs_dbg_hex("addr:", phwaddr->sa_data, ETH_ALEN); + lbs_deb_hex(LBS_DEB_NET, "addr", phwaddr->sa_data, ETH_ALEN); memcpy(adapter->current_addr, phwaddr->sa_data, ETH_ALEN); - ret = libertas_prepare_and_send_command(priv, cmd_802_11_mac_address, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_MAC_ADDRESS, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (ret) { lbs_deb_net("set MAC address failed\n"); @@ -490,7 +647,7 @@ static int wlan_set_mac_address(struct net_device *dev, void *addr) goto done; } - lbs_dbg_hex("adapter->macaddr:", adapter->current_addr, ETH_ALEN); + lbs_deb_hex(LBS_DEB_NET, "adapter->macaddr", adapter->current_addr, ETH_ALEN); memcpy(dev->dev_addr, adapter->current_addr, ETH_ALEN); if (priv->mesh_dev) memcpy(priv->mesh_dev->dev_addr, adapter->current_addr, ETH_ALEN); @@ -500,7 +657,7 @@ static int wlan_set_mac_address(struct net_device *dev, void *addr) return ret; } -static int wlan_copy_multicast_address(wlan_adapter * adapter, +static int libertas_copy_multicast_address(wlan_adapter * adapter, struct net_device *dev) { int i = 0; @@ -515,11 +672,12 @@ static int wlan_copy_multicast_address(wlan_adapter * adapter, } -static void wlan_set_multicast_list(struct net_device *dev) +static void libertas_set_multicast_list(struct net_device *dev) { wlan_private *priv = dev->priv; wlan_adapter *adapter = priv->adapter; int oldpacketfilter; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_NET); @@ -528,57 +686,52 @@ static void wlan_set_multicast_list(struct net_device *dev) if (dev->flags & IFF_PROMISC) { lbs_deb_net("enable promiscuous mode\n"); adapter->currentpacketfilter |= - cmd_act_mac_promiscuous_enable; + CMD_ACT_MAC_PROMISCUOUS_ENABLE; adapter->currentpacketfilter &= - ~(cmd_act_mac_all_multicast_enable | - cmd_act_mac_multicast_enable); + ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | + CMD_ACT_MAC_MULTICAST_ENABLE); } else { /* Multicast */ adapter->currentpacketfilter &= - ~cmd_act_mac_promiscuous_enable; + ~CMD_ACT_MAC_PROMISCUOUS_ENABLE; if (dev->flags & IFF_ALLMULTI || dev->mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) { lbs_deb_net( "enabling all multicast\n"); adapter->currentpacketfilter |= - cmd_act_mac_all_multicast_enable; + CMD_ACT_MAC_ALL_MULTICAST_ENABLE; adapter->currentpacketfilter &= - ~cmd_act_mac_multicast_enable; + ~CMD_ACT_MAC_MULTICAST_ENABLE; } else { adapter->currentpacketfilter &= - ~cmd_act_mac_all_multicast_enable; + ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE; if (!dev->mc_count) { lbs_deb_net("no multicast addresses, " "disabling multicast\n"); adapter->currentpacketfilter &= - ~cmd_act_mac_multicast_enable; + ~CMD_ACT_MAC_MULTICAST_ENABLE; } else { int i; adapter->currentpacketfilter |= - cmd_act_mac_multicast_enable; + CMD_ACT_MAC_MULTICAST_ENABLE; adapter->nr_of_multicastmacaddr = - wlan_copy_multicast_address(adapter, dev); + libertas_copy_multicast_address(adapter, dev); lbs_deb_net("multicast addresses: %d\n", dev->mc_count); for (i = 0; i < dev->mc_count; i++) { - lbs_deb_net("Multicast address %d:" - MAC_FMT "\n", i, - adapter->multicastlist[i][0], - adapter->multicastlist[i][1], - adapter->multicastlist[i][2], - adapter->multicastlist[i][3], - adapter->multicastlist[i][4], - adapter->multicastlist[i][5]); + lbs_deb_net("Multicast address %d:%s\n", + i, print_mac(mac, + adapter->multicastlist[i])); } /* send multicast addresses to firmware */ libertas_prepare_and_send_command(priv, - cmd_mac_multicast_adr, - cmd_act_set, 0, 0, + CMD_MAC_MULTICAST_ADR, + CMD_ACT_SET, 0, 0, NULL); } } @@ -599,18 +752,16 @@ static void wlan_set_multicast_list(struct net_device *dev) * @param data A pointer to wlan_thread structure * @return 0 */ -static int wlan_service_main_thread(void *data) +static int libertas_thread(void *data) { - struct wlan_thread *thread = data; - wlan_private *priv = thread->priv; + struct net_device *dev = data; + wlan_private *priv = dev->priv; wlan_adapter *adapter = priv->adapter; wait_queue_t wait; u8 ireg = 0; lbs_deb_enter(LBS_DEB_THREAD); - wlan_activate_thread(thread); - init_waitqueue_entry(&wait, current); set_freezable(); @@ -620,7 +771,7 @@ static int wlan_service_main_thread(void *data) adapter->intcounter, adapter->currenttxskb, priv->dnld_sent); - add_wait_queue(&thread->waitq, &wait); + add_wait_queue(&priv->waitq, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&adapter->driver_lock); if ((adapter->psstate == PS_STATE_SLEEP) || @@ -636,14 +787,13 @@ static int wlan_service_main_thread(void *data) } else spin_unlock_irq(&adapter->driver_lock); - lbs_deb_thread( "main-thread 222 (waking up): intcounter=%d currenttxskb=%p " "dnld_sent=%d\n", adapter->intcounter, adapter->currenttxskb, priv->dnld_sent); set_current_state(TASK_RUNNING); - remove_wait_queue(&thread->waitq, &wait); + remove_wait_queue(&priv->waitq, &wait); try_to_freeze(); lbs_deb_thread("main-thread 333: intcounter=%d currenttxskb=%p " @@ -681,20 +831,20 @@ static int wlan_service_main_thread(void *data) adapter->currenttxskb, priv->dnld_sent); /* command response? */ - if (adapter->hisregcpy & his_cmdupldrdy) { + if (adapter->hisregcpy & MRVDRV_CMD_UPLD_RDY) { lbs_deb_thread("main-thread: cmd response ready\n"); - adapter->hisregcpy &= ~his_cmdupldrdy; + adapter->hisregcpy &= ~MRVDRV_CMD_UPLD_RDY; spin_unlock_irq(&adapter->driver_lock); libertas_process_rx_command(priv); spin_lock_irq(&adapter->driver_lock); } /* Any Card Event */ - if (adapter->hisregcpy & his_cardevent) { + if (adapter->hisregcpy & MRVDRV_CARDEVENT) { lbs_deb_thread("main-thread: Card Event Activity\n"); - adapter->hisregcpy &= ~his_cardevent; + adapter->hisregcpy &= ~MRVDRV_CARDEVENT; if (priv->hw_read_event_cause(priv)) { lbs_pr_alert( @@ -711,7 +861,7 @@ static int wlan_service_main_thread(void *data) if (adapter->psstate == PS_STATE_PRE_SLEEP) { if (!priv->dnld_sent && !adapter->cur_cmd) { if (adapter->connect_status == - libertas_connected) { + LIBERTAS_CONNECTED) { lbs_deb_thread( "main_thread: PRE_SLEEP--intcounter=%d currenttxskb=%p " "dnld_sent=%d cur_cmd=%p, confirm now\n", @@ -758,12 +908,213 @@ static int wlan_service_main_thread(void *data) del_timer(&adapter->command_timer); adapter->nr_cmd_pending = 0; wake_up_all(&adapter->cmd_pending); - wlan_deactivate_thread(thread); lbs_deb_leave(LBS_DEB_THREAD); return 0; } +/** + * @brief This function downloads firmware image, gets + * HW spec from firmware and set basic parameters to + * firmware. + * + * @param priv A pointer to wlan_private structure + * @return 0 or -1 + */ +static int wlan_setup_firmware(wlan_private * priv) +{ + int ret = -1; + wlan_adapter *adapter = priv->adapter; + struct cmd_ds_mesh_access mesh_access; + + lbs_deb_enter(LBS_DEB_FW); + + /* + * Read MAC address from HW + */ + memset(adapter->current_addr, 0xff, ETH_ALEN); + + ret = libertas_prepare_and_send_command(priv, CMD_GET_HW_SPEC, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); + + if (ret) { + ret = -1; + goto done; + } + + libertas_set_mac_packet_filter(priv); + + /* Get the supported Data rates */ + ret = libertas_prepare_and_send_command(priv, CMD_802_11_DATA_RATE, + CMD_ACT_GET_TX_RATE, + CMD_OPTION_WAITFORRSP, 0, NULL); + + if (ret) { + ret = -1; + goto done; + } + + /* Disable mesh autostart */ + if (priv->mesh_dev) { + memset(&mesh_access, 0, sizeof(mesh_access)); + mesh_access.data[0] = cpu_to_le32(0); + ret = libertas_prepare_and_send_command(priv, + CMD_MESH_ACCESS, + CMD_ACT_MESH_SET_AUTOSTART_ENABLED, + CMD_OPTION_WAITFORRSP, 0, (void *)&mesh_access); + if (ret) { + ret = -1; + goto done; + } + priv->mesh_autostart_enabled = 0; + } + + /* Set the boot2 version in firmware */ + ret = libertas_prepare_and_send_command(priv, CMD_SET_BOOT2_VER, + 0, CMD_OPTION_WAITFORRSP, 0, NULL); + + ret = 0; +done: + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} + +/** + * This function handles the timeout of command sending. + * It will re-send the same command again. + */ +static void command_timer_fn(unsigned long data) +{ + wlan_private *priv = (wlan_private *)data; + wlan_adapter *adapter = priv->adapter; + struct cmd_ctrl_node *ptempnode; + struct cmd_ds_command *cmd; + unsigned long flags; + + ptempnode = adapter->cur_cmd; + if (ptempnode == NULL) { + lbs_deb_fw("ptempnode empty\n"); + return; + } + + cmd = (struct cmd_ds_command *)ptempnode->bufvirtualaddr; + if (!cmd) { + lbs_deb_fw("cmd is NULL\n"); + return; + } + + lbs_deb_fw("command_timer_fn fired, cmd %x\n", cmd->command); + + if (!adapter->fw_ready) + return; + + spin_lock_irqsave(&adapter->driver_lock, flags); + adapter->cur_cmd = NULL; + spin_unlock_irqrestore(&adapter->driver_lock, flags); + + lbs_deb_fw("re-sending same command because of timeout\n"); + libertas_queue_cmd(adapter, ptempnode, 0); + + wake_up_interruptible(&priv->waitq); + + return; +} + +static int libertas_init_adapter(wlan_private * priv) +{ + wlan_adapter *adapter = priv->adapter; + size_t bufsize; + int i, ret = 0; + + /* Allocate buffer to store the BSSID list */ + bufsize = MAX_NETWORK_COUNT * sizeof(struct bss_descriptor); + adapter->networks = kzalloc(bufsize, GFP_KERNEL); + if (!adapter->networks) { + lbs_pr_err("Out of memory allocating beacons\n"); + ret = -1; + goto out; + } + + /* Initialize scan result lists */ + INIT_LIST_HEAD(&adapter->network_free_list); + INIT_LIST_HEAD(&adapter->network_list); + for (i = 0; i < MAX_NETWORK_COUNT; i++) { + list_add_tail(&adapter->networks[i].list, + &adapter->network_free_list); + } + + adapter->libertas_ps_confirm_sleep.seqnum = cpu_to_le16(++adapter->seqnum); + adapter->libertas_ps_confirm_sleep.command = + cpu_to_le16(CMD_802_11_PS_MODE); + adapter->libertas_ps_confirm_sleep.size = + cpu_to_le16(sizeof(struct PS_CMD_ConfirmSleep)); + adapter->libertas_ps_confirm_sleep.action = + cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED); + + memset(adapter->current_addr, 0xff, ETH_ALEN); + + adapter->connect_status = LIBERTAS_DISCONNECTED; + adapter->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; + adapter->mode = IW_MODE_INFRA; + adapter->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; + adapter->currentpacketfilter = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; + adapter->radioon = RADIO_ON; + adapter->auto_rate = 1; + adapter->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; + adapter->psmode = WLAN802_11POWERMODECAM; + adapter->psstate = PS_STATE_FULL_POWER; + + mutex_init(&adapter->lock); + + memset(&adapter->tx_queue_ps, 0, NR_TX_QUEUE*sizeof(struct sk_buff*)); + adapter->tx_queue_idx = 0; + spin_lock_init(&adapter->txqueue_lock); + + setup_timer(&adapter->command_timer, command_timer_fn, + (unsigned long)priv); + + INIT_LIST_HEAD(&adapter->cmdfreeq); + INIT_LIST_HEAD(&adapter->cmdpendingq); + + spin_lock_init(&adapter->driver_lock); + init_waitqueue_head(&adapter->cmd_pending); + adapter->nr_cmd_pending = 0; + + /* Allocate the command buffers */ + if (libertas_allocate_cmd_buffer(priv)) { + lbs_pr_err("Out of memory allocating command buffers\n"); + ret = -1; + } + +out: + return ret; +} + +static void libertas_free_adapter(wlan_private * priv) +{ + wlan_adapter *adapter = priv->adapter; + + if (!adapter) { + lbs_deb_fw("why double free adapter?\n"); + return; + } + + lbs_deb_fw("free command buffer\n"); + libertas_free_cmd_buffer(priv); + + lbs_deb_fw("free command_timer\n"); + del_timer(&adapter->command_timer); + + lbs_deb_fw("free scan results table\n"); + kfree(adapter->networks); + adapter->networks = NULL; + + /* Free the adapter object itself */ + lbs_deb_fw("free adapter\n"); + kfree(adapter); + priv->adapter = NULL; +} + /** * @brief This function adds the card. it will probe the * card, allocate the wlan_priv and initialize the device. @@ -781,7 +1132,7 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) /* Allocate an Ethernet device and register it */ if (!(dev = alloc_etherdev(sizeof(wlan_private)))) { lbs_pr_err("init ethX device failed\n"); - return NULL; + goto done; } priv = dev->priv; @@ -791,20 +1142,24 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) goto err_kzalloc; } + if (libertas_init_adapter(priv)) { + lbs_pr_err("failed to initialize adapter structure.\n"); + goto err_init_adapter; + } + priv->dev = dev; priv->card = card; priv->mesh_open = 0; priv->infra_open = 0; - - SET_MODULE_OWNER(dev); + priv->hotplug_device = dmdev; /* Setup the OS Interface to our functions */ - dev->open = wlan_open; - dev->hard_start_xmit = wlan_pre_start_xmit; - dev->stop = wlan_close; - dev->set_mac_address = wlan_set_mac_address; - dev->tx_timeout = wlan_tx_timeout; - dev->get_stats = wlan_get_stats; + dev->open = libertas_open; + dev->hard_start_xmit = libertas_pre_start_xmit; + dev->stop = libertas_close; + dev->set_mac_address = libertas_set_mac_address; + dev->tx_timeout = libertas_tx_timeout; + dev->get_stats = libertas_get_stats; dev->watchdog_timeo = 5 * HZ; dev->ethtool_ops = &libertas_ethtool_ops; #ifdef WIRELESS_EXT @@ -813,84 +1168,148 @@ wlan_private *libertas_add_card(void *card, struct device *dmdev) #define NETIF_F_DYNALLOC 16 dev->features |= NETIF_F_DYNALLOC; dev->flags |= IFF_BROADCAST | IFF_MULTICAST; - dev->set_multicast_list = wlan_set_multicast_list; + dev->set_multicast_list = libertas_set_multicast_list; SET_NETDEV_DEV(dev, dmdev); - INIT_LIST_HEAD(&priv->adapter->cmdfreeq); - INIT_LIST_HEAD(&priv->adapter->cmdpendingq); + priv->rtap_net_dev = NULL; + if (device_create_file(dmdev, &dev_attr_libertas_rtap)) + goto err_init_adapter; + + lbs_deb_thread("Starting main thread...\n"); + init_waitqueue_head(&priv->waitq); + priv->main_thread = kthread_run(libertas_thread, dev, "libertas_main"); + if (IS_ERR(priv->main_thread)) { + lbs_deb_thread("Error creating main thread.\n"); + goto err_kthread_run; + } + + priv->work_thread = create_singlethread_workqueue("libertas_worker"); + INIT_DELAYED_WORK(&priv->assoc_work, libertas_association_worker); + INIT_DELAYED_WORK(&priv->scan_work, libertas_scan_worker); + INIT_WORK(&priv->sync_channel, libertas_sync_channel); - spin_lock_init(&priv->adapter->driver_lock); - init_waitqueue_head(&priv->adapter->cmd_pending); - priv->adapter->nr_cmd_pending = 0; goto done; +err_kthread_run: + device_remove_file(dmdev, &dev_attr_libertas_rtap); + +err_init_adapter: + libertas_free_adapter(priv); + err_kzalloc: free_netdev(dev); priv = NULL; + done: lbs_deb_leave_args(LBS_DEB_NET, "priv %p", priv); return priv; } EXPORT_SYMBOL_GPL(libertas_add_card); -int libertas_activate_card(wlan_private *priv, char *fw_name) + +int libertas_remove_card(wlan_private *priv) { + wlan_adapter *adapter = priv->adapter; struct net_device *dev = priv->dev; - int ret = -1; + union iwreq_data wrqu; lbs_deb_enter(LBS_DEB_MAIN); - lbs_deb_thread("Starting kthread...\n"); - priv->mainthread.priv = priv; - wlan_create_thread(wlan_service_main_thread, - &priv->mainthread, "wlan_main_service"); + libertas_remove_rtap(priv); - priv->assoc_thread = - create_singlethread_workqueue("libertas_assoc"); - INIT_DELAYED_WORK(&priv->assoc_work, libertas_association_worker); - INIT_WORK(&priv->sync_channel, libertas_sync_channel); + dev = priv->dev; + device_remove_file(priv->hotplug_device, &dev_attr_libertas_rtap); - /* - * Register the device. Fillup the private data structure with - * relevant information from the card and request for the required - * IRQ. - */ - if (priv->hw_register_dev(priv) < 0) { - lbs_pr_err("failed to register WLAN device\n"); - goto err_registerdev; - } + cancel_delayed_work(&priv->scan_work); + cancel_delayed_work(&priv->assoc_work); + destroy_workqueue(priv->work_thread); - /* init FW and HW */ - if (fw_name && libertas_init_fw(priv, fw_name)) { - lbs_pr_err("firmware init failed\n"); - goto err_registerdev; + if (adapter->psmode == WLAN802_11POWERMODEMAX_PSP) { + adapter->psmode = WLAN802_11POWERMODECAM; + libertas_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); } + memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); + wrqu.ap_addr.sa_family = ARPHRD_ETHER; + wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); + + /* Stop the thread servicing the interrupts */ + adapter->surpriseremoved = 1; + kthread_stop(priv->main_thread); + + libertas_free_adapter(priv); + + priv->dev = NULL; + free_netdev(dev); + + lbs_deb_leave(LBS_DEB_MAIN); + return 0; +} +EXPORT_SYMBOL_GPL(libertas_remove_card); + + +int libertas_start_card(wlan_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = -1; + + lbs_deb_enter(LBS_DEB_MAIN); + + /* poke the firmware */ + ret = wlan_setup_firmware(priv); + if (ret) + goto done; + + /* init 802.11d */ + libertas_init_11d(priv); + if (register_netdev(dev)) { lbs_pr_err("cannot register ethX device\n"); - goto err_init_fw; + goto done; } - lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); - libertas_debugfs_init_one(priv, dev); + lbs_pr_info("%s: Marvell WLAN 802.11 adapter\n", dev->name); + ret = 0; - goto done; -err_init_fw: - priv->hw_unregister_dev(priv); -err_registerdev: - destroy_workqueue(priv->assoc_thread); - /* Stop the thread servicing the interrupts */ - wake_up_interruptible(&priv->mainthread.waitq); - wlan_terminate_thread(&priv->mainthread); done: - lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); return ret; } -EXPORT_SYMBOL_GPL(libertas_activate_card); +EXPORT_SYMBOL_GPL(libertas_start_card); + + +int libertas_stop_card(wlan_private *priv) +{ + struct net_device *dev = priv->dev; + int ret = -1; + struct cmd_ctrl_node *cmdnode; + unsigned long flags; + + lbs_deb_enter(LBS_DEB_MAIN); + + netif_stop_queue(priv->dev); + netif_carrier_off(priv->dev); + + libertas_debugfs_remove_one(priv); + + /* Flush pending command nodes */ + spin_lock_irqsave(&priv->adapter->driver_lock, flags); + list_for_each_entry(cmdnode, &priv->adapter->cmdpendingq, list) { + cmdnode->cmdwaitqwoken = 1; + wake_up_interruptible(&cmdnode->cmdwait_q); + } + spin_unlock_irqrestore(&priv->adapter->driver_lock, flags); + + unregister_netdev(dev); + + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(libertas_stop_card); /** @@ -915,13 +1334,11 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) mesh_dev->priv = priv; priv->mesh_dev = mesh_dev; - SET_MODULE_OWNER(mesh_dev); - - mesh_dev->open = mesh_open; - mesh_dev->hard_start_xmit = mesh_pre_start_xmit; - mesh_dev->stop = mesh_close; - mesh_dev->get_stats = wlan_get_stats; - mesh_dev->set_mac_address = wlan_set_mac_address; + mesh_dev->open = libertas_mesh_open; + mesh_dev->hard_start_xmit = libertas_mesh_pre_start_xmit; + mesh_dev->stop = libertas_mesh_close; + mesh_dev->get_stats = libertas_get_stats; + mesh_dev->set_mac_address = libertas_set_mac_address; mesh_dev->ethtool_ops = &libertas_ethtool_ops; memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, sizeof(priv->dev->dev_addr)); @@ -940,7 +1357,7 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) goto err_free; } - ret = device_create_file(&(mesh_dev->dev), &dev_attr_anycast_mask); + ret = sysfs_create_group(&(mesh_dev->dev.kobj), &libertas_mesh_attr_group); if (ret) goto err_unregister; @@ -948,7 +1365,6 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) ret = 0; goto done; - err_unregister: unregister_netdev(mesh_dev); @@ -961,86 +1377,12 @@ int libertas_add_mesh(wlan_private *priv, struct device *dev) } EXPORT_SYMBOL_GPL(libertas_add_mesh); -static void wake_pending_cmdnodes(wlan_private *priv) -{ - struct cmd_ctrl_node *cmdnode; - unsigned long flags; - - lbs_deb_enter(LBS_DEB_CMD); - - spin_lock_irqsave(&priv->adapter->driver_lock, flags); - list_for_each_entry(cmdnode, &priv->adapter->cmdpendingq, list) { - cmdnode->cmdwaitqwoken = 1; - wake_up_interruptible(&cmdnode->cmdwait_q); - } - spin_unlock_irqrestore(&priv->adapter->driver_lock, flags); -} - - -int libertas_remove_card(wlan_private *priv) -{ - wlan_adapter *adapter; - struct net_device *dev; - union iwreq_data wrqu; - - lbs_deb_enter(LBS_DEB_NET); - - if (!priv) - goto out; - - adapter = priv->adapter; - - if (!adapter) - goto out; - - dev = priv->dev; - - netif_stop_queue(priv->dev); - netif_carrier_off(priv->dev); - - wake_pending_cmdnodes(priv); - - unregister_netdev(dev); - - cancel_delayed_work(&priv->assoc_work); - destroy_workqueue(priv->assoc_thread); - - if (adapter->psmode == wlan802_11powermodemax_psp) { - adapter->psmode = wlan802_11powermodecam; - libertas_ps_wakeup(priv, cmd_option_waitforrsp); - } - - memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); - wrqu.ap_addr.sa_family = ARPHRD_ETHER; - wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); - - adapter->surpriseremoved = 1; - - /* Stop the thread servicing the interrupts */ - wlan_terminate_thread(&priv->mainthread); - - libertas_debugfs_remove_one(priv); - - lbs_deb_net("free adapter\n"); - libertas_free_adapter(priv); - - lbs_deb_net("unregister finish\n"); - - priv->dev = NULL; - free_netdev(dev); - -out: - lbs_deb_leave(LBS_DEB_NET); - return 0; -} -EXPORT_SYMBOL_GPL(libertas_remove_card); - void libertas_remove_mesh(wlan_private *priv) { struct net_device *mesh_dev; - lbs_deb_enter(LBS_DEB_NET); + lbs_deb_enter(LBS_DEB_MAIN); if (!priv) goto out; @@ -1050,14 +1392,14 @@ void libertas_remove_mesh(wlan_private *priv) netif_stop_queue(mesh_dev); netif_carrier_off(priv->mesh_dev); - device_remove_file(&(mesh_dev->dev), &dev_attr_anycast_mask); + sysfs_remove_group(&(mesh_dev->dev.kobj), &libertas_mesh_attr_group); unregister_netdev(mesh_dev); priv->mesh_dev = NULL ; free_netdev(mesh_dev); out: - lbs_deb_leave(LBS_DEB_NET); + lbs_deb_leave(LBS_DEB_MAIN); } EXPORT_SYMBOL_GPL(libertas_remove_mesh); @@ -1076,7 +1418,7 @@ struct chan_freq_power *libertas_get_region_cfp_table(u8 region, u8 band, int *c lbs_deb_enter(LBS_DEB_MAIN); - end = sizeof(region_cfp_table)/sizeof(struct region_cfp_table); + end = ARRAY_SIZE(region_cfp_table); for (i = 0; i < end ; i++) { lbs_deb_main("region_cfp_table[i].region=%d\n", @@ -1148,15 +1490,30 @@ void libertas_interrupt(struct net_device *dev) if (priv->adapter->psstate == PS_STATE_SLEEP) { priv->adapter->psstate = PS_STATE_AWAKE; netif_wake_queue(dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_wake_queue(priv->mesh_dev); } - wake_up_interruptible(&priv->mainthread.waitq); + wake_up_interruptible(&priv->waitq); lbs_deb_leave(LBS_DEB_THREAD); } EXPORT_SYMBOL_GPL(libertas_interrupt); +int libertas_reset_device(wlan_private *priv) +{ + int ret; + + lbs_deb_enter(LBS_DEB_MAIN); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_RESET, + CMD_ACT_HALT, 0, 0, NULL); + msleep_interruptible(10); + + lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); + return ret; +} +EXPORT_SYMBOL_GPL(libertas_reset_device); + static int libertas_init_module(void) { lbs_deb_enter(LBS_DEB_MAIN); @@ -1174,6 +1531,81 @@ static void libertas_exit_module(void) lbs_deb_leave(LBS_DEB_MAIN); } +/* + * rtap interface support fuctions + */ + +static int libertas_rtap_open(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_stop_queue(dev); + return 0; +} + +static int libertas_rtap_stop(struct net_device *dev) +{ + return 0; +} + +static int libertas_rtap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + netif_stop_queue(dev); + return -EOPNOTSUPP; +} + +static struct net_device_stats *libertas_rtap_get_stats(struct net_device *dev) +{ + wlan_private *priv = dev->priv; + return &priv->ieee->stats; +} + + +void libertas_remove_rtap(wlan_private *priv) +{ + if (priv->rtap_net_dev == NULL) + return; + unregister_netdev(priv->rtap_net_dev); + free_ieee80211(priv->rtap_net_dev); + priv->rtap_net_dev = NULL; +} + +int libertas_add_rtap(wlan_private *priv) +{ + int rc = 0; + + if (priv->rtap_net_dev) + return -EPERM; + + priv->rtap_net_dev = alloc_ieee80211(0); + if (priv->rtap_net_dev == NULL) + return -ENOMEM; + + + priv->ieee = netdev_priv(priv->rtap_net_dev); + + strcpy(priv->rtap_net_dev->name, "rtap%d"); + + priv->rtap_net_dev->type = ARPHRD_IEEE80211_RADIOTAP; + priv->rtap_net_dev->open = libertas_rtap_open; + priv->rtap_net_dev->stop = libertas_rtap_stop; + priv->rtap_net_dev->get_stats = libertas_rtap_get_stats; + priv->rtap_net_dev->hard_start_xmit = libertas_rtap_hard_start_xmit; + priv->rtap_net_dev->set_multicast_list = libertas_set_multicast_list; + priv->rtap_net_dev->priv = priv; + + priv->ieee->iw_mode = IW_MODE_MONITOR; + + rc = register_netdev(priv->rtap_net_dev); + if (rc) { + free_ieee80211(priv->rtap_net_dev); + priv->rtap_net_dev = NULL; + return rc; + } + + return 0; +} + + module_init(libertas_init_module); module_exit(libertas_exit_module); diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index 769c86fb95096a99e60e74c39900e6f6da2cf907..0420e5b9ff9bbde84e72eecba277718cb9e1497e 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -85,12 +85,12 @@ static u8 wlan_getavgnf(wlan_private * priv) static void wlan_save_rawSNRNF(wlan_private * priv, struct rxpd *p_rx_pd) { wlan_adapter *adapter = priv->adapter; - if (adapter->numSNRNF < adapter->data_avg_factor) + if (adapter->numSNRNF < DEFAULT_DATA_AVG_FACTOR) adapter->numSNRNF++; adapter->rawSNR[adapter->nextSNRNF] = p_rx_pd->snr; adapter->rawNF[adapter->nextSNRNF] = p_rx_pd->nf; adapter->nextSNRNF++; - if (adapter->nextSNRNF >= adapter->data_avg_factor) + if (adapter->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR) adapter->nextSNRNF = 0; return; } @@ -117,8 +117,6 @@ static void wlan_compute_rssi(wlan_private * priv, struct rxpd *p_rx_pd) adapter->NF[TYPE_RXPD][TYPE_NOAVG] = p_rx_pd->nf; wlan_save_rawSNRNF(priv, p_rx_pd); - adapter->rxpd_rate = p_rx_pd->rx_rate; - adapter->SNR[TYPE_RXPD][TYPE_AVG] = wlan_getavgsnr(priv) * AVG_SCALE; adapter->NF[TYPE_RXPD][TYPE_AVG] = wlan_getavgnf(priv) * AVG_SCALE; lbs_deb_rx("after computing SNR: SNR-avg = %d, NF-avg = %d\n", @@ -140,12 +138,15 @@ void libertas_upload_rx_packet(wlan_private * priv, struct sk_buff *skb) { lbs_deb_rx("skb->data %p\n", skb->data); - if (priv->mesh_dev && IS_MESH_FRAME(skb)) - skb->protocol = eth_type_trans(skb, priv->mesh_dev); - else - skb->protocol = eth_type_trans(skb, priv->dev); + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) { + skb->protocol = eth_type_trans(skb, priv->rtap_net_dev); + } else { + if (priv->mesh_dev && IS_MESH_FRAME(skb)) + skb->protocol = eth_type_trans(skb, priv->mesh_dev); + else + skb->protocol = eth_type_trans(skb, priv->dev); + } skb->ip_summed = CHECKSUM_UNNECESSARY; - netif_rx(skb); } @@ -172,11 +173,7 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) lbs_deb_enter(LBS_DEB_RX); - if (priv->adapter->debugmode & MRVDRV_DEBUG_RX_PATH) - lbs_dbg_hex("RX packet: ", skb->data, - min_t(unsigned int, skb->len, 100)); - - if (priv->adapter->linkmode == WLAN_LINKMODE_802_11) + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) return process_rxed_802_11_packet(priv, skb); p_rx_pkt = (struct rxpackethdr *) skb->data; @@ -186,7 +183,7 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) else UNSET_MESH_FRAME(skb); - lbs_dbg_hex("RX Data: Before chop rxpd", skb->data, + lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min_t(unsigned int, skb->len, 100)); if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { @@ -210,9 +207,9 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) lbs_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n", skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); - lbs_dbg_hex("RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr, + lbs_deb_hex(LBS_DEB_RX, "RX Data: Dest", p_rx_pkt->eth803_hdr.dest_addr, sizeof(p_rx_pkt->eth803_hdr.dest_addr)); - lbs_dbg_hex("RX Data: Src", p_rx_pkt->eth803_hdr.src_addr, + lbs_deb_hex(LBS_DEB_RX, "RX Data: Src", p_rx_pkt->eth803_hdr.src_addr, sizeof(p_rx_pkt->eth803_hdr.src_addr)); if (memcmp(&p_rx_pkt->rfc1042_hdr, @@ -244,7 +241,7 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) */ hdrchop = (u8 *) p_ethhdr - (u8 *) p_rx_pkt; } else { - lbs_dbg_hex("RX Data: LLC/SNAP", + lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP", (u8 *) & p_rx_pkt->rfc1042_hdr, sizeof(p_rx_pkt->rfc1042_hdr)); @@ -260,8 +257,8 @@ int libertas_process_rxed_packet(wlan_private * priv, struct sk_buff *skb) /* Take the data rate from the rxpd structure * only if the rate is auto */ - if (adapter->is_datarate_auto) - adapter->datarate = libertas_index_to_data_rate(p_rx_pd->rx_rate); + if (adapter->auto_rate) + adapter->cur_rate = libertas_fw_index_to_data_rate(p_rx_pd->rx_rate); wlan_compute_rssi(priv, p_rx_pd); @@ -296,21 +293,22 @@ static u8 convert_mv_rate_to_radiotap(u8 rate) return 11; case 3: /* 11 Mbps */ return 22; - case 4: /* 6 Mbps */ + /* case 4: reserved */ + case 5: /* 6 Mbps */ return 12; - case 5: /* 9 Mbps */ + case 6: /* 9 Mbps */ return 18; - case 6: /* 12 Mbps */ + case 7: /* 12 Mbps */ return 24; - case 7: /* 18 Mbps */ + case 8: /* 18 Mbps */ return 36; - case 8: /* 24 Mbps */ + case 9: /* 24 Mbps */ return 48; - case 9: /* 36 Mbps */ + case 10: /* 36 Mbps */ return 72; - case 10: /* 48 Mbps */ + case 11: /* 48 Mbps */ return 96; - case 11: /* 54 Mbps */ + case 12: /* 54 Mbps */ return 108; } lbs_pr_alert("Invalid Marvell WLAN rate %i\n", rate); @@ -340,7 +338,7 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) p_rx_pkt = (struct rx80211packethdr *) skb->data; prxpd = &p_rx_pkt->rx_pd; - // lbs_dbg_hex("RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); + // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { lbs_deb_rx("rx err: frame received wit bad length\n"); @@ -361,20 +359,19 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd)); /* create the exported radio header */ - switch (priv->adapter->radiomode) { - case WLAN_RADIOMODE_NONE: + if(priv->adapter->monitormode == WLAN_MONITOR_OFF) { /* no radio header */ /* chop the rxpd */ skb_pull(skb, sizeof(struct rxpd)); - break; + } - case WLAN_RADIOMODE_RADIOTAP: + else { /* radiotap header */ radiotap_hdr.hdr.it_version = 0; /* XXX must check this value for pad */ radiotap_hdr.hdr.it_pad = 0; - radiotap_hdr.hdr.it_len = sizeof(struct rx_radiotap_hdr); - radiotap_hdr.hdr.it_present = RX_RADIOTAP_PRESENT; + radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); + radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); /* unknown values */ radiotap_hdr.flags = 0; radiotap_hdr.chan_freq = 0; @@ -389,8 +386,6 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) radiotap_hdr.rx_flags |= IEEE80211_RADIOTAP_F_RX_BADFCS; //memset(radiotap_hdr.pad, 0x11, IEEE80211_RADIOTAP_HDRLEN - 18); - // lbs_dbg_hex1("RX radiomode packet BEF: ", skb->data, min(skb->len, 100)); - /* chop the rxpd */ skb_pull(skb, sizeof(struct rxpd)); @@ -408,25 +403,13 @@ static int process_rxed_802_11_packet(wlan_private * priv, struct sk_buff *skb) rx_radiotap_hdr)); memcpy(pradiotap_hdr, &radiotap_hdr, sizeof(struct rx_radiotap_hdr)); - //lbs_dbg_hex1("RX radiomode packet AFT: ", skb->data, min(skb->len, 100)); - break; - - default: - /* unknown header */ - lbs_pr_alert("Unknown radiomode %i\n", - priv->adapter->radiomode); - /* don't export any header */ - /* chop the rxpd */ - skb_pull(skb, sizeof(struct rxpd)); - break; } /* Take the data rate from the rxpd structure * only if the rate is auto */ - if (adapter->is_datarate_auto) { - adapter->datarate = libertas_index_to_data_rate(prxpd->rx_rate); - } + if (adapter->auto_rate) + adapter->cur_rate = libertas_fw_index_to_data_rate(prxpd->rx_rate); wlan_compute_rssi(priv, prxpd); diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index c3043dcb541ea0c1cb129f0b1a8d9988ebf56cc2..ad1e67d984ce70e9b2a59b4a97cfbe61f71c13c4 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c @@ -13,10 +13,13 @@ #include #include +#include + #include "host.h" #include "decl.h" #include "dev.h" #include "scan.h" +#include "join.h" //! Approximate amount of data needed to pass a scan result back to iwlist #define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ @@ -62,6 +65,15 @@ static const u8 zeromac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 bcastmac[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; + + + +/*********************************************************************/ +/* */ +/* Misc helper functions */ +/* */ +/*********************************************************************/ + static inline void clear_bss_descriptor (struct bss_descriptor * bss) { /* Don't blow away ->list, just BSS data */ @@ -74,9 +86,9 @@ static inline int match_bss_no_security(struct wlan_802_11_security * secinfo, if ( !secinfo->wep_enabled && !secinfo->WPAenabled && !secinfo->WPA2enabled - && match_bss->wpa_ie[0] != WPA_IE - && match_bss->rsn_ie[0] != WPA2_IE - && !match_bss->privacy) { + && match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC + && match_bss->rsn_ie[0] != MFIE_TYPE_RSN + && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { return 1; } return 0; @@ -88,7 +100,7 @@ static inline int match_bss_static_wep(struct wlan_802_11_security * secinfo, if ( secinfo->wep_enabled && !secinfo->WPAenabled && !secinfo->WPA2enabled - && match_bss->privacy) { + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { return 1; } return 0; @@ -99,9 +111,10 @@ static inline int match_bss_wpa(struct wlan_802_11_security * secinfo, { if ( !secinfo->wep_enabled && secinfo->WPAenabled - && (match_bss->wpa_ie[0] == WPA_IE) + && (match_bss->wpa_ie[0] == MFIE_TYPE_GENERIC) /* privacy bit may NOT be set in some APs like LinkSys WRT54G - && bss->privacy */ + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { + */ ) { return 1; } @@ -113,9 +126,10 @@ static inline int match_bss_wpa2(struct wlan_802_11_security * secinfo, { if ( !secinfo->wep_enabled && secinfo->WPA2enabled - && (match_bss->rsn_ie[0] == WPA2_IE) + && (match_bss->rsn_ie[0] == MFIE_TYPE_RSN) /* privacy bit may NOT be set in some APs like LinkSys WRT54G - && bss->privacy */ + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { + */ ) { return 1; } @@ -128,9 +142,9 @@ static inline int match_bss_dynamic_wep(struct wlan_802_11_security * secinfo, if ( !secinfo->wep_enabled && !secinfo->WPAenabled && !secinfo->WPA2enabled - && (match_bss->wpa_ie[0] != WPA_IE) - && (match_bss->rsn_ie[0] != WPA2_IE) - && match_bss->privacy) { + && (match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC) + && (match_bss->rsn_ie[0] != MFIE_TYPE_RSN) + && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) { return 1; } return 0; @@ -160,7 +174,7 @@ static int is_network_compatible(wlan_adapter * adapter, { int matched = 0; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter(LBS_DEB_SCAN); if (bss->mode != mode) goto done; @@ -177,7 +191,7 @@ static int is_network_compatible(wlan_adapter * adapter, adapter->secinfo.wep_enabled ? "e" : "d", adapter->secinfo.WPAenabled ? "e" : "d", adapter->secinfo.WPA2enabled ? "e" : "d", - bss->privacy); + (bss->capability & WLAN_CAPABILITY_PRIVACY)); goto done; } else if ((matched = match_bss_wpa2(&adapter->secinfo, bss))) { lbs_deb_scan( @@ -187,15 +201,14 @@ static int is_network_compatible(wlan_adapter * adapter, adapter->secinfo.wep_enabled ? "e" : "d", adapter->secinfo.WPAenabled ? "e" : "d", adapter->secinfo.WPA2enabled ? "e" : "d", - bss->privacy); + (bss->capability & WLAN_CAPABILITY_PRIVACY)); goto done; } else if ((matched = match_bss_dynamic_wep(&adapter->secinfo, bss))) { lbs_deb_scan( "is_network_compatible() dynamic WEP: " "wpa_ie=%#x wpa2_ie=%#x privacy=%#x\n", - bss->wpa_ie[0], - bss->rsn_ie[0], - bss->privacy); + bss->wpa_ie[0], bss->rsn_ie[0], + (bss->capability & WLAN_CAPABILITY_PRIVACY)); goto done; } @@ -207,16 +220,44 @@ static int is_network_compatible(wlan_adapter * adapter, adapter->secinfo.wep_enabled ? "e" : "d", adapter->secinfo.WPAenabled ? "e" : "d", adapter->secinfo.WPA2enabled ? "e" : "d", - bss->privacy); + (bss->capability & WLAN_CAPABILITY_PRIVACY)); done: - lbs_deb_leave(LBS_DEB_SCAN); + lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched); return matched; } +/** + * @brief Compare two SSIDs + * + * @param ssid1 A pointer to ssid to compare + * @param ssid2 A pointer to ssid to compare + * + * @return 0--ssid is same, otherwise is different + */ +int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len) +{ + if (ssid1_len != ssid2_len) + return -1; + + return memcmp(ssid1, ssid2, ssid1_len); +} + + + + +/*********************************************************************/ +/* */ +/* Main scanning support */ +/* */ +/*********************************************************************/ + + /** * @brief Create a channel list for the driver to scan based on region info * + * Only used from wlan_scan_setup_scan_config() + * * Use the driver region/band information to construct a comprehensive list * of channels to scan. This routine is used for any scan that is not * provided a specific channel list to scan. @@ -244,17 +285,19 @@ static void wlan_scan_create_channel_list(wlan_private * priv, int nextchan; u8 scantype; + lbs_deb_enter_args(LBS_DEB_SCAN, "filteredscan %d", filteredscan); + chanidx = 0; /* Set the default scan type to the user specified type, will later * be changed to passive on a per channel basis if restricted by * regulatory requirements (11d or 11h) */ - scantype = adapter->scantype; + scantype = CMD_SCAN_TYPE_ACTIVE; for (rgnidx = 0; rgnidx < ARRAY_SIZE(adapter->region_channel); rgnidx++) { if (priv->adapter->enable11d && - adapter->connect_status != libertas_connected) { + adapter->connect_status != LIBERTAS_CONNECTED) { /* Scan all the supported chan for the first scan */ if (!adapter->universal_channel[rgnidx].valid) continue; @@ -286,11 +329,11 @@ static void wlan_scan_create_channel_list(wlan_private * priv, case BAND_G: default: scanchanlist[chanidx].radiotype = - cmd_scan_radio_type_bg; + CMD_SCAN_RADIO_TYPE_BG; break; } - if (scantype == cmd_scan_type_passive) { + if (scantype == CMD_SCAN_TYPE_PASSIVE) { scanchanlist[chanidx].maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME); scanchanlist[chanidx].chanscanmode.passivescan = @@ -312,6 +355,16 @@ static void wlan_scan_create_channel_list(wlan_private * priv, } } + +/* Delayed partial scan worker */ +void libertas_scan_worker(struct work_struct *work) +{ + wlan_private *priv = container_of(work, wlan_private, scan_work.work); + + wlan_scan_networks(priv, NULL, 0); +} + + /** * @brief Construct a wlan_scan_cmd_config structure to use in issue scan cmds * @@ -359,7 +412,6 @@ wlan_scan_setup_scan_config(wlan_private * priv, u8 * pfilteredscan, u8 * pscancurrentonly) { - wlan_adapter *adapter = priv->adapter; struct mrvlietypes_numprobes *pnumprobestlv; struct mrvlietypes_ssidparamset *pssidtlv; struct wlan_scan_cmd_config * pscancfgout = NULL; @@ -371,6 +423,8 @@ wlan_scan_setup_scan_config(wlan_private * priv, int channel; int radiotype; + lbs_deb_enter(LBS_DEB_SCAN); + pscancfgout = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); if (pscancfgout == NULL) goto out; @@ -407,15 +461,12 @@ wlan_scan_setup_scan_config(wlan_private * priv, *pscancurrentonly = 0; if (puserscanin) { - /* Set the bss type scan filter, use adapter setting if unset */ pscancfgout->bsstype = - (puserscanin->bsstype ? puserscanin->bsstype : adapter-> - scanmode); + puserscanin->bsstype ? puserscanin->bsstype : CMD_BSS_TYPE_ANY; /* Set the number of probes to send, use adapter setting if unset */ - numprobes = (puserscanin->numprobes ? puserscanin->numprobes : - adapter->scanprobes); + numprobes = puserscanin->numprobes ? puserscanin->numprobes : 0; /* * Set the BSSID filter to the incoming configuration, @@ -447,8 +498,8 @@ wlan_scan_setup_scan_config(wlan_private * priv, *pfilteredscan = 1; } } else { - pscancfgout->bsstype = adapter->scanmode; - numprobes = adapter->scanprobes; + pscancfgout->bsstype = CMD_BSS_TYPE_ANY; + numprobes = 0; } /* If the input config or adapter has the number of Probes set, add tlv */ @@ -469,59 +520,56 @@ wlan_scan_setup_scan_config(wlan_private * priv, */ *ppchantlvout = (struct mrvlietypes_chanlistparamset *) ptlvpos; - if (puserscanin && puserscanin->chanlist[0].channumber) { - - lbs_deb_scan("Scan: Using supplied channel list\n"); + if (!puserscanin || !puserscanin->chanlist[0].channumber) { + /* Create a default channel scan list */ + lbs_deb_scan("creating full region channel list\n"); + wlan_scan_create_channel_list(priv, pscanchanlist, + *pfilteredscan); + goto out; + } - for (chanidx = 0; - chanidx < WLAN_IOCTL_USER_SCAN_CHAN_MAX - && puserscanin->chanlist[chanidx].channumber; chanidx++) { + for (chanidx = 0; + chanidx < WLAN_IOCTL_USER_SCAN_CHAN_MAX + && puserscanin->chanlist[chanidx].channumber; chanidx++) { - channel = puserscanin->chanlist[chanidx].channumber; - (pscanchanlist + chanidx)->channumber = channel; + channel = puserscanin->chanlist[chanidx].channumber; + (pscanchanlist + chanidx)->channumber = channel; - radiotype = puserscanin->chanlist[chanidx].radiotype; - (pscanchanlist + chanidx)->radiotype = radiotype; + radiotype = puserscanin->chanlist[chanidx].radiotype; + (pscanchanlist + chanidx)->radiotype = radiotype; - scantype = puserscanin->chanlist[chanidx].scantype; + scantype = puserscanin->chanlist[chanidx].scantype; - if (scantype == cmd_scan_type_passive) { - (pscanchanlist + - chanidx)->chanscanmode.passivescan = 1; - } else { - (pscanchanlist + - chanidx)->chanscanmode.passivescan = 0; - } + if (scantype == CMD_SCAN_TYPE_PASSIVE) { + (pscanchanlist + + chanidx)->chanscanmode.passivescan = 1; + } else { + (pscanchanlist + + chanidx)->chanscanmode.passivescan = 0; + } - if (puserscanin->chanlist[chanidx].scantime) { - scandur = - puserscanin->chanlist[chanidx].scantime; + if (puserscanin->chanlist[chanidx].scantime) { + scandur = puserscanin->chanlist[chanidx].scantime; + } else { + if (scantype == CMD_SCAN_TYPE_PASSIVE) { + scandur = MRVDRV_PASSIVE_SCAN_CHAN_TIME; } else { - if (scantype == cmd_scan_type_passive) { - scandur = MRVDRV_PASSIVE_SCAN_CHAN_TIME; - } else { - scandur = MRVDRV_ACTIVE_SCAN_CHAN_TIME; - } + scandur = MRVDRV_ACTIVE_SCAN_CHAN_TIME; } - - (pscanchanlist + chanidx)->minscantime = - cpu_to_le16(scandur); - (pscanchanlist + chanidx)->maxscantime = - cpu_to_le16(scandur); } - /* Check if we are only scanning the current channel */ - if ((chanidx == 1) && (puserscanin->chanlist[0].channumber - == - priv->adapter->curbssparams.channel)) { - *pscancurrentonly = 1; - lbs_deb_scan("Scan: Scanning current channel only"); - } + (pscanchanlist + chanidx)->minscantime = + cpu_to_le16(scandur); + (pscanchanlist + chanidx)->maxscantime = + cpu_to_le16(scandur); + } - } else { - lbs_deb_scan("Scan: Creating full region channel list\n"); - wlan_scan_create_channel_list(priv, pscanchanlist, - *pfilteredscan); + /* Check if we are only scanning the current channel */ + if ((chanidx == 1) && + (puserscanin->chanlist[0].channumber == + priv->adapter->curbssparams.channel)) { + *pscancurrentonly = 1; + lbs_deb_scan("scanning current channel only"); } out: @@ -531,6 +579,8 @@ wlan_scan_setup_scan_config(wlan_private * priv, /** * @brief Construct and send multiple scan config commands to the firmware * + * Only used from wlan_scan_networks() + * * Previous routines have created a wlan_scan_cmd_config with any requested * TLVs. This function splits the channel TLV into maxchanperscan lists * and sends the portion of the channel TLV along with the other TLVs @@ -568,12 +618,14 @@ static int wlan_scan_channel_list(wlan_private * priv, int scanned = 0; union iwreq_data wrqu; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter_args(LBS_DEB_SCAN, "maxchanperscan %d, filteredscan %d, " + "full_scan %d", maxchanperscan, filteredscan, full_scan); - if (pscancfgout == 0 || pchantlvout == 0 || pscanchanlist == 0) { - lbs_deb_scan("Scan: Null detect: %p, %p, %p\n", - pscancfgout, pchantlvout, pscanchanlist); - return -1; + if (!pscancfgout || !pchantlvout || !pscanchanlist) { + lbs_deb_scan("pscancfgout, pchantlvout or " + "pscanchanlist is NULL\n"); + ret = -1; + goto out; } pchantlvout->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); @@ -605,12 +657,13 @@ static int wlan_scan_channel_list(wlan_private * priv, while (tlvidx < maxchanperscan && ptmpchan->channumber && !doneearly && scanned < 2) { - lbs_deb_scan( - "Scan: Chan(%3d), Radio(%d), mode(%d,%d), Dur(%d)\n", - ptmpchan->channumber, ptmpchan->radiotype, - ptmpchan->chanscanmode.passivescan, - ptmpchan->chanscanmode.disablechanfilt, - ptmpchan->maxscantime); + lbs_deb_scan("channel %d, radio %d, passive %d, " + "dischanflt %d, maxscantime %d\n", + ptmpchan->channumber, + ptmpchan->radiotype, + ptmpchan->chanscanmode.passivescan, + ptmpchan->chanscanmode.disablechanfilt, + ptmpchan->maxscantime); /* Copy the current channel TLV to the command being prepared */ memcpy(pchantlvout->chanscanparam + tlvidx, @@ -667,7 +720,7 @@ static int wlan_scan_channel_list(wlan_private * priv, } /* Send the scan command to the firmware with the specified cfg */ - ret = libertas_prepare_and_send_command(priv, cmd_802_11_scan, 0, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SCAN, 0, 0, 0, pscancfgout); if (scanned >= 2 && !full_scan) { ret = 0; @@ -679,24 +732,38 @@ static int wlan_scan_channel_list(wlan_private * priv, done: priv->adapter->last_scanned_channel = ptmpchan->channumber; - /* Tell userspace the scan table has been updated */ - memset(&wrqu, 0, sizeof(union iwreq_data)); - wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL); + if (priv->adapter->last_scanned_channel) { + /* Schedule the next part of the partial scan */ + if (!full_scan && !priv->adapter->surpriseremoved) { + cancel_delayed_work(&priv->scan_work); + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(300)); + } + } else { + /* All done, tell userspace the scan table has been updated */ + memset(&wrqu, 0, sizeof(union iwreq_data)); + wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL); + } +out: lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } -static void -clear_selected_scan_list_entries(wlan_adapter * adapter, - const struct wlan_ioctl_user_scan_cfg * scan_cfg) +/* + * Only used from wlan_scan_networks() +*/ +static void clear_selected_scan_list_entries(wlan_adapter *adapter, + const struct wlan_ioctl_user_scan_cfg *scan_cfg) { - struct bss_descriptor * bss; - struct bss_descriptor * safe; + struct bss_descriptor *bss; + struct bss_descriptor *safe; u32 clear_ssid_flag = 0, clear_bssid_flag = 0; + lbs_deb_enter(LBS_DEB_SCAN); + if (!scan_cfg) - return; + goto out; if (scan_cfg->clear_ssid && scan_cfg->ssid_len) clear_ssid_flag = 1; @@ -708,7 +775,7 @@ clear_selected_scan_list_entries(wlan_adapter * adapter, } if (!clear_ssid_flag && !clear_bssid_flag) - return; + goto out; mutex_lock(&adapter->lock); list_for_each_entry_safe (bss, safe, &adapter->network_list, list) { @@ -731,12 +798,16 @@ clear_selected_scan_list_entries(wlan_adapter * adapter, } } mutex_unlock(&adapter->lock); +out: + lbs_deb_leave(LBS_DEB_SCAN); } /** * @brief Internal function used to start a scan based on an input config * + * Also used from debugfs + * * Use the input user scan configuration information when provided in * order to send the appropriate scan commands to firmware to populate or * update the internal driver scan table @@ -744,12 +815,13 @@ clear_selected_scan_list_entries(wlan_adapter * adapter, * @param priv A pointer to wlan_private structure * @param puserscanin Pointer to the input configuration for the requested * scan. + * @param full_scan ??? * * @return 0 or < 0 if error */ int wlan_scan_networks(wlan_private * priv, - const struct wlan_ioctl_user_scan_cfg * puserscanin, - int full_scan) + const struct wlan_ioctl_user_scan_cfg * puserscanin, + int full_scan) { wlan_adapter * adapter = priv->adapter; struct mrvlietypes_chanlistparamset *pchantlvout; @@ -762,9 +834,16 @@ int wlan_scan_networks(wlan_private * priv, #ifdef CONFIG_LIBERTAS_DEBUG struct bss_descriptor * iter_bss; int i = 0; + DECLARE_MAC_BUF(mac); #endif - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan); + + /* Cancel any partial outstanding partial scans if this scan + * is a full scan. + */ + if (full_scan && delayed_work_pending(&priv->scan_work)) + cancel_delayed_work(&priv->scan_work); scan_chan_list = kzalloc(sizeof(struct chanscanparamset) * WLAN_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL); @@ -791,8 +870,10 @@ int wlan_scan_networks(wlan_private * priv, if (!scancurrentchanonly) { netif_stop_queue(priv->dev); netif_carrier_off(priv->dev); - netif_stop_queue(priv->mesh_dev); - netif_carrier_off(priv->mesh_dev); + if (priv->mesh_dev) { + netif_stop_queue(priv->mesh_dev); + netif_carrier_off(priv->mesh_dev); + } } ret = wlan_scan_channel_list(priv, @@ -807,19 +888,22 @@ int wlan_scan_networks(wlan_private * priv, #ifdef CONFIG_LIBERTAS_DEBUG /* Dump the scan table */ mutex_lock(&adapter->lock); + lbs_deb_scan("The scan table contains:\n"); list_for_each_entry (iter_bss, &adapter->network_list, list) { - lbs_deb_scan("Scan:(%02d) " MAC_FMT ", RSSI[%03d], SSID[%s]\n", - i++, MAC_ARG(iter_bss->bssid), (s32) iter_bss->rssi, + lbs_deb_scan("scan %02d, %s, RSSI, %d, SSID '%s'\n", + i++, print_mac(mac, iter_bss->bssid), (s32) iter_bss->rssi, escape_essid(iter_bss->ssid, iter_bss->ssid_len)); } mutex_unlock(&adapter->lock); #endif - if (priv->adapter->connect_status == libertas_connected) { + if (priv->adapter->connect_status == LIBERTAS_CONNECTED) { netif_carrier_on(priv->dev); netif_wake_queue(priv->dev); - netif_carrier_on(priv->mesh_dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) { + netif_carrier_on(priv->mesh_dev); + netif_wake_queue(priv->mesh_dev); + } } out: @@ -833,58 +917,6 @@ int wlan_scan_networks(wlan_private * priv, return ret; } -/** - * @brief Inspect the scan response buffer for pointers to expected TLVs - * - * TLVs can be included at the end of the scan response BSS information. - * Parse the data in the buffer for pointers to TLVs that can potentially - * be passed back in the response - * - * @param ptlv Pointer to the start of the TLV buffer to parse - * @param tlvbufsize size of the TLV buffer - * @param ptsftlv Output parameter: Pointer to the TSF TLV if found - * - * @return void - */ -static -void wlan_ret_802_11_scan_get_tlv_ptrs(struct mrvlietypes_data * ptlv, - int tlvbufsize, - struct mrvlietypes_tsftimestamp ** ptsftlv) -{ - struct mrvlietypes_data *pcurrenttlv; - int tlvbufleft; - u16 tlvtype; - u16 tlvlen; - - pcurrenttlv = ptlv; - tlvbufleft = tlvbufsize; - *ptsftlv = NULL; - - lbs_deb_scan("SCAN_RESP: tlvbufsize = %d\n", tlvbufsize); - lbs_dbg_hex("SCAN_RESP: TLV Buf", (u8 *) ptlv, tlvbufsize); - - while (tlvbufleft >= sizeof(struct mrvlietypesheader)) { - tlvtype = le16_to_cpu(pcurrenttlv->header.type); - tlvlen = le16_to_cpu(pcurrenttlv->header.len); - - switch (tlvtype) { - case TLV_TYPE_TSFTIMESTAMP: - *ptsftlv = (struct mrvlietypes_tsftimestamp *) pcurrenttlv; - break; - - default: - lbs_deb_scan("SCAN_RESP: Unhandled TLV = %d\n", - tlvtype); - /* Give up, this seems corrupted */ - return; - } /* switch */ - - tlvbufleft -= (sizeof(ptlv->header) + tlvlen); - pcurrenttlv = - (struct mrvlietypes_data *) (pcurrenttlv->Data + tlvlen); - } /* while */ -} - /** * @brief Interpret a BSS scan response returned from the firmware * @@ -899,67 +931,49 @@ void wlan_ret_802_11_scan_get_tlv_ptrs(struct mrvlietypes_data * ptlv, static int libertas_process_bss(struct bss_descriptor * bss, u8 ** pbeaconinfo, int *bytesleft) { - enum ieeetypes_elementid elemID; struct ieeetypes_fhparamset *pFH; struct ieeetypes_dsparamset *pDS; struct ieeetypes_cfparamset *pCF; struct ieeetypes_ibssparamset *pibss; - struct ieeetypes_capinfo *pcap; - struct WLAN_802_11_FIXED_IEs fixedie; - u8 *pcurrentptr; - u8 *pRate; - u8 elemlen; - u8 bytestocopy; - u8 ratesize; - u16 beaconsize; - u8 founddatarateie; - int bytesleftforcurrentbeacon; - int ret; - - struct IE_WPA *pIe; - const u8 oui01[4] = { 0x00, 0x50, 0xf2, 0x01 }; - + DECLARE_MAC_BUF(mac); struct ieeetypes_countryinfoset *pcountryinfo; + u8 *pos, *end, *p; + u8 n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; + u16 beaconsize = 0; + int ret; - lbs_deb_enter(LBS_DEB_ASSOC); - - founddatarateie = 0; - ratesize = 0; - beaconsize = 0; + lbs_deb_enter(LBS_DEB_SCAN); if (*bytesleft >= sizeof(beaconsize)) { /* Extract & convert beacon size from the command buffer */ - beaconsize = le16_to_cpup((void *)*pbeaconinfo); + beaconsize = le16_to_cpu(get_unaligned((u16 *)*pbeaconinfo)); *bytesleft -= sizeof(beaconsize); *pbeaconinfo += sizeof(beaconsize); } if (beaconsize == 0 || beaconsize > *bytesleft) { - *pbeaconinfo += *bytesleft; *bytesleft = 0; - - return -1; + ret = -1; + goto done; } /* Initialize the current working beacon pointer for this BSS iteration */ - pcurrentptr = *pbeaconinfo; + pos = *pbeaconinfo; + end = pos + beaconsize; /* Advance the return beacon pointer past the current beacon */ *pbeaconinfo += beaconsize; *bytesleft -= beaconsize; - bytesleftforcurrentbeacon = beaconsize; - - memcpy(bss->bssid, pcurrentptr, ETH_ALEN); - lbs_deb_scan("process_bss: AP BSSID " MAC_FMT "\n", MAC_ARG(bss->bssid)); + memcpy(bss->bssid, pos, ETH_ALEN); + lbs_deb_scan("process_bss: AP BSSID %s\n", print_mac(mac, bss->bssid)); + pos += ETH_ALEN; - pcurrentptr += ETH_ALEN; - bytesleftforcurrentbeacon -= ETH_ALEN; - - if (bytesleftforcurrentbeacon < 12) { + if ((end - pos) < 12) { lbs_deb_scan("process_bss: Not enough bytes left\n"); - return -1; + ret = -1; + goto done; } /* @@ -968,85 +982,61 @@ static int libertas_process_bss(struct bss_descriptor * bss, */ /* RSSI is 1 byte long */ - bss->rssi = *pcurrentptr; - lbs_deb_scan("process_bss: RSSI=%02X\n", *pcurrentptr); - pcurrentptr += 1; - bytesleftforcurrentbeacon -= 1; + bss->rssi = *pos; + lbs_deb_scan("process_bss: RSSI=%02X\n", *pos); + pos++; /* time stamp is 8 bytes long */ - fixedie.timestamp = bss->timestamp = le64_to_cpup((void *)pcurrentptr); - pcurrentptr += 8; - bytesleftforcurrentbeacon -= 8; + pos += 8; /* beacon interval is 2 bytes long */ - fixedie.beaconinterval = bss->beaconperiod = le16_to_cpup((void *)pcurrentptr); - pcurrentptr += 2; - bytesleftforcurrentbeacon -= 2; + bss->beaconperiod = le16_to_cpup((void *) pos); + pos += 2; /* capability information is 2 bytes long */ - memcpy(&fixedie.capabilities, pcurrentptr, 2); - lbs_deb_scan("process_bss: fixedie.capabilities=0x%X\n", - fixedie.capabilities); - pcap = (struct ieeetypes_capinfo *) & fixedie.capabilities; - memcpy(&bss->cap, pcap, sizeof(struct ieeetypes_capinfo)); - pcurrentptr += 2; - bytesleftforcurrentbeacon -= 2; - - /* rest of the current buffer are IE's */ - lbs_deb_scan("process_bss: IE length for this AP = %d\n", - bytesleftforcurrentbeacon); + bss->capability = le16_to_cpup((void *) pos); + lbs_deb_scan("process_bss: capabilities = 0x%4X\n", bss->capability); + pos += 2; - lbs_dbg_hex("process_bss: IE info", (u8 *) pcurrentptr, - bytesleftforcurrentbeacon); - - if (pcap->privacy) { + if (bss->capability & WLAN_CAPABILITY_PRIVACY) lbs_deb_scan("process_bss: AP WEP enabled\n"); - bss->privacy = wlan802_11privfilter8021xWEP; - } else { - bss->privacy = wlan802_11privfilteracceptall; - } - - if (pcap->ibss == 1) { + if (bss->capability & WLAN_CAPABILITY_IBSS) bss->mode = IW_MODE_ADHOC; - } else { + else bss->mode = IW_MODE_INFRA; - } + + /* rest of the current buffer are IE's */ + lbs_deb_scan("process_bss: IE length for this AP = %zd\n", end - pos); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos); /* process variable IE */ - while (bytesleftforcurrentbeacon >= 2) { - elemID = (enum ieeetypes_elementid) (*((u8 *) pcurrentptr)); - elemlen = *((u8 *) pcurrentptr + 1); + while (pos <= end - 2) { + struct ieee80211_info_element * elem = + (struct ieee80211_info_element *) pos; - if (bytesleftforcurrentbeacon < elemlen) { + if (pos + elem->len > end) { lbs_deb_scan("process_bss: error in processing IE, " "bytes left < IE length\n"); - bytesleftforcurrentbeacon = 0; - continue; + break; } - switch (elemID) { - case SSID: - bss->ssid_len = elemlen; - memcpy(bss->ssid, (pcurrentptr + 2), elemlen); + switch (elem->id) { + case MFIE_TYPE_SSID: + bss->ssid_len = elem->len; + memcpy(bss->ssid, elem->data, elem->len); lbs_deb_scan("ssid '%s', ssid length %u\n", escape_essid(bss->ssid, bss->ssid_len), bss->ssid_len); break; - case SUPPORTED_RATES: - memcpy(bss->datarates, (pcurrentptr + 2), elemlen); - memmove(bss->libertas_supported_rates, (pcurrentptr + 2), - elemlen); - ratesize = elemlen; - founddatarateie = 1; + case MFIE_TYPE_RATES: + n_basic_rates = min_t(u8, MAX_RATES, elem->len); + memcpy(bss->rates, elem->data, n_basic_rates); + got_basic_rates = 1; break; - case EXTRA_IE: - lbs_deb_scan("process_bss: EXTRA_IE Found!\n"); - break; - - case FH_PARAM_SET: - pFH = (struct ieeetypes_fhparamset *) pcurrentptr; + case MFIE_TYPE_FH_SET: + pFH = (struct ieeetypes_fhparamset *) pos; memmove(&bss->phyparamset.fhparamset, pFH, sizeof(struct ieeetypes_fhparamset)); #if 0 /* I think we can store these LE */ @@ -1055,21 +1045,21 @@ static int libertas_process_bss(struct bss_descriptor * bss, #endif break; - case DS_PARAM_SET: - pDS = (struct ieeetypes_dsparamset *) pcurrentptr; + case MFIE_TYPE_DS_SET: + pDS = (struct ieeetypes_dsparamset *) pos; bss->channel = pDS->currentchan; memcpy(&bss->phyparamset.dsparamset, pDS, sizeof(struct ieeetypes_dsparamset)); break; - case CF_PARAM_SET: - pCF = (struct ieeetypes_cfparamset *) pcurrentptr; + case MFIE_TYPE_CF_SET: + pCF = (struct ieeetypes_cfparamset *) pos; memcpy(&bss->ssparamset.cfparamset, pCF, sizeof(struct ieeetypes_cfparamset)); break; - case IBSS_PARAM_SET: - pibss = (struct ieeetypes_ibssparamset *) pcurrentptr; + case MFIE_TYPE_IBSS_SET: + pibss = (struct ieeetypes_ibssparamset *) pos; bss->atimwindow = le32_to_cpu(pibss->atimwindow); memmove(&bss->ssparamset.ibssparamset, pibss, sizeof(struct ieeetypes_ibssparamset)); @@ -1079,9 +1069,8 @@ static int libertas_process_bss(struct bss_descriptor * bss, #endif break; - /* Handle Country Info IE */ - case COUNTRY_INFO: - pcountryinfo = (struct ieeetypes_countryinfoset *) pcurrentptr; + case MFIE_TYPE_COUNTRY: + pcountryinfo = (struct ieeetypes_countryinfoset *) pos; if (pcountryinfo->len < sizeof(pcountryinfo->countrycode) || pcountryinfo->len > 254) { lbs_deb_scan("process_bss: 11D- Err " @@ -1094,70 +1083,63 @@ static int libertas_process_bss(struct bss_descriptor * bss, memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2); - lbs_dbg_hex("process_bss: 11D- CountryInfo:", + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo", (u8 *) pcountryinfo, (u32) (pcountryinfo->len + 2)); break; - case EXTENDED_SUPPORTED_RATES: - /* - * only process extended supported rate - * if data rate is already found. - * data rate IE should come before + case MFIE_TYPE_RATES_EX: + /* only process extended supported rate if data rate is + * already found. Data rate IE should come before * extended supported rate IE */ - if (founddatarateie) { - if ((elemlen + ratesize) > WLAN_SUPPORTED_RATES) { - bytestocopy = - (WLAN_SUPPORTED_RATES - ratesize); - } else { - bytestocopy = elemlen; - } - - pRate = (u8 *) bss->datarates; - pRate += ratesize; - memmove(pRate, (pcurrentptr + 2), bytestocopy); - pRate = (u8 *) bss->libertas_supported_rates; - pRate += ratesize; - memmove(pRate, (pcurrentptr + 2), bytestocopy); - } - break; - - case VENDOR_SPECIFIC_221: -#define IE_ID_LEN_FIELDS_BYTES 2 - pIe = (struct IE_WPA *)pcurrentptr; - - if (memcmp(pIe->oui, oui01, sizeof(oui01))) + if (!got_basic_rates) break; - bss->wpa_ie_len = min(elemlen + IE_ID_LEN_FIELDS_BYTES, - MAX_WPA_IE_LEN); - memcpy(bss->wpa_ie, pcurrentptr, bss->wpa_ie_len); - lbs_dbg_hex("process_bss: WPA IE", bss->wpa_ie, elemlen); + n_ex_rates = elem->len; + if (n_basic_rates + n_ex_rates > MAX_RATES) + n_ex_rates = MAX_RATES - n_basic_rates; + + p = bss->rates + n_basic_rates; + memcpy(p, elem->data, n_ex_rates); break; - case WPA2_IE: - pIe = (struct IE_WPA *)pcurrentptr; - bss->rsn_ie_len = min(elemlen + IE_ID_LEN_FIELDS_BYTES, - MAX_WPA_IE_LEN); - memcpy(bss->rsn_ie, pcurrentptr, bss->rsn_ie_len); - lbs_dbg_hex("process_bss: RSN_IE", bss->rsn_ie, elemlen); + + case MFIE_TYPE_GENERIC: + if (elem->len >= 4 && + elem->data[0] == 0x00 && + elem->data[1] == 0x50 && + elem->data[2] == 0xf2 && + elem->data[3] == 0x01) { + bss->wpa_ie_len = min(elem->len + 2, + MAX_WPA_IE_LEN); + memcpy(bss->wpa_ie, elem, bss->wpa_ie_len); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: WPA IE", bss->wpa_ie, + elem->len); + } else if (elem->len >= MARVELL_MESH_IE_LENGTH && + elem->data[0] == 0x00 && + elem->data[1] == 0x50 && + elem->data[2] == 0x43 && + elem->data[3] == 0x04) { + bss->mesh = 1; + } break; - case TIM: + + case MFIE_TYPE_RSN: + bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); + memcpy(bss->rsn_ie, elem, bss->rsn_ie_len); + lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", bss->rsn_ie, elem->len); break; - case CHALLENGE_TEXT: + default: break; } - pcurrentptr += elemlen + 2; - - /* need to account for IE ID and IE len */ - bytesleftforcurrentbeacon -= (elemlen + 2); - - } /* while (bytesleftforcurrentbeacon > 2) */ + pos += elem->len + 2; + } /* Timestamp */ bss->last_scanned = jiffies; + libertas_unset_basic_rate_flags(bss->rates, sizeof(bss->rates)); ret = 0; @@ -1166,41 +1148,29 @@ static int libertas_process_bss(struct bss_descriptor * bss, return ret; } -/** - * @brief Compare two SSIDs - * - * @param ssid1 A pointer to ssid to compare - * @param ssid2 A pointer to ssid to compare - * - * @return 0--ssid is same, otherwise is different - */ -int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len) -{ - if (ssid1_len != ssid2_len) - return -1; - - return memcmp(ssid1, ssid2, ssid1_len); -} - /** * @brief This function finds a specific compatible BSSID in the scan list * + * Used in association code + * * @param adapter A pointer to wlan_adapter * @param bssid BSSID to find in the scan list * @param mode Network mode: Infrastructure or IBSS * * @return index in BSSID list, or error return code (< 0) */ -struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, +struct bss_descriptor *libertas_find_bssid_in_list(wlan_adapter * adapter, u8 * bssid, u8 mode) { struct bss_descriptor * iter_bss; struct bss_descriptor * found_bss = NULL; + lbs_deb_enter(LBS_DEB_SCAN); + if (!bssid) - return NULL; + goto out; - lbs_dbg_hex("libertas_find_BSSID_in_list: looking for ", + lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN); /* Look through the scan table for a compatible match. The loop will @@ -1225,12 +1195,16 @@ struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, } mutex_unlock(&adapter->lock); +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); return found_bss; } /** * @brief This function finds ssid in ssid list. * + * Used in association code + * * @param adapter A pointer to wlan_adapter * @param ssid SSID to find in the list * @param bssid BSSID to qualify the SSID selection (if provided) @@ -1247,6 +1221,8 @@ struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, struct bss_descriptor * found_bss = NULL; struct bss_descriptor * tmp_oldest = NULL; + lbs_deb_enter(LBS_DEB_SCAN); + mutex_lock(&adapter->lock); list_for_each_entry (iter_bss, &adapter->network_list, list) { @@ -1291,6 +1267,7 @@ struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, out: mutex_unlock(&adapter->lock); + lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss); return found_bss; } @@ -1304,13 +1281,15 @@ struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, * * @return index in BSSID list */ -struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, +static struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, u8 mode) { u8 bestrssi = 0; struct bss_descriptor * iter_bss; struct bss_descriptor * best_bss = NULL; + lbs_deb_enter(LBS_DEB_SCAN); + mutex_lock(&adapter->lock); list_for_each_entry (iter_bss, &adapter->network_list, list) { @@ -1335,12 +1314,15 @@ struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, } mutex_unlock(&adapter->lock); + lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss); return best_bss; } /** * @brief Find the AP with specific ssid in the scan list * + * Used from association worker. + * * @param priv A pointer to wlan_private structure * @param pSSID A pointer to AP's ssid * @@ -1353,11 +1335,11 @@ int libertas_find_best_network_ssid(wlan_private * priv, int ret = -1; struct bss_descriptor * found; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter(LBS_DEB_SCAN); wlan_scan_networks(priv, NULL, 1); if (adapter->surpriseremoved) - return -1; + goto out; wait_event_interruptible(adapter->cmd_pending, !adapter->nr_cmd_pending); @@ -1369,6 +1351,7 @@ int libertas_find_best_network_ssid(wlan_private * priv, ret = 0; } +out: lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } @@ -1391,7 +1374,10 @@ int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_SCAN); - wlan_scan_networks(priv, NULL, 0); + if (!delayed_work_pending(&priv->scan_work)) { + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(50)); + } if (adapter->surpriseremoved) return -1; @@ -1400,10 +1386,17 @@ int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, return 0; } + /** * @brief Send a scan command for all available channels filtered on a spec * + * Used in association code and from debugfs + * * @param priv A pointer to wlan_private structure + * @param ssid A pointer to the SSID to scan for + * @param ssid_len Length of the SSID + * @param clear_ssid Should existing scan results with this SSID + * be cleared? * @param prequestedssid A pointer to AP's ssid * @param keeppreviousscan Flag used to save/clear scan table before scan * @@ -1416,7 +1409,8 @@ int libertas_send_specific_ssid_scan(wlan_private * priv, struct wlan_ioctl_user_scan_cfg scancfg; int ret = 0; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s', clear %d", + escape_essid(ssid, ssid_len), clear_ssid); if (!ssid_len) goto out; @@ -1427,47 +1421,27 @@ int libertas_send_specific_ssid_scan(wlan_private * priv, scancfg.clear_ssid = clear_ssid; wlan_scan_networks(priv, &scancfg, 1); - if (adapter->surpriseremoved) - return -1; + if (adapter->surpriseremoved) { + ret = -1; + goto out; + } wait_event_interruptible(adapter->cmd_pending, !adapter->nr_cmd_pending); out: - lbs_deb_leave(LBS_DEB_ASSOC); + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; } -/** - * @brief scan an AP with specific BSSID - * - * @param priv A pointer to wlan_private structure - * @param bssid A pointer to AP's bssid - * @param keeppreviousscan Flag used to save/clear scan table before scan - * - * @return 0-success, otherwise fail - */ -int libertas_send_specific_bssid_scan(wlan_private * priv, u8 * bssid, u8 clear_bssid) -{ - struct wlan_ioctl_user_scan_cfg scancfg; - - lbs_deb_enter(LBS_DEB_ASSOC); - if (bssid == NULL) - goto out; - memset(&scancfg, 0x00, sizeof(scancfg)); - memcpy(scancfg.bssid, bssid, ETH_ALEN); - scancfg.clear_bssid = clear_bssid; - wlan_scan_networks(priv, &scancfg, 1); - if (priv->adapter->surpriseremoved) - return -1; - wait_event_interruptible(priv->adapter->cmd_pending, - !priv->adapter->nr_cmd_pending); +/*********************************************************************/ +/* */ +/* Support for Wireless Extensions */ +/* */ +/*********************************************************************/ -out: - lbs_deb_leave(LBS_DEB_ASSOC); - return 0; -} +#define MAX_CUSTOM_LEN 64 static inline char *libertas_translate_scan(wlan_private *priv, char *start, char *stop, @@ -1483,10 +1457,13 @@ static inline char *libertas_translate_scan(wlan_private *priv, #define RSSI_DIFF ((u8)(PERFECT_RSSI - WORST_RSSI)) u8 rssi; + lbs_deb_enter(LBS_DEB_SCAN); + cfp = libertas_find_cfp_by_band_and_channel(adapter, 0, bss->channel); if (!cfp) { lbs_deb_scan("Invalid channel number %d\n", bss->channel); - return NULL; + start = NULL; + goto out; } /* First entry *MUST* be the AP BSSID */ @@ -1550,7 +1527,7 @@ static inline char *libertas_translate_scan(wlan_private *priv, /* Add encryption capability */ iwe.cmd = SIOCGIWENCODE; - if (bss->privacy) { + if (bss->capability & WLAN_CAPABILITY_PRIVACY) { iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; } else { iwe.u.data.flags = IW_ENCODE_DISABLED; @@ -1565,12 +1542,9 @@ static inline char *libertas_translate_scan(wlan_private *priv, iwe.u.bitrate.disabled = 0; iwe.u.bitrate.value = 0; - for (j = 0; j < sizeof(bss->libertas_supported_rates); j++) { - u8 rate = bss->libertas_supported_rates[j]; - if (rate == 0) - break; /* no more rates */ - /* Bit rate given in 500 kb/s units (+ 0x80) */ - iwe.u.bitrate.value = (rate & 0x7f) * 500000; + for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { + /* Bit rate given in 500 kb/s units */ + iwe.u.bitrate.value = bss->rates[j] * 500000; current_val = iwe_stream_add_value(start, current_val, stop, &iwe, IW_EV_PARAM_LEN); } @@ -1605,11 +1579,25 @@ static inline char *libertas_translate_scan(wlan_private *priv, start = iwe_stream_add_point(start, stop, &iwe, buf); } + if (bss->mesh) { + char custom[MAX_CUSTOM_LEN]; + char *p = custom; + + iwe.cmd = IWEVCUSTOM; + p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), + "mesh-type: olpc"); + iwe.u.data.length = p - custom; + if (iwe.u.data.length) + start = iwe_stream_add_point(start, stop, &iwe, custom); + } + +out: + lbs_deb_leave_args(LBS_DEB_SCAN, "start %p", start); return start; } /** - * @brief Retrieve the scan table entries via wireless tools IOCTL call + * @brief Handle Retrieve scan table ioctl * * @param dev A pointer to net_device structure * @param info A pointer to iw_request_info structure @@ -1630,16 +1618,12 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, struct bss_descriptor * iter_bss; struct bss_descriptor * safe; - lbs_deb_enter(LBS_DEB_ASSOC); - - /* If we've got an uncompleted scan, schedule the next part */ - if (!adapter->nr_cmd_pending && adapter->last_scanned_channel) - wlan_scan_networks(priv, NULL, 0); + lbs_deb_enter(LBS_DEB_SCAN); /* Update RSSI if current BSS is a locally created ad-hoc BSS */ if ((adapter->mode == IW_MODE_ADHOC) && adapter->adhoccreate) { - libertas_prepare_and_send_command(priv, cmd_802_11_rssi, 0, - cmd_option_waitforrsp, 0, NULL); + libertas_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, + CMD_OPTION_WAITFORRSP, 0, NULL); } mutex_lock(&adapter->lock); @@ -1652,6 +1636,10 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, break; } + /* For mesh device, list only mesh networks */ + if (dev == priv->mesh_dev && !iter_bss->mesh) + continue; + /* Prune old an old scan result */ stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; if (time_after(jiffies, stale_time)) { @@ -1672,19 +1660,27 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, dwrq->length = (ev - extra); dwrq->flags = 0; - lbs_deb_leave(LBS_DEB_ASSOC); + lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", err); return err; } + + + +/*********************************************************************/ +/* */ +/* Command execution */ +/* */ +/*********************************************************************/ + + /** * @brief Prepare a scan command to be sent to the firmware * - * Use the wlan_scan_cmd_config sent to the command processing module in - * the libertas_prepare_and_send_command to configure a cmd_ds_802_11_scan command - * struct to send to firmware. + * Called from libertas_prepare_and_send_command() in cmd.c * - * The fixed fields specifying the BSS type and BSSID filters as well as a - * variable number/length of TLVs are sent in the command to firmware. + * Sends a fixed lenght data part (specifying the BSS type and BSSID filters) + * as well as a variable number/length of TLVs to the firmware. * * @param priv A pointer to wlan_private structure * @param cmd A pointer to cmd_ds_command structure to be sent to @@ -1693,36 +1689,31 @@ int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, * to set the fields/TLVs for the command sent to firmware * * @return 0 or -1 - * - * @sa wlan_scan_create_channel_list */ int libertas_cmd_80211_scan(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf) { struct cmd_ds_802_11_scan *pscan = &cmd->params.scan; - struct wlan_scan_cmd_config *pscancfg; + struct wlan_scan_cmd_config *pscancfg = pdata_buf; - lbs_deb_enter(LBS_DEB_ASSOC); - - pscancfg = pdata_buf; + lbs_deb_enter(LBS_DEB_SCAN); /* Set fixed field variables in scan command */ pscan->bsstype = pscancfg->bsstype; - memcpy(pscan->BSSID, pscancfg->bssid, sizeof(pscan->BSSID)); + memcpy(pscan->bssid, pscancfg->bssid, ETH_ALEN); memcpy(pscan->tlvbuffer, pscancfg->tlvbuffer, pscancfg->tlvbufferlen); - cmd->command = cpu_to_le16(cmd_802_11_scan); + cmd->command = cpu_to_le16(CMD_802_11_SCAN); /* size is equal to the sizeof(fixed portions) + the TLV len + header */ - cmd->size = cpu_to_le16(sizeof(pscan->bsstype) - + sizeof(pscan->BSSID) - + pscancfg->tlvbufferlen + S_DS_GEN); + cmd->size = cpu_to_le16(sizeof(pscan->bsstype) + ETH_ALEN + + pscancfg->tlvbufferlen + S_DS_GEN); - lbs_deb_scan("SCAN_CMD: command=%x, size=%x, seqnum=%x\n", + lbs_deb_scan("SCAN_CMD: command 0x%04x, size %d, seqnum %d\n", le16_to_cpu(cmd->command), le16_to_cpu(cmd->size), le16_to_cpu(cmd->seqnum)); - lbs_deb_leave(LBS_DEB_ASSOC); + lbs_deb_leave(LBS_DEB_SCAN); return 0; } @@ -1741,6 +1732,8 @@ static inline int is_same_network(struct bss_descriptor *src, /** * @brief This function handles the command response of scan * + * Called from handle_cmd_response() in cmdrespc. + * * The response buffer for the scan command has the following * memory layout: * @@ -1766,8 +1759,6 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) { wlan_adapter *adapter = priv->adapter; struct cmd_ds_802_11_scan_rsp *pscan; - struct mrvlietypes_data *ptlv; - struct mrvlietypes_tsftimestamp *ptsftlv; struct bss_descriptor * iter_bss; struct bss_descriptor * safe; u8 *pbssinfo; @@ -1777,7 +1768,7 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) int tlvbufsize; int ret; - lbs_deb_enter(LBS_DEB_ASSOC); + lbs_deb_enter(LBS_DEB_SCAN); /* Prune old entries from scan table */ list_for_each_entry_safe (iter_bss, safe, &adapter->network_list, list) { @@ -1798,10 +1789,10 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) goto done; } - bytesleft = le16_to_cpu(pscan->bssdescriptsize); + bytesleft = le16_to_cpu(get_unaligned((u16*)&pscan->bssdescriptsize)); lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft); - scanrespsize = le16_to_cpu(resp->size); + scanrespsize = le16_to_cpu(get_unaligned((u16*)&resp->size)); lbs_deb_scan("SCAN_RESP: returned %d AP before parsing\n", pscan->nr_sets); @@ -1816,11 +1807,6 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) + sizeof(pscan->nr_sets) + S_DS_GEN); - ptlv = (struct mrvlietypes_data *) (pscan->bssdesc_and_tlvbuffer + bytesleft); - - /* Search the TLV buffer space in the scan response for any valid TLVs */ - wlan_ret_802_11_scan_get_tlv_ptrs(ptlv, tlvbufsize, &ptsftlv); - /* * Process each scan response returned (pscan->nr_sets). Save * the information in the newbssentry and then insert into the @@ -1831,6 +1817,7 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) struct bss_descriptor new; struct bss_descriptor * found = NULL; struct bss_descriptor * oldest = NULL; + DECLARE_MAC_BUF(mac); /* Process the data fields and IEs returned for this BSS */ memset(&new, 0, sizeof (struct bss_descriptor)); @@ -1869,19 +1856,8 @@ int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp) continue; } - lbs_deb_scan("SCAN_RESP: BSSID = " MAC_FMT "\n", - new.bssid[0], new.bssid[1], new.bssid[2], - new.bssid[3], new.bssid[4], new.bssid[5]); - - /* - * If the TSF TLV was appended to the scan results, save the - * this entries TSF value in the networktsf field. The - * networktsf is the firmware's TSF value at the time the - * beacon or probe response was received. - */ - if (ptsftlv) { - new.networktsf = le64_to_cpup(&ptsftlv->tsftable[idx]); - } + lbs_deb_scan("SCAN_RESP: BSSID = %s\n", + print_mac(mac, new.bssid)); /* Copy the locally created newbssentry to the scan table */ memcpy(found, &new, offsetof(struct bss_descriptor, list)); diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h index bd019e5ff1eb24f294fb7c12d0874017ecef7136..c29c031bef8cf8a9798ee9bf31e9314e0acc0676 100644 --- a/drivers/net/wireless/libertas/scan.h +++ b/drivers/net/wireless/libertas/scan.h @@ -140,8 +140,7 @@ struct bss_descriptor { u8 ssid[IW_ESSID_MAX_SIZE + 1]; u8 ssid_len; - /* WEP encryption requirement */ - u32 privacy; + u16 capability; /* receive signal strength in dBm */ long rssi; @@ -152,18 +151,16 @@ struct bss_descriptor { u32 atimwindow; + /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */ u8 mode; - u8 libertas_supported_rates[WLAN_SUPPORTED_RATES]; - __le64 timestamp; //!< TSF value included in the beacon/probe response + /* zero-terminated array of supported data rates */ + u8 rates[MAX_RATES + 1]; + unsigned long last_scanned; union ieeetypes_phyparamset phyparamset; union IEEEtypes_ssparamset ssparamset; - struct ieeetypes_capinfo cap; - u8 datarates[WLAN_SUPPORTED_RATES]; - - u64 networktsf; //!< TSF timestamp from the current firmware TSF struct ieeetypes_countryinfofullset countryinfo; @@ -172,34 +169,31 @@ struct bss_descriptor { u8 rsn_ie[MAX_WPA_IE_LEN]; size_t rsn_ie_len; + u8 mesh; + struct list_head list; }; -extern int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); +int libertas_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); struct bss_descriptor * libertas_find_ssid_in_list(wlan_adapter * adapter, u8 *ssid, u8 ssid_len, u8 * bssid, u8 mode, int channel); -struct bss_descriptor * libertas_find_best_ssid_in_list(wlan_adapter * adapter, - u8 mode); - -extern struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, +struct bss_descriptor * libertas_find_bssid_in_list(wlan_adapter * adapter, u8 * bssid, u8 mode); int libertas_find_best_network_ssid(wlan_private * priv, u8 *out_ssid, u8 *out_ssid_len, u8 preferred_mode, u8 *out_mode); -extern int libertas_send_specific_ssid_scan(wlan_private * priv, u8 *ssid, +int libertas_send_specific_ssid_scan(wlan_private * priv, u8 *ssid, u8 ssid_len, u8 clear_ssid); -extern int libertas_send_specific_bssid_scan(wlan_private * priv, - u8 * bssid, u8 clear_bssid); -extern int libertas_cmd_80211_scan(wlan_private * priv, +int libertas_cmd_80211_scan(wlan_private * priv, struct cmd_ds_command *cmd, void *pdata_buf); -extern int libertas_ret_80211_scan(wlan_private * priv, +int libertas_ret_80211_scan(wlan_private * priv, struct cmd_ds_command *resp); int wlan_scan_networks(wlan_private * priv, @@ -211,9 +205,11 @@ struct ifreq; struct iw_point; struct iw_param; struct iw_request_info; -extern int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, +int libertas_get_scan(struct net_device *dev, struct iw_request_info *info, struct iw_point *dwrq, char *extra); -extern int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, +int libertas_set_scan(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra); +void libertas_scan_worker(struct work_struct *work); + #endif /* _WLAN_SCAN_H */ diff --git a/drivers/net/wireless/libertas/thread.h b/drivers/net/wireless/libertas/thread.h deleted file mode 100644 index b1f34d92ff3e3ca81592d35199b4fe04a25a7966..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/libertas/thread.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef __WLAN_THREAD_H_ -#define __WLAN_THREAD_H_ - -#include - -struct wlan_thread { - struct task_struct *task; - wait_queue_head_t waitq; - pid_t pid; - void *priv; -}; - -static inline void wlan_activate_thread(struct wlan_thread * thr) -{ - /** Record the thread pid */ - thr->pid = current->pid; - - /** Initialize the wait queue */ - init_waitqueue_head(&thr->waitq); -} - -static inline void wlan_deactivate_thread(struct wlan_thread * thr) -{ - lbs_deb_enter(LBS_DEB_THREAD); - - thr->pid = 0; - - lbs_deb_leave(LBS_DEB_THREAD); -} - -static inline void wlan_create_thread(int (*wlanfunc) (void *), - struct wlan_thread * thr, char *name) -{ - thr->task = kthread_run(wlanfunc, thr, "%s", name); -} - -static inline int wlan_terminate_thread(struct wlan_thread * thr) -{ - lbs_deb_enter(LBS_DEB_THREAD); - - /* Check if the thread is active or not */ - if (!thr->pid) { - printk(KERN_ERR "Thread does not exist\n"); - return -1; - } - kthread_stop(thr->task); - - lbs_deb_leave(LBS_DEB_THREAD); - return 0; -} - -#endif diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c index 17c437635a00845932231337c5d8aac1a00a89cf..fbec06c10dd7c227941811f739c5ee820bb8e1f3 100644 --- a/drivers/net/wireless/libertas/tx.c +++ b/drivers/net/wireless/libertas/tx.c @@ -58,7 +58,6 @@ static u32 convert_radiotap_rate_to_mv(u8 rate) */ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) { - wlan_adapter *adapter = priv->adapter; int ret = 0; struct txpd localtxpd; struct txpd *plocaltxpd = &localtxpd; @@ -72,10 +71,6 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) if (priv->adapter->surpriseremoved) return -1; - if ((priv->adapter->debugmode & MRVDRV_DEBUG_TX_PATH) != 0) - lbs_dbg_hex("TX packet: ", skb->data, - min_t(unsigned int, skb->len, 100)); - if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); @@ -90,11 +85,8 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) /* offset of actual data */ plocaltxpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); - /* TxCtrl set by user or default */ - plocaltxpd->tx_control = cpu_to_le32(adapter->pkttxctrl); - p802x_hdr = skb->data; - if (priv->adapter->radiomode == WLAN_RADIOMODE_RADIOTAP) { + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) { /* locate radiotap header */ pradiotap_hdr = (struct tx_radiotap_hdr *)skb->data; @@ -103,7 +95,6 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) new_rate = convert_radiotap_rate_to_mv(pradiotap_hdr->rate); if (new_rate != 0) { /* use new tx_control[4:0] */ - new_rate |= (adapter->pkttxctrl & ~0x1f); plocaltxpd->tx_control = cpu_to_le32(new_rate); } @@ -115,12 +106,12 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) } /* copy destination address from 802.3 or 802.11 header */ - if (priv->adapter->linkmode == WLAN_LINKMODE_802_11) + if (priv->adapter->monitormode != WLAN_MONITOR_OFF) memcpy(plocaltxpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); else memcpy(plocaltxpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); - lbs_dbg_hex("txpd", (u8 *) plocaltxpd, sizeof(struct txpd)); + lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) plocaltxpd, sizeof(struct txpd)); if (IS_MESH_FRAME(skb)) { plocaltxpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME); @@ -130,7 +121,7 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) ptr += sizeof(struct txpd); - lbs_dbg_hex("Tx Data", (u8 *) p802x_hdr, le16_to_cpu(plocaltxpd->tx_packet_length)); + lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(plocaltxpd->tx_packet_length)); memcpy(ptr, p802x_hdr, le16_to_cpu(plocaltxpd->tx_packet_length)); ret = priv->hw_host_to_card(priv, MVMS_DAT, priv->adapter->tmptxbuf, @@ -153,13 +144,14 @@ static int SendSinglePacket(wlan_private * priv, struct sk_buff *skb) priv->stats.tx_errors++; } - if (!ret && priv->adapter->radiomode == WLAN_RADIOMODE_RADIOTAP) { + if (!ret && priv->adapter->monitormode != WLAN_MONITOR_OFF) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* stop processing outgoing pkts */ netif_stop_queue(priv->dev); - netif_stop_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); /* freeze any packets already in our queues */ priv->adapter->TxLockFlag = 1; } else { @@ -198,10 +190,12 @@ static void wlan_tx_queue(wlan_private *priv, struct sk_buff *skb) adapter->tx_queue_ps[adapter->tx_queue_idx++] = skb; if (adapter->tx_queue_idx == NR_TX_QUEUE) { netif_stop_queue(priv->dev); - netif_stop_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_stop_queue(priv->mesh_dev); } else { netif_start_queue(priv->dev); - netif_start_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_start_queue(priv->mesh_dev); } spin_unlock(&adapter->txqueue_lock); @@ -219,7 +213,7 @@ int libertas_process_tx(wlan_private * priv, struct sk_buff *skb) int ret = -1; lbs_deb_enter(LBS_DEB_TX); - lbs_dbg_hex("TX Data", skb->data, min_t(unsigned int, skb->len, 100)); + lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); if (priv->dnld_sent) { lbs_pr_alert( "TX error: dnld_sent = %d, not sending\n", @@ -258,16 +252,12 @@ void libertas_send_tx_feedback(wlan_private * priv) int txfail; int try_count; - if (adapter->radiomode != WLAN_RADIOMODE_RADIOTAP || + if (adapter->monitormode == WLAN_MONITOR_OFF || adapter->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)adapter->currenttxskb->data; - if ((adapter->debugmode & MRVDRV_DEBUG_TX_PATH) != 0) - lbs_dbg_hex("TX feedback: ", (u8 *) radiotap_hdr, - min_t(unsigned int, adapter->currenttxskb->len, 100)); - txfail = (status >> 24); #if 0 @@ -283,9 +273,10 @@ void libertas_send_tx_feedback(wlan_private * priv) libertas_upload_rx_packet(priv, adapter->currenttxskb); adapter->currenttxskb = NULL; priv->adapter->TxLockFlag = 0; - if (priv->adapter->connect_status == libertas_connected) { + if (priv->adapter->connect_status == LIBERTAS_CONNECTED) { netif_wake_queue(priv->dev); - netif_wake_queue(priv->mesh_dev); + if (priv->mesh_dev) + netif_wake_queue(priv->mesh_dev); } } EXPORT_SYMBOL_GPL(libertas_send_tx_feedback); diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h index 028e2f3b53d6e0768c2a901bc3e861e10d9add75..a43a5f63c879c81d44a4d87834c8d21671c194df 100644 --- a/drivers/net/wireless/libertas/types.h +++ b/drivers/net/wireless/libertas/types.h @@ -7,71 +7,6 @@ #include #include -/** IEEE type definitions */ -enum ieeetypes_elementid { - SSID = 0, - SUPPORTED_RATES, - FH_PARAM_SET, - DS_PARAM_SET, - CF_PARAM_SET, - TIM, - IBSS_PARAM_SET, - COUNTRY_INFO = 7, - - CHALLENGE_TEXT = 16, - - EXTENDED_SUPPORTED_RATES = 50, - - VENDOR_SPECIFIC_221 = 221, - - WPA_IE = 221, - WPA2_IE = 48, - - EXTRA_IE = 133, -} __attribute__ ((packed)); - -#ifdef __BIG_ENDIAN -#define CAPINFO_MASK (~(0xda00)) -#else -#define CAPINFO_MASK (~(0x00da)) -#endif - -struct ieeetypes_capinfo { -#ifdef __BIG_ENDIAN_BITFIELD - u8 chanagility:1; - u8 pbcc:1; - u8 shortpreamble:1; - u8 privacy:1; - u8 cfpollrqst:1; - u8 cfpollable:1; - u8 ibss:1; - u8 ess:1; - u8 rsrvd1:2; - u8 dsssofdm:1; - u8 rsvrd2:1; - u8 apsd:1; - u8 shortslottime:1; - u8 rsrvd3:1; - u8 spectrummgmt:1; -#else - u8 ess:1; - u8 ibss:1; - u8 cfpollable:1; - u8 cfpollrqst:1; - u8 privacy:1; - u8 shortpreamble:1; - u8 pbcc:1; - u8 chanagility:1; - u8 spectrummgmt:1; - u8 rsrvd3:1; - u8 shortslottime:1; - u8 apsd:1; - u8 rsvrd2:1; - u8 dsssofdm:1; - u8 rsrvd1:2; -#endif -} __attribute__ ((packed)); - struct ieeetypes_cfparamset { u8 elementid; u8 len; @@ -114,7 +49,7 @@ union ieeetypes_phyparamset { } __attribute__ ((packed)); struct ieeetypes_assocrsp { - struct ieeetypes_capinfo capability; + __le16 capability; __le16 statuscode; __le16 aid; u8 iebuffer[1]; diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c index 2fcc3bf210817623a8e96dc3a526df968dfa69e7..c6f5aa3cb465c4394d2140559eb1cec9c0e1de8b 100644 --- a/drivers/net/wireless/libertas/wext.c +++ b/drivers/net/wireless/libertas/wext.c @@ -21,60 +21,6 @@ #include "assoc.h" -/** - * the rates supported by the card - */ -static u8 libertas_wlan_data_rates[WLAN_SUPPORTED_RATES] = - { 0x02, 0x04, 0x0B, 0x16, 0x00, 0x0C, 0x12, - 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C, 0x00 -}; - -/** - * @brief Convert mw value to dbm value - * - * @param mw the value of mw - * @return the value of dbm - */ -static int mw_to_dbm(int mw) -{ - if (mw < 2) - return 0; - else if (mw < 3) - return 3; - else if (mw < 4) - return 5; - else if (mw < 6) - return 7; - else if (mw < 7) - return 8; - else if (mw < 8) - return 9; - else if (mw < 10) - return 10; - else if (mw < 13) - return 11; - else if (mw < 16) - return 12; - else if (mw < 20) - return 13; - else if (mw < 25) - return 14; - else if (mw < 32) - return 15; - else if (mw < 40) - return 16; - else if (mw < 50) - return 17; - else if (mw < 63) - return 18; - else if (mw < 79) - return 19; - else if (mw < 100) - return 20; - else - return 21; -} - /** * @brief Find the channel frequency power info with specific channel * @@ -165,7 +111,7 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(wlan_adapter * adapter, * @option Radio Option * @return 0 --success, otherwise fail */ -int wlan_radio_ioctl(wlan_private * priv, u8 option) +static int wlan_radio_ioctl(wlan_private * priv, u8 option) { int ret = 0; wlan_adapter *adapter = priv->adapter; @@ -177,9 +123,9 @@ int wlan_radio_ioctl(wlan_private * priv, u8 option) adapter->radioon = option; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_radio_control, - cmd_act_set, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RADIO_CONTROL, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, 0, NULL); } lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); @@ -187,84 +133,31 @@ int wlan_radio_ioctl(wlan_private * priv, u8 option) } /** - * @brief Copy rates - * - * @param dest A pointer to Dest Buf - * @param src A pointer to Src Buf - * @param len The len of Src Buf - * @return Number of rates copyed - */ -static inline int copyrates(u8 * dest, int pos, u8 * src, int len) -{ - int i; - - for (i = 0; i < len && src[i]; i++, pos++) { - if (pos >= sizeof(u8) * WLAN_SUPPORTED_RATES) - break; - dest[pos] = src[i]; - } - - return pos; -} - -/** - * @brief Get active data rates + * @brief Copy active data rates based on adapter mode and status * * @param adapter A pointer to wlan_adapter structure * @param rate The buf to return the active rates - * @return The number of rates */ -static int get_active_data_rates(wlan_adapter * adapter, - u8* rates) +static void copy_active_data_rates(wlan_adapter * adapter, u8 * rates) { - int k = 0; - lbs_deb_enter(LBS_DEB_WEXT); - if (adapter->connect_status != libertas_connected) { - if (adapter->mode == IW_MODE_INFRA) { - lbs_deb_wext("infra\n"); - k = copyrates(rates, k, libertas_supported_rates, - sizeof(libertas_supported_rates)); - } else { - lbs_deb_wext("Adhoc G\n"); - k = copyrates(rates, k, libertas_adhoc_rates_g, - sizeof(libertas_adhoc_rates_g)); - } - } else { - k = copyrates(rates, 0, adapter->curbssparams.datarates, - adapter->curbssparams.numofrates); - } + if (adapter->connect_status != LIBERTAS_CONNECTED) + memcpy(rates, libertas_bg_rates, MAX_RATES); + else + memcpy(rates, adapter->curbssparams.rates, MAX_RATES); - lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", k); - return k; + lbs_deb_leave(LBS_DEB_WEXT); } static int wlan_get_name(struct net_device *dev, struct iw_request_info *info, char *cwrq, char *extra) { - const char *cp; - char comm[6] = { "COMM-" }; - char mrvl[6] = { "MRVL-" }; - int cnt; lbs_deb_enter(LBS_DEB_WEXT); - strcpy(cwrq, mrvl); - - cp = strstr(libertas_driver_version, comm); - if (cp == libertas_driver_version) //skip leading "COMM-" - cp = libertas_driver_version + strlen(comm); - else - cp = libertas_driver_version; - - cnt = strlen(mrvl); - cwrq += cnt; - while (cnt < 16 && (*cp != '-')) { - *cwrq++ = toupper(*cp++); - cnt++; - } - *cwrq = '\0'; + /* We could add support for 802.11n here as needed. Jean II */ + snprintf(cwrq, IFNAMSIZ, "IEEE 802.11b/g"); lbs_deb_leave(LBS_DEB_WEXT); return 0; @@ -305,7 +198,7 @@ static int wlan_get_wap(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { memcpy(awrq->sa_data, adapter->curbssparams.bssid, ETH_ALEN); } else { memset(awrq->sa_data, 0, ETH_ALEN); @@ -349,24 +242,11 @@ static int wlan_get_nick(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - /* - * Get the Nick Name saved - */ - - mutex_lock(&adapter->lock); - strncpy(extra, adapter->nodename, 16); - mutex_unlock(&adapter->lock); - - extra[16] = '\0'; + dwrq->length = strlen(adapter->nodename); + memcpy(extra, adapter->nodename, dwrq->length); + extra[dwrq->length] = '\0'; - /* - * If none, we may want to get the one that was set - */ - - /* - * Push it out ! - */ - dwrq->length = strlen(extra) + 1; + dwrq->flags = 1; /* active */ lbs_deb_leave(LBS_DEB_WEXT); return 0; @@ -382,20 +262,21 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, /* Use nickname to indicate that mesh is on */ - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { strncpy(extra, "Mesh", 12); extra[12] = '\0'; - dwrq->length = strlen(extra) + 1; + dwrq->length = strlen(extra); } else { extra[0] = '\0'; - dwrq->length = 1 ; + dwrq->length = 0; } lbs_deb_leave(LBS_DEB_WEXT); return 0; } + static int wlan_set_rts(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { @@ -414,8 +295,8 @@ static int wlan_set_rts(struct net_device *dev, struct iw_request_info *info, adapter->rtsthsd = rthr; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_set, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_SET, CMD_OPTION_WAITFORRSP, OID_802_11_RTS_THRESHOLD, &rthr); lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); @@ -432,8 +313,8 @@ static int wlan_get_rts(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); adapter->rtsthsd = 0; - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_get, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_GET, CMD_OPTION_WAITFORRSP, OID_802_11_RTS_THRESHOLD, NULL); if (ret) goto out; @@ -467,8 +348,8 @@ static int wlan_set_frag(struct net_device *dev, struct iw_request_info *info, adapter->fragthsd = fthr; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_set, cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_SET, CMD_OPTION_WAITFORRSP, OID_802_11_FRAGMENTATION_THRESHOLD, &fthr); lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); @@ -486,8 +367,8 @@ static int wlan_get_frag(struct net_device *dev, struct iw_request_info *info, adapter->fragthsd = 0; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - cmd_act_get, cmd_option_waitforrsp, + CMD_802_11_SNMP_MIB, + CMD_ACT_GET, CMD_OPTION_WAITFORRSP, OID_802_11_FRAGMENTATION_THRESHOLD, NULL); if (ret) goto out; @@ -539,9 +420,9 @@ static int wlan_get_txpow(struct net_device *dev, lbs_deb_enter(LBS_DEB_WEXT); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_rf_tx_power, - cmd_act_tx_power_opt_get, - cmd_option_waitforrsp, 0, NULL); + CMD_802_11_RF_TX_POWER, + CMD_ACT_TX_POWER_OPT_GET, + CMD_OPTION_WAITFORRSP, 0, NULL); if (ret) goto out; @@ -581,9 +462,9 @@ static int wlan_set_retry(struct net_device *dev, struct iw_request_info *info, /* Adding 1 to convert retry count to try count */ adapter->txretrycount = vwrq->value + 1; - ret = libertas_prepare_and_send_command(priv, cmd_802_11_snmp_mib, - cmd_act_set, - cmd_option_waitforrsp, + ret = libertas_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, + CMD_ACT_SET, + CMD_OPTION_WAITFORRSP, OID_802_11_TX_RETRYCOUNT, NULL); if (ret) @@ -608,8 +489,8 @@ static int wlan_get_retry(struct net_device *dev, struct iw_request_info *info, adapter->txretrycount = 0; ret = libertas_prepare_and_send_command(priv, - cmd_802_11_snmp_mib, - cmd_act_get, cmd_option_waitforrsp, + CMD_802_11_SNMP_MIB, + CMD_ACT_GET, CMD_OPTION_WAITFORRSP, OID_802_11_TX_RETRYCOUNT, NULL); if (ret) goto out; @@ -673,7 +554,7 @@ static int wlan_get_range(struct net_device *dev, struct iw_request_info *info, wlan_adapter *adapter = priv->adapter; struct iw_range *range = (struct iw_range *)extra; struct chan_freq_power *cfp; - u8 rates[WLAN_SUPPORTED_RATES]; + u8 rates[MAX_RATES + 1]; u8 flag = 0; @@ -686,19 +567,17 @@ static int wlan_get_range(struct net_device *dev, struct iw_request_info *info, range->max_nwid = 0; memset(rates, 0, sizeof(rates)); - range->num_bitrates = get_active_data_rates(adapter, rates); - - for (i = 0; i < min_t(__u8, range->num_bitrates, IW_MAX_BITRATES) && rates[i]; - i++) { - range->bitrate[i] = (rates[i] & 0x7f) * 500000; - } + copy_active_data_rates(adapter, rates); + range->num_bitrates = strnlen(rates, IW_MAX_BITRATES); + for (i = 0; i < range->num_bitrates; i++) + range->bitrate[i] = rates[i] * 500000; range->num_bitrates = i; lbs_deb_wext("IW_MAX_BITRATES %d, num_bitrates %d\n", IW_MAX_BITRATES, range->num_bitrates); range->num_frequency = 0; if (priv->adapter->enable11d && - adapter->connect_status == libertas_connected) { + adapter->connect_status == LIBERTAS_CONNECTED) { u8 chan_no; u8 band; @@ -858,9 +737,9 @@ static int wlan_set_power(struct net_device *dev, struct iw_request_info *info, */ if (vwrq->disabled) { - adapter->psmode = wlan802_11powermodecam; + adapter->psmode = WLAN802_11POWERMODECAM; if (adapter->psstate != PS_STATE_FULL_POWER) { - libertas_ps_wakeup(priv, cmd_option_waitforrsp); + libertas_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); } return 0; @@ -875,14 +754,14 @@ static int wlan_set_power(struct net_device *dev, struct iw_request_info *info, return -EINVAL; } - if (adapter->psmode != wlan802_11powermodecam) { + if (adapter->psmode != WLAN802_11POWERMODECAM) { return 0; } - adapter->psmode = wlan802_11powermodemax_psp; + adapter->psmode = WLAN802_11POWERMODEMAX_PSP; - if (adapter->connect_status == libertas_connected) { - libertas_ps_sleep(priv, cmd_option_waitforrsp); + if (adapter->connect_status == LIBERTAS_CONNECTED) { + libertas_ps_sleep(priv, CMD_OPTION_WAITFORRSP); } lbs_deb_leave(LBS_DEB_WEXT); @@ -900,8 +779,8 @@ static int wlan_get_power(struct net_device *dev, struct iw_request_info *info, mode = adapter->psmode; - if ((vwrq->disabled = (mode == wlan802_11powermodecam)) - || adapter->connect_status == libertas_disconnected) + if ((vwrq->disabled = (mode == WLAN802_11POWERMODECAM)) + || adapter->connect_status == LIBERTAS_DISCONNECTED) { goto out; } @@ -937,7 +816,7 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) priv->wstats.status = adapter->mode; /* If we're not associated, all quality values are meaningless */ - if (adapter->connect_status != libertas_connected) + if (adapter->connect_status != LIBERTAS_CONNECTED) goto out; /* Quality by RSSI */ @@ -973,7 +852,7 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) /* Quality by TX errors */ priv->wstats.discard.retries = priv->stats.tx_errors; - tx_retries = le16_to_cpu(adapter->logmsg.retry); + tx_retries = le32_to_cpu(adapter->logmsg.retry); if (tx_retries > 75) tx_qual = (90 - tx_retries) * POOR / 15; @@ -989,20 +868,20 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; quality = min(quality, tx_qual); - priv->wstats.discard.code = le16_to_cpu(adapter->logmsg.wepundecryptable); - priv->wstats.discard.fragment = le16_to_cpu(adapter->logmsg.rxfrag); + priv->wstats.discard.code = le32_to_cpu(adapter->logmsg.wepundecryptable); + priv->wstats.discard.fragment = le32_to_cpu(adapter->logmsg.rxfrag); priv->wstats.discard.retries = tx_retries; - priv->wstats.discard.misc = le16_to_cpu(adapter->logmsg.ackfailure); + priv->wstats.discard.misc = le32_to_cpu(adapter->logmsg.ackfailure); /* Calculate quality */ - priv->wstats.qual.qual = max(quality, (u32)100); + priv->wstats.qual.qual = min_t(u8, quality, 100); priv->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; stats_valid = 1; /* update stats asynchronously for future calls */ - libertas_prepare_and_send_command(priv, cmd_802_11_rssi, 0, + libertas_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 0, 0, NULL); - libertas_prepare_and_send_command(priv, cmd_802_11_get_log, 0, + libertas_prepare_and_send_command(priv, CMD_802_11_GET_LOG, 0, 0, 0, NULL); out: if (!stats_valid) { @@ -1080,88 +959,46 @@ static int wlan_set_freq(struct net_device *dev, struct iw_request_info *info, return ret; } -/** - * @brief use index to get the data rate - * - * @param index The index of data rate - * @return data rate or 0 - */ -u32 libertas_index_to_data_rate(u8 index) -{ - if (index >= sizeof(libertas_wlan_data_rates)) - index = 0; - - return libertas_wlan_data_rates[index]; -} - -/** - * @brief use rate to get the index - * - * @param rate data rate - * @return index or 0 - */ -u8 libertas_data_rate_to_index(u32 rate) -{ - u8 *ptr; - - if (rate) - if ((ptr = memchr(libertas_wlan_data_rates, (u8) rate, - sizeof(libertas_wlan_data_rates)))) - return (ptr - libertas_wlan_data_rates); - - return 0; -} - static int wlan_set_rate(struct net_device *dev, struct iw_request_info *info, struct iw_param *vwrq, char *extra) { wlan_private *priv = dev->priv; wlan_adapter *adapter = priv->adapter; - u32 data_rate; + u32 new_rate; u16 action; - int ret = 0; - u8 rates[WLAN_SUPPORTED_RATES]; - u8 *rate; + int ret = -EINVAL; + u8 rates[MAX_RATES + 1]; lbs_deb_enter(LBS_DEB_WEXT); - lbs_deb_wext("vwrq->value %d\n", vwrq->value); + /* Auto rate? */ if (vwrq->value == -1) { - action = cmd_act_set_tx_auto; // Auto - adapter->is_datarate_auto = 1; - adapter->datarate = 0; + action = CMD_ACT_SET_TX_AUTO; + adapter->auto_rate = 1; + adapter->cur_rate = 0; } else { - if (vwrq->value % 100000) { - return -EINVAL; - } - - data_rate = vwrq->value / 500000; + if (vwrq->value % 100000) + goto out; memset(rates, 0, sizeof(rates)); - get_active_data_rates(adapter, rates); - rate = rates; - while (*rate) { - lbs_deb_wext("rate=0x%X, wanted data_rate 0x%X\n", *rate, - data_rate); - if ((*rate & 0x7f) == (data_rate & 0x7f)) - break; - rate++; - } - if (!*rate) { - lbs_pr_alert("fixed data rate 0x%X out " - "of range\n", data_rate); - return -EINVAL; + copy_active_data_rates(adapter, rates); + new_rate = vwrq->value / 500000; + if (!memchr(rates, new_rate, sizeof(rates))) { + lbs_pr_alert("fixed data rate 0x%X out of range\n", + new_rate); + goto out; } - adapter->datarate = data_rate; - action = cmd_act_set_tx_fix_rate; - adapter->is_datarate_auto = 0; + adapter->cur_rate = new_rate; + action = CMD_ACT_SET_TX_FIX_RATE; + adapter->auto_rate = 0; } - ret = libertas_prepare_and_send_command(priv, cmd_802_11_data_rate, - action, cmd_option_waitforrsp, 0, NULL); + ret = libertas_prepare_and_send_command(priv, CMD_802_11_DATA_RATE, + action, CMD_OPTION_WAITFORRSP, 0, NULL); +out: lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); return ret; } @@ -1174,14 +1011,19 @@ static int wlan_get_rate(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - if (adapter->is_datarate_auto) { - vwrq->fixed = 0; + if (adapter->connect_status == LIBERTAS_CONNECTED) { + vwrq->value = adapter->cur_rate * 500000; + + if (adapter->auto_rate) + vwrq->fixed = 0; + else + vwrq->fixed = 1; + } else { - vwrq->fixed = 1; + vwrq->fixed = 0; + vwrq->value = 0; } - vwrq->value = adapter->datarate * 500000; - lbs_deb_leave(LBS_DEB_WEXT); return 0; } @@ -1298,7 +1140,7 @@ static int wlan_get_encode(struct net_device *dev, dwrq->flags |= IW_ENCODE_NOKEY; - lbs_deb_wext("key: " MAC_FMT ", keylen %d\n", + lbs_deb_wext("key: %02x:%02x:%02x:%02x:%02x:%02x, keylen %d\n", extra[0], extra[1], extra[2], extra[3], extra[4], extra[5], dwrq->length); @@ -1325,7 +1167,7 @@ static int wlan_set_wep_key(struct assoc_request *assoc_req, int set_tx_key) { int ret = 0; - struct WLAN_802_11_KEY *pkey; + struct enc_key *pkey; lbs_deb_enter(LBS_DEB_WEXT); @@ -1344,7 +1186,7 @@ static int wlan_set_wep_key(struct assoc_request *assoc_req, pkey = &assoc_req->wep_keys[index]; if (key_length > 0) { - memset(pkey, 0, sizeof(struct WLAN_802_11_KEY)); + memset(pkey, 0, sizeof(struct enc_key)); pkey->type = KEY_TYPE_ID_WEP; /* Standardize the key length */ @@ -1412,11 +1254,11 @@ static void disable_wpa(struct assoc_request *assoc_req) { lbs_deb_enter(LBS_DEB_WEXT); - memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct WLAN_802_11_KEY)); + memset(&assoc_req->wpa_mcast_key, 0, sizeof (struct enc_key)); assoc_req->wpa_mcast_key.flags = KEY_INFO_WPA_MCAST; set_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); - memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct WLAN_802_11_KEY)); + memset(&assoc_req->wpa_unicast_key, 0, sizeof (struct enc_key)); assoc_req->wpa_unicast_key.flags = KEY_INFO_WPA_UNICAST; set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); @@ -1567,7 +1409,7 @@ static int wlan_get_encodeext(struct net_device *dev, && (adapter->secinfo.WPAenabled || adapter->secinfo.WPA2enabled)) { /* WPA */ - struct WLAN_802_11_KEY * pkey = NULL; + struct enc_key * pkey = NULL; if ( adapter->wpa_mcast_key.len && (adapter->wpa_mcast_key.flags & KEY_INFO_WPA_ENABLED)) @@ -1679,7 +1521,7 @@ static int wlan_set_encodeext(struct net_device *dev, if (set_tx_key) set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) { - struct WLAN_802_11_KEY * pkey; + struct enc_key * pkey; /* validate key length */ if (((alg == IW_ENCODE_ALG_TKIP) @@ -1702,7 +1544,7 @@ static int wlan_set_encodeext(struct net_device *dev, set_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); } - memset(pkey, 0, sizeof (struct WLAN_802_11_KEY)); + memset(pkey, 0, sizeof (struct enc_key)); memcpy(pkey->key, ext->key, ext->key_len); pkey->len = ext->key_len; if (pkey->len) @@ -1976,12 +1818,14 @@ static int wlan_set_txpow(struct net_device *dev, struct iw_request_info *info, return 0; } - adapter->preamble = cmd_type_auto_preamble; + adapter->preamble = CMD_TYPE_AUTO_PREAMBLE; wlan_radio_ioctl(priv, RADIO_ON); + /* Userspace check in iwrange if it should use dBm or mW, + * therefore this should never happen... Jean II */ if ((vwrq->flags & IW_TXPOW_TYPE) == IW_TXPOW_MWATT) { - dbm = (u16) mw_to_dbm(vwrq->value); + return -EOPNOTSUPP; } else dbm = (u16) vwrq->value; @@ -1993,9 +1837,9 @@ static int wlan_set_txpow(struct net_device *dev, struct iw_request_info *info, lbs_deb_wext("txpower set %d dbm\n", dbm); ret = libertas_prepare_and_send_command(priv, - cmd_802_11_rf_tx_power, - cmd_act_tx_power_opt_set_low, - cmd_option_waitforrsp, 0, (void *)&dbm); + CMD_802_11_RF_TX_POWER, + CMD_ACT_TX_POWER_OPT_SET_LOW, + CMD_OPTION_WAITFORRSP, 0, (void *)&dbm); lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); return ret; @@ -2017,7 +1861,7 @@ static int wlan_get_essid(struct net_device *dev, struct iw_request_info *info, /* * Get the current SSID */ - if (adapter->connect_status == libertas_connected) { + if (adapter->connect_status == LIBERTAS_CONNECTED) { memcpy(extra, adapter->curbssparams.ssid, adapter->curbssparams.ssid_len); extra[adapter->curbssparams.ssid_len] = '\0'; @@ -2029,12 +1873,7 @@ static int wlan_get_essid(struct net_device *dev, struct iw_request_info *info, * If none, we may want to get the one that was set */ - /* To make the driver backward compatible with WPA supplicant v0.2.4 */ - if (dwrq->length == 32) /* check with WPA supplicant buffer size */ - dwrq->length = min_t(size_t, adapter->curbssparams.ssid_len, - IW_ESSID_MAX_SIZE); - else - dwrq->length = adapter->curbssparams.ssid_len + 1; + dwrq->length = adapter->curbssparams.ssid_len; dwrq->flags = 1; /* active */ @@ -2055,14 +1894,6 @@ static int wlan_set_essid(struct net_device *dev, struct iw_request_info *info, lbs_deb_enter(LBS_DEB_WEXT); - /* - * WE-20 and earlier NULL pad the end of the SSID and increment - * SSID length so it can be used like a string. WE-21 and later don't, - * but some userspace tools aren't able to cope with the change. - */ - if ((in_ssid_len > 0) && (extra[in_ssid_len - 1] == '\0')) - in_ssid_len--; - /* Check the size of the string */ if (in_ssid_len > IW_ESSID_MAX_SIZE) { ret = -E2BIG; @@ -2129,13 +1960,14 @@ static int wlan_set_wap(struct net_device *dev, struct iw_request_info *info, wlan_adapter *adapter = priv->adapter; struct assoc_request * assoc_req; int ret = 0; + DECLARE_MAC_BUF(mac); lbs_deb_enter(LBS_DEB_WEXT); if (awrq->sa_family != ARPHRD_ETHER) return -EINVAL; - lbs_deb_wext("ASSOC: WAP: sa_data " MAC_FMT "\n", MAC_ARG(awrq->sa_data)); + lbs_deb_wext("ASSOC: WAP: sa_data %s\n", print_mac(mac, awrq->sa_data)); mutex_lock(&adapter->lock); @@ -2298,13 +2130,13 @@ static const iw_handler mesh_wlan_handler[] = { (iw_handler) NULL, /* SIOCSIWPMKSA */ }; struct iw_handler_def libertas_handler_def = { - .num_standard = sizeof(wlan_handler) / sizeof(iw_handler), + .num_standard = ARRAY_SIZE(wlan_handler), .standard = (iw_handler *) wlan_handler, .get_wireless_stats = wlan_get_wireless_stats, }; struct iw_handler_def mesh_handler_def = { - .num_standard = sizeof(mesh_wlan_handler) / sizeof(iw_handler), + .num_standard = ARRAY_SIZE(mesh_wlan_handler), .standard = (iw_handler *) mesh_wlan_handler, .get_wireless_stats = wlan_get_wireless_stats, }; diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h index 3d5196c9553aa6228cdade43ffd95364ad91a1c7..6aa444c7de8d6d43fc144ee5beb4397beca880b4 100644 --- a/drivers/net/wireless/libertas/wext.h +++ b/drivers/net/wireless/libertas/wext.h @@ -4,9 +4,6 @@ #ifndef _WLAN_WEXT_H_ #define _WLAN_WEXT_H_ -#define SUBCMD_OFFSET 4 -#define SUBCMD_DATA(x) *((int *)(x->u.name + SUBCMD_OFFSET)) - /** wlan_ioctl_regrdwr */ struct wlan_ioctl_regrdwr { /** Which register to access */ @@ -18,13 +15,9 @@ struct wlan_ioctl_regrdwr { u32 value; }; -#define WLAN_LINKMODE_802_3 0 -#define WLAN_LINKMODE_802_11 2 -#define WLAN_RADIOMODE_NONE 0 -#define WLAN_RADIOMODE_RADIOTAP 2 +#define WLAN_MONITOR_OFF 0 extern struct iw_handler_def libertas_handler_def; extern struct iw_handler_def mesh_handler_def; -int wlan_radio_ioctl(wlan_private * priv, u8 option); #endif /* _WLAN_WEXT_H_ */ diff --git a/drivers/net/wireless/net2280.h b/drivers/net/wireless/net2280.h new file mode 100644 index 0000000000000000000000000000000000000000..120eb831b287c4c50cbc2f545b39370fa2526bfb --- /dev/null +++ b/drivers/net/wireless/net2280.h @@ -0,0 +1,452 @@ +#ifndef NET2280_H +#define NET2280_H +/* + * NetChip 2280 high/full speed USB device controller. + * Unlike many such controllers, this one talks PCI. + */ + +/* + * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) + * Copyright (C) 2003 David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/*-------------------------------------------------------------------------*/ + +/* NET2280 MEMORY MAPPED REGISTERS + * + * The register layout came from the chip documentation, and the bit + * number definitions were extracted from chip specification. + * + * Use the shift operator ('<<') to build bit masks, with readl/writel + * to access the registers through PCI. + */ + +/* main registers, BAR0 + 0x0000 */ +struct net2280_regs { + // offset 0x0000 + __le32 devinit; +#define LOCAL_CLOCK_FREQUENCY 8 +#define FORCE_PCI_RESET 7 +#define PCI_ID 6 +#define PCI_ENABLE 5 +#define FIFO_SOFT_RESET 4 +#define CFG_SOFT_RESET 3 +#define PCI_SOFT_RESET 2 +#define USB_SOFT_RESET 1 +#define M8051_RESET 0 + __le32 eectl; +#define EEPROM_ADDRESS_WIDTH 23 +#define EEPROM_CHIP_SELECT_ACTIVE 22 +#define EEPROM_PRESENT 21 +#define EEPROM_VALID 20 +#define EEPROM_BUSY 19 +#define EEPROM_CHIP_SELECT_ENABLE 18 +#define EEPROM_BYTE_READ_START 17 +#define EEPROM_BYTE_WRITE_START 16 +#define EEPROM_READ_DATA 8 +#define EEPROM_WRITE_DATA 0 + __le32 eeclkfreq; + u32 _unused0; + // offset 0x0010 + + __le32 pciirqenb0; /* interrupt PCI master ... */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + __le32 pciirqenb1; +#define PCI_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + __le32 cpu_irqenb0; /* ... or onboard 8051 */ +#define SETUP_PACKET_INTERRUPT_ENABLE 7 +#define ENDPOINT_F_INTERRUPT_ENABLE 6 +#define ENDPOINT_E_INTERRUPT_ENABLE 5 +#define ENDPOINT_D_INTERRUPT_ENABLE 4 +#define ENDPOINT_C_INTERRUPT_ENABLE 3 +#define ENDPOINT_B_INTERRUPT_ENABLE 2 +#define ENDPOINT_A_INTERRUPT_ENABLE 1 +#define ENDPOINT_0_INTERRUPT_ENABLE 0 + __le32 cpu_irqenb1; +#define CPU_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + + // offset 0x0020 + u32 _unused1; + __le32 usbirqenb1; +#define USB_INTERRUPT_ENABLE 31 +#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26 +#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25 +#define PCI_INTA_INTERRUPT_ENABLE 24 +#define PCI_PME_INTERRUPT_ENABLE 23 +#define PCI_SERR_INTERRUPT_ENABLE 22 +#define PCI_PERR_INTERRUPT_ENABLE 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19 +#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16 +#define GPIO_INTERRUPT_ENABLE 13 +#define DMA_D_INTERRUPT_ENABLE 12 +#define DMA_C_INTERRUPT_ENABLE 11 +#define DMA_B_INTERRUPT_ENABLE 10 +#define DMA_A_INTERRUPT_ENABLE 9 +#define EEPROM_DONE_INTERRUPT_ENABLE 8 +#define VBUS_INTERRUPT_ENABLE 7 +#define CONTROL_STATUS_INTERRUPT_ENABLE 6 +#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4 +#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2 +#define RESUME_INTERRUPT_ENABLE 1 +#define SOF_INTERRUPT_ENABLE 0 + __le32 irqstat0; +#define INTA_ASSERTED 12 +#define SETUP_PACKET_INTERRUPT 7 +#define ENDPOINT_F_INTERRUPT 6 +#define ENDPOINT_E_INTERRUPT 5 +#define ENDPOINT_D_INTERRUPT 4 +#define ENDPOINT_C_INTERRUPT 3 +#define ENDPOINT_B_INTERRUPT 2 +#define ENDPOINT_A_INTERRUPT 1 +#define ENDPOINT_0_INTERRUPT 0 + __le32 irqstat1; +#define POWER_STATE_CHANGE_INTERRUPT 27 +#define PCI_ARBITER_TIMEOUT_INTERRUPT 26 +#define PCI_PARITY_ERROR_INTERRUPT 25 +#define PCI_INTA_INTERRUPT 24 +#define PCI_PME_INTERRUPT 23 +#define PCI_SERR_INTERRUPT 22 +#define PCI_PERR_INTERRUPT 21 +#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20 +#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19 +#define PCI_RETRY_ABORT_INTERRUPT 17 +#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16 +#define GPIO_INTERRUPT 13 +#define DMA_D_INTERRUPT 12 +#define DMA_C_INTERRUPT 11 +#define DMA_B_INTERRUPT 10 +#define DMA_A_INTERRUPT 9 +#define EEPROM_DONE_INTERRUPT 8 +#define VBUS_INTERRUPT 7 +#define CONTROL_STATUS_INTERRUPT 6 +#define ROOT_PORT_RESET_INTERRUPT 4 +#define SUSPEND_REQUEST_INTERRUPT 3 +#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2 +#define RESUME_INTERRUPT 1 +#define SOF_INTERRUPT 0 + // offset 0x0030 + __le32 idxaddr; + __le32 idxdata; + __le32 fifoctl; +#define PCI_BASE2_RANGE 16 +#define IGNORE_FIFO_AVAILABILITY 3 +#define PCI_BASE2_SELECT 2 +#define FIFO_CONFIGURATION_SELECT 0 + u32 _unused2; + // offset 0x0040 + __le32 memaddr; +#define START 28 +#define DIRECTION 27 +#define FIFO_DIAGNOSTIC_SELECT 24 +#define MEMORY_ADDRESS 0 + __le32 memdata0; + __le32 memdata1; + u32 _unused3; + // offset 0x0050 + __le32 gpioctl; +#define GPIO3_LED_SELECT 12 +#define GPIO3_INTERRUPT_ENABLE 11 +#define GPIO2_INTERRUPT_ENABLE 10 +#define GPIO1_INTERRUPT_ENABLE 9 +#define GPIO0_INTERRUPT_ENABLE 8 +#define GPIO3_OUTPUT_ENABLE 7 +#define GPIO2_OUTPUT_ENABLE 6 +#define GPIO1_OUTPUT_ENABLE 5 +#define GPIO0_OUTPUT_ENABLE 4 +#define GPIO3_DATA 3 +#define GPIO2_DATA 2 +#define GPIO1_DATA 1 +#define GPIO0_DATA 0 + __le32 gpiostat; +#define GPIO3_INTERRUPT 3 +#define GPIO2_INTERRUPT 2 +#define GPIO1_INTERRUPT 1 +#define GPIO0_INTERRUPT 0 +} __attribute__ ((packed)); + +/* usb control, BAR0 + 0x0080 */ +struct net2280_usb_regs { + // offset 0x0080 + __le32 stdrsp; +#define STALL_UNSUPPORTED_REQUESTS 31 +#define SET_TEST_MODE 16 +#define GET_OTHER_SPEED_CONFIGURATION 15 +#define GET_DEVICE_QUALIFIER 14 +#define SET_ADDRESS 13 +#define ENDPOINT_SET_CLEAR_HALT 12 +#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11 +#define GET_STRING_DESCRIPTOR_2 10 +#define GET_STRING_DESCRIPTOR_1 9 +#define GET_STRING_DESCRIPTOR_0 8 +#define GET_SET_INTERFACE 6 +#define GET_SET_CONFIGURATION 5 +#define GET_CONFIGURATION_DESCRIPTOR 4 +#define GET_DEVICE_DESCRIPTOR 3 +#define GET_ENDPOINT_STATUS 2 +#define GET_INTERFACE_STATUS 1 +#define GET_DEVICE_STATUS 0 + __le32 prodvendid; +#define PRODUCT_ID 16 +#define VENDOR_ID 0 + __le32 relnum; + __le32 usbctl; +#define SERIAL_NUMBER_INDEX 16 +#define PRODUCT_ID_STRING_ENABLE 13 +#define VENDOR_ID_STRING_ENABLE 12 +#define USB_ROOT_PORT_WAKEUP_ENABLE 11 +#define VBUS_PIN 10 +#define TIMED_DISCONNECT 9 +#define SUSPEND_IMMEDIATELY 7 +#define SELF_POWERED_USB_DEVICE 6 +#define REMOTE_WAKEUP_SUPPORT 5 +#define PME_POLARITY 4 +#define USB_DETECT_ENABLE 3 +#define PME_WAKEUP_ENABLE 2 +#define DEVICE_REMOTE_WAKEUP_ENABLE 1 +#define SELF_POWERED_STATUS 0 + // offset 0x0090 + __le32 usbstat; +#define HIGH_SPEED 7 +#define FULL_SPEED 6 +#define GENERATE_RESUME 5 +#define GENERATE_DEVICE_REMOTE_WAKEUP 4 + __le32 xcvrdiag; +#define FORCE_HIGH_SPEED_MODE 31 +#define FORCE_FULL_SPEED_MODE 30 +#define USB_TEST_MODE 24 +#define LINE_STATE 16 +#define TRANSCEIVER_OPERATION_MODE 2 +#define TRANSCEIVER_SELECT 1 +#define TERMINATION_SELECT 0 + __le32 setup0123; + __le32 setup4567; + // offset 0x0090 + u32 _unused0; + __le32 ouraddr; +#define FORCE_IMMEDIATE 7 +#define OUR_USB_ADDRESS 0 + __le32 ourconfig; +} __attribute__ ((packed)); + +/* pci control, BAR0 + 0x0100 */ +struct net2280_pci_regs { + // offset 0x0100 + __le32 pcimstctl; +#define PCI_ARBITER_PARK_SELECT 13 +#define PCI_MULTI LEVEL_ARBITER 12 +#define PCI_RETRY_ABORT_ENABLE 11 +#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10 +#define DMA_READ_MULTIPLE_ENABLE 9 +#define DMA_READ_LINE_ENABLE 8 +#define PCI_MASTER_COMMAND_SELECT 6 +#define MEM_READ_OR_WRITE 0 +#define IO_READ_OR_WRITE 1 +#define CFG_READ_OR_WRITE 2 +#define PCI_MASTER_START 5 +#define PCI_MASTER_READ_WRITE 4 +#define PCI_MASTER_WRITE 0 +#define PCI_MASTER_READ 1 +#define PCI_MASTER_BYTE_WRITE_ENABLES 0 + __le32 pcimstaddr; + __le32 pcimstdata; + __le32 pcimststat; +#define PCI_ARBITER_CLEAR 2 +#define PCI_EXTERNAL_ARBITER 1 +#define PCI_HOST_MODE 0 +} __attribute__ ((packed)); + +/* dma control, BAR0 + 0x0180 ... array of four structs like this, + * for channels 0..3. see also struct net2280_dma: descriptor + * that can be loaded into some of these registers. + */ +struct net2280_dma_regs { /* [11.7] */ + // offset 0x0180, 0x01a0, 0x01c0, 0x01e0, + __le32 dmactl; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25 +#define DMA_CLEAR_COUNT_ENABLE 21 +#define DESCRIPTOR_POLLING_RATE 19 +#define POLL_CONTINUOUS 0 +#define POLL_1_USEC 1 +#define POLL_100_USEC 2 +#define POLL_1_MSEC 3 +#define DMA_VALID_BIT_POLLING_ENABLE 18 +#define DMA_VALID_BIT_ENABLE 17 +#define DMA_SCATTER_GATHER_ENABLE 16 +#define DMA_OUT_AUTO_START_ENABLE 4 +#define DMA_PREEMPT_ENABLE 3 +#define DMA_FIFO_VALIDATE 2 +#define DMA_ENABLE 1 +#define DMA_ADDRESS_HOLD 0 + __le32 dmastat; +#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25 +#define DMA_TRANSACTION_DONE_INTERRUPT 24 +#define DMA_ABORT 1 +#define DMA_START 0 + u32 _unused0[2]; + // offset 0x0190, 0x01b0, 0x01d0, 0x01f0, + __le32 dmacount; +#define VALID_BIT 31 +#define DMA_DIRECTION 30 +#define DMA_DONE_INTERRUPT_ENABLE 29 +#define END_OF_CHAIN 28 +#define DMA_BYTE_COUNT_MASK ((1<<24)-1) +#define DMA_BYTE_COUNT 0 + __le32 dmaaddr; + __le32 dmadesc; + u32 _unused1; +} __attribute__ ((packed)); + +/* dedicated endpoint registers, BAR0 + 0x0200 */ + +struct net2280_dep_regs { /* [11.8] */ + // offset 0x0200, 0x0210, 0x220, 0x230, 0x240 + __le32 dep_cfg; + // offset 0x0204, 0x0214, 0x224, 0x234, 0x244 + __le32 dep_rsp; + u32 _unused[2]; +} __attribute__ ((packed)); + +/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs + * like this, for ep0 then the configurable endpoints A..F + * ep0 reserved for control; E and F have only 64 bytes of fifo + */ +struct net2280_ep_regs { /* [11.9] */ + // offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 + __le32 ep_cfg; +#define ENDPOINT_BYTE_COUNT 16 +#define ENDPOINT_ENABLE 10 +#define ENDPOINT_TYPE 8 +#define ENDPOINT_DIRECTION 7 +#define ENDPOINT_NUMBER 0 + __le32 ep_rsp; +#define SET_NAK_OUT_PACKETS 15 +#define SET_EP_HIDE_STATUS_PHASE 14 +#define SET_EP_FORCE_CRC_ERROR 13 +#define SET_INTERRUPT_MODE 12 +#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11 +#define SET_NAK_OUT_PACKETS_MODE 10 +#define SET_ENDPOINT_TOGGLE 9 +#define SET_ENDPOINT_HALT 8 +#define CLEAR_NAK_OUT_PACKETS 7 +#define CLEAR_EP_HIDE_STATUS_PHASE 6 +#define CLEAR_EP_FORCE_CRC_ERROR 5 +#define CLEAR_INTERRUPT_MODE 4 +#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3 +#define CLEAR_NAK_OUT_PACKETS_MODE 2 +#define CLEAR_ENDPOINT_TOGGLE 1 +#define CLEAR_ENDPOINT_HALT 0 + __le32 ep_irqenb; +#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5 +#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1 +#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 + __le32 ep_stat; +#define FIFO_VALID_COUNT 24 +#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22 +#define TIMEOUT 21 +#define USB_STALL_SENT 20 +#define USB_IN_NAK_SENT 19 +#define USB_IN_ACK_RCVD 18 +#define USB_OUT_PING_NAK_SENT 17 +#define USB_OUT_ACK_SENT 16 +#define FIFO_OVERFLOW 13 +#define FIFO_UNDERFLOW 12 +#define FIFO_FULL 11 +#define FIFO_EMPTY 10 +#define FIFO_FLUSH 9 +#define SHORT_PACKET_OUT_DONE_INTERRUPT 6 +#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5 +#define NAK_OUT_PACKETS 4 +#define DATA_PACKET_RECEIVED_INTERRUPT 3 +#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 +#define DATA_OUT_PING_TOKEN_INTERRUPT 1 +#define DATA_IN_TOKEN_INTERRUPT 0 + // offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 + __le32 ep_avail; + __le32 ep_data; + u32 _unused0[2]; +} __attribute__ ((packed)); + +struct net2280_reg_write { + __le16 port; + __le32 addr; + __le32 val; +} __attribute__ ((packed)); + +struct net2280_reg_read { + __le16 port; + __le32 addr; +} __attribute__ ((packed)); +#endif /* NET2280_H */ diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index 45b00e13ab2b3e670584b9dd26a77a307bd32e3c..c2d71afd57e5c6196a83cc620f0adb67222dba2a 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c @@ -412,7 +412,6 @@ static int netwave_probe(struct pcmcia_device *link) spin_lock_init(&priv->spinlock); /* Netwave specific entries in the device structure */ - SET_MODULE_OWNER(dev); dev->hard_start_xmit = &netwave_start_xmit; dev->get_stats = &netwave_get_stats; dev->set_multicast_list = &set_multicast_list; @@ -710,9 +709,9 @@ static const iw_handler netwave_private_handler[] = static const struct iw_handler_def netwave_handler_def = { - .num_standard = sizeof(netwave_handler)/sizeof(iw_handler), - .num_private = sizeof(netwave_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(netwave_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(netwave_handler), + .num_private = ARRAY_SIZE(netwave_private_handler), + .num_private_args = ARRAY_SIZE(netwave_private_args), .standard = (iw_handler *) netwave_handler, .private = (iw_handler *) netwave_private_handler, .private_args = (struct iw_priv_args *) netwave_private_args, @@ -738,6 +737,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { win_req_t req; memreq_t mem; u_char __iomem *ramBase = NULL; + DECLARE_MAC_BUF(mac); DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); @@ -806,12 +806,13 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) { for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i); - printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx id " - "%c%c, hw_addr ", dev->name, dev->base_addr, dev->irq, - (u_long) ramBase, (int) readb(ramBase+NETWAVE_EREG_NI), - (int) readb(ramBase+NETWAVE_EREG_NI+1)); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx" + "id %c%c, hw_addr %s\n", + dev->name, dev->base_addr, dev->irq, + (u_long) ramBase, + (int) readb(ramBase+NETWAVE_EREG_NI), + (int) readb(ramBase+NETWAVE_EREG_NI+1), + print_mac(mac, dev->dev_addr)); /* get revision words */ printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n", diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 062286dc8e152370da4d2f1ed6f9cff8e8646132..ca6c2da7bc5d3acc727e9037c59e75174752b253 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -2232,6 +2232,7 @@ static int orinoco_init(struct net_device *dev) struct hermes_idstring nickbuf; u16 reclen; int len; + DECLARE_MAC_BUF(mac); /* No need to lock, the hw_unavailable flag is already set in * alloc_orinocodev() */ @@ -2274,10 +2275,8 @@ static int orinoco_init(struct net_device *dev) goto out; } - printk(KERN_DEBUG "%s: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", - dev->name, dev->dev_addr[0], dev->dev_addr[1], - dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], - dev->dev_addr[5]); + printk(KERN_DEBUG "%s: MAC address %s\n", + dev->name, print_mac(mac, dev->dev_addr)); /* Get the station name */ err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index d1e502236b2a782fc928890bb07ed8ceb5ad4fa2..8b7f5768a10307d0ebd81446e8b78b4c71758e0a 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c @@ -313,7 +313,6 @@ orinoco_cs_config(struct pcmcia_device *link) /* Ok, we have the configuration, prepare to register the netdev */ dev->base_addr = link->io.BasePort1; dev->irq = link->irq.AssignedIRQ; - SET_MODULE_OWNER(dev); card->node.major = card->node.minor = 0; SET_NETDEV_DEV(dev, &handle_to_dev(link)); diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c index eaf3d13b851cc2288a75736bc4b349c2dae74b76..35ec5fcf81a613d5d4404b92b21e38f74f3486f1 100644 --- a/drivers/net/wireless/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco_nortel.c @@ -193,7 +193,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev, card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c index 97a8b4ff32bd88d59b87a3deeb87d060385cb168..2547d5dac0d338752439e14be5db194871b5baa2 100644 --- a/drivers/net/wireless/orinoco_pci.c +++ b/drivers/net/wireless/orinoco_pci.c @@ -148,7 +148,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, priv = netdev_priv(dev); card = priv->card; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING); diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c index 31162ac25a92fe32d7d4ddfbe26d8a2e961f04f3..98fe165337d1ee5330b64db71f48266116d31b4a 100644 --- a/drivers/net/wireless/orinoco_plx.c +++ b/drivers/net/wireless/orinoco_plx.c @@ -232,7 +232,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c index 7c7b960c91dfe057a2459d97f78ccf8a12204065..df493185a4af65a2b815b4542fbd9f55e71904ba 100644 --- a/drivers/net/wireless/orinoco_tmd.c +++ b/drivers/net/wireless/orinoco_tmd.c @@ -134,7 +134,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, priv = netdev_priv(dev); card = priv->card; card->bridge_io = bridge_io; - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); @@ -190,7 +189,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev, static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct orinoco_private *priv = dev->priv; + struct orinoco_private *priv = netdev_priv(dev); struct orinoco_pci_card *card = priv->card; unregister_netdev(dev); diff --git a/drivers/net/wireless/p54.h b/drivers/net/wireless/p54.h new file mode 100644 index 0000000000000000000000000000000000000000..744c866066c5fccd4fd54c9d9acd31c4e6ef52a0 --- /dev/null +++ b/drivers/net/wireless/p54.h @@ -0,0 +1,81 @@ +#ifndef PRISM54_H +#define PRISM54_H + +/* + * Shared defines for all mac80211 Prism54 code + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +enum control_frame_types { + P54_CONTROL_TYPE_FILTER_SET = 0, + P54_CONTROL_TYPE_CHANNEL_CHANGE, + P54_CONTROL_TYPE_FREQDONE, + P54_CONTROL_TYPE_DCFINIT, + P54_CONTROL_TYPE_FREEQUEUE = 7, + P54_CONTROL_TYPE_TXDONE, + P54_CONTROL_TYPE_PING, + P54_CONTROL_TYPE_STAT_READBACK, + P54_CONTROL_TYPE_BBP, + P54_CONTROL_TYPE_EEPROM_READBACK, + P54_CONTROL_TYPE_LED +}; + +struct p54_control_hdr { + __le16 magic1; + __le16 len; + __le32 req_id; + __le16 type; /* enum control_frame_types */ + u8 retry1; + u8 retry2; + u8 data[0]; +} __attribute__ ((packed)); + +#define EEPROM_READBACK_LEN (sizeof(struct p54_control_hdr) + 4 /* p54_eeprom_lm86 */) +#define MAX_RX_SIZE (IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct p54_control_hdr) + 20 /* length of struct p54_rx_hdr */ + 16 ) + +#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 + +struct p54_common { + u32 rx_start; + u32 rx_end; + struct sk_buff_head tx_queue; + void (*tx)(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx); + int (*open)(struct ieee80211_hw *dev); + void (*stop)(struct ieee80211_hw *dev); + int mode; + u8 mac_addr[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + struct pda_iq_autocal_entry *iq_autocal; + unsigned int iq_autocal_len; + struct pda_channel_output_limit *output_limit; + unsigned int output_limit_len; + struct pda_pa_curve_data *curve_data; + __le16 rxhw; + u8 version; + unsigned int tx_hdr_len; + void *cached_vdcf; + unsigned int fw_var; + /* FIXME: this channels/modes/rates stuff sucks */ + struct ieee80211_channel channels[14]; + struct ieee80211_rate rates[12]; + struct ieee80211_hw_mode modes[2]; + struct ieee80211_tx_queue_stats tx_stats; +}; + +int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); +void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); +int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len); +void p54_fill_eeprom_readback(struct p54_control_hdr *hdr); +struct ieee80211_hw *p54_init_common(size_t priv_data_len); +void p54_free_common(struct ieee80211_hw *dev); + +#endif /* PRISM54_H */ diff --git a/drivers/net/wireless/p54common.c b/drivers/net/wireless/p54common.c new file mode 100644 index 0000000000000000000000000000000000000000..2c63cf0ad2cdb3cfe11936e40147fd89b390f0da --- /dev/null +++ b/drivers/net/wireless/p54common.c @@ -0,0 +1,1019 @@ + +/* + * Common code for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007, Christian Lamparter + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include + +#include "p54.h" +#include "p54common.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_DESCRIPTION("Softmac Prism54 common code"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54common"); + +void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) +{ + struct p54_common *priv = dev->priv; + struct bootrec_exp_if *exp_if; + struct bootrec *bootrec; + u32 *data = (u32 *)fw->data; + u32 *end_data = (u32 *)fw->data + (fw->size >> 2); + u8 *fw_version = NULL; + size_t len; + int i; + + if (priv->rx_start) + return; + + while (data < end_data && *data) + data++; + + while (data < end_data && !*data) + data++; + + bootrec = (struct bootrec *) data; + + while (bootrec->data <= end_data && + (bootrec->data + (len = le32_to_cpu(bootrec->len))) <= end_data) { + u32 code = le32_to_cpu(bootrec->code); + switch (code) { + case BR_CODE_COMPONENT_ID: + switch (be32_to_cpu(*bootrec->data)) { + case FW_FMAC: + printk(KERN_INFO "p54: FreeMAC firmware\n"); + break; + case FW_LM20: + printk(KERN_INFO "p54: LM20 firmware\n"); + break; + case FW_LM86: + printk(KERN_INFO "p54: LM86 firmware\n"); + break; + case FW_LM87: + printk(KERN_INFO "p54: LM87 firmware - not supported yet!\n"); + break; + default: + printk(KERN_INFO "p54: unknown firmware\n"); + break; + } + break; + case BR_CODE_COMPONENT_VERSION: + /* 24 bytes should be enough for all firmwares */ + if (strnlen((unsigned char*)bootrec->data, 24) < 24) + fw_version = (unsigned char*)bootrec->data; + break; + case BR_CODE_DESCR: + priv->rx_start = le32_to_cpu(bootrec->data[1]); + /* FIXME add sanity checking */ + priv->rx_end = le32_to_cpu(bootrec->data[2]) - 0x3500; + break; + case BR_CODE_EXPOSED_IF: + exp_if = (struct bootrec_exp_if *) bootrec->data; + for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) + if (exp_if[i].if_id == 0x1a) + priv->fw_var = le16_to_cpu(exp_if[i].variant); + break; + case BR_CODE_DEPENDENT_IF: + break; + case BR_CODE_END_OF_BRA: + case LEGACY_BR_CODE_END_OF_BRA: + end_data = NULL; + break; + default: + break; + } + bootrec = (struct bootrec *)&bootrec->data[len]; + } + + if (fw_version) + printk(KERN_INFO "p54: FW rev %s - Softmac protocol %x.%x\n", + fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); + + if (priv->fw_var >= 0x300) { + /* Firmware supports QoS, use it! */ + priv->tx_stats.data[0].limit = 3; + priv->tx_stats.data[1].limit = 4; + priv->tx_stats.data[2].limit = 3; + priv->tx_stats.data[3].limit = 1; + dev->queues = 4; + } +} +EXPORT_SYMBOL_GPL(p54_parse_firmware); + +static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev, + struct pda_pa_curve_data *curve_data) +{ + struct p54_common *priv = dev->priv; + struct pda_pa_curve_data_sample_rev1 *rev1; + struct pda_pa_curve_data_sample_rev0 *rev0; + size_t cd_len = sizeof(*curve_data) + + (curve_data->points_per_channel*sizeof(*rev1) + 2) * + curve_data->channels; + unsigned int i, j; + void *source, *target; + + priv->curve_data = kmalloc(cd_len, GFP_KERNEL); + if (!priv->curve_data) + return -ENOMEM; + + memcpy(priv->curve_data, curve_data, sizeof(*curve_data)); + source = curve_data->data; + target = priv->curve_data->data; + for (i = 0; i < curve_data->channels; i++) { + __le16 *freq = source; + source += sizeof(__le16); + *((__le16 *)target) = *freq; + target += sizeof(__le16); + for (j = 0; j < curve_data->points_per_channel; j++) { + rev1 = target; + rev0 = source; + + rev1->rf_power = rev0->rf_power; + rev1->pa_detector = rev0->pa_detector; + rev1->data_64qam = rev0->pcv; + /* "invent" the points for the other modulations */ +#define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y) + rev1->data_16qam = SUB(rev0->pcv, 12); + rev1->data_qpsk = SUB(rev1->data_16qam, 12); + rev1->data_bpsk = SUB(rev1->data_qpsk, 12); + rev1->data_barker= SUB(rev1->data_bpsk, 14); +#undef SUB + target += sizeof(*rev1); + source += sizeof(*rev0); + } + } + + return 0; +} + +int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) +{ + struct p54_common *priv = dev->priv; + struct eeprom_pda_wrap *wrap = NULL; + struct pda_entry *entry; + int i = 0; + unsigned int data_len, entry_len; + void *tmp; + int err; + + wrap = (struct eeprom_pda_wrap *) eeprom; + entry = (void *)wrap->data + wrap->len; + i += 2; + i += le16_to_cpu(entry->len)*2; + while (i < len) { + entry_len = le16_to_cpu(entry->len); + data_len = ((entry_len - 1) << 1); + switch (le16_to_cpu(entry->code)) { + case PDR_MAC_ADDRESS: + SET_IEEE80211_PERM_ADDR(dev, entry->data); + break; + case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: + if (data_len < 2) { + err = -EINVAL; + goto err; + } + + if (2 + entry->data[1]*sizeof(*priv->output_limit) > data_len) { + err = -EINVAL; + goto err; + } + + priv->output_limit = kmalloc(entry->data[1] * + sizeof(*priv->output_limit), GFP_KERNEL); + + if (!priv->output_limit) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->output_limit, &entry->data[2], + entry->data[1]*sizeof(*priv->output_limit)); + priv->output_limit_len = entry->data[1]; + break; + case PDR_PRISM_PA_CAL_CURVE_DATA: + if (data_len < sizeof(struct pda_pa_curve_data)) { + err = -EINVAL; + goto err; + } + + if (((struct pda_pa_curve_data *)entry->data)->cal_method_rev) { + priv->curve_data = kmalloc(data_len, GFP_KERNEL); + if (!priv->curve_data) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->curve_data, entry->data, data_len); + } else { + err = p54_convert_rev0_to_rev1(dev, (struct pda_pa_curve_data *)entry->data); + if (err) + goto err; + } + + break; + case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: + priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); + if (!priv->iq_autocal) { + err = -ENOMEM; + goto err; + } + + memcpy(priv->iq_autocal, entry->data, data_len); + priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); + break; + case PDR_INTERFACE_LIST: + tmp = entry->data; + while ((u8 *)tmp < entry->data + data_len) { + struct bootrec_exp_if *exp_if = tmp; + if (le16_to_cpu(exp_if->if_id) == 0xF) + priv->rxhw = exp_if->variant & cpu_to_le16(0x07); + tmp += sizeof(struct bootrec_exp_if); + } + break; + case PDR_HARDWARE_PLATFORM_COMPONENT_ID: + priv->version = *(u8 *)(entry->data + 1); + break; + case PDR_END: + i = len; + break; + } + + entry = (void *)entry + (entry_len + 1)*2; + i += 2; + i += entry_len*2; + } + + if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { + printk(KERN_ERR "p54: not all required entries found in eeprom!\n"); + err = -EINVAL; + goto err; + } + + return 0; + + err: + if (priv->iq_autocal) { + kfree(priv->iq_autocal); + priv->iq_autocal = NULL; + } + + if (priv->output_limit) { + kfree(priv->output_limit); + priv->output_limit = NULL; + } + + if (priv->curve_data) { + kfree(priv->curve_data); + priv->curve_data = NULL; + } + + printk(KERN_ERR "p54: eeprom parse failed!\n"); + return err; +} +EXPORT_SYMBOL_GPL(p54_parse_eeprom); + +void p54_fill_eeprom_readback(struct p54_control_hdr *hdr) +{ + struct p54_eeprom_lm86 *eeprom_hdr; + + hdr->magic1 = cpu_to_le16(0x8000); + hdr->len = cpu_to_le16(sizeof(*eeprom_hdr) + 0x2000); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK); + hdr->retry1 = hdr->retry2 = 0; + eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data; + eeprom_hdr->offset = 0x0; + eeprom_hdr->len = cpu_to_le16(0x2000); +} +EXPORT_SYMBOL_GPL(p54_fill_eeprom_readback); + +static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data; + struct ieee80211_rx_status rx_status = {0}; + u16 freq = le16_to_cpu(hdr->freq); + + rx_status.ssi = hdr->rssi; + rx_status.rate = hdr->rate & 0x1f; /* report short preambles & CCK too */ + rx_status.channel = freq == 2484 ? 14 : (freq - 2407)/5; + rx_status.freq = freq; + rx_status.phymode = MODE_IEEE80211G; + rx_status.antenna = hdr->antenna; + rx_status.mactime = le64_to_cpu(hdr->timestamp); + + skb_pull(skb, sizeof(*hdr)); + skb_trim(skb, le16_to_cpu(hdr->len)); + + ieee80211_rx_irqsafe(dev, skb, &rx_status); +} + +static void inline p54_wake_free_queues(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + int i; + + /* ieee80211_start_queues is great if all queues are really empty. + * But, what if some are full? */ + + for (i = 0; i < dev->queues; i++) + if (priv->tx_stats.data[i].len < priv->tx_stats.data[i].limit) + ieee80211_wake_queue(dev, i); +} + +static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; + struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data; + struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; + u32 addr = le32_to_cpu(hdr->req_id) - 0x70; + struct memrecord *range = NULL; + u32 freed = 0; + u32 last_addr = priv->rx_start; + + while (entry != (struct sk_buff *)&priv->tx_queue) { + range = (struct memrecord *)&entry->cb; + if (range->start_addr == addr) { + struct ieee80211_tx_status status = {{0}}; + struct p54_control_hdr *entry_hdr; + struct p54_tx_control_allocdata *entry_data; + int pad = 0; + + if (entry->next != (struct sk_buff *)&priv->tx_queue) + freed = ((struct memrecord *)&entry->next->cb)->start_addr - last_addr; + else + freed = priv->rx_end - last_addr; + + last_addr = range->end_addr; + __skb_unlink(entry, &priv->tx_queue); + if (!range->control) { + kfree_skb(entry); + break; + } + memcpy(&status.control, range->control, + sizeof(status.control)); + kfree(range->control); + priv->tx_stats.data[status.control.queue].len--; + + entry_hdr = (struct p54_control_hdr *) entry->data; + entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; + if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) + pad = entry_data->align[0]; + + if (!status.control.flags & IEEE80211_TXCTL_NO_ACK) { + if (!(payload->status & 0x01)) + status.flags |= IEEE80211_TX_STATUS_ACK; + else + status.excessive_retries = 1; + } + status.retry_count = payload->retries - 1; + status.ack_signal = le16_to_cpu(payload->ack_rssi); + skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); + ieee80211_tx_status_irqsafe(dev, entry, &status); + break; + } else + last_addr = range->end_addr; + entry = entry->next; + } + + if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 + + sizeof(struct p54_control_hdr)) + p54_wake_free_queues(dev); +} + +static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; + + switch (le16_to_cpu(hdr->type)) { + case P54_CONTROL_TYPE_TXDONE: + p54_rx_frame_sent(dev, skb); + break; + case P54_CONTROL_TYPE_BBP: + break; + default: + printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", + wiphy_name(dev->wiphy), le16_to_cpu(hdr->type)); + break; + } +} + +/* returns zero if skb can be reused */ +int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) +{ + u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8; + switch (type) { + case 0x00: + case 0x01: + p54_rx_data(dev, skb); + return -1; + case 0x4d: + /* TODO: do something better... but then again, I've never seen this happen */ + printk(KERN_ERR "%s: Received fault. Probably need to restart hardware now..\n", + wiphy_name(dev->wiphy)); + break; + case 0x80: + p54_rx_control(dev, skb); + break; + default: + printk(KERN_ERR "%s: unknown frame RXed (0x%02x)\n", + wiphy_name(dev->wiphy), type); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(p54_rx); + +/* + * So, the firmware is somewhat stupid and doesn't know what places in its + * memory incoming data should go to. By poking around in the firmware, we + * can find some unused memory to upload our packets to. However, data that we + * want the card to TX needs to stay intact until the card has told us that + * it is done with it. This function finds empty places we can upload to and + * marks allocated areas as reserved if necessary. p54_rx_frame_sent frees + * allocated areas. + */ +static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, + struct p54_control_hdr *data, u32 len, + struct ieee80211_tx_control *control) +{ + struct p54_common *priv = dev->priv; + struct sk_buff *entry = priv->tx_queue.next; + struct sk_buff *target_skb = NULL; + struct memrecord *range; + u32 last_addr = priv->rx_start; + u32 largest_hole = 0; + u32 target_addr = priv->rx_start; + unsigned long flags; + unsigned int left; + len = (len + 0x170 + 3) & ~0x3; /* 0x70 headroom, 0x100 tailroom */ + + spin_lock_irqsave(&priv->tx_queue.lock, flags); + left = skb_queue_len(&priv->tx_queue); + while (left--) { + u32 hole_size; + range = (struct memrecord *)&entry->cb; + hole_size = range->start_addr - last_addr; + if (!target_skb && hole_size >= len) { + target_skb = entry->prev; + hole_size -= len; + target_addr = last_addr; + } + largest_hole = max(largest_hole, hole_size); + last_addr = range->end_addr; + entry = entry->next; + } + if (!target_skb && priv->rx_end - last_addr >= len) { + target_skb = priv->tx_queue.prev; + largest_hole = max(largest_hole, priv->rx_end - last_addr - len); + if (!skb_queue_empty(&priv->tx_queue)) { + range = (struct memrecord *)&target_skb->cb; + target_addr = range->end_addr; + } + } else + largest_hole = max(largest_hole, priv->rx_end - last_addr); + + if (skb) { + range = (struct memrecord *)&skb->cb; + range->start_addr = target_addr; + range->end_addr = target_addr + len; + range->control = control; + __skb_queue_after(&priv->tx_queue, target_skb, skb); + if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 + + sizeof(struct p54_control_hdr)) + ieee80211_stop_queues(dev); + } + spin_unlock_irqrestore(&priv->tx_queue.lock, flags); + + data->req_id = cpu_to_le32(target_addr + 0x70); +} + +static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct ieee80211_tx_queue_stats_data *current_queue; + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_allocdata *txhdr; + struct ieee80211_tx_control *control_copy; + size_t padding, len; + u8 rate; + + current_queue = &priv->tx_stats.data[control->queue]; + if (unlikely(current_queue->len > current_queue->limit)) + return NETDEV_TX_BUSY; + current_queue->len++; + current_queue->count++; + if (current_queue->len == current_queue->limit) + ieee80211_stop_queue(dev, control->queue); + + padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; + len = skb->len; + + control_copy = kmalloc(sizeof(*control), GFP_ATOMIC); + if (control_copy) + memcpy(control_copy, control, sizeof(*control)); + + txhdr = (struct p54_tx_control_allocdata *) + skb_push(skb, sizeof(*txhdr) + padding); + hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr)); + + if (padding) + hdr->magic1 = cpu_to_le16(0x4010); + else + hdr->magic1 = cpu_to_le16(0x0010); + hdr->len = cpu_to_le16(len); + hdr->type = (control->flags & IEEE80211_TXCTL_NO_ACK) ? 0 : cpu_to_le16(1); + hdr->retry1 = hdr->retry2 = control->retry_limit; + p54_assign_address(dev, skb, hdr, skb->len, control_copy); + + memset(txhdr->wep_key, 0x0, 16); + txhdr->padding = 0; + txhdr->padding2 = 0; + + /* TODO: add support for alternate retry TX rates */ + rate = control->tx_rate; + if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) + rate |= 0x40; + else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + rate |= 0x20; + memset(txhdr->rateset, rate, 8); + txhdr->wep_key_present = 0; + txhdr->wep_key_len = 0; + txhdr->frame_type = cpu_to_le32(control->queue + 4); + txhdr->magic4 = 0; + txhdr->antenna = (control->antenna_sel_tx == 0) ? + 2 : control->antenna_sel_tx - 1; + txhdr->output_power = 0x7f; // HW Maximum + txhdr->magic5 = (control->flags & IEEE80211_TXCTL_NO_ACK) ? + 0 : ((rate > 0x3) ? cpu_to_le32(0x33) : cpu_to_le32(0x23)); + if (padding) + txhdr->align[0] = padding; + + priv->tx(dev, hdr, skb->len, 0); + return 0; +} + +static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type, + const u8 *dst, const u8 *src, u8 antenna, + u32 magic3, u32 magic8, u32 magic9) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_filter *filter; + + hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) + + priv->tx_hdr_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr = (void *)hdr + priv->tx_hdr_len; + + filter = (struct p54_tx_control_filter *) hdr->data; + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*filter)); + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter), NULL); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); + + filter->filter_type = cpu_to_le16(filter_type); + memcpy(filter->dst, dst, ETH_ALEN); + if (!src) + memset(filter->src, ~0, ETH_ALEN); + else + memcpy(filter->src, src, ETH_ALEN); + filter->antenna = antenna; + filter->magic3 = cpu_to_le32(magic3); + filter->rx_addr = cpu_to_le32(priv->rx_end); + filter->max_rx = cpu_to_le16(0x0620); /* FIXME: for usb ver 1.. maybe */ + filter->rxhw = priv->rxhw; + filter->magic8 = cpu_to_le16(magic8); + filter->magic9 = cpu_to_le16(magic9); + + priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*filter), 1); + return 0; +} + +static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_channel *chan; + unsigned int i; + size_t payload_len = sizeof(*chan) + sizeof(u32)*2 + + sizeof(*chan->curve_data) * + priv->curve_data->points_per_channel; + void *entry; + + hdr = kzalloc(sizeof(*hdr) + payload_len + + priv->tx_hdr_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr = (void *)hdr + priv->tx_hdr_len; + + chan = (struct p54_tx_control_channel *) hdr->data; + + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*chan)); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE); + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len, NULL); + + chan->magic1 = cpu_to_le16(0x1); + chan->magic2 = cpu_to_le16(0x0); + + for (i = 0; i < priv->iq_autocal_len; i++) { + if (priv->iq_autocal[i].freq != freq) + continue; + + memcpy(&chan->iq_autocal, &priv->iq_autocal[i], + sizeof(*priv->iq_autocal)); + break; + } + if (i == priv->iq_autocal_len) + goto err; + + for (i = 0; i < priv->output_limit_len; i++) { + if (priv->output_limit[i].freq != freq) + continue; + + chan->val_barker = 0x38; + chan->val_bpsk = priv->output_limit[i].val_bpsk; + chan->val_qpsk = priv->output_limit[i].val_qpsk; + chan->val_16qam = priv->output_limit[i].val_16qam; + chan->val_64qam = priv->output_limit[i].val_64qam; + break; + } + if (i == priv->output_limit_len) + goto err; + + chan->pa_points_per_curve = priv->curve_data->points_per_channel; + + entry = priv->curve_data->data; + for (i = 0; i < priv->curve_data->channels; i++) { + if (*((__le16 *)entry) != freq) { + entry += sizeof(__le16); + entry += sizeof(struct pda_pa_curve_data_sample_rev1) * + chan->pa_points_per_curve; + continue; + } + + entry += sizeof(__le16); + memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) * + chan->pa_points_per_curve); + break; + } + + memcpy(hdr->data + payload_len - 4, &chan->val_bpsk, 4); + + priv->tx(dev, hdr, sizeof(*hdr) + payload_len, 1); + return 0; + + err: + printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); + kfree(hdr); + return -EINVAL; +} + +static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_led *led; + + hdr = kzalloc(sizeof(*hdr) + sizeof(*led) + + priv->tx_hdr_len, GFP_KERNEL); + if (!hdr) + return -ENOMEM; + + hdr = (void *)hdr + priv->tx_hdr_len; + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*led)); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED); + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led), NULL); + + led = (struct p54_tx_control_led *) hdr->data; + led->mode = cpu_to_le16(mode); + led->led_permanent = cpu_to_le16(link); + led->led_temporary = cpu_to_le16(act); + led->duration = cpu_to_le16(1000); + + priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*led), 1); + + return 0; +} + +#define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, burst) \ +do { \ + queue.aifs = cpu_to_le16(ai_fs); \ + queue.cwmin = cpu_to_le16(cw_min); \ + queue.cwmax = cpu_to_le16(cw_max); \ + queue.txop = (burst == 0) ? \ + 0 : cpu_to_le16((burst * 100) / 32 + 1); \ +} while(0) + +static void p54_init_vdcf(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_vdcf *vdcf; + + /* all USB V1 adapters need a extra headroom */ + hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; + hdr->magic1 = cpu_to_le16(0x8001); + hdr->len = cpu_to_le16(sizeof(*vdcf)); + hdr->type = cpu_to_le16(P54_CONTROL_TYPE_DCFINIT); + hdr->req_id = cpu_to_le32(priv->rx_start); + + vdcf = (struct p54_tx_control_vdcf *) hdr->data; + + P54_SET_QUEUE(vdcf->queue[0], 0x0002, 0x0003, 0x0007, 0x000f); + P54_SET_QUEUE(vdcf->queue[1], 0x0002, 0x0007, 0x000f, 0x001e); + P54_SET_QUEUE(vdcf->queue[2], 0x0002, 0x000f, 0x03ff, 0x0014); + P54_SET_QUEUE(vdcf->queue[3], 0x0007, 0x000f, 0x03ff, 0x0000); +} + +static void p54_set_vdcf(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct p54_control_hdr *hdr; + struct p54_tx_control_vdcf *vdcf; + + hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; + + p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf), NULL); + + vdcf = (struct p54_tx_control_vdcf *) hdr->data; + + if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { + vdcf->slottime = 9; + vdcf->magic1 = 0x00; + vdcf->magic2 = 0x10; + } else { + vdcf->slottime = 20; + vdcf->magic1 = 0x0a; + vdcf->magic2 = 0x06; + } + + /* (see prism54/isl_oid.h for further details) */ + vdcf->frameburst = cpu_to_le16(0); + + priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*vdcf), 0); +} + +static int p54_start(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + int err; + + err = priv->open(dev); + if (!err) + priv->mode = IEEE80211_IF_TYPE_MNTR; + + return err; +} + +static void p54_stop(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + struct sk_buff *skb; + while ((skb = skb_dequeue(&priv->tx_queue))) { + struct memrecord *range = (struct memrecord *)&skb->cb; + if (range->control) + kfree(range->control); + kfree_skb(skb); + } + priv->stop(dev); + priv->mode = IEEE80211_IF_TYPE_INVALID; +} + +static int p54_add_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct p54_common *priv = dev->priv; + + if (priv->mode != IEEE80211_IF_TYPE_MNTR) + return -EOPNOTSUPP; + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + priv->mode = conf->type; + break; + default: + return -EOPNOTSUPP; + } + + memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); + + p54_set_filter(dev, 0, priv->mac_addr, NULL, 0, 1, 0, 0xF642); + p54_set_filter(dev, 0, priv->mac_addr, NULL, 1, 0, 0, 0xF642); + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + p54_set_filter(dev, 1, priv->mac_addr, NULL, 0, 0x15F, 0x1F4, 0); + break; + default: + BUG(); /* impossible */ + break; + } + + p54_set_leds(dev, 1, 0, 0); + + return 0; +} + +static void p54_remove_interface(struct ieee80211_hw *dev, + struct ieee80211_if_init_conf *conf) +{ + struct p54_common *priv = dev->priv; + priv->mode = IEEE80211_IF_TYPE_MNTR; + memset(priv->mac_addr, 0, ETH_ALEN); + p54_set_filter(dev, 0, priv->mac_addr, NULL, 2, 0, 0, 0); +} + +static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) +{ + int ret; + + ret = p54_set_freq(dev, cpu_to_le16(conf->freq)); + p54_set_vdcf(dev); + return ret; +} + +static int p54_config_interface(struct ieee80211_hw *dev, int if_id, + struct ieee80211_if_conf *conf) +{ + struct p54_common *priv = dev->priv; + + p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642); + p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0); + p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); + memcpy(priv->bssid, conf->bssid, ETH_ALEN); + return 0; +} + +static void p54_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_mc_list *mclist) +{ + struct p54_common *priv = dev->priv; + + *total_flags &= FIF_BCN_PRBRESP_PROMISC; + + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + p54_set_filter(dev, 0, priv->mac_addr, + NULL, 2, 0, 0, 0); + else + p54_set_filter(dev, 0, priv->mac_addr, + priv->bssid, 2, 0, 0, 0); + } +} + +static int p54_conf_tx(struct ieee80211_hw *dev, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct p54_common *priv = dev->priv; + struct p54_tx_control_vdcf *vdcf; + + vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *) + ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data); + + if ((params) && !((queue < 0) || (queue > 4))) { + P54_SET_QUEUE(vdcf->queue[queue], params->aifs, + params->cw_min, params->cw_max, params->burst_time); + } else + return -EINVAL; + + p54_set_vdcf(dev); + + return 0; +} + +static int p54_get_stats(struct ieee80211_hw *dev, + struct ieee80211_low_level_stats *stats) +{ + /* TODO */ + return 0; +} + +static int p54_get_tx_stats(struct ieee80211_hw *dev, + struct ieee80211_tx_queue_stats *stats) +{ + struct p54_common *priv = dev->priv; + unsigned int i; + + for (i = 0; i < dev->queues; i++) + memcpy(&stats->data[i], &priv->tx_stats.data[i], + sizeof(stats->data[i])); + + return 0; +} + +static const struct ieee80211_ops p54_ops = { + .tx = p54_tx, + .start = p54_start, + .stop = p54_stop, + .add_interface = p54_add_interface, + .remove_interface = p54_remove_interface, + .config = p54_config, + .config_interface = p54_config_interface, + .configure_filter = p54_configure_filter, + .conf_tx = p54_conf_tx, + .get_stats = p54_get_stats, + .get_tx_stats = p54_get_tx_stats +}; + +struct ieee80211_hw *p54_init_common(size_t priv_data_len) +{ + struct ieee80211_hw *dev; + struct p54_common *priv; + int i; + + dev = ieee80211_alloc_hw(priv_data_len, &p54_ops); + if (!dev) + return NULL; + + priv = dev->priv; + priv->mode = IEEE80211_IF_TYPE_INVALID; + skb_queue_head_init(&priv->tx_queue); + memcpy(priv->channels, p54_channels, sizeof(p54_channels)); + memcpy(priv->rates, p54_rates, sizeof(p54_rates)); + priv->modes[1].mode = MODE_IEEE80211B; + priv->modes[1].num_rates = 4; + priv->modes[1].rates = priv->rates; + priv->modes[1].num_channels = ARRAY_SIZE(p54_channels); + priv->modes[1].channels = priv->channels; + priv->modes[0].mode = MODE_IEEE80211G; + priv->modes[0].num_rates = ARRAY_SIZE(p54_rates); + priv->modes[0].rates = priv->rates; + priv->modes[0].num_channels = ARRAY_SIZE(p54_channels); + priv->modes[0].channels = priv->channels; + dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ + IEEE80211_HW_RX_INCLUDES_FCS; + dev->channel_change_time = 1000; /* TODO: find actual value */ + dev->max_rssi = 127; + + priv->tx_stats.data[0].limit = 5; + dev->queues = 1; + + dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + + sizeof(struct p54_tx_control_allocdata); + + priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf) + + priv->tx_hdr_len + sizeof(struct p54_control_hdr), GFP_KERNEL); + + if (!priv->cached_vdcf) { + ieee80211_free_hw(dev); + return NULL; + } + + p54_init_vdcf(dev); + + for (i = 0; i < 2; i++) { + if (ieee80211_register_hwmode(dev, &priv->modes[i])) { + kfree(priv->cached_vdcf); + ieee80211_free_hw(dev); + return NULL; + } + } + + return dev; +} +EXPORT_SYMBOL_GPL(p54_init_common); + +void p54_free_common(struct ieee80211_hw *dev) +{ + struct p54_common *priv = dev->priv; + kfree(priv->iq_autocal); + kfree(priv->output_limit); + kfree(priv->curve_data); + kfree(priv->cached_vdcf); +} +EXPORT_SYMBOL_GPL(p54_free_common); + +static int __init p54_init(void) +{ + return 0; +} + +static void __exit p54_exit(void) +{ +} + +module_init(p54_init); +module_exit(p54_exit); diff --git a/drivers/net/wireless/p54common.h b/drivers/net/wireless/p54common.h new file mode 100644 index 0000000000000000000000000000000000000000..a721334e20d96db17af5e4dd760fdaf90f00c4a0 --- /dev/null +++ b/drivers/net/wireless/p54common.h @@ -0,0 +1,329 @@ +#ifndef PRISM54COMMON_H +#define PRISM54COMMON_H + +/* + * Common code specific definitions for mac80211 Prism54 drivers + * + * Copyright (c) 2006, Michael Wu + * Copyright (c) 2007, Christian Lamparter + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +struct bootrec { + __le32 code; + __le32 len; + u32 data[0]; +} __attribute__((packed)); + +struct bootrec_exp_if { + __le16 role; + __le16 if_id; + __le16 variant; + __le16 btm_compat; + __le16 top_compat; +} __attribute__((packed)); + +#define BR_CODE_MIN 0x80000000 +#define BR_CODE_COMPONENT_ID 0x80000001 +#define BR_CODE_COMPONENT_VERSION 0x80000002 +#define BR_CODE_DEPENDENT_IF 0x80000003 +#define BR_CODE_EXPOSED_IF 0x80000004 +#define BR_CODE_DESCR 0x80000101 +#define BR_CODE_MAX 0x8FFFFFFF +#define BR_CODE_END_OF_BRA 0xFF0000FF +#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF + +#define FW_FMAC 0x464d4143 +#define FW_LM86 0x4c4d3836 +#define FW_LM87 0x4c4d3837 +#define FW_LM20 0x4c4d3230 + +/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */ + +struct pda_entry { + __le16 len; /* includes both code and data */ + __le16 code; + u8 data[0]; +} __attribute__ ((packed)); + +struct eeprom_pda_wrap { + u32 magic; + u16 pad; + u16 len; + u32 arm_opcode; + u8 data[0]; +} __attribute__ ((packed)); + +struct pda_iq_autocal_entry { + __le16 freq; + __le16 iq_param[4]; +} __attribute__ ((packed)); + +struct pda_channel_output_limit { + __le16 freq; + u8 val_bpsk; + u8 val_qpsk; + u8 val_16qam; + u8 val_64qam; + u8 rate_set_mask; + u8 rate_set_size; +} __attribute__ ((packed)); + +struct pda_pa_curve_data_sample_rev0 { + u8 rf_power; + u8 pa_detector; + u8 pcv; +} __attribute__ ((packed)); + +struct pda_pa_curve_data_sample_rev1 { + u8 rf_power; + u8 pa_detector; + u8 data_barker; + u8 data_bpsk; + u8 data_qpsk; + u8 data_16qam; + u8 data_64qam; + u8 padding; +} __attribute__ ((packed)); + +struct pda_pa_curve_data { + u8 cal_method_rev; + u8 channels; + u8 points_per_channel; + u8 padding; + u8 data[0]; +} __attribute__ ((packed)); + +/* + * this defines the PDR codes used to build PDAs as defined in document + * number 553155. The current implementation mirrors version 1.1 of the + * document and lists only PDRs supported by the ARM platform. + */ + +/* common and choice range (0x0000 - 0x0fff) */ +#define PDR_END 0x0000 +#define PDR_MANUFACTURING_PART_NUMBER 0x0001 +#define PDR_PDA_VERSION 0x0002 +#define PDR_NIC_SERIAL_NUMBER 0x0003 + +#define PDR_MAC_ADDRESS 0x0101 +#define PDR_REGULATORY_DOMAIN_LIST 0x0103 +#define PDR_TEMPERATURE_TYPE 0x0107 + +#define PDR_PRISM_PCI_IDENTIFIER 0x0402 + +/* ARM range (0x1000 - 0x1fff) */ +#define PDR_COUNTRY_INFORMATION 0x1000 +#define PDR_INTERFACE_LIST 0x1001 +#define PDR_HARDWARE_PLATFORM_COMPONENT_ID 0x1002 +#define PDR_OEM_NAME 0x1003 +#define PDR_PRODUCT_NAME 0x1004 +#define PDR_UTF8_OEM_NAME 0x1005 +#define PDR_UTF8_PRODUCT_NAME 0x1006 +#define PDR_COUNTRY_LIST 0x1007 +#define PDR_DEFAULT_COUNTRY 0x1008 + +#define PDR_ANTENNA_GAIN 0x1100 + +#define PDR_PRISM_INDIGO_PA_CALIBRATION_DATA 0x1901 +#define PDR_RSSI_LINEAR_APPROXIMATION 0x1902 +#define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS 0x1903 +#define PDR_PRISM_PA_CAL_CURVE_DATA 0x1904 +#define PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND 0x1905 +#define PDR_PRISM_ZIF_TX_IQ_CALIBRATION 0x1906 +#define PDR_REGULATORY_POWER_LIMITS 0x1907 +#define PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED 0x1908 +#define PDR_RADIATED_TRANSMISSION_CORRECTION 0x1909 +#define PDR_PRISM_TX_IQ_CALIBRATION 0x190a + +/* reserved range (0x2000 - 0x7fff) */ + +/* customer range (0x8000 - 0xffff) */ +#define PDR_BASEBAND_REGISTERS 0x8000 +#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001 + +/* stored in skb->cb */ +struct memrecord { + u32 start_addr; + u32 end_addr; + struct ieee80211_tx_control *control; +}; + +struct p54_eeprom_lm86 { + __le16 offset; + __le16 len; + u8 data[0]; +} __attribute__ ((packed)); + +struct p54_rx_hdr { + __le16 magic; + __le16 len; + __le16 freq; + u8 antenna; + u8 rate; + u8 rssi; + u8 quality; + u16 unknown2; + __le64 timestamp; + u8 data[0]; +} __attribute__ ((packed)); + +struct p54_frame_sent_hdr { + u8 status; + u8 retries; + __le16 ack_rssi; + __le16 seq; + u16 rate; +} __attribute__ ((packed)); + +struct p54_tx_control_allocdata { + u8 rateset[8]; + u16 padding; + u8 wep_key_present; + u8 wep_key_len; + u8 wep_key[16]; + __le32 frame_type; + u32 padding2; + __le16 magic4; + u8 antenna; + u8 output_power; + __le32 magic5; + u8 align[0]; +} __attribute__ ((packed)); + +struct p54_tx_control_filter { + __le16 filter_type; + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN]; + u8 antenna; + u8 debug; + __le32 magic3; + u8 rates[8]; // FIXME: what's this for? + __le32 rx_addr; + __le16 max_rx; + __le16 rxhw; + __le16 magic8; + __le16 magic9; +} __attribute__ ((packed)); + +struct p54_tx_control_channel { + __le16 magic1; + __le16 magic2; + u8 padding1[20]; + struct pda_iq_autocal_entry iq_autocal; + u8 pa_points_per_curve; + u8 val_barker; + u8 val_bpsk; + u8 val_qpsk; + u8 val_16qam; + u8 val_64qam; + struct pda_pa_curve_data_sample_rev1 curve_data[0]; + /* additional padding/data after curve_data */ +} __attribute__ ((packed)); + +struct p54_tx_control_led { + __le16 mode; + __le16 led_temporary; + __le16 led_permanent; + __le16 duration; +} __attribute__ ((packed)); + +struct p54_tx_vdcf_queues { + __le16 aifs; + __le16 cwmin; + __le16 cwmax; + __le16 txop; +} __attribute__ ((packed)); + +struct p54_tx_control_vdcf { + u8 padding; + u8 slottime; + u8 magic1; + u8 magic2; + struct p54_tx_vdcf_queues queue[8]; + u8 pad2[4]; + __le16 frameburst; +} __attribute__ ((packed)); + +static const struct ieee80211_rate p54_rates[] = { + { .rate = 10, + .val = 0, + .val2 = 0x10, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 20, + .val = 1, + .val2 = 0x11, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 55, + .val = 2, + .val2 = 0x12, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 110, + .val = 3, + .val2 = 0x13, + .flags = IEEE80211_RATE_CCK_2 }, + { .rate = 60, + .val = 4, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 90, + .val = 5, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 120, + .val = 6, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 180, + .val = 7, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 240, + .val = 8, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 360, + .val = 9, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 480, + .val = 10, + .flags = IEEE80211_RATE_OFDM }, + { .rate = 540, + .val = 11, + .flags = IEEE80211_RATE_OFDM }, +}; + +// TODO: just generate this.. +static const struct ieee80211_channel p54_channels[] = { + { .chan = 1, + .freq = 2412}, + { .chan = 2, + .freq = 2417}, + { .chan = 3, + .freq = 2422}, + { .chan = 4, + .freq = 2427}, + { .chan = 5, + .freq = 2432}, + { .chan = 6, + .freq = 2437}, + { .chan = 7, + .freq = 2442}, + { .chan = 8, + .freq = 2447}, + { .chan = 9, + .freq = 2452}, + { .chan = 10, + .freq = 2457}, + { .chan = 11, + .freq = 2462}, + { .chan = 12, + .freq = 2467}, + { .chan = 13, + .freq = 2472}, + { .chan = 14, + .freq = 2484} +}; + +#endif /* PRISM54COMMON_H */ diff --git a/drivers/net/wireless/p54pci.c b/drivers/net/wireless/p54pci.c new file mode 100644 index 0000000000000000000000000000000000000000..410b54387f23672f04225d4e11c020f0830825b0 --- /dev/null +++ b/drivers/net/wireless/p54pci.c @@ -0,0 +1,692 @@ + +/* + * Linux device driver for PCI based Prism54 + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "p54.h" +#include "p54pci.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_DESCRIPTION("Prism54 PCI wireless driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54pci"); + +static struct pci_device_id p54p_table[] __devinitdata = { + /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3890) }, + /* 3COM 3CRWE154G72 Wireless LAN adapter */ + { PCI_DEVICE(0x10b7, 0x6001) }, + /* Intersil PRISM Indigo Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3877) }, + /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ + { PCI_DEVICE(0x1260, 0x3886) }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, p54p_table); + +static int p54p_upload_firmware(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + const struct firmware *fw_entry = NULL; + __le32 reg; + int err; + u32 *data; + u32 remains, left, device_addr; + + P54P_WRITE(int_enable, 0); + P54P_READ(int_enable); + udelay(10); + + reg = P54P_READ(ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); + P54P_WRITE(ctrl_stat, reg); + P54P_READ(ctrl_stat); + udelay(10); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + + mdelay(50); + + err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev); + if (err) { + printk(KERN_ERR "%s (prism54pci): cannot find firmware " + "(isl3886)\n", pci_name(priv->pdev)); + return err; + } + + p54_parse_firmware(dev, fw_entry); + + data = (u32 *) fw_entry->data; + remains = fw_entry->size; + device_addr = ISL38XX_DEV_FIRMWARE_ADDR; + while (remains) { + u32 i = 0; + left = min((u32)0x1000, remains); + P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr)); + P54P_READ(int_enable); + + device_addr += 0x1000; + while (i < left) { + P54P_WRITE(direct_mem_win[i], *data++); + i += sizeof(u32); + } + + remains -= left; + P54P_READ(int_enable); + } + + release_firmware(fw_entry); + + reg = P54P_READ(ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); + P54P_WRITE(ctrl_stat, reg); + P54P_READ(ctrl_stat); + udelay(10); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54P_WRITE(ctrl_stat, reg); + wmb(); + udelay(10); + + return 0; +} + +static irqreturn_t p54p_simple_interrupt(int irq, void *dev_id) +{ + struct p54p_priv *priv = (struct p54p_priv *) dev_id; + __le32 reg; + + reg = P54P_READ(int_ident); + P54P_WRITE(int_ack, reg); + + if (reg & P54P_READ(int_enable)) + complete(&priv->boot_comp); + + return IRQ_HANDLED; +} + +static int p54p_read_eeprom(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + int err; + struct p54_control_hdr *hdr; + void *eeprom; + dma_addr_t rx_mapping, tx_mapping; + u16 alen; + + init_completion(&priv->boot_comp); + err = request_irq(priv->pdev->irq, &p54p_simple_interrupt, + IRQF_SHARED, "prism54pci", priv); + if (err) { + printk(KERN_ERR "%s (prism54pci): failed to register IRQ handler\n", + pci_name(priv->pdev)); + return err; + } + + eeprom = kmalloc(0x2010 + EEPROM_READBACK_LEN, GFP_KERNEL); + if (!eeprom) { + printk(KERN_ERR "%s (prism54pci): no memory for eeprom!\n", + pci_name(priv->pdev)); + err = -ENOMEM; + goto out; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + P54P_WRITE(ring_control_base, priv->ring_control_dma); + P54P_READ(ring_control_base); + udelay(10); + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + P54P_READ(int_enable); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { + printk(KERN_ERR "%s (prism54pci): Cannot boot firmware!\n", + pci_name(priv->pdev)); + err = -EINVAL; + goto out; + } + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); + P54P_READ(int_enable); + + hdr = eeprom + 0x2010; + p54_fill_eeprom_readback(hdr); + hdr->req_id = cpu_to_le32(priv->common.rx_start); + + rx_mapping = pci_map_single(priv->pdev, eeprom, + 0x2010, PCI_DMA_FROMDEVICE); + tx_mapping = pci_map_single(priv->pdev, (void *)hdr, + EEPROM_READBACK_LEN, PCI_DMA_TODEVICE); + + priv->ring_control->rx_mgmt[0].host_addr = cpu_to_le32(rx_mapping); + priv->ring_control->rx_mgmt[0].len = cpu_to_le16(0x2010); + priv->ring_control->tx_data[0].host_addr = cpu_to_le32(tx_mapping); + priv->ring_control->tx_data[0].device_addr = hdr->req_id; + priv->ring_control->tx_data[0].len = cpu_to_le16(EEPROM_READBACK_LEN); + + priv->ring_control->host_idx[2] = cpu_to_le32(1); + priv->ring_control->host_idx[1] = cpu_to_le32(1); + + wmb(); + mdelay(100); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + + wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); + wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); + + pci_unmap_single(priv->pdev, tx_mapping, + EEPROM_READBACK_LEN, PCI_DMA_TODEVICE); + pci_unmap_single(priv->pdev, rx_mapping, + 0x2010, PCI_DMA_FROMDEVICE); + + alen = le16_to_cpu(priv->ring_control->rx_mgmt[0].len); + if (le32_to_cpu(priv->ring_control->device_idx[2]) != 1 || + alen < 0x10) { + printk(KERN_ERR "%s (prism54pci): Cannot read eeprom!\n", + pci_name(priv->pdev)); + err = -EINVAL; + goto out; + } + + p54_parse_eeprom(dev, (u8 *)eeprom + 0x10, alen - 0x10); + + out: + kfree(eeprom); + P54P_WRITE(int_enable, 0); + P54P_READ(int_enable); + udelay(10); + free_irq(priv->pdev->irq, priv); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + return err; +} + +static void p54p_refill_rx_ring(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + u32 limit, host_idx, idx; + + host_idx = le32_to_cpu(priv->ring_control->host_idx[0]); + limit = host_idx; + limit -= le32_to_cpu(priv->ring_control->device_idx[0]); + limit = ARRAY_SIZE(priv->ring_control->rx_data) - limit; + + idx = host_idx % ARRAY_SIZE(priv->ring_control->rx_data); + while (limit-- > 1) { + struct p54p_desc *desc = &priv->ring_control->rx_data[idx]; + + if (!desc->host_addr) { + struct sk_buff *skb; + dma_addr_t mapping; + skb = dev_alloc_skb(MAX_RX_SIZE); + if (!skb) + break; + + mapping = pci_map_single(priv->pdev, + skb_tail_pointer(skb), + MAX_RX_SIZE, + PCI_DMA_FROMDEVICE); + desc->host_addr = cpu_to_le32(mapping); + desc->device_addr = 0; // FIXME: necessary? + desc->len = cpu_to_le16(MAX_RX_SIZE); + desc->flags = 0; + priv->rx_buf[idx] = skb; + } + + idx++; + host_idx++; + idx %= ARRAY_SIZE(priv->ring_control->rx_data); + } + + wmb(); + priv->ring_control->host_idx[0] = cpu_to_le32(host_idx); +} + +static irqreturn_t p54p_interrupt(int irq, void *dev_id) +{ + struct ieee80211_hw *dev = dev_id; + struct p54p_priv *priv = dev->priv; + __le32 reg; + + spin_lock(&priv->lock); + reg = P54P_READ(int_ident); + if (unlikely(reg == 0xFFFFFFFF)) { + spin_unlock(&priv->lock); + return IRQ_HANDLED; + } + + P54P_WRITE(int_ack, reg); + + reg &= P54P_READ(int_enable); + + if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) { + struct p54p_desc *desc; + u32 idx, i; + i = priv->tx_idx; + i %= ARRAY_SIZE(priv->ring_control->tx_data); + priv->tx_idx = idx = le32_to_cpu(priv->ring_control->device_idx[1]); + idx %= ARRAY_SIZE(priv->ring_control->tx_data); + + while (i != idx) { + desc = &priv->ring_control->tx_data[i]; + if (priv->tx_buf[i]) { + kfree(priv->tx_buf[i]); + priv->tx_buf[i] = NULL; + } + + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), PCI_DMA_TODEVICE); + + desc->host_addr = 0; + desc->device_addr = 0; + desc->len = 0; + desc->flags = 0; + + i++; + i %= ARRAY_SIZE(priv->ring_control->tx_data); + } + + i = priv->rx_idx; + i %= ARRAY_SIZE(priv->ring_control->rx_data); + priv->rx_idx = idx = le32_to_cpu(priv->ring_control->device_idx[0]); + idx %= ARRAY_SIZE(priv->ring_control->rx_data); + while (i != idx) { + u16 len; + struct sk_buff *skb; + desc = &priv->ring_control->rx_data[i]; + len = le16_to_cpu(desc->len); + skb = priv->rx_buf[i]; + + skb_put(skb, len); + + if (p54_rx(dev, skb)) { + pci_unmap_single(priv->pdev, + le32_to_cpu(desc->host_addr), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + + priv->rx_buf[i] = NULL; + desc->host_addr = 0; + } else { + skb_trim(skb, 0); + desc->len = cpu_to_le16(MAX_RX_SIZE); + } + + i++; + i %= ARRAY_SIZE(priv->ring_control->rx_data); + } + + p54p_refill_rx_ring(dev); + + wmb(); + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) + complete(&priv->boot_comp); + + spin_unlock(&priv->lock); + + return reg ? IRQ_HANDLED : IRQ_NONE; +} + +static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx) +{ + struct p54p_priv *priv = dev->priv; + unsigned long flags; + struct p54p_desc *desc; + dma_addr_t mapping; + u32 device_idx, idx, i; + + spin_lock_irqsave(&priv->lock, flags); + + device_idx = le32_to_cpu(priv->ring_control->device_idx[1]); + idx = le32_to_cpu(priv->ring_control->host_idx[1]); + i = idx % ARRAY_SIZE(priv->ring_control->tx_data); + + mapping = pci_map_single(priv->pdev, data, len, PCI_DMA_TODEVICE); + desc = &priv->ring_control->tx_data[i]; + desc->host_addr = cpu_to_le32(mapping); + desc->device_addr = data->req_id; + desc->len = cpu_to_le16(len); + desc->flags = 0; + + wmb(); + priv->ring_control->host_idx[1] = cpu_to_le32(idx + 1); + + if (free_on_tx) + priv->tx_buf[i] = data; + + spin_unlock_irqrestore(&priv->lock, flags); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + P54P_READ(dev_int); + + /* FIXME: unlikely to happen because the device usually runs out of + memory before we fill the ring up, but we can make it impossible */ + if (idx - device_idx > ARRAY_SIZE(priv->ring_control->tx_data) - 2) + printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy)); +} + +static int p54p_open(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + int err; + + init_completion(&priv->boot_comp); + err = request_irq(priv->pdev->irq, &p54p_interrupt, + IRQF_SHARED, "prism54pci", dev); + if (err) { + printk(KERN_ERR "%s: failed to register IRQ handler\n", + wiphy_name(dev->wiphy)); + return err; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + priv->rx_idx = priv->tx_idx = 0; + p54p_refill_rx_ring(dev); + + p54p_upload_firmware(dev); + + P54P_WRITE(ring_control_base, priv->ring_control_dma); + P54P_READ(ring_control_base); + wmb(); + udelay(10); + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + P54P_READ(int_enable); + wmb(); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + P54P_READ(dev_int); + + if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { + printk(KERN_ERR "%s: Cannot boot firmware!\n", + wiphy_name(dev->wiphy)); + free_irq(priv->pdev->irq, dev); + return -ETIMEDOUT; + } + + P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); + P54P_READ(int_enable); + wmb(); + udelay(10); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); + P54P_READ(dev_int); + wmb(); + udelay(10); + + return 0; +} + +static void p54p_stop(struct ieee80211_hw *dev) +{ + struct p54p_priv *priv = dev->priv; + unsigned int i; + struct p54p_desc *desc; + + P54P_WRITE(int_enable, 0); + P54P_READ(int_enable); + udelay(10); + + free_irq(priv->pdev->irq, dev); + + P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + for (i = 0; i < ARRAY_SIZE(priv->rx_buf); i++) { + desc = &priv->ring_control->rx_data[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + MAX_RX_SIZE, PCI_DMA_FROMDEVICE); + kfree_skb(priv->rx_buf[i]); + priv->rx_buf[i] = NULL; + } + + for (i = 0; i < ARRAY_SIZE(priv->tx_buf); i++) { + desc = &priv->ring_control->tx_data[i]; + if (desc->host_addr) + pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), PCI_DMA_TODEVICE); + + kfree(priv->tx_buf[i]); + priv->tx_buf[i] = NULL; + } + + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); +} + +static int __devinit p54p_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct p54p_priv *priv; + struct ieee80211_hw *dev; + unsigned long mem_addr, mem_len; + int err; + DECLARE_MAC_BUF(mac); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "%s (prism54pci): Cannot enable new PCI device\n", + pci_name(pdev)); + return err; + } + + mem_addr = pci_resource_start(pdev, 0); + mem_len = pci_resource_len(pdev, 0); + if (mem_len < sizeof(struct p54p_csr)) { + printk(KERN_ERR "%s (prism54pci): Too short PCI resources\n", + pci_name(pdev)); + pci_disable_device(pdev); + return err; + } + + err = pci_request_regions(pdev, "prism54pci"); + if (err) { + printk(KERN_ERR "%s (prism54pci): Cannot obtain PCI resources\n", + pci_name(pdev)); + return err; + } + + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || + pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + printk(KERN_ERR "%s (prism54pci): No suitable DMA available\n", + pci_name(pdev)); + goto err_free_reg; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + pci_write_config_byte(pdev, 0x40, 0); + pci_write_config_byte(pdev, 0x41, 0); + + dev = p54_init_common(sizeof(*priv)); + if (!dev) { + printk(KERN_ERR "%s (prism54pci): ieee80211 alloc failed\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_free_reg; + } + + priv = dev->priv; + priv->pdev = pdev; + + SET_IEEE80211_DEV(dev, &pdev->dev); + pci_set_drvdata(pdev, dev); + + priv->map = ioremap(mem_addr, mem_len); + if (!priv->map) { + printk(KERN_ERR "%s (prism54pci): Cannot map device memory\n", + pci_name(pdev)); + err = -EINVAL; // TODO: use a better error code? + goto err_free_dev; + } + + priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), + &priv->ring_control_dma); + if (!priv->ring_control) { + printk(KERN_ERR "%s (prism54pci): Cannot allocate rings\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_iounmap; + } + memset(priv->ring_control, 0, sizeof(*priv->ring_control)); + + err = p54p_upload_firmware(dev); + if (err) + goto err_free_desc; + + err = p54p_read_eeprom(dev); + if (err) + goto err_free_desc; + + priv->common.open = p54p_open; + priv->common.stop = p54p_stop; + priv->common.tx = p54p_tx; + + spin_lock_init(&priv->lock); + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "%s (prism54pci): Cannot register netdevice\n", + pci_name(pdev)); + goto err_free_common; + } + + printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n", + wiphy_name(dev->wiphy), + print_mac(mac, dev->wiphy->perm_addr), + priv->common.version); + + return 0; + + err_free_common: + p54_free_common(dev); + + err_free_desc: + pci_free_consistent(pdev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); + + err_iounmap: + iounmap(priv->map); + + err_free_dev: + pci_set_drvdata(pdev, NULL); + ieee80211_free_hw(dev); + + err_free_reg: + pci_release_regions(pdev); + pci_disable_device(pdev); + return err; +} + +static void __devexit p54p_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + priv = dev->priv; + pci_free_consistent(pdev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); + p54_free_common(dev); + iounmap(priv->map); + pci_release_regions(pdev); + pci_disable_device(pdev); + ieee80211_free_hw(dev); +} + +#ifdef CONFIG_PM +static int p54p_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv = dev->priv; + + if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { + ieee80211_stop_queues(dev); + p54p_stop(dev); + } + + pci_save_state(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +static int p54p_resume(struct pci_dev *pdev) +{ + struct ieee80211_hw *dev = pci_get_drvdata(pdev); + struct p54p_priv *priv = dev->priv; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { + p54p_open(dev); + ieee80211_start_queues(dev); + } + + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver p54p_driver = { + .name = "prism54pci", + .id_table = p54p_table, + .probe = p54p_probe, + .remove = __devexit_p(p54p_remove), +#ifdef CONFIG_PM + .suspend = p54p_suspend, + .resume = p54p_resume, +#endif /* CONFIG_PM */ +}; + +static int __init p54p_init(void) +{ + return pci_register_driver(&p54p_driver); +} + +static void __exit p54p_exit(void) +{ + pci_unregister_driver(&p54p_driver); +} + +module_init(p54p_init); +module_exit(p54p_exit); diff --git a/drivers/net/wireless/p54pci.h b/drivers/net/wireless/p54pci.h new file mode 100644 index 0000000000000000000000000000000000000000..52feb597dc4a8a4cb6c6b964df0e49929cd2f6af --- /dev/null +++ b/drivers/net/wireless/p54pci.h @@ -0,0 +1,106 @@ +#ifndef PRISM54PCI_H +#define PRISM54PCI_H + +/* + * Defines for PCI based mac80211 Prism54 driver + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Device Interrupt register bits */ +#define ISL38XX_DEV_INT_RESET 0x0001 +#define ISL38XX_DEV_INT_UPDATE 0x0002 +#define ISL38XX_DEV_INT_WAKEUP 0x0008 +#define ISL38XX_DEV_INT_SLEEP 0x0010 +#define ISL38XX_DEV_INT_ABORT 0x0020 +/* these two only used in USB */ +#define ISL38XX_DEV_INT_DATA 0x0040 +#define ISL38XX_DEV_INT_MGMT 0x0080 + +#define ISL38XX_DEV_INT_PCIUART_CTS 0x4000 +#define ISL38XX_DEV_INT_PCIUART_DR 0x8000 + +/* Interrupt Identification/Acknowledge/Enable register bits */ +#define ISL38XX_INT_IDENT_UPDATE 0x0002 +#define ISL38XX_INT_IDENT_INIT 0x0004 +#define ISL38XX_INT_IDENT_WAKEUP 0x0008 +#define ISL38XX_INT_IDENT_SLEEP 0x0010 +#define ISL38XX_INT_IDENT_PCIUART_CTS 0x4000 +#define ISL38XX_INT_IDENT_PCIUART_DR 0x8000 + +/* Control/Status register bits */ +#define ISL38XX_CTRL_STAT_SLEEPMODE 0x00000200 +#define ISL38XX_CTRL_STAT_CLKRUN 0x00800000 +#define ISL38XX_CTRL_STAT_RESET 0x10000000 +#define ISL38XX_CTRL_STAT_RAMBOOT 0x20000000 +#define ISL38XX_CTRL_STAT_STARTHALTED 0x40000000 +#define ISL38XX_CTRL_STAT_HOST_OVERRIDE 0x80000000 + +struct p54p_csr { + __le32 dev_int; + u8 unused_1[12]; + __le32 int_ident; + __le32 int_ack; + __le32 int_enable; + u8 unused_2[4]; + union { + __le32 ring_control_base; + __le32 gen_purp_com[2]; + }; + u8 unused_3[8]; + __le32 direct_mem_base; + u8 unused_4[44]; + __le32 dma_addr; + __le32 dma_len; + __le32 dma_ctrl; + u8 unused_5[12]; + __le32 ctrl_stat; + u8 unused_6[1924]; + u8 cardbus_cis[0x800]; + u8 direct_mem_win[0x1000]; +} __attribute__ ((packed)); + +/* usb backend only needs the register defines above */ +#ifndef PRISM54USB_H +struct p54p_desc { + __le32 host_addr; + __le32 device_addr; + __le16 len; + __le16 flags; +} __attribute__ ((packed)); + +struct p54p_ring_control { + __le32 host_idx[4]; + __le32 device_idx[4]; + struct p54p_desc rx_data[8]; + struct p54p_desc tx_data[32]; + struct p54p_desc rx_mgmt[4]; + struct p54p_desc tx_mgmt[4]; +} __attribute__ ((packed)); + +#define P54P_READ(r) __raw_readl(&priv->map->r) +#define P54P_WRITE(r, val) __raw_writel((__force u32)(val), &priv->map->r) + +struct p54p_priv { + struct p54_common common; + struct pci_dev *pdev; + struct p54p_csr __iomem *map; + + spinlock_t lock; + struct p54p_ring_control *ring_control; + dma_addr_t ring_control_dma; + u32 rx_idx, tx_idx; + struct sk_buff *rx_buf[8]; + void *tx_buf[32]; + struct completion boot_comp; +}; + +#endif /* PRISM54USB_H */ +#endif /* PRISM54PCI_H */ diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54usb.c new file mode 100644 index 0000000000000000000000000000000000000000..755482a5a9382793b6f942d702871ffe2ddb530f --- /dev/null +++ b/drivers/net/wireless/p54usb.c @@ -0,0 +1,907 @@ + +/* + * Linux device driver for USB based Prism54 + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "p54.h" +#include "p54usb.h" + +MODULE_AUTHOR("Michael Wu "); +MODULE_DESCRIPTION("Prism54 USB wireless driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("prism54usb"); + +static struct usb_device_id p54u_table[] __devinitdata = { + /* Version 1 devices (pci chip + net2280) */ + {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ + {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ + {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ + {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */ + {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ + {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ + {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ + {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ + {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ + {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ + {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ + {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ + {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ + + /* Version 2 devices (3887) */ + {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ + {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ + {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/ + {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/ + {USB_DEVICE(0x0cde, 0x0006)}, /* Medion MD40900 */ + {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ + {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ + {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ + {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ + {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ + {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ + {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ + {} +}; + +MODULE_DEVICE_TABLE(usb, p54u_table); + +static void p54u_rx_cb(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb; + struct ieee80211_hw *dev = info->dev; + struct p54u_priv *priv = dev->priv; + + if (unlikely(urb->status)) { + info->urb = NULL; + usb_free_urb(urb); + return; + } + + skb_unlink(skb, &priv->rx_queue); + skb_put(skb, urb->actual_length); + if (!priv->hw_type) + skb_pull(skb, sizeof(struct net2280_tx_hdr)); + + if (p54_rx(dev, skb)) { + skb = dev_alloc_skb(MAX_RX_SIZE); + if (unlikely(!skb)) { + usb_free_urb(urb); + /* TODO check rx queue length and refill *somewhere* */ + return; + } + + info = (struct p54u_rx_info *) skb->cb; + info->urb = urb; + info->dev = dev; + urb->transfer_buffer = skb_tail_pointer(skb); + urb->context = skb; + skb_queue_tail(&priv->rx_queue, skb); + } else { + skb_trim(skb, 0); + skb_queue_tail(&priv->rx_queue, skb); + } + + usb_submit_urb(urb, GFP_ATOMIC); +} + +static void p54u_tx_cb(struct urb *urb) +{ + usb_free_urb(urb); +} + +static void p54u_tx_free_cb(struct urb *urb) +{ + kfree(urb->transfer_buffer); + usb_free_urb(urb); +} + +static int p54u_init_urbs(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + struct urb *entry; + struct sk_buff *skb; + struct p54u_rx_info *info; + + while (skb_queue_len(&priv->rx_queue) < 32) { + skb = __dev_alloc_skb(MAX_RX_SIZE, GFP_KERNEL); + if (!skb) + break; + entry = usb_alloc_urb(0, GFP_KERNEL); + if (!entry) { + kfree_skb(skb); + break; + } + usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), MAX_RX_SIZE, p54u_rx_cb, skb); + info = (struct p54u_rx_info *) skb->cb; + info->urb = entry; + info->dev = dev; + skb_queue_tail(&priv->rx_queue, skb); + usb_submit_urb(entry, GFP_KERNEL); + } + + return 0; +} + +static void p54u_free_urbs(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + struct p54u_rx_info *info; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&priv->rx_queue))) { + info = (struct p54u_rx_info *) skb->cb; + if (!info->urb) + continue; + + usb_kill_urb(info->urb); + kfree_skb(skb); + } +} + +static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx) +{ + struct p54u_priv *priv = dev->priv; + struct urb *addr_urb, *data_urb; + + addr_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!addr_urb) + return; + + data_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!data_urb) { + usb_free_urb(addr_urb); + return; + } + + usb_fill_bulk_urb(addr_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), &data->req_id, + sizeof(data->req_id), p54u_tx_cb, dev); + usb_fill_bulk_urb(data_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), data, len, + free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev); + + usb_submit_urb(addr_urb, GFP_ATOMIC); + usb_submit_urb(data_urb, GFP_ATOMIC); +} + +static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data, + size_t len, int free_on_tx) +{ + struct p54u_priv *priv = dev->priv; + struct urb *int_urb, *data_urb; + struct net2280_tx_hdr *hdr; + struct net2280_reg_write *reg; + + reg = kmalloc(sizeof(*reg), GFP_ATOMIC); + if (!reg) + return; + + int_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!int_urb) { + kfree(reg); + return; + } + + data_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!data_urb) { + kfree(reg); + usb_free_urb(int_urb); + return; + } + + reg->port = cpu_to_le16(NET2280_DEV_U32); + reg->addr = cpu_to_le32(P54U_DEV_BASE); + reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); + + len += sizeof(*data); + hdr = (void *)data - sizeof(*hdr); + memset(hdr, 0, sizeof(*hdr)); + hdr->device_addr = data->req_id; + hdr->len = cpu_to_le16(len); + + usb_fill_bulk_urb(int_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), + p54u_tx_free_cb, dev); + usb_submit_urb(int_urb, GFP_ATOMIC); + + usb_fill_bulk_urb(data_urb, priv->udev, + usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, len + sizeof(*hdr), + free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev); + usb_submit_urb(data_urb, GFP_ATOMIC); +} + +static int p54u_write(struct p54u_priv *priv, + struct net2280_reg_write *buf, + enum net2280_op_type type, + __le32 addr, __le32 val) +{ + unsigned int ep; + int alen; + + if (type & 0x0800) + ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV); + else + ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_BRG); + + buf->port = cpu_to_le16(type); + buf->addr = addr; + buf->val = val; + + return usb_bulk_msg(priv->udev, ep, buf, sizeof(*buf), &alen, 1000); +} + +static int p54u_read(struct p54u_priv *priv, void *buf, + enum net2280_op_type type, + __le32 addr, __le32 *val) +{ + struct net2280_reg_read *read = buf; + __le32 *reg = buf; + unsigned int ep; + int alen, err; + + if (type & 0x0800) + ep = P54U_PIPE_DEV; + else + ep = P54U_PIPE_BRG; + + read->port = cpu_to_le16(type); + read->addr = addr; + + err = usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), + read, sizeof(*read), &alen, 1000); + if (err) + return err; + + err = usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, ep), + reg, sizeof(*reg), &alen, 1000); + if (err) + return err; + + *val = *reg; + return 0; +} + +static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep, + void *data, size_t len) +{ + int alen; + return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), + data, len, &alen, 2000); +} + +static int p54u_read_eeprom(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + void *buf; + struct p54_control_hdr *hdr; + int err, alen; + size_t offset = priv->hw_type ? 0x10 : 0x20; + + buf = kmalloc(0x2020, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "prism54usb: cannot allocate memory for" + "eeprom readback!\n"); + return -ENOMEM; + } + + if (priv->hw_type) { + *((u32 *) buf) = priv->common.rx_start; + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); + if (err) { + printk(KERN_ERR "prism54usb: addr send failed\n"); + goto fail; + } + } else { + struct net2280_reg_write *reg = buf; + reg->port = cpu_to_le16(NET2280_DEV_U32); + reg->addr = cpu_to_le32(P54U_DEV_BASE); + reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); + err = p54u_bulk_msg(priv, P54U_PIPE_DEV, buf, sizeof(*reg)); + if (err) { + printk(KERN_ERR "prism54usb: dev_int send failed\n"); + goto fail; + } + } + + hdr = buf + priv->common.tx_hdr_len; + p54_fill_eeprom_readback(hdr); + hdr->req_id = cpu_to_le32(priv->common.rx_start); + if (priv->common.tx_hdr_len) { + struct net2280_tx_hdr *tx_hdr = buf; + tx_hdr->device_addr = hdr->req_id; + tx_hdr->len = cpu_to_le16(EEPROM_READBACK_LEN); + } + + /* we can just pretend to send 0x2000 bytes of nothing in the headers */ + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, + EEPROM_READBACK_LEN + priv->common.tx_hdr_len); + if (err) { + printk(KERN_ERR "prism54usb: eeprom req send failed\n"); + goto fail; + } + + err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), + buf, 0x2020, &alen, 1000); + if (!err && alen > offset) { + p54_parse_eeprom(dev, (u8 *)buf + offset, alen - offset); + } else { + printk(KERN_ERR "prism54usb: eeprom read failed!\n"); + err = -EINVAL; + goto fail; + } + + fail: + kfree(buf); + return err; +} + +static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) +{ + static char start_string[] = "~~~~<\r"; + struct p54u_priv *priv = dev->priv; + const struct firmware *fw_entry = NULL; + int err, alen; + u8 carry = 0; + u8 *buf, *tmp, *data; + unsigned int left, remains, block_size; + struct x2_header *hdr; + unsigned long timeout; + + tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "p54usb: cannot allocate firmware upload buffer!\n"); + err = -ENOMEM; + goto err_bufalloc; + } + + memcpy(buf, start_string, 4); + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4); + if (err) { + printk(KERN_ERR "p54usb: reset failed! (%d)\n", err); + goto err_reset; + } + + err = request_firmware(&fw_entry, "isl3887usb_bare", &priv->udev->dev); + if (err) { + printk(KERN_ERR "p54usb: cannot find firmware (isl3887usb_bare)!\n"); + goto err_req_fw_failed; + } + + p54_parse_firmware(dev, fw_entry); + + left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size); + strcpy(buf, start_string); + left -= strlen(start_string); + tmp += strlen(start_string); + + data = fw_entry->data; + remains = fw_entry->size; + + hdr = (struct x2_header *)(buf + strlen(start_string)); + memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE); + hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR); + hdr->fw_length = cpu_to_le32(fw_entry->size); + hdr->crc = cpu_to_le32(~crc32_le(~0, (void *)&hdr->fw_load_addr, + sizeof(u32)*2)); + left -= sizeof(*hdr); + tmp += sizeof(*hdr); + + while (remains) { + while (left--) { + if (carry) { + *tmp++ = carry; + carry = 0; + remains--; + continue; + } + switch (*data) { + case '~': + *tmp++ = '}'; + carry = '^'; + break; + case '}': + *tmp++ = '}'; + carry = ']'; + break; + default: + *tmp++ = *data; + remains--; + break; + } + data++; + } + + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); + if (err) { + printk(KERN_ERR "prism54usb: firmware upload failed!\n"); + goto err_upload_failed; + } + + tmp = buf; + left = block_size = min((unsigned int)P54U_FW_BLOCK, remains); + } + + *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size)); + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); + if (err) { + printk(KERN_ERR "prism54usb: firmware upload failed!\n"); + goto err_upload_failed; + } + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { + if (alen > 2 && !memcmp(buf, "OK", 2)) + break; + + if (alen > 5 && !memcmp(buf, "ERROR", 5)) { + printk(KERN_INFO "prism54usb: firmware upload failed!\n"); + err = -EINVAL; + break; + } + + if (time_after(jiffies, timeout)) { + printk(KERN_ERR "prism54usb: firmware boot timed out!\n"); + err = -ETIMEDOUT; + break; + } + } + if (err) + goto err_upload_failed; + + buf[0] = 'g'; + buf[1] = '\r'; + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); + if (err) { + printk(KERN_ERR "prism54usb: firmware boot failed!\n"); + goto err_upload_failed; + } + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(err = usb_bulk_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { + if (alen > 0 && buf[0] == 'g') + break; + + if (time_after(jiffies, timeout)) { + err = -ETIMEDOUT; + break; + } + } + if (err) + goto err_upload_failed; + + err_upload_failed: + release_firmware(fw_entry); + err_req_fw_failed: + err_reset: + kfree(buf); + err_bufalloc: + return err; +} + +static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + const struct firmware *fw_entry = NULL; + const struct p54p_csr *devreg = (const struct p54p_csr *) P54U_DEV_BASE; + int err, alen; + void *buf; + __le32 reg; + unsigned int remains, offset; + u8 *data; + + buf = kmalloc(512, GFP_KERNEL); + if (!buf) { + printk(KERN_ERR "p54usb: firmware buffer alloc failed!\n"); + return -ENOMEM; + } + + err = request_firmware(&fw_entry, "isl3890usb", &priv->udev->dev); + if (err) { + printk(KERN_ERR "p54usb: cannot find firmware (isl3890usb)!\n"); + kfree(buf); + return err; + } + + p54_parse_firmware(dev, fw_entry); + +#define P54U_WRITE(type, addr, data) \ + do {\ + err = p54u_write(priv, buf, type,\ + cpu_to_le32((u32)(unsigned long)addr), data);\ + if (err) \ + goto fail;\ + } while (0) + +#define P54U_READ(type, addr) \ + do {\ + err = p54u_read(priv, buf, type,\ + cpu_to_le32((u32)(unsigned long)addr), ®);\ + if (err)\ + goto fail;\ + } while (0) + + /* power down net2280 bridge */ + P54U_READ(NET2280_BRG_U32, NET2280_GPIOCTL); + reg |= cpu_to_le32(P54U_BRG_POWER_DOWN); + reg &= cpu_to_le32(~P54U_BRG_POWER_UP); + P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); + + mdelay(100); + + /* power up bridge */ + reg |= cpu_to_le32(P54U_BRG_POWER_UP); + reg &= cpu_to_le32(~P54U_BRG_POWER_DOWN); + P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); + + mdelay(100); + + P54U_WRITE(NET2280_BRG_U32, NET2280_DEVINIT, + cpu_to_le32(NET2280_CLK_30Mhz | + NET2280_PCI_ENABLE | + NET2280_PCI_SOFT_RESET)); + + mdelay(20); + + P54U_WRITE(NET2280_BRG_CFG_U16, PCI_COMMAND, + cpu_to_le32(PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER)); + + P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_0, + cpu_to_le32(NET2280_BASE)); + + P54U_READ(NET2280_BRG_CFG_U16, PCI_STATUS); + reg |= cpu_to_le32(PCI_STATUS_REC_MASTER_ABORT); + P54U_WRITE(NET2280_BRG_CFG_U16, PCI_STATUS, reg); + + // TODO: we really need this? + P54U_READ(NET2280_BRG_U32, NET2280_RELNUM); + + P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_RSP, + cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); + P54U_WRITE(NET2280_BRG_U32, NET2280_EPC_RSP, + cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); + + P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_2, + cpu_to_le32(NET2280_BASE2)); + + /* finally done setting up the bridge */ + + P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | PCI_COMMAND, + cpu_to_le32(PCI_COMMAND_MEMORY | + PCI_COMMAND_MASTER)); + + P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | 0x40 /* TRDY timeout */, 0); + P54U_WRITE(NET2280_DEV_CFG_U32, 0x10000 | PCI_BASE_ADDRESS_0, + cpu_to_le32(P54U_DEV_BASE)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + + /* do romboot */ + P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, 0); + + P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(100); + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + /* finally, we can upload firmware now! */ + remains = fw_entry->size; + data = fw_entry->data; + offset = ISL38XX_DEV_FIRMWARE_ADDR; + + while (remains) { + unsigned int block_len = min(remains, (unsigned int)512); + memcpy(buf, data, block_len); + + err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); + if (err) { + printk(KERN_ERR "prism54usb: firmware block upload " + "failed\n"); + goto fail; + } + + P54U_WRITE(NET2280_DEV_U32, &devreg->direct_mem_base, + cpu_to_le32(0xc0000f00)); + + P54U_WRITE(NET2280_DEV_U32, + 0x0020 | (unsigned long)&devreg->direct_mem_win, 0); + P54U_WRITE(NET2280_DEV_U32, + 0x0020 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(1)); + + P54U_WRITE(NET2280_DEV_U32, + 0x0024 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(block_len)); + P54U_WRITE(NET2280_DEV_U32, + 0x0028 | (unsigned long)&devreg->direct_mem_win, + cpu_to_le32(offset)); + + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_addr, + cpu_to_le32(NET2280_EPA_FIFO_PCI_ADDR)); + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_len, + cpu_to_le32(block_len >> 2)); + P54U_WRITE(NET2280_DEV_U32, &devreg->dma_ctrl, + cpu_to_le32(ISL38XX_DMA_MASTER_CONTROL_TRIGGER)); + + mdelay(10); + + P54U_READ(NET2280_DEV_U32, + 0x002C | (unsigned long)&devreg->direct_mem_win); + if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || + !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { + printk(KERN_ERR "prism54usb: firmware DMA transfer " + "failed\n"); + goto fail; + } + + P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_STAT, + cpu_to_le32(NET2280_FIFO_FLUSH)); + + remains -= block_len; + data += block_len; + offset += block_len; + } + + /* do ramboot */ + P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(20); + + reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); + P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); + + mdelay(100); + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + /* start up the firmware */ + P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, + cpu_to_le32(ISL38XX_INT_IDENT_INIT)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT_ENABLE | + NET2280_USB_INTERRUPT_ENABLE)); + + P54U_WRITE(NET2280_DEV_U32, &devreg->dev_int, + cpu_to_le32(ISL38XX_DEV_INT_RESET)); + + err = usb_interrupt_msg(priv->udev, + usb_rcvbulkpipe(priv->udev, P54U_PIPE_INT), + buf, sizeof(__le32), &alen, 1000); + if (err || alen != sizeof(__le32)) + goto fail; + + P54U_READ(NET2280_DEV_U32, &devreg->int_ident); + P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); + + if (!(reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))) + err = -EINVAL; + + P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); + P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, + cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); + +#undef P54U_WRITE +#undef P54U_READ + + fail: + release_firmware(fw_entry); + kfree(buf); + return err; +} + +static int p54u_open(struct ieee80211_hw *dev) +{ + struct p54u_priv *priv = dev->priv; + int err; + + err = p54u_init_urbs(dev); + if (err) { + return err; + } + + priv->common.open = p54u_init_urbs; + + return 0; +} + +static void p54u_stop(struct ieee80211_hw *dev) +{ + /* TODO: figure out how to reliably stop the 3887 and net2280 so + the hardware is still usable next time we want to start it. + until then, we just stop listening to the hardware.. */ + p54u_free_urbs(dev); + return; +} + +static int __devinit p54u_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct ieee80211_hw *dev; + struct p54u_priv *priv; + int err; + unsigned int i, recognized_pipes; + DECLARE_MAC_BUF(mac); + + dev = p54_init_common(sizeof(*priv)); + if (!dev) { + printk(KERN_ERR "prism54usb: ieee80211 alloc failed\n"); + return -ENOMEM; + } + + priv = dev->priv; + + SET_IEEE80211_DEV(dev, &intf->dev); + usb_set_intfdata(intf, dev); + priv->udev = udev; + + usb_get_dev(udev); + + /* really lazy and simple way of figuring out if we're a 3887 */ + /* TODO: should just stick the identification in the device table */ + i = intf->altsetting->desc.bNumEndpoints; + recognized_pipes = 0; + while (i--) { + switch (intf->altsetting->endpoint[i].desc.bEndpointAddress) { + case P54U_PIPE_DATA: + case P54U_PIPE_MGMT: + case P54U_PIPE_BRG: + case P54U_PIPE_DEV: + case P54U_PIPE_DATA | USB_DIR_IN: + case P54U_PIPE_MGMT | USB_DIR_IN: + case P54U_PIPE_BRG | USB_DIR_IN: + case P54U_PIPE_DEV | USB_DIR_IN: + case P54U_PIPE_INT | USB_DIR_IN: + recognized_pipes++; + } + } + priv->common.open = p54u_open; + + if (recognized_pipes < P54U_PIPE_NUMBER) { + priv->hw_type = P54U_3887; + priv->common.tx = p54u_tx_3887; + } else { + dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr); + priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr); + priv->common.tx = p54u_tx_net2280; + } + priv->common.stop = p54u_stop; + + if (priv->hw_type) + err = p54u_upload_firmware_3887(dev); + else + err = p54u_upload_firmware_net2280(dev); + if (err) + goto err_free_dev; + + err = p54u_read_eeprom(dev); + if (err) + goto err_free_dev; + + if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { + u8 perm_addr[ETH_ALEN]; + + printk(KERN_WARNING "prism54usb: Invalid hwaddr! Using randomly generated MAC addr\n"); + random_ether_addr(perm_addr); + SET_IEEE80211_PERM_ADDR(dev, perm_addr); + } + + skb_queue_head_init(&priv->rx_queue); + + err = ieee80211_register_hw(dev); + if (err) { + printk(KERN_ERR "prism54usb: Cannot register netdevice\n"); + goto err_free_dev; + } + + printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n", + wiphy_name(dev->wiphy), + print_mac(mac, dev->wiphy->perm_addr), + priv->common.version); + + return 0; + + err_free_dev: + ieee80211_free_hw(dev); + usb_set_intfdata(intf, NULL); + usb_put_dev(udev); + return err; +} + +static void __devexit p54u_disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *dev = usb_get_intfdata(intf); + struct p54u_priv *priv; + + if (!dev) + return; + + ieee80211_unregister_hw(dev); + + priv = dev->priv; + usb_put_dev(interface_to_usbdev(intf)); + p54_free_common(dev); + ieee80211_free_hw(dev); +} + +static struct usb_driver p54u_driver = { + .name = "prism54usb", + .id_table = p54u_table, + .probe = p54u_probe, + .disconnect = p54u_disconnect, +}; + +static int __init p54u_init(void) +{ + return usb_register(&p54u_driver); +} + +static void __exit p54u_exit(void) +{ + usb_deregister(&p54u_driver); +} + +module_init(p54u_init); +module_exit(p54u_exit); diff --git a/drivers/net/wireless/p54usb.h b/drivers/net/wireless/p54usb.h new file mode 100644 index 0000000000000000000000000000000000000000..d1896b396c1c128d3285dc27026855349ae2bbd5 --- /dev/null +++ b/drivers/net/wireless/p54usb.h @@ -0,0 +1,133 @@ +#ifndef PRISM54USB_H +#define PRISM54USB_H + +/* + * Defines for USB based mac80211 Prism54 driver + * + * Copyright (c) 2006, Michael Wu + * + * Based on the islsm (softmac prism54) driver, which is: + * Copyright 2004-2006 Jean-Baptiste Note , et al. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* for isl3886 register definitions used on ver 1 devices */ +#include "p54pci.h" +#include "net2280.h" + +/* pci */ +#define NET2280_BASE 0x10000000 +#define NET2280_BASE2 0x20000000 + +/* gpio */ +#define P54U_BRG_POWER_UP (1 << GPIO0_DATA) +#define P54U_BRG_POWER_DOWN (1 << GPIO1_DATA) + +/* devinit */ +#define NET2280_CLK_4Mhz (15 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_30Mhz (2 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_60Mhz (1 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_CLK_STOP (0 << LOCAL_CLOCK_FREQUENCY) +#define NET2280_PCI_ENABLE (1 << PCI_ENABLE) +#define NET2280_PCI_SOFT_RESET (1 << PCI_SOFT_RESET) + +/* endpoints */ +#define NET2280_CLEAR_NAK_OUT_PACKETS_MODE (1 << CLEAR_NAK_OUT_PACKETS_MODE) +#define NET2280_FIFO_FLUSH (1 << FIFO_FLUSH) + +/* irq */ +#define NET2280_USB_INTERRUPT_ENABLE (1 << USB_INTERRUPT_ENABLE) +#define NET2280_PCI_INTA_INTERRUPT (1 << PCI_INTA_INTERRUPT) +#define NET2280_PCI_INTA_INTERRUPT_ENABLE (1 << PCI_INTA_INTERRUPT_ENABLE) + +/* registers */ +#define NET2280_DEVINIT 0x00 +#define NET2280_USBIRQENB1 0x24 +#define NET2280_IRQSTAT1 0x2c +#define NET2280_FIFOCTL 0x38 +#define NET2280_GPIOCTL 0x50 +#define NET2280_RELNUM 0x88 +#define NET2280_EPA_RSP 0x324 +#define NET2280_EPA_STAT 0x32c +#define NET2280_EPB_STAT 0x34c +#define NET2280_EPC_RSP 0x364 +#define NET2280_EPC_STAT 0x36c +#define NET2280_EPD_STAT 0x38c + +#define NET2280_EPA_CFG 0x320 +#define NET2280_EPB_CFG 0x340 +#define NET2280_EPC_CFG 0x360 +#define NET2280_EPD_CFG 0x380 +#define NET2280_EPE_CFG 0x3A0 +#define NET2280_EPF_CFG 0x3C0 +#define P54U_DEV_BASE 0x40000000 + +struct net2280_tx_hdr { + __le32 device_addr; + __le16 len; + __le16 follower; /* ? */ + u8 padding[8]; +} __attribute__((packed)); + +/* Some flags for the isl hardware registers controlling DMA inside the + * chip */ +#define ISL38XX_DMA_STATUS_DONE 0x00000001 +#define ISL38XX_DMA_STATUS_READY 0x00000002 +#define NET2280_EPA_FIFO_PCI_ADDR 0x20000000 +#define ISL38XX_DMA_MASTER_CONTROL_TRIGGER 0x00000004 + +enum net2280_op_type { + NET2280_BRG_U32 = 0x001F, + NET2280_BRG_CFG_U32 = 0x000F, + NET2280_BRG_CFG_U16 = 0x0003, + NET2280_DEV_U32 = 0x080F, + NET2280_DEV_CFG_U32 = 0x088F, + NET2280_DEV_CFG_U16 = 0x0883 +}; + +#define P54U_FW_BLOCK 2048 + +#define X2_SIGNATURE "x2 " +#define X2_SIGNATURE_SIZE 4 + +struct x2_header { + u8 signature[X2_SIGNATURE_SIZE]; + __le32 fw_load_addr; + __le32 fw_length; + __le32 crc; +} __attribute__((packed)); + +/* pipes 3 and 4 are not used by the driver */ +#define P54U_PIPE_NUMBER 9 + +enum p54u_pipe_addr { + P54U_PIPE_DATA = 0x01, + P54U_PIPE_MGMT = 0x02, + P54U_PIPE_3 = 0x03, + P54U_PIPE_4 = 0x04, + P54U_PIPE_BRG = 0x0d, + P54U_PIPE_DEV = 0x0e, + P54U_PIPE_INT = 0x0f +}; + +struct p54u_rx_info { + struct urb *urb; + struct ieee80211_hw *dev; +}; + +struct p54u_priv { + struct p54_common common; + struct usb_device *udev; + enum { + P54U_NET2280 = 0, + P54U_3887 + } hw_type; + + spinlock_t lock; + struct sk_buff_head rx_queue; +}; + +#endif /* PRISM54USB_H */ diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 585f5996d292d75a3b3265b4e17c20866eb9a488..6d80ca421cf0a2c1974176cc0eac33fd0470f725 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -1753,7 +1753,7 @@ prism54_get_oid(struct net_device *ndev, struct iw_request_info *info, int rvalue; enum oid_num_t n = dwrq->flags; - rvalue = mgt_get_request((islpci_private *) ndev->priv, n, 0, NULL, &r); + rvalue = mgt_get_request(netdev_priv(ndev), n, 0, NULL, &r); dwrq->length = mgt_response_to_str(n, &r, extra); if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32) kfree(r.ptr); @@ -1766,7 +1766,7 @@ prism54_set_u32(struct net_device *ndev, struct iw_request_info *info, { u32 oid = uwrq[0], u = uwrq[1]; - return mgt_set_request((islpci_private *) ndev->priv, oid, 0, &u); + return mgt_set_request(netdev_priv(ndev), oid, 0, &u); } static int @@ -1775,7 +1775,7 @@ prism54_set_raw(struct net_device *ndev, struct iw_request_info *info, { u32 oid = dwrq->flags; - return mgt_set_request((islpci_private *) ndev->priv, oid, 0, extra); + return mgt_set_request(netdev_priv(ndev), oid, 0, extra); } void @@ -2029,12 +2029,12 @@ static void format_event(islpci_private *priv, char *dest, const char *str, const struct obj_mlme *mlme, u16 *length, int error) { - const u8 *a = mlme->address; + DECLARE_MAC_BUF(mac); int n = snprintf(dest, IW_CUSTOM_MAX, - "%s %s %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X %s (%2.2X)", + "%s %s %s %s (%2.2X)", str, ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"), - a[0], a[1], a[2], a[3], a[4], a[5], + print_mac(mac, mlme->address), (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ") : ""), mlme->code); BUG_ON(n > IW_CUSTOM_MAX); @@ -2105,15 +2105,13 @@ struct ieee80211_beacon_phdr { #define WLAN_EID_GENERIC 0xdd static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 }; -#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] -#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" - static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, u8 *wpa_ie, size_t wpa_ie_len) { struct list_head *ptr; struct islpci_bss_wpa_ie *bss = NULL; + DECLARE_MAC_BUF(mac); if (wpa_ie_len > MAX_WPA_IE_LEN) wpa_ie_len = MAX_WPA_IE_LEN; @@ -2154,8 +2152,8 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid, bss->wpa_ie_len = wpa_ie_len; bss->last_update = jiffies; } else { - printk(KERN_DEBUG "Failed to add BSS WPA entry for " MACSTR - "\n", MAC2STR(bssid)); + printk(KERN_DEBUG "Failed to add BSS WPA entry for " + "%s\n", print_mac(mac, bssid)); } /* expire old entries from WPA list */ @@ -2221,6 +2219,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, { struct ieee80211_beacon_phdr *hdr; u8 *pos, *end; + DECLARE_MAC_BUF(mac); if (!priv->wpa) return; @@ -2231,7 +2230,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr, while (pos < end) { if (pos + 2 + pos[1] > end) { printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed " - "for " MACSTR "\n", MAC2STR(addr)); + "for %s\n", print_mac(mac, addr)); return; } if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && @@ -2270,6 +2269,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, size_t len = 0; /* u16, better? */ u8 *payload = NULL, *pos = NULL; int ret; + DECLARE_MAC_BUF(mac); /* I think all trapable objects are listed here. * Some oids have a EX version. The difference is that they are emitted @@ -2358,14 +2358,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, break; memcpy(&confirm->address, mlmeex->address, ETH_ALEN); - printk(KERN_DEBUG "Authenticate from: address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", - mlmeex->address[0], - mlmeex->address[1], - mlmeex->address[2], - mlmeex->address[3], - mlmeex->address[4], - mlmeex->address[5] - ); + printk(KERN_DEBUG "Authenticate from: address:\t%s\n", + print_mac(mac, mlmeex->address)); confirm->id = -1; /* or mlmeex->id ? */ confirm->state = 0; /* not used */ confirm->code = 0; @@ -2410,15 +2404,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { - printk(KERN_DEBUG "No WPA IE found from " - "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", - mlmeex->address[0], - mlmeex->address[1], - mlmeex->address[2], - mlmeex->address[3], - mlmeex->address[4], - mlmeex->address[5] - ); + printk(KERN_DEBUG "No WPA IE found from address:\t%s\n", + print_mac(mac, mlmeex->address)); kfree(confirm); break; } @@ -2454,15 +2441,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); if (!wpa_ie_len) { - printk(KERN_DEBUG "No WPA IE found from " - "address:\t%02x:%02x:%02x:%02x:%02x:%02x\n", - mlmeex->address[0], - mlmeex->address[1], - mlmeex->address[2], - mlmeex->address[3], - mlmeex->address[4], - mlmeex->address[5] - ); + printk(KERN_DEBUG "No WPA IE found from address:\t%s\n", + print_mac(mac, mlmeex->address)); kfree(confirm); break; } @@ -3239,10 +3219,9 @@ static const iw_handler prism54_private_handler[] = { }; const struct iw_handler_def prism54_handler_def = { - .num_standard = sizeof (prism54_handler) / sizeof (iw_handler), - .num_private = sizeof (prism54_private_handler) / sizeof (iw_handler), - .num_private_args = - sizeof (prism54_private_args) / sizeof (struct iw_priv_args), + .num_standard = ARRAY_SIZE(prism54_handler), + .num_private = ARRAY_SIZE(prism54_private_handler), + .num_private_args = ARRAY_SIZE(prism54_private_args), .standard = (iw_handler *) prism54_handler, .private = (iw_handler *) prism54_private_handler, .private_args = (struct iw_priv_args *) prism54_private_args, diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 084795355b74a312cac2bdf29c10b6a01088e528..219dd651dc41067b4ecd66083b9d23a172ffd261 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -808,7 +808,6 @@ islpci_setup(struct pci_dev *pdev) if (!ndev) return ndev; - SET_MODULE_OWNER(ndev); pci_set_drvdata(pdev, ndev); #if defined(SET_NETDEV_DEV) SET_NETDEV_DEV(ndev, &pdev->dev); diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c index 42780320cd5cbef6ab1a2b9b13f91877b996c719..57a4ac34bed6cd2eefcb9034ae5a85ec75bfc642 100644 --- a/drivers/net/wireless/prism54/oid_mgt.c +++ b/drivers/net/wireless/prism54/oid_mgt.c @@ -244,13 +244,11 @@ mgt_init(islpci_private *priv) /* Alloc the cache */ for (i = 0; i < OID_NUM_LAST; i++) { if (isl_oid[i].flags & OID_FLAG_CACHED) { - priv->mib[i] = kmalloc(isl_oid[i].size * + priv->mib[i] = kzalloc(isl_oid[i].size * (isl_oid[i].range + 1), GFP_KERNEL); if (!priv->mib[i]) return -ENOMEM; - memset(priv->mib[i], 0, - isl_oid[i].size * (isl_oid[i].range + 1)); } else priv->mib[i] = NULL; } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 3be624295a1f79d1a3da70bcbc37620c688b6a8f..f87fe10059ae0f40319372cee58488fd0c9e899c 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -314,7 +314,7 @@ static int ray_probe(struct pcmcia_device *p_dev) if (!dev) goto fail_alloc_dev; - local = dev->priv; + local = netdev_priv(dev); local->finder = p_dev; /* The io structure describes IO port mapping. None used here */ @@ -356,7 +356,6 @@ static int ray_probe(struct pcmcia_device *p_dev) dev->set_multicast_list = &set_multicast_list; DEBUG(2,"ray_cs ray_attach calling ether_setup.)\n"); - SET_MODULE_OWNER(dev); dev->init = &ray_dev_init; dev->open = &ray_open; dev->stop = &ray_dev_close; @@ -388,7 +387,7 @@ static void ray_detach(struct pcmcia_device *link) ray_release(link); - local = (ray_dev_t *)dev->priv; + local = netdev_priv(dev); del_timer(&local->timer); if (link->priv) { @@ -412,7 +411,8 @@ static int ray_config(struct pcmcia_device *link) win_req_t req; memreq_t mem; struct net_device *dev = (struct net_device *)link->priv; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); + DECLARE_MAC_BUF(mac); DEBUG(1, "ray_config(0x%p)\n", link); @@ -483,10 +483,8 @@ static int ray_config(struct pcmcia_device *link) strcpy(local->node.dev_name, dev->name); link->dev_node = &local->node; - printk(KERN_INFO "%s: RayLink, irq %d, hw_addr ", - dev->name, dev->irq); - for (i = 0; i < 6; i++) - printk("%02X%s", dev->dev_addr[i], ((i<5) ? ":" : "\n")); + printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %s\n", + dev->name, dev->irq, print_mac(mac, dev->dev_addr)); return 0; @@ -520,7 +518,7 @@ static int ray_init(struct net_device *dev) int i; UCHAR *p; struct ccs __iomem *pccs; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; DEBUG(1, "ray_init(0x%p)\n", dev); if (!(pcmcia_dev_present(link))) { @@ -581,7 +579,7 @@ static int ray_init(struct net_device *dev) static int dl_startup_params(struct net_device *dev) { int ccsindex; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct ccs __iomem *pccs; struct pcmcia_device *link = local->finder; @@ -786,7 +784,7 @@ static void join_net(u_long data) static void ray_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); int i; DEBUG(1, "ray_release(0x%p)\n", link); @@ -834,7 +832,7 @@ int ray_dev_init(struct net_device *dev) #ifdef RAY_IMMEDIATE_INIT int i; #endif /* RAY_IMMEDIATE_INIT */ - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; DEBUG(1,"ray_dev_init(dev=%p)\n",dev); @@ -868,7 +866,7 @@ int ray_dev_init(struct net_device *dev) /*===========================================================================*/ static int ray_dev_config(struct net_device *dev, struct ifmap *map) { - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; /* Dummy routine to satisfy device structure */ DEBUG(1,"ray_dev_config(dev=%p,ifmap=%p)\n",dev,map); @@ -882,7 +880,7 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map) /*===========================================================================*/ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev) { - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; short length = skb->len; @@ -925,7 +923,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ray_hw_xmit(unsigned char* data, int len, struct net_device* dev, UCHAR msg_type) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct ccs __iomem *pccs; int ccsindex; int offset; @@ -1099,7 +1097,7 @@ static int ray_set_freq(struct net_device *dev, struct iw_freq *fwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ /* Reject if card is already initialised */ @@ -1124,7 +1122,7 @@ static int ray_get_freq(struct net_device *dev, struct iw_freq *fwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); fwrq->m = local->sparm.b5.a_hop_pattern; fwrq->e = 0; @@ -1140,7 +1138,7 @@ static int ray_set_essid(struct net_device *dev, struct iw_point *dwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); /* Reject if card is already initialised */ if(local->card_status != CARD_AWAITING_PARAM) @@ -1173,7 +1171,7 @@ static int ray_get_essid(struct net_device *dev, struct iw_point *dwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); /* Get the essid that was set */ memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE); @@ -1194,7 +1192,7 @@ static int ray_get_wap(struct net_device *dev, struct sockaddr *awrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); memcpy(awrq->sa_data, local->bss_id, ETH_ALEN); awrq->sa_family = ARPHRD_ETHER; @@ -1211,7 +1209,7 @@ static int ray_set_rate(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); /* Reject if card is already initialised */ if(local->card_status != CARD_AWAITING_PARAM) @@ -1240,7 +1238,7 @@ static int ray_get_rate(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); if(local->net_default_tx_rate == 3) vwrq->value = 2000000; /* Hum... */ @@ -1260,7 +1258,7 @@ static int ray_set_rts(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int rthr = vwrq->value; /* Reject if card is already initialised */ @@ -1290,7 +1288,7 @@ static int ray_get_rts(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8) + local->sparm.b5.a_rts_threshold[1]; @@ -1309,7 +1307,7 @@ static int ray_set_frag(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int fthr = vwrq->value; /* Reject if card is already initialised */ @@ -1338,7 +1336,7 @@ static int ray_get_frag(struct net_device *dev, struct iw_param *vwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8) + local->sparm.b5.a_frag_threshold[1]; @@ -1357,7 +1355,7 @@ static int ray_set_mode(struct net_device *dev, __u32 *uwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ char card_mode = 1; @@ -1389,7 +1387,7 @@ static int ray_get_mode(struct net_device *dev, __u32 *uwrq, char *extra) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); if(local->sparm.b5.a_network_type) *uwrq = IW_MODE_INFRA; @@ -1492,7 +1490,7 @@ static int ray_commit(struct net_device *dev, */ static iw_stats * ray_get_wireless_stats(struct net_device * dev) { - ray_dev_t * local = (ray_dev_t *) dev->priv; + ray_dev_t * local = netdev_priv(dev); struct pcmcia_device *link = local->finder; struct status __iomem *p = local->sram + STATUS_BASE; @@ -1568,9 +1566,9 @@ static const struct iw_priv_args ray_private_args[] = { static const struct iw_handler_def ray_handler_def = { - .num_standard = sizeof(ray_handler)/sizeof(iw_handler), - .num_private = sizeof(ray_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(ray_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(ray_handler), + .num_private = ARRAY_SIZE(ray_private_handler), + .num_private_args = ARRAY_SIZE(ray_private_args), .standard = ray_handler, .private = ray_private_handler, .private_args = ray_private_args, @@ -1580,7 +1578,7 @@ static const struct iw_handler_def ray_handler_def = /*===========================================================================*/ static int ray_open(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link; link = local->finder; @@ -1614,7 +1612,7 @@ static int ray_open(struct net_device *dev) /*===========================================================================*/ static int ray_dev_close(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link; link = local->finder; @@ -1773,7 +1771,7 @@ static int parse_addr(char *in_str, UCHAR *out) /*===========================================================================*/ static struct net_device_stats *ray_get_stats(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; struct status __iomem *p = local->sram + STATUS_BASE; if (!(pcmcia_dev_present(link))) { @@ -1803,7 +1801,7 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev) /*===========================================================================*/ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; int ccsindex; int i; @@ -1840,7 +1838,7 @@ static void ray_update_multi_list(struct net_device *dev, int all) int ccsindex; struct ccs __iomem *pccs; int i = 0; - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); struct pcmcia_device *link = local->finder; void __iomem *p = local->sram + HOST_TO_ECF_BASE; @@ -1884,7 +1882,7 @@ static void ray_update_multi_list(struct net_device *dev, int all) /*===========================================================================*/ static void set_multicast_list(struct net_device *dev) { - ray_dev_t *local = (ray_dev_t *)dev->priv; + ray_dev_t *local = netdev_priv(dev); UCHAR promisc; DEBUG(2,"ray_cs set_multicast_list(%p)\n",dev); @@ -1935,7 +1933,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id) DEBUG(4,"ray_cs: interrupt for *dev=%p\n",dev); - local = (ray_dev_t *)dev->priv; + local = netdev_priv(dev); link = (struct pcmcia_device *)local->finder; if (!pcmcia_dev_present(link)) { DEBUG(2,"ray_cs interrupt from device not present or suspended.\n"); @@ -2165,7 +2163,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i { struct sk_buff *skb = NULL; struct rcs __iomem *prcslink = prcs; - ray_dev_t *local = dev->priv; + ray_dev_t *local = netdev_priv(dev); UCHAR *rx_ptr; int total_len; int tmp; @@ -2611,6 +2609,7 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) UCHAR *p; struct freq_hop_element *pfh; UCHAR c[33]; + DECLARE_MAC_BUF(mac); link = this_device; if (!link) @@ -2618,7 +2617,7 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) dev = (struct net_device *)link->priv; if (!dev) return 0; - local = (ray_dev_t *)dev->priv; + local = netdev_priv(dev); if (!local) return 0; @@ -2640,9 +2639,8 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) nettype[local->sparm.b5.a_network_type], c); p = local->bss_id; - len += sprintf(buf + len, - "BSSID = %02x:%02x:%02x:%02x:%02x:%02x\n", - p[0],p[1],p[2],p[3],p[4],p[5]); + len += sprintf(buf + len, "BSSID = %s\n", + print_mac(mac, p)); len += sprintf(buf + len, "Country code = %d\n", local->sparm.b5.a_curr_country_code); diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..da05b1faf60d359ff21dcb4a39620e83d6f4a2f7 --- /dev/null +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -0,0 +1,130 @@ +config RT2X00 + tristate "Ralink driver support" + depends on MAC80211 && WLAN_80211 && EXPERIMENTAL + ---help--- + This will enable the experimental support for the Ralink drivers, + developed in the rt2x00 project . + + These drivers will make use of the Devicescape ieee80211 stack. + + When building one of the individual drivers, the rt2x00 library + will also be created. That library (when the driver is built as + a module) will be called "rt2x00lib.ko". + +config RT2X00_LIB + tristate + depends on RT2X00 + +config RT2X00_LIB_PCI + tristate + depends on RT2X00 + select RT2X00_LIB + +config RT2X00_LIB_USB + tristate + depends on RT2X00 + select RT2X00_LIB + +config RT2X00_LIB_FIRMWARE + boolean + depends on RT2X00_LIB + select CRC_ITU_T + select FW_LOADER + +config RT2X00_LIB_RFKILL + boolean + depends on RT2X00_LIB + select RFKILL + select INPUT_POLLDEV + +config RT2400PCI + tristate "Ralink rt2400 pci/pcmcia support" + depends on RT2X00 && PCI + select RT2X00_LIB_PCI + select EEPROM_93CX6 + ---help--- + This is an experimental driver for the Ralink rt2400 wireless chip. + + When compiled as a module, this driver will be called "rt2400pci.ko". + +config RT2400PCI_RFKILL + bool "RT2400 rfkill support" + depends on RT2400PCI + select RT2X00_LIB_RFKILL + ---help--- + This adds support for integrated rt2400 devices that feature a + hardware button to control the radio state. + This feature depends on the RF switch subsystem rfkill. + +config RT2500PCI + tristate "Ralink rt2500 pci/pcmcia support" + depends on RT2X00 && PCI + select RT2X00_LIB_PCI + select EEPROM_93CX6 + ---help--- + This is an experimental driver for the Ralink rt2500 wireless chip. + + When compiled as a module, this driver will be called "rt2500pci.ko". + +config RT2500PCI_RFKILL + bool "RT2500 rfkill support" + depends on RT2500PCI + select RT2X00_LIB_RFKILL + ---help--- + This adds support for integrated rt2500 devices that feature a + hardware button to control the radio state. + This feature depends on the RF switch subsystem rfkill. + +config RT61PCI + tristate "Ralink rt61 pci/pcmcia support" + depends on RT2X00 && PCI + select RT2X00_LIB_PCI + select RT2X00_LIB_FIRMWARE + select EEPROM_93CX6 + ---help--- + This is an experimental driver for the Ralink rt61 wireless chip. + + When compiled as a module, this driver will be called "rt61pci.ko". + +config RT61PCI_RFKILL + bool "RT61 rfkill support" + depends on RT61PCI + select RT2X00_LIB_RFKILL + ---help--- + This adds support for integrated rt61 devices that feature a + hardware button to control the radio state. + This feature depends on the RF switch subsystem rfkill. + +config RT2500USB + tristate "Ralink rt2500 usb support" + depends on RT2X00 && USB + select RT2X00_LIB_USB + ---help--- + This is an experimental driver for the Ralink rt2500 wireless chip. + + When compiled as a module, this driver will be called "rt2500usb.ko". + +config RT73USB + tristate "Ralink rt73 usb support" + depends on RT2X00 && USB + select RT2X00_LIB_USB + select RT2X00_LIB_FIRMWARE + ---help--- + This is an experimental driver for the Ralink rt73 wireless chip. + + When compiled as a module, this driver will be called "rt73usb.ko". + +config RT2X00_LIB_DEBUGFS + bool "Ralink debugfs support" + depends on RT2X00_LIB && MAC80211_DEBUGFS + ---help--- + Enable creation of debugfs files for the rt2x00 drivers. + These debugfs files support both reading and writing of the + most important register types of the rt2x00 devices. + +config RT2X00_DEBUG + bool "Ralink debug output" + depends on RT2X00_LIB + ---help--- + Enable debugging output for all rt2x00 modules + diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..30d654a42eeab77b1270dc2479df83a99e1783fb --- /dev/null +++ b/drivers/net/wireless/rt2x00/Makefile @@ -0,0 +1,22 @@ +rt2x00lib-objs := rt2x00dev.o rt2x00mac.o rt2x00config.o + +ifeq ($(CONFIG_RT2X00_LIB_DEBUGFS),y) + rt2x00lib-objs += rt2x00debug.o +endif + +ifeq ($(CONFIG_RT2X00_LIB_RFKILL),y) + rt2x00lib-objs += rt2x00rfkill.o +endif + +ifeq ($(CONFIG_RT2X00_LIB_FIRMWARE),y) + rt2x00lib-objs += rt2x00firmware.o +endif + +obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o +obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o +obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o +obj-$(CONFIG_RT2400PCI) += rt2400pci.o +obj-$(CONFIG_RT2500PCI) += rt2500pci.o +obj-$(CONFIG_RT61PCI) += rt61pci.o +obj-$(CONFIG_RT2500USB) += rt2500usb.o +obj-$(CONFIG_RT73USB) += rt73usb.o diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c new file mode 100644 index 0000000000000000000000000000000000000000..31c1dd271627191813d37278d23abc08bcb48455 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -0,0 +1,1664 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2400pci + Abstract: rt2400pci device specific routines. + Supported chipsets: RT2460. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2400pci" + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2400pci.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00pci_register_read and rt2x00pci_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static u32 rt2400pci_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, BBPCSR, ®); + if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt2400pci_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2400pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); +} + +static void rt2400pci_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2400pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2400pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); +} + +static void rt2400pci_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, RFCSR, ®); + if (!rt2x00_get_field32(reg, RFCSR_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "RFCSR register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, RFCSR_VALUE, value); + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, RFCSR, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); +} + +static void rt2400pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, CSR21_EEPROM_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, CSR21, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt2400pci_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt2400pci_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt2400pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2400pci_read_csr, + .write = rt2400pci_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2400pci_bbp_read, + .write = rt2400pci_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2400pci_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +#ifdef CONFIG_RT2400PCI_RFKILL +static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + return rt2x00_get_field32(reg, GPIOCSR_BIT0); +} +#else +#define rt2400pci_rfkill_poll NULL +#endif /* CONFIG_RT2400PCI_RFKILL */ + +/* + * Configuration handlers. + */ +static void rt2400pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, + __le32 *mac) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR3, mac, + (2 * sizeof(__le32))); +} + +static void rt2400pci_config_bssid(struct rt2x00_dev *rt2x00dev, + __le32 *bssid) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR5, bssid, + (2 * sizeof(__le32))); +} + +static void rt2400pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Enable beacon config + */ + rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®); + rt2x00_set_field32(®, BCNCSR1_PRELOAD, + PREAMBLE + get_duration(IEEE80211_HEADER, 20)); + rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, tsf_sync); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); +} + +static void rt2400pci_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + int preamble_mask; + u32 reg; + + /* + * When short preamble is enabled, we should set bit 0x08 + */ + preamble_mask = short_preamble << 3; + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, ack_timeout); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, ack_consume_time); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00 | preamble_mask); + rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); + rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR3, ®); + rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask); + rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 20)); + rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR4, ®); + rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask); + rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 55)); + rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR5, ®); + rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask); + rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 110)); + rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); +} + +static void rt2400pci_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt2x00pci_register_write(rt2x00dev, ARCSR1, basic_rate_mask); +} + +static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf) +{ + /* + * Switch on tuning bits. + */ + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * RF2420 chipset don't need any additional actions. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2420)) + return; + + /* + * For the RT2421 chipsets we need to write an invalid + * reference clock rate to activate auto_tune. + * After that we set the value back to the correct channel. + */ + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, 0x000c2a32); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + msleep(1); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + msleep(1); + + /* + * Switch off tuning bits. + */ + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); + + rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * Clear false CRC during channel switch. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); +} + +static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower) +{ + rt2400pci_bbp_write(rt2x00dev, 3, TXPOWER_TO_DEV(txpower)); +} + +static void rt2400pci_config_antenna(struct rt2x00_dev *rt2x00dev, + int antenna_tx, int antenna_rx) +{ + u8 r1; + u8 r4; + + rt2400pci_bbp_read(rt2x00dev, 4, &r4); + rt2400pci_bbp_read(rt2x00dev, 1, &r1); + + /* + * Configure the TX antenna. + */ + switch (antenna_tx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + } + + rt2400pci_bbp_write(rt2x00dev, 4, r4); + rt2400pci_bbp_write(rt2x00dev, 1, r1); +} + +static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_SLOT_TIME, libconf->slot_time); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, libconf->sifs); + rt2x00_set_field32(®, CSR18_PIFS, libconf->pifs); + rt2x00pci_register_write(rt2x00dev, CSR18, reg); + + rt2x00pci_register_read(rt2x00dev, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, libconf->difs); + rt2x00_set_field32(®, CSR19_EIFS, libconf->eifs); + rt2x00pci_register_write(rt2x00dev, CSR19, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR12, ®); + rt2x00_set_field32(®, CSR12_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION, + libconf->conf->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, CSR12, reg); +} + +static void rt2400pci_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt2400pci_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt2400pci_config_channel(rt2x00dev, &libconf->rf); + if (flags & CONFIG_UPDATE_TXPOWER) + rt2400pci_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt2400pci_config_antenna(rt2x00dev, + libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt2400pci_config_duration(rt2x00dev, libconf); +} + +static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev, + struct ieee80211_tx_queue_params *params) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_CWMIN, params->cw_min); + rt2x00_set_field32(®, CSR11_CWMAX, params->cw_max); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); +} + +/* + * LED functions. + */ +static void rt2400pci_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, 70); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, 30); + + if (rt2x00dev->led_mode == LED_MODE_TXRX_ACTIVITY) { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + } else if (rt2x00dev->led_mode == LED_MODE_ASUS) { + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } else { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } + + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +static void rt2400pci_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +/* + * Link tuning + */ +static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u8 bbp; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2400pci_bbp_read(rt2x00dev, 39, &bbp); + rt2x00dev->link.false_cca = bbp; +} + +static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt2400pci_bbp_write(rt2x00dev, 13, 0x08); + rt2x00dev->link.vgc_level = 0x08; +} + +static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + u8 reg; + + /* + * The link tuner should not run longer then 60 seconds, + * and should run once every 2 seconds. + */ + if (rt2x00dev->link.count > 60 || !(rt2x00dev->link.count & 1)) + return; + + /* + * Base r13 link tuning on the false cca count. + */ + rt2400pci_bbp_read(rt2x00dev, 13, ®); + + if (rt2x00dev->link.false_cca > 512 && reg < 0x20) { + rt2400pci_bbp_write(rt2x00dev, 13, ++reg); + rt2x00dev->link.vgc_level = reg; + } else if (rt2x00dev->link.false_cca < 100 && reg > 0x08) { + rt2400pci_bbp_write(rt2x00dev, 13, --reg); + rt2x00dev->link.vgc_level = reg; + } +} + +/* + * Initialization functions. + */ +static void rt2400pci_init_rxring(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_desc *rxd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + rxd = ring->entry[i].priv; + + rt2x00_desc_read(rxd, 2, &word); + rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, + ring->data_size); + rt2x00_desc_write(rxd, 2, word); + + rt2x00_desc_read(rxd, 1, &word); + rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(rxd, 1, word); + + rt2x00_desc_read(rxd, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_clear(rt2x00dev->rx); +} + +static void rt2400pci_init_txring(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_desc *txd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + txd = ring->entry[i].priv; + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, + ring->data_size); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(txd, 0, word); + } + + rt2x00_ring_index_clear(ring); +} + +static int rt2400pci_init_rings(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize rings. + */ + rt2400pci_init_rxring(rt2x00dev); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + rt2400pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, + rt2x00dev->bcn[1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_PRIO, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); + rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR3, ®); + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR5, ®); + rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); + rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, + rt2x00dev->bcn[1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); + rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, + rt2x00dev->bcn[0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR1, ®); + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit); + rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR2, ®); + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + rt2x00dev->rx->data_dma); + rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); + + return 0; +} + +static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); + rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00023f20); + rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); + + rt2x00pci_register_read(rt2x00dev, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + (rt2x00dev->rx->data_size / 128)); + rt2x00pci_register_write(rt2x00dev, CSR9, reg); + + rt2x00pci_register_write(rt2x00dev, CNT3, 0x3f080000); + + rt2x00pci_register_read(rt2x00dev, ARCSR0, ®); + rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA0, 133); + rt2x00_set_field32(®, ARCSR0_AR_BBP_ID0, 134); + rt2x00_set_field32(®, ARCSR0_AR_BBP_DATA1, 136); + rt2x00_set_field32(®, ARCSR0_AR_BBP_ID1, 135); + rt2x00pci_register_write(rt2x00dev, ARCSR0, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 3); /* Tx power.*/ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 32); /* Signal */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 36); /* Rssi */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00217223); + rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); + + rt2x00pci_register_read(rt2x00dev, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 154); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 154); + rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, CSR1_BBP_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 0); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + /* + * We must clear the FCS and FIFO error count. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00pci_register_read(rt2x00dev, CNT4, ®); + + return 0; +} + +static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2400pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2400pci_bbp_write(rt2x00dev, 1, 0x00); + rt2400pci_bbp_write(rt2x00dev, 3, 0x27); + rt2400pci_bbp_write(rt2x00dev, 4, 0x08); + rt2400pci_bbp_write(rt2x00dev, 10, 0x0f); + rt2400pci_bbp_write(rt2x00dev, 15, 0x72); + rt2400pci_bbp_write(rt2x00dev, 16, 0x74); + rt2400pci_bbp_write(rt2x00dev, 17, 0x20); + rt2400pci_bbp_write(rt2x00dev, 18, 0x72); + rt2400pci_bbp_write(rt2x00dev, 19, 0x0b); + rt2400pci_bbp_write(rt2x00dev, 20, 0x00); + rt2400pci_bbp_write(rt2x00dev, 28, 0x11); + rt2400pci_bbp_write(rt2x00dev, 29, 0x04); + rt2400pci_bbp_write(rt2x00dev, 30, 0x21); + rt2400pci_bbp_write(rt2x00dev, 31, 0x00); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt2400pci_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); + rt2x00_set_field32(®, CSR8_RXDONE, mask); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); +} + +static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt2400pci_init_rings(rt2x00dev) || + rt2400pci_init_registers(rt2x00dev) || + rt2400pci_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + /* + * Enable interrupts. + */ + rt2400pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); + + /* + * Enable LED + */ + rt2400pci_enable_led(rt2x00dev); + + return 0; +} + +static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Disable LED + */ + rt2400pci_disable_led(rt2x00dev); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); + + /* + * Disable synchronisation. + */ + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Cancel RX and TX. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); + + /* + * Disable interrupts. + */ + rt2400pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_OFF); +} + +static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1); + rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state: bbp %d and rf %d.\n", + state, bbp_state, rf_state); + + return -EBUSY; +} + +static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2400pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2400pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt2400pci_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2400pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + u32 signal = 0; + u32 service = 0; + u32 length_high = 0; + u32 length_low = 0; + + /* + * The PLCP values should be treated as if they + * were BBP values. + */ + rt2x00_set_field32(&signal, BBPCSR_VALUE, desc->signal); + rt2x00_set_field32(&signal, BBPCSR_REGNUM, 5); + rt2x00_set_field32(&signal, BBPCSR_BUSY, 1); + + rt2x00_set_field32(&service, BBPCSR_VALUE, desc->service); + rt2x00_set_field32(&service, BBPCSR_REGNUM, 6); + rt2x00_set_field32(&service, BBPCSR_BUSY, 1); + + rt2x00_set_field32(&length_high, BBPCSR_VALUE, desc->length_high); + rt2x00_set_field32(&length_high, BBPCSR_REGNUM, 7); + rt2x00_set_field32(&length_high, BBPCSR_BUSY, 1); + + rt2x00_set_field32(&length_low, BBPCSR_VALUE, desc->length_low); + rt2x00_set_field32(&length_low, BBPCSR_REGNUM, 8); + rt2x00_set_field32(&length_low, BBPCSR_BUSY, 1); + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, length); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, service); + rt2x00_desc_write(txd, 3, word); + + rt2x00_desc_read(txd, 4, &word); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, length_low); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, length_high); + rt2x00_desc_write(txd, 4, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_RTS, + test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue == IEEE80211_TX_QUEUE_BEACON) { + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + if (queue == IEEE80211_TX_QUEUE_DATA0) + rt2x00_set_field32(®, TXCSR0_KICK_PRIO, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA1) + rt2x00_set_field32(®, TXCSR0_KICK_TX, 1); + else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + rt2x00_set_field32(®, TXCSR0_KICK_ATIM, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); +} + +/* + * RX control handlers + */ +static void rt2400pci_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = entry->priv; + u32 word0; + u32 word2; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 2, &word2); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + desc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); + desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - + entry->ring->rt2x00dev->rssi_offset; + desc->ofdm = 0; + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); +} + +/* + * Interrupt functions. + */ +static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_entry *entry; + struct data_desc *txd; + u32 word; + int tx_status; + int retry; + + while (!rt2x00_ring_empty(ring)) { + entry = rt2x00_get_data_entry_done(ring); + txd = entry->priv; + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + break; + + /* + * Obtain the status about this packet. + */ + tx_status = rt2x00_get_field32(word, TXD_W0_RESULT); + retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); + + rt2x00lib_txdone(entry, tx_status, retry); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_desc_write(txd, 0, word); + rt2x00_ring_index_done_inc(ring); + } + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + entry = ring->entry; + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); +} + +static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Beacon timer expired interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 2 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 3 - Atim ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) + rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + + /* + * 4 - Priority ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) + rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + + /* + * 5 - Tx ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) + rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2400pci_eepromregister_read; + eeprom.register_write = rt2400pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + ERROR(rt2x00dev, "Invalid EEPROM data detected.\n"); + return -EINVAL; + } + + return 0; +} + +static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2460, value, reg); + + if (!rt2x00_rf(&rt2x00dev->chip, RF2420) && + !rt2x00_rf(&rt2x00dev->chip, RF2421)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ + rt2x00dev->led_mode = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + /* + * Detect if this device has an hardware controlled radio. + */ +#ifdef CONFIG_RT2400PCI_RFKILL + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); +#endif /* CONFIG_RT2400PCI_RFKILL */ + + /* + * Check if the BBP tuning should be enabled. + */ + if (!rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + return 0; +} + +/* + * RF value list for RF2420 & RF2421 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg[] = { + { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00022058, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00022058, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00022058, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00022058, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00022058, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00022058, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00022058, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00022058, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00022058, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00022058, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00022058, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 }, +}; + +static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = 0; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 2; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 1; + spec->num_rates = 4; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + spec->num_channels = ARRAY_SIZE(rf_vals_bg); + spec->channels = rf_vals_bg; +} + +static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2400pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2400pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt2400pci_probe_hw_mode(rt2x00dev); + + /* + * This device requires the beacon ring + */ + __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt2400pci_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * since there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, long_retry); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, short_retry); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + return 0; +} + +static int rt2400pci_conf_tx(struct ieee80211_hw *hw, + int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * We don't support variating cw_min and cw_max variables + * per queue. So by default we only configure the TX queue, + * and ignore all other configurations. + */ + if (queue != IEEE80211_TX_QUEUE_DATA0) + return -EINVAL; + + if (rt2x00mac_conf_tx(hw, queue, params)) + return -EINVAL; + + /* + * Write configuration to register. + */ + rt2400pci_config_cw(rt2x00dev, &rt2x00dev->tx->tx_params); + + return 0; +} + +static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR17, ®); + tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, CSR16, ®); + tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); + + return tsf; +} + +static void rt2400pci_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00pci_register_write(rt2x00dev, CSR16, 0); + rt2x00pci_register_write(rt2x00dev, CSR17, 0); +} + +static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR15, ®); + return rt2x00_get_field32(reg, CSR15_BEACON_SENT); +} + +static const struct ieee80211_ops rt2400pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt2400pci_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt2400pci_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2400pci_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt2400pci_get_tsf, + .reset_tsf = rt2400pci_reset_tsf, + .beacon_update = rt2x00pci_beacon_update, + .tx_last_beacon = rt2400pci_tx_last_beacon, +}; + +static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { + .irq_handler = rt2400pci_interrupt, + .probe_hw = rt2400pci_probe_hw, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .set_device_state = rt2400pci_set_device_state, + .rfkill_poll = rt2400pci_rfkill_poll, + .link_stats = rt2400pci_link_stats, + .reset_tuner = rt2400pci_reset_tuner, + .link_tuner = rt2400pci_link_tuner, + .write_tx_desc = rt2400pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .kick_tx_queue = rt2400pci_kick_tx_queue, + .fill_rxdone = rt2400pci_fill_rxdone, + .config_mac_addr = rt2400pci_config_mac_addr, + .config_bssid = rt2400pci_config_bssid, + .config_type = rt2400pci_config_type, + .config_preamble = rt2400pci_config_preamble, + .config = rt2400pci_config, +}; + +static const struct rt2x00_ops rt2400pci_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt2400pci_rt2x00_ops, + .hw = &rt2400pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2400pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2400pci module information. + */ +static struct pci_device_id rt2400pci_device_table[] = { + { PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt2400pci_driver = { + .name = DRV_NAME, + .id_table = rt2400pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt2400pci_init(void) +{ + return pci_register_driver(&rt2400pci_driver); +} + +static void __exit rt2400pci_exit(void) +{ + pci_unregister_driver(&rt2400pci_driver); +} + +module_init(rt2400pci_init); +module_exit(rt2400pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h new file mode 100644 index 0000000000000000000000000000000000000000..ae22501f085d20ea42db5cc93160912ad037b9d1 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2400pci.h @@ -0,0 +1,943 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2400pci + Abstract: Data structures and registers for the rt2400pci module. + Supported chipsets: RT2460. + */ + +#ifndef RT2400PCI_H +#define RT2400PCI_H + +/* + * RF chip defines. + */ +#define RF2420 0x0000 +#define RF2421 0x0001 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 100 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0000 +#define CSR_REG_SIZE 0x014c +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_SIZE 0x0020 +#define RF_SIZE 0x0010 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR0: ASIC revision number. + */ +#define CSR0 0x0000 + +/* + * CSR1: System control register. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define CSR1 0x0004 +#define CSR1_SOFT_RESET FIELD32(0x00000001) +#define CSR1_BBP_RESET FIELD32(0x00000002) +#define CSR1_HOST_READY FIELD32(0x00000004) + +/* + * CSR2: System admin status register (invalid). + */ +#define CSR2 0x0008 + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3 0x000c +#define CSR3_BYTE0 FIELD32(0x000000ff) +#define CSR3_BYTE1 FIELD32(0x0000ff00) +#define CSR3_BYTE2 FIELD32(0x00ff0000) +#define CSR3_BYTE3 FIELD32(0xff000000) + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4 0x0010 +#define CSR4_BYTE4 FIELD32(0x000000ff) +#define CSR4_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR5: BSSID register 0. + */ +#define CSR5 0x0014 +#define CSR5_BYTE0 FIELD32(0x000000ff) +#define CSR5_BYTE1 FIELD32(0x0000ff00) +#define CSR5_BYTE2 FIELD32(0x00ff0000) +#define CSR5_BYTE3 FIELD32(0xff000000) + +/* + * CSR6: BSSID register 1. + */ +#define CSR6 0x0018 +#define CSR6_BYTE4 FIELD32(0x000000ff) +#define CSR6_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR7: Interrupt source register. + * Write 1 to clear interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + */ +#define CSR7 0x001c +#define CSR7_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR7_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR7_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR7_TXDONE_TXRING FIELD32(0x00000008) +#define CSR7_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR7_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR7_RXDONE FIELD32(0x00000040) + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + */ +#define CSR8 0x0020 +#define CSR8_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR8_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR8_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR8_TXDONE_TXRING FIELD32(0x00000008) +#define CSR8_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR8_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR8_RXDONE FIELD32(0x00000040) + +/* + * CSR9: Maximum frame length register. + * MAX_FRAME_UNIT: Maximum frame length in 128b unit, default: 12. + */ +#define CSR9 0x0024 +#define CSR9_MAX_FRAME_UNIT FIELD32(0x00000f80) + +/* + * CSR11: Back-off control register. + * CWMIN: CWmin. Default cwmin is 31 (2^5 - 1). + * CWMAX: CWmax. Default cwmax is 1023 (2^10 - 1). + * SLOT_TIME: Slot time, default is 20us for 802.11b. + * LONG_RETRY: Long retry count. + * SHORT_RETRY: Short retry count. + */ +#define CSR11 0x002c +#define CSR11_CWMIN FIELD32(0x0000000f) +#define CSR11_CWMAX FIELD32(0x000000f0) +#define CSR11_SLOT_TIME FIELD32(0x00001f00) +#define CSR11_LONG_RETRY FIELD32(0x00ff0000) +#define CSR11_SHORT_RETRY FIELD32(0xff000000) + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + * BEACON_INTERVAL: Beacon interval, default is 100 TU. + * CFPMAX_DURATION: Cfp maximum duration, default is 100 TU. + */ +#define CSR12 0x0030 +#define CSR12_BEACON_INTERVAL FIELD32(0x0000ffff) +#define CSR12_CFP_MAX_DURATION FIELD32(0xffff0000) + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + * ATIMW_DURATION: Atim window duration. + * CFP_PERIOD: Cfp period, default is 0 TU. + */ +#define CSR13 0x0034 +#define CSR13_ATIMW_DURATION FIELD32(0x0000ffff) +#define CSR13_CFP_PERIOD FIELD32(0x00ff0000) + +/* + * CSR14: Synchronization control register. + * TSF_COUNT: Enable tsf auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable tbcn with reload value. + * TCFP: Enable tcfp & cfp / cp switching. + * TATIMW: Enable tatimw & atim window switching. + * BEACON_GEN: Enable beacon generator. + * CFP_COUNT_PRELOAD: Cfp count preload value. + * TBCM_PRELOAD: Tbcn preload value in units of 64us. + */ +#define CSR14 0x0038 +#define CSR14_TSF_COUNT FIELD32(0x00000001) +#define CSR14_TSF_SYNC FIELD32(0x00000006) +#define CSR14_TBCN FIELD32(0x00000008) +#define CSR14_TCFP FIELD32(0x00000010) +#define CSR14_TATIMW FIELD32(0x00000020) +#define CSR14_BEACON_GEN FIELD32(0x00000040) +#define CSR14_CFP_COUNT_PRELOAD FIELD32(0x0000ff00) +#define CSR14_TBCM_PRELOAD FIELD32(0xffff0000) + +/* + * CSR15: Synchronization status register. + * CFP: ASIC is in contention-free period. + * ATIMW: ASIC is in ATIM window. + * BEACON_SENT: Beacon is send. + */ +#define CSR15 0x003c +#define CSR15_CFP FIELD32(0x00000001) +#define CSR15_ATIMW FIELD32(0x00000002) +#define CSR15_BEACON_SENT FIELD32(0x00000004) + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16 0x0040 +#define CSR16_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17 0x0044 +#define CSR17_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR18: IFS timer register 0. + * SIFS: Sifs, default is 10 us. + * PIFS: Pifs, default is 30 us. + */ +#define CSR18 0x0048 +#define CSR18_SIFS FIELD32(0x0000ffff) +#define CSR18_PIFS FIELD32(0xffff0000) + +/* + * CSR19: IFS timer register 1. + * DIFS: Difs, default is 50 us. + * EIFS: Eifs, default is 364 us. + */ +#define CSR19 0x004c +#define CSR19_DIFS FIELD32(0x0000ffff) +#define CSR19_EIFS FIELD32(0xffff0000) + +/* + * CSR20: Wakeup timer register. + * DELAY_AFTER_TBCN: Delay after tbcn expired in units of 1/16 TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTOWAKE: Enable auto wakeup / sleep mechanism. + */ +#define CSR20 0x0050 +#define CSR20_DELAY_AFTER_TBCN FIELD32(0x0000ffff) +#define CSR20_TBCN_BEFORE_WAKEUP FIELD32(0x00ff0000) +#define CSR20_AUTOWAKE FIELD32(0x01000000) + +/* + * CSR21: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + */ +#define CSR21 0x0054 +#define CSR21_RELOAD FIELD32(0x00000001) +#define CSR21_EEPROM_DATA_CLOCK FIELD32(0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(0x00000010) +#define CSR21_TYPE_93C46 FIELD32(0x00000020) + +/* + * CSR22: CFP control register. + * CFP_DURATION_REMAIN: Cfp duration remain, in units of TU. + * RELOAD_CFP_DURATION: Write 1 to reload cfp duration remain. + */ +#define CSR22 0x0058 +#define CSR22_CFP_DURATION_REMAIN FIELD32(0x0000ffff) +#define CSR22_RELOAD_CFP_DURATION FIELD32(0x00010000) + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + * KICK_TX: Kick tx ring. + * KICK_ATIM: Kick atim ring. + * KICK_PRIO: Kick priority ring. + * ABORT: Abort all transmit related ring operation. + */ +#define TXCSR0 0x0060 +#define TXCSR0_KICK_TX FIELD32(0x00000001) +#define TXCSR0_KICK_ATIM FIELD32(0x00000002) +#define TXCSR0_KICK_PRIO FIELD32(0x00000004) +#define TXCSR0_ABORT FIELD32(0x00000008) + +/* + * TXCSR1: TX Configuration Register. + * ACK_TIMEOUT: Ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. + * ACK_CONSUME_TIME: Ack consume time, default = sifs + acktime @ 1mbps. + * TSF_OFFSET: Insert tsf offset. + * AUTORESPONDER: Enable auto responder which include ack & cts. + */ +#define TXCSR1 0x0064 +#define TXCSR1_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXCSR1_ACK_CONSUME_TIME FIELD32(0x0003fe00) +#define TXCSR1_TSF_OFFSET FIELD32(0x00fc0000) +#define TXCSR1_AUTORESPONDER FIELD32(0x01000000) + +/* + * TXCSR2: Tx descriptor configuration register. + * TXD_SIZE: Tx descriptor size, default is 48. + * NUM_TXD: Number of tx entries in ring. + * NUM_ATIM: Number of atim entries in ring. + * NUM_PRIO: Number of priority entries in ring. + */ +#define TXCSR2 0x0068 +#define TXCSR2_TXD_SIZE FIELD32(0x000000ff) +#define TXCSR2_NUM_TXD FIELD32(0x0000ff00) +#define TXCSR2_NUM_ATIM FIELD32(0x00ff0000) +#define TXCSR2_NUM_PRIO FIELD32(0xff000000) + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3 0x006c +#define TXCSR3_TX_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4 0x0070 +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5 0x0074 +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6 0x0078 +#define TXCSR6_BEACON_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR7: Auto responder control register. + * AR_POWERMANAGEMENT: Auto responder power management bit. + */ +#define TXCSR7 0x007c +#define TXCSR7_AR_POWERMANAGEMENT FIELD32(0x00000001) + +/* + * Receive related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * RXCSR0: RX Control Register. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * PASS_CRC: Pass all packets with crc attached. + */ +#define RXCSR0 0x0080 +#define RXCSR0_DISABLE_RX FIELD32(0x00000001) +#define RXCSR0_DROP_CRC FIELD32(0x00000002) +#define RXCSR0_DROP_PHYSICAL FIELD32(0x00000004) +#define RXCSR0_DROP_CONTROL FIELD32(0x00000008) +#define RXCSR0_DROP_NOT_TO_ME FIELD32(0x00000010) +#define RXCSR0_DROP_TODS FIELD32(0x00000020) +#define RXCSR0_DROP_VERSION_ERROR FIELD32(0x00000040) +#define RXCSR0_PASS_CRC FIELD32(0x00000080) + +/* + * RXCSR1: RX descriptor configuration register. + * RXD_SIZE: Rx descriptor size, default is 32b. + * NUM_RXD: Number of rx entries in ring. + */ +#define RXCSR1 0x0084 +#define RXCSR1_RXD_SIZE FIELD32(0x000000ff) +#define RXCSR1_NUM_RXD FIELD32(0x0000ff00) + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2 0x0088 +#define RXCSR2_RX_RING_REGISTER FIELD32(0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR3 0x0090 +#define RXCSR3_BBP_ID0 FIELD32(0x0000007f) +#define RXCSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define RXCSR3_BBP_ID1 FIELD32(0x00007f00) +#define RXCSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define RXCSR3_BBP_ID2 FIELD32(0x007f0000) +#define RXCSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define RXCSR3_BBP_ID3 FIELD32(0x7f000000) +#define RXCSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * RXCSR4: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR4 0x0094 +#define RXCSR4_BBP_ID4 FIELD32(0x0000007f) +#define RXCSR4_BBP_ID4_VALID FIELD32(0x00000080) +#define RXCSR4_BBP_ID5 FIELD32(0x00007f00) +#define RXCSR4_BBP_ID5_VALID FIELD32(0x00008000) + +/* + * ARCSR0: Auto Responder PLCP config register 0. + * ARCSR0_AR_BBP_DATA#: Auto responder BBP register # data. + * ARCSR0_AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR0 0x0098 +#define ARCSR0_AR_BBP_DATA0 FIELD32(0x000000ff) +#define ARCSR0_AR_BBP_ID0 FIELD32(0x0000ff00) +#define ARCSR0_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define ARCSR0_AR_BBP_ID1 FIELD32(0xff000000) + +/* + * ARCSR1: Auto Responder PLCP config register 1. + * ARCSR0_AR_BBP_DATA#: Auto responder BBP register # data. + * ARCSR0_AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR1 0x009c +#define ARCSR1_AR_BBP_DATA2 FIELD32(0x000000ff) +#define ARCSR1_AR_BBP_ID2 FIELD32(0x0000ff00) +#define ARCSR1_AR_BBP_DATA3 FIELD32(0x00ff0000) +#define ARCSR1_AR_BBP_ID3 FIELD32(0xff000000) + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PCICSR: PCI control register. + * BIG_ENDIAN: 1: big endian, 0: little endian. + * RX_TRESHOLD: Rx threshold in dw to start pci access + * 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. + * TX_TRESHOLD: Tx threshold in dw to start pci access + * 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. + * BURST_LENTH: Pci burst length 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. + * ENABLE_CLK: Enable clk_run, pci clock can't going down to non-operational. + */ +#define PCICSR 0x008c +#define PCICSR_BIG_ENDIAN FIELD32(0x00000001) +#define PCICSR_RX_TRESHOLD FIELD32(0x00000006) +#define PCICSR_TX_TRESHOLD FIELD32(0x00000018) +#define PCICSR_BURST_LENTH FIELD32(0x00000060) +#define PCICSR_ENABLE_CLK FIELD32(0x00000080) + +/* + * CNT0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define CNT0 0x00a0 +#define CNT0_FCS_ERROR FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT1: PLCP error count. + * CNT2: Long error count. + * CNT3: CCA false alarm count. + * CNT4: Rx FIFO overflow count. + * CNT5: Tx FIFO underrun count. + */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac +#define CNT2 0x00b0 +#define TIMECSR3 0x00b4 +#define CNT3 0x00b8 +#define CNT4 0x00bc +#define CNT5 0x00c0 + +/* + * Baseband Control Register. + */ + +/* + * PWRCSR0: Power mode configuration register. + */ +#define PWRCSR0 0x00c4 + +/* + * Power state transition time registers. + */ +#define PSCSR0 0x00c8 +#define PSCSR1 0x00cc +#define PSCSR2 0x00d0 +#define PSCSR3 0x00d4 + +/* + * PWRCSR1: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURR_STATE: BBP current state. + * RF_CURR_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define PWRCSR1 0x00d8 +#define PWRCSR1_SET_STATE FIELD32(0x00000001) +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(0x00000006) +#define PWRCSR1_RF_DESIRE_STATE FIELD32(0x00000018) +#define PWRCSR1_BBP_CURR_STATE FIELD32(0x00000060) +#define PWRCSR1_RF_CURR_STATE FIELD32(0x00000180) +#define PWRCSR1_PUT_TO_SLEEP FIELD32(0x00000200) + +/* + * TIMECSR: Timer control register. + * US_COUNT: 1 us timer count in units of clock cycles. + * US_64_COUNT: 64 us timer count in units of 1 us timer. + * BEACON_EXPECT: Beacon expect window. + */ +#define TIMECSR 0x00dc +#define TIMECSR_US_COUNT FIELD32(0x000000ff) +#define TIMECSR_US_64_COUNT FIELD32(0x0000ff00) +#define TIMECSR_BEACON_EXPECT FIELD32(0x00070000) + +/* + * MACCSR0: MAC configuration register 0. + */ +#define MACCSR0 0x00e0 + +/* + * MACCSR1: MAC configuration register 1. + * KICK_RX: Kick one-shot rx in one-shot rx mode. + * ONESHOT_RXMODE: Enable one-shot rx mode for debugging. + * BBPRX_RESET_MODE: Ralink bbp rx reset mode. + * AUTO_TXBBP: Auto tx logic access bbp control register. + * AUTO_RXBBP: Auto rx logic access bbp control register. + * LOOPBACK: Loopback mode. 0: normal, 1: internal, 2: external, 3:rsvd. + * INTERSIL_IF: Intersil if calibration pin. + */ +#define MACCSR1 0x00e4 +#define MACCSR1_KICK_RX FIELD32(0x00000001) +#define MACCSR1_ONESHOT_RXMODE FIELD32(0x00000002) +#define MACCSR1_BBPRX_RESET_MODE FIELD32(0x00000004) +#define MACCSR1_AUTO_TXBBP FIELD32(0x00000008) +#define MACCSR1_AUTO_RXBBP FIELD32(0x00000010) +#define MACCSR1_LOOPBACK FIELD32(0x00000060) +#define MACCSR1_INTERSIL_IF FIELD32(0x00000080) + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + * AR_BBP_DATA#: Auto reset BBP register # data. + * AR_BBP_ID#: Auto reset BBP register # id. + */ +#define RALINKCSR 0x00e8 +#define RALINKCSR_AR_BBP_DATA0 FIELD32(0x000000ff) +#define RALINKCSR_AR_BBP_ID0 FIELD32(0x0000ff00) +#define RALINKCSR_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define RALINKCSR_AR_BBP_ID1 FIELD32(0xff000000) + +/* + * BCNCSR: Beacon interval control register. + * CHANGE: Write one to change beacon interval. + * DELTATIME: The delta time value. + * NUM_BEACON: Number of beacon according to mode. + * MODE: Please refer to asic specs. + * PLUS: Plus or minus delta time value. + */ +#define BCNCSR 0x00ec +#define BCNCSR_CHANGE FIELD32(0x00000001) +#define BCNCSR_DELTATIME FIELD32(0x0000001e) +#define BCNCSR_NUM_BEACON FIELD32(0x00001fe0) +#define BCNCSR_MODE FIELD32(0x00006000) +#define BCNCSR_PLUS FIELD32(0x00008000) + +/* + * BBP / RF / IF Control Register. + */ + +/* + * BBPCSR: BBP serial control register. + * VALUE: Register value to program into BBP. + * REGNUM: Selected BBP register. + * BUSY: 1: asic is busy execute BBP programming. + * WRITE_CONTROL: 1: write BBP, 0: read BBP. + */ +#define BBPCSR 0x00f0 +#define BBPCSR_VALUE FIELD32(0x000000ff) +#define BBPCSR_REGNUM FIELD32(0x00007f00) +#define BBPCSR_BUSY FIELD32(0x00008000) +#define BBPCSR_WRITE_CONTROL FIELD32(0x00010000) + +/* + * RFCSR: RF serial control register. + * VALUE: Register value + id to program into rf/if. + * NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * IF_SELECT: Chip to program: 0: rf, 1: if. + * PLL_LD: Rf pll_ld status. + * BUSY: 1: asic is busy execute rf programming. + */ +#define RFCSR 0x00f4 +#define RFCSR_VALUE FIELD32(0x00ffffff) +#define RFCSR_NUMBER_OF_BITS FIELD32(0x1f000000) +#define RFCSR_IF_SELECT FIELD32(0x20000000) +#define RFCSR_PLL_LD FIELD32(0x40000000) +#define RFCSR_BUSY FIELD32(0x80000000) + +/* + * LEDCSR: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY: 0: idle, 1: active. + */ +#define LEDCSR 0x00f8 +#define LEDCSR_ON_PERIOD FIELD32(0x000000ff) +#define LEDCSR_OFF_PERIOD FIELD32(0x0000ff00) +#define LEDCSR_LINK FIELD32(0x00010000) +#define LEDCSR_ACTIVITY FIELD32(0x00020000) + +/* + * ASIC pointer information. + * RXPTR: Current RX ring address. + * TXPTR: Current Tx ring address. + * PRIPTR: Current Priority ring address. + * ATIMPTR: Current ATIM ring address. + */ +#define RXPTR 0x0100 +#define TXPTR 0x0104 +#define PRIPTR 0x0108 +#define ATIMPTR 0x010c + +/* + * GPIO and others. + */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR 0x0120 +#define GPIOCSR_BIT0 FIELD32(0x00000001) +#define GPIOCSR_BIT1 FIELD32(0x00000002) +#define GPIOCSR_BIT2 FIELD32(0x00000004) +#define GPIOCSR_BIT3 FIELD32(0x00000008) +#define GPIOCSR_BIT4 FIELD32(0x00000010) +#define GPIOCSR_BIT5 FIELD32(0x00000020) +#define GPIOCSR_BIT6 FIELD32(0x00000040) +#define GPIOCSR_BIT7 FIELD32(0x00000080) + +/* + * BBPPCSR: BBP Pin control register. + */ +#define BBPPCSR 0x0124 + +/* + * BCNCSR1: Tx BEACON offset time control register. + * PRELOAD: Beacon timer offset in units of usec. + */ +#define BCNCSR1 0x0130 +#define BCNCSR1_PRELOAD FIELD32(0x0000ffff) + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + * DELAY: RX_PE low width, in units of pci clock cycle. + */ +#define MACCSR2 0x0134 +#define MACCSR2_DELAY FIELD32(0x000000ff) + +/* + * ARCSR2: 1 Mbps ACK/CTS PLCP. + */ +#define ARCSR2 0x013c +#define ARCSR2_SIGNAL FIELD32(0x000000ff) +#define ARCSR2_SERVICE FIELD32(0x0000ff00) +#define ARCSR2_LENGTH_LOW FIELD32(0x00ff0000) +#define ARCSR2_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR3: 2 Mbps ACK/CTS PLCP. + */ +#define ARCSR3 0x0140 +#define ARCSR3_SIGNAL FIELD32(0x000000ff) +#define ARCSR3_SERVICE FIELD32(0x0000ff00) +#define ARCSR3_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR4: 5.5 Mbps ACK/CTS PLCP. + */ +#define ARCSR4 0x0144 +#define ARCSR4_SIGNAL FIELD32(0x000000ff) +#define ARCSR4_SERVICE FIELD32(0x0000ff00) +#define ARCSR4_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR5: 11 Mbps ACK/CTS PLCP. + */ +#define ARCSR5 0x0148 +#define ARCSR5_SIGNAL FIELD32(0x000000ff) +#define ARCSR5_SERVICE FIELD32(0x0000ff00) +#define ARCSR5_LENGTH FIELD32(0xffff0000) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R1: TX antenna control + */ +#define BBP_R1_TX_ANTENNA FIELD8(0x03) + +/* + * R4: RX antenna control + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x06) + +/* + * RF registers + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RF_TYPE: Rf_type of this adapter. + * LED_MODE: 0: default, 1: TX/RX activity,2: Single (ignore link), 3: rsvd. + * RX_AGCVGC: 0: disable, 1:enable BBP R13 tuning. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + */ +#define EEPROM_ANTENNA 0x0b +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x0180) +#define EEPROM_ANTENNA_RX_AGCVGC_TUNING FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0c +#define EEPROM_BBP_SIZE 7 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x13 +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 8 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 8 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_RESULT FIELD32(0x0000001c) +#define TXD_W0_RETRY_COUNT FIELD32(0x000000e0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_RTS FIELD32(0x00000800) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(0x00008000) +#define TXD_W0_AGC FIELD32(0x00ff0000) +#define TXD_W0_R2 FIELD32(0xff000000) + +/* + * Word1 + */ +#define TXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define TXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) +#define TXD_W2_DATABYTE_COUNT FIELD32(0xffff0000) + +/* + * Word3 & 4: PLCP information + */ +#define TXD_W3_PLCP_SIGNAL FIELD32(0x0000ffff) +#define TXD_W3_PLCP_SERVICE FIELD32(0xffff0000) +#define TXD_W4_PLCP_LENGTH_LOW FIELD32(0x0000ffff) +#define TXD_W4_PLCP_LENGTH_HIGH FIELD32(0xffff0000) + +/* + * Word5 + */ +#define TXD_W5_BBCR4 FIELD32(0x0000ffff) +#define TXD_W5_AGC_REG FIELD32(0x007f0000) +#define TXD_W5_AGC_REG_VALID FIELD32(0x00800000) +#define TXD_W5_XXX_REG FIELD32(0x7f000000) +#define TXD_W5_XXX_REG_VALID FIELD32(0x80000000) + +/* + * Word6 + */ +#define TXD_W6_SK_BUFF FIELD32(0xffffffff) + +/* + * Word7 + */ +#define TXD_W7_RESERVED FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_DATABYTE_COUNT FIELD32(0xffff0000) + +/* + * Word1 + */ +#define RXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define RXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) +#define RXD_W2_SIGNAL FIELD32(0x00ff0000) +#define RXD_W2_RSSI FIELD32(0xff000000) + +/* + * Word3 + */ +#define RXD_W3_BBR2 FIELD32(0x000000ff) +#define RXD_W3_BBR3 FIELD32(0x0000ff00) +#define RXD_W3_BBR4 FIELD32(0x00ff0000) +#define RXD_W3_BBR5 FIELD32(0xff000000) + +/* + * Word4 + */ +#define RXD_W4_RX_END_TIME FIELD32(0xffffffff) + +/* + * Word5 & 6 & 7: Reserved + */ +#define RXD_W5_RESERVED FIELD32(0xffffffff) +#define RXD_W6_RESERVED FIELD32(0xffffffff) +#define RXD_W7_RESERVED FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + * NOTE: Logics in rt2400pci for txpower are reversed + * compared to the other rt2x00 drivers. A higher txpower + * value means that the txpower must be lowered. This is + * important when converting the value coming from the + * dscape stack to the rt2400 acceptable value. + */ +#define MIN_TXPOWER 31 +#define MAX_TXPOWER 62 +#define DEFAULT_TXPOWER 39 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ + ((__txpower) < MIN_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ + (((__txpower) - MAX_TXPOWER) + MIN_TXPOWER); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + (__txpower) += MIN_TXPOWER; \ + ((__txpower) <= MIN_TXPOWER) ? MAX_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MIN_TXPOWER : \ + (MAX_TXPOWER - ((__txpower) - MIN_TXPOWER))); \ +}) + +#endif /* RT2400PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c new file mode 100644 index 0000000000000000000000000000000000000000..ff2d63267b19e8acb41b356e8524e3451014f291 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -0,0 +1,1971 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500pci + Abstract: rt2500pci device specific routines. + Supported chipsets: RT2560. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2500pci" + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt2500pci.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2x00pci_register_read and rt2x00pci_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static u32 rt2500pci_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, BBPCSR, ®); + if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt2500pci_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_VALUE, value); + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); +} + +static void rt2500pci_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, BBPCSR_REGNUM, word); + rt2x00_set_field32(®, BBPCSR_BUSY, 1); + rt2x00_set_field32(®, BBPCSR_WRITE_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, BBPCSR_BUSY)) { + ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, BBPCSR_VALUE); +} + +static void rt2500pci_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, RFCSR, ®); + if (!rt2x00_get_field32(reg, RFCSR_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "RFCSR register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, RFCSR_VALUE, value); + rt2x00_set_field32(®, RFCSR_NUMBER_OF_BITS, 20); + rt2x00_set_field32(®, RFCSR_IF_SELECT, 0); + rt2x00_set_field32(®, RFCSR_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, RFCSR, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); +} + +static void rt2500pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, CSR21_EEPROM_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, CSR21_EEPROM_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, CSR21, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt2500pci_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt2500pci_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt2500pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2500pci_read_csr, + .write = rt2500pci_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2500pci_bbp_read, + .write = rt2500pci_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2500pci_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +#ifdef CONFIG_RT2500PCI_RFKILL +static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + return rt2x00_get_field32(reg, GPIOCSR_BIT0); +} +#else +#define rt2500pci_rfkill_poll NULL +#endif /* CONFIG_RT2500PCI_RFKILL */ + +/* + * Configuration handlers. + */ +static void rt2500pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, + __le32 *mac) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR3, mac, + (2 * sizeof(__le32))); +} + +static void rt2500pci_config_bssid(struct rt2x00_dev *rt2x00dev, + __le32 *bssid) +{ + rt2x00pci_register_multiwrite(rt2x00dev, CSR5, bssid, + (2 * sizeof(__le32))); +} + +static void rt2500pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Enable beacon config + */ + rt2x00pci_register_read(rt2x00dev, BCNCSR1, ®); + rt2x00_set_field32(®, BCNCSR1_PRELOAD, + PREAMBLE + get_duration(IEEE80211_HEADER, 20)); + rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN, + rt2x00lib_get_ring(rt2x00dev, + IEEE80211_TX_QUEUE_BEACON) + ->tx_params.cw_min); + rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); + rt2x00_set_field32(®, CSR14_TBCN, 1); + rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); + rt2x00_set_field32(®, CSR14_TSF_SYNC, tsf_sync); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); +} + +static void rt2500pci_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + int preamble_mask; + u32 reg; + + /* + * When short preamble is enabled, we should set bit 0x08 + */ + preamble_mask = short_preamble << 3; + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_ACK_TIMEOUT, ack_timeout); + rt2x00_set_field32(®, TXCSR1_ACK_CONSUME_TIME, ack_consume_time); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR2, ®); + rt2x00_set_field32(®, ARCSR2_SIGNAL, 0x00 | preamble_mask); + rt2x00_set_field32(®, ARCSR2_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); + rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR3, ®); + rt2x00_set_field32(®, ARCSR3_SIGNAL, 0x01 | preamble_mask); + rt2x00_set_field32(®, ARCSR3_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 20)); + rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR4, ®); + rt2x00_set_field32(®, ARCSR4_SIGNAL, 0x02 | preamble_mask); + rt2x00_set_field32(®, ARCSR4_SERVICE, 0x04); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 55)); + rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, ARCSR5, ®); + rt2x00_set_field32(®, ARCSR5_SIGNAL, 0x03 | preamble_mask); + rt2x00_set_field32(®, ARCSR5_SERVICE, 0x84); + rt2x00_set_field32(®, ARCSR2_LENGTH, get_duration(ACK_SIZE, 110)); + rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); +} + +static void rt2500pci_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt2x00pci_register_write(rt2x00dev, ARCSR1, basic_rate_mask); +} + +static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r70; + + /* + * Set TXpower. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + + /* + * Switch on tuning bits. + * For RT2523 devices we do not need to update the R1 register. + */ + if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); + + /* + * For RT2525 we should first set the channel to half band higher. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { + static const u32 vals[] = { + 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, + 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, + 0x00080d1e, 0x00080d22, 0x00080d26, 0x00080d2a, + 0x00080d2e, 0x00080d3a + }; + + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2500pci_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); + } + + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + rt2500pci_rf_write(rt2x00dev, 2, rf->rf2); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); + + /* + * Channel 14 requires the Japan filter bit to be set. + */ + r70 = 0x46; + rt2x00_set_field8(&r70, BBP_R70_JAPAN_FILTER, rf->channel == 14); + rt2500pci_bbp_write(rt2x00dev, 70, r70); + + msleep(1); + + /* + * Switch off tuning bits. + * For RT2523 devices we do not need to update the R1 register. + */ + if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) { + rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); + rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); + } + + rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); + rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); + + /* + * Clear false CRC during channel switch. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, &rf->rf1); +} + +static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 rf3; + + rt2x00_rf_read(rt2x00dev, 3, &rf3); + rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2500pci_rf_write(rt2x00dev, 3, rf3); +} + +static void rt2500pci_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + u32 reg; + u8 r14; + u8 r2; + + rt2x00pci_register_read(rt2x00dev, BBPCSR1, ®); + rt2500pci_bbp_read(rt2x00dev, 14, &r14); + rt2500pci_bbp_read(rt2x00dev, 2, &r2); + + /* + * Configure the TX antenna. + */ + switch (antenna_tx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field32(®, BBPCSR1_CCK, 2); + rt2x00_set_field32(®, BBPCSR1_OFDM, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); + rt2x00_set_field32(®, BBPCSR1_CCK, 0); + rt2x00_set_field32(®, BBPCSR1_OFDM, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field32(®, BBPCSR1_CCK, 2); + rt2x00_set_field32(®, BBPCSR1_OFDM, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + } + + /* + * RT2525E and RT5222 need to flip TX I/Q + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || + rt2x00_rf(&rt2x00dev->chip, RF5222)) { + rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); + rt2x00_set_field32(®, BBPCSR1_CCK_FLIP, 1); + rt2x00_set_field32(®, BBPCSR1_OFDM_FLIP, 1); + + /* + * RT2525E does not need RX I/Q Flip. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) + rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); + } else { + rt2x00_set_field32(®, BBPCSR1_CCK_FLIP, 0); + rt2x00_set_field32(®, BBPCSR1_OFDM_FLIP, 0); + } + + rt2x00pci_register_write(rt2x00dev, BBPCSR1, reg); + rt2500pci_bbp_write(rt2x00dev, 14, r14); + rt2500pci_bbp_write(rt2x00dev, 2, r2); +} + +static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_SLOT_TIME, libconf->slot_time); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_read(rt2x00dev, CSR18, ®); + rt2x00_set_field32(®, CSR18_SIFS, libconf->sifs); + rt2x00_set_field32(®, CSR18_PIFS, libconf->pifs); + rt2x00pci_register_write(rt2x00dev, CSR18, reg); + + rt2x00pci_register_read(rt2x00dev, CSR19, ®); + rt2x00_set_field32(®, CSR19_DIFS, libconf->difs); + rt2x00_set_field32(®, CSR19_EIFS, libconf->eifs); + rt2x00pci_register_write(rt2x00dev, CSR19, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR1, ®); + rt2x00_set_field32(®, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(®, TXCSR1_AUTORESPONDER, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR12, ®); + rt2x00_set_field32(®, CSR12_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt2x00_set_field32(®, CSR12_CFP_MAX_DURATION, + libconf->conf->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, CSR12, reg); +} + +static void rt2500pci_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt2500pci_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt2500pci_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt2500pci_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt2500pci_config_antenna(rt2x00dev, + libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt2500pci_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt2500pci_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + + rt2x00_set_field32(®, LEDCSR_ON_PERIOD, 70); + rt2x00_set_field32(®, LEDCSR_OFF_PERIOD, 30); + + if (rt2x00dev->led_mode == LED_MODE_TXRX_ACTIVITY) { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + } else if (rt2x00dev->led_mode == LED_MODE_ASUS) { + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } else { + rt2x00_set_field32(®, LEDCSR_LINK, 1); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 1); + } + + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +static void rt2500pci_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, LEDCSR, ®); + rt2x00_set_field32(®, LEDCSR_LINK, 0); + rt2x00_set_field32(®, LEDCSR_ACTIVITY, 0); + rt2x00pci_register_write(rt2x00dev, LEDCSR, reg); +} + +/* + * Link tuning + */ +static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00pci_register_read(rt2x00dev, CNT3, ®); + rt2x00dev->link.false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA); +} + +static void rt2500pci_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt2500pci_bbp_write(rt2x00dev, 17, 0x48); + rt2x00dev->link.vgc_level = 0x48; +} + +static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u8 r17; + + /* + * To prevent collisions with MAC ASIC on chipsets + * up to version C the link tuning should halt after 20 + * seconds. + */ + if (rt2x00_get_rev(&rt2x00dev->chip) < RT2560_VERSION_D && + rt2x00dev->link.count > 20) + return; + + rt2500pci_bbp_read(rt2x00dev, 17, &r17); + + /* + * Chipset versions C and lower should directly continue + * to the dynamic CCA tuning. + */ + if (rt2x00_get_rev(&rt2x00dev->chip) < RT2560_VERSION_D) + goto dynamic_cca_tune; + + /* + * A too low RSSI will cause too much false CCA which will + * then corrupt the R17 tuning. To remidy this the tuning should + * be stopped (While making sure the R17 value will not exceed limits) + */ + if (rssi < -80 && rt2x00dev->link.count > 20) { + if (r17 >= 0x41) { + r17 = rt2x00dev->link.vgc_level; + rt2500pci_bbp_write(rt2x00dev, 17, r17); + } + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + if (r17 != 0x50) + rt2500pci_bbp_write(rt2x00dev, 17, 0x50); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + if (r17 != 0x41) + rt2500pci_bbp_write(rt2x00dev, 17, 0x41); + return; + } + + /* + * Leave short or middle distance condition, restore r17 + * to the dynamic tuning range. + */ + if (r17 >= 0x41) { + rt2500pci_bbp_write(rt2x00dev, 17, rt2x00dev->link.vgc_level); + return; + } + +dynamic_cca_tune: + + /* + * R17 is inside the dynamic tuning range, + * start tuning the link based on the false cca counter. + */ + if (rt2x00dev->link.false_cca > 512 && r17 < 0x40) { + rt2500pci_bbp_write(rt2x00dev, 17, ++r17); + rt2x00dev->link.vgc_level = r17; + } else if (rt2x00dev->link.false_cca < 100 && r17 > 0x32) { + rt2500pci_bbp_write(rt2x00dev, 17, --r17); + rt2x00dev->link.vgc_level = r17; + } +} + +/* + * Initialization functions. + */ +static void rt2500pci_init_rxring(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_desc *rxd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + rxd = ring->entry[i].priv; + + rt2x00_desc_read(rxd, 1, &word); + rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(rxd, 1, word); + + rt2x00_desc_read(rxd, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_clear(rt2x00dev->rx); +} + +static void rt2500pci_init_txring(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_desc *txd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + txd = ring->entry[i].priv; + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(txd, 0, word); + } + + rt2x00_ring_index_clear(ring); +} + +static int rt2500pci_init_rings(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize rings. + */ + rt2500pci_init_rxring(rt2x00dev); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + rt2500pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); + rt2x00_set_field32(®, TXCSR2_TXD_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size); + rt2x00_set_field32(®, TXCSR2_NUM_TXD, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, + rt2x00dev->bcn[1].stats.limit); + rt2x00_set_field32(®, TXCSR2_NUM_PRIO, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); + rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR3, ®); + rt2x00_set_field32(®, TXCSR3_TX_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR5, ®); + rt2x00_set_field32(®, TXCSR5_PRIO_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); + rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, + rt2x00dev->bcn[1].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); + + rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); + rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, + rt2x00dev->bcn[0].data_dma); + rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR1, ®); + rt2x00_set_field32(®, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); + rt2x00_set_field32(®, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit); + rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR2, ®); + rt2x00_set_field32(®, RXCSR2_RX_RING_REGISTER, + rt2x00dev->rx->data_dma); + rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); + + return 0; +} + +static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_write(rt2x00dev, PSCSR0, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR1, 0x00000002); + rt2x00pci_register_write(rt2x00dev, PSCSR2, 0x00020002); + rt2x00pci_register_write(rt2x00dev, PSCSR3, 0x00000002); + + rt2x00pci_register_read(rt2x00dev, TIMECSR, ®); + rt2x00_set_field32(®, TIMECSR_US_COUNT, 33); + rt2x00_set_field32(®, TIMECSR_US_64_COUNT, 63); + rt2x00_set_field32(®, TIMECSR_BEACON_EXPECT, 0); + rt2x00pci_register_write(rt2x00dev, TIMECSR, reg); + + rt2x00pci_register_read(rt2x00dev, CSR9, ®); + rt2x00_set_field32(®, CSR9_MAX_FRAME_UNIT, + rt2x00dev->rx->data_size / 128); + rt2x00pci_register_write(rt2x00dev, CSR9, reg); + + /* + * Always use CWmin and CWmax set in descriptor. + */ + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_CW_SELECT, 0); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + rt2x00pci_register_write(rt2x00dev, CNT3, 0); + + rt2x00pci_register_read(rt2x00dev, TXCSR8, ®); + rt2x00_set_field32(®, TXCSR8_BBP_ID0, 10); + rt2x00_set_field32(®, TXCSR8_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID1, 11); + rt2x00_set_field32(®, TXCSR8_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID2, 13); + rt2x00_set_field32(®, TXCSR8_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXCSR8_BBP_ID3, 12); + rt2x00_set_field32(®, TXCSR8_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR8, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR0, ®); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_1MBS, 112); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_2MBS, 56); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_5_5MBS, 20); + rt2x00_set_field32(®, ARTCSR0_ACK_CTS_11MBS, 10); + rt2x00pci_register_write(rt2x00dev, ARTCSR0, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR1, ®); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_6MBS, 45); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_9MBS, 37); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_12MBS, 33); + rt2x00_set_field32(®, ARTCSR1_ACK_CTS_18MBS, 29); + rt2x00pci_register_write(rt2x00dev, ARTCSR1, reg); + + rt2x00pci_register_read(rt2x00dev, ARTCSR2, ®); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_24MBS, 29); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_36MBS, 25); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_48MBS, 25); + rt2x00_set_field32(®, ARTCSR2_ACK_CTS_54MBS, 25); + rt2x00pci_register_write(rt2x00dev, ARTCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RXCSR3, ®); + rt2x00_set_field32(®, RXCSR3_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, RXCSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID1, 51); /* Rssi */ + rt2x00_set_field32(®, RXCSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, RXCSR3_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, RXCSR3_BBP_ID3, 51); /* RSSI */ + rt2x00_set_field32(®, RXCSR3_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, RXCSR3, reg); + + rt2x00pci_register_read(rt2x00dev, PCICSR, ®); + rt2x00_set_field32(®, PCICSR_BIG_ENDIAN, 0); + rt2x00_set_field32(®, PCICSR_RX_TRESHOLD, 0); + rt2x00_set_field32(®, PCICSR_TX_TRESHOLD, 3); + rt2x00_set_field32(®, PCICSR_BURST_LENTH, 1); + rt2x00_set_field32(®, PCICSR_ENABLE_CLK, 1); + rt2x00_set_field32(®, PCICSR_READ_MULTIPLE, 1); + rt2x00_set_field32(®, PCICSR_WRITE_INVALID, 1); + rt2x00pci_register_write(rt2x00dev, PCICSR, reg); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); + + rt2x00pci_register_write(rt2x00dev, GPIOCSR, 0x0000ff00); + rt2x00pci_register_write(rt2x00dev, TESTCSR, 0x000000f0); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MACCSR0, 0x00213223); + rt2x00pci_register_write(rt2x00dev, MACCSR1, 0x00235518); + + rt2x00pci_register_read(rt2x00dev, MACCSR2, ®); + rt2x00_set_field32(®, MACCSR2_DELAY, 64); + rt2x00pci_register_write(rt2x00dev, MACCSR2, reg); + + rt2x00pci_register_read(rt2x00dev, RALINKCSR, ®); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA0, 17); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID0, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID0, 1); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_DATA1, 0); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_ID1, 26); + rt2x00_set_field32(®, RALINKCSR_AR_BBP_VALID1, 1); + rt2x00pci_register_write(rt2x00dev, RALINKCSR, reg); + + rt2x00pci_register_write(rt2x00dev, BBPCSR1, 0x82188200); + + rt2x00pci_register_write(rt2x00dev, TXACKCSR0, 0x00000020); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, CSR1_BBP_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 0); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, CSR1, ®); + rt2x00_set_field32(®, CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, CSR1, reg); + + /* + * We must clear the FCS and FIFO error count. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, CNT0, ®); + rt2x00pci_register_read(rt2x00dev, CNT4, ®); + + return 0; +} + +static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2500pci_bbp_write(rt2x00dev, 3, 0x02); + rt2500pci_bbp_write(rt2x00dev, 4, 0x19); + rt2500pci_bbp_write(rt2x00dev, 14, 0x1c); + rt2500pci_bbp_write(rt2x00dev, 15, 0x30); + rt2500pci_bbp_write(rt2x00dev, 16, 0xac); + rt2500pci_bbp_write(rt2x00dev, 18, 0x18); + rt2500pci_bbp_write(rt2x00dev, 19, 0xff); + rt2500pci_bbp_write(rt2x00dev, 20, 0x1e); + rt2500pci_bbp_write(rt2x00dev, 21, 0x08); + rt2500pci_bbp_write(rt2x00dev, 22, 0x08); + rt2500pci_bbp_write(rt2x00dev, 23, 0x08); + rt2500pci_bbp_write(rt2x00dev, 24, 0x70); + rt2500pci_bbp_write(rt2x00dev, 25, 0x40); + rt2500pci_bbp_write(rt2x00dev, 26, 0x08); + rt2500pci_bbp_write(rt2x00dev, 27, 0x23); + rt2500pci_bbp_write(rt2x00dev, 30, 0x10); + rt2500pci_bbp_write(rt2x00dev, 31, 0x2b); + rt2500pci_bbp_write(rt2x00dev, 32, 0xb9); + rt2500pci_bbp_write(rt2x00dev, 34, 0x12); + rt2500pci_bbp_write(rt2x00dev, 35, 0x50); + rt2500pci_bbp_write(rt2x00dev, 39, 0xc4); + rt2500pci_bbp_write(rt2x00dev, 40, 0x02); + rt2500pci_bbp_write(rt2x00dev, 41, 0x60); + rt2500pci_bbp_write(rt2x00dev, 53, 0x10); + rt2500pci_bbp_write(rt2x00dev, 54, 0x18); + rt2500pci_bbp_write(rt2x00dev, 56, 0x08); + rt2500pci_bbp_write(rt2x00dev, 57, 0x10); + rt2500pci_bbp_write(rt2x00dev, 58, 0x08); + rt2500pci_bbp_write(rt2x00dev, 61, 0x6d); + rt2500pci_bbp_write(rt2x00dev, 62, 0x10); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt2500pci_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, mask); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); + rt2x00_set_field32(®, CSR8_RXDONE, mask); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); +} + +static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt2500pci_init_rings(rt2x00dev) || + rt2500pci_init_registers(rt2x00dev) || + rt2500pci_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + /* + * Enable interrupts. + */ + rt2500pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); + + /* + * Enable LED + */ + rt2500pci_enable_led(rt2x00dev); + + return 0; +} + +static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Disable LED + */ + rt2500pci_disable_led(rt2x00dev); + + rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); + + /* + * Disable synchronisation. + */ + rt2x00pci_register_write(rt2x00dev, CSR14, 0); + + /* + * Cancel RX and TX. + */ + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + rt2x00_set_field32(®, TXCSR0_ABORT, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); + + /* + * Disable interrupts. + */ + rt2500pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_OFF); +} + +static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + rt2x00_set_field32(®, PWRCSR1_SET_STATE, 1); + rt2x00_set_field32(®, PWRCSR1_BBP_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_RF_DESIRE_STATE, state); + rt2x00_set_field32(®, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, PWRCSR1, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PWRCSR1, ®); + bbp_state = rt2x00_get_field32(reg, PWRCSR1_BBP_CURR_STATE); + rf_state = rt2x00_get_field32(reg, PWRCSR1_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state: bbp %d and rf %d.\n", + state, bbp_state, rf_state); + + return -EBUSY; +} + +static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2500pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2500pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt2500pci_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2500pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W2_AIFS, desc->aifs); + rt2x00_set_field32(&word, TXD_W2_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W2_CWMAX, desc->cw_max); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 3, &word); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 3, word); + + rt2x00_desc_read(txd, 10, &word); + rt2x00_set_field32(&word, TXD_W10_RTS, + test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags)); + rt2x00_desc_write(txd, 10, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue == IEEE80211_TX_QUEUE_BEACON) { + rt2x00pci_register_read(rt2x00dev, CSR14, ®); + if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { + rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, CSR14, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TXCSR0, ®); + if (queue == IEEE80211_TX_QUEUE_DATA0) + rt2x00_set_field32(®, TXCSR0_KICK_PRIO, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA1) + rt2x00_set_field32(®, TXCSR0_KICK_TX, 1); + else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + rt2x00_set_field32(®, TXCSR0_KICK_ATIM, 1); + rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); +} + +/* + * RX control handlers + */ +static void rt2500pci_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = entry->priv; + u32 word0; + u32 word2; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 2, &word2); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + desc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); + desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - + entry->ring->rt2x00dev->rssi_offset; + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); +} + +/* + * Interrupt functions. + */ +static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_entry *entry; + struct data_desc *txd; + u32 word; + int tx_status; + int retry; + + while (!rt2x00_ring_empty(ring)) { + entry = rt2x00_get_data_entry_done(ring); + txd = entry->priv; + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + break; + + /* + * Obtain the status about this packet. + */ + tx_status = rt2x00_get_field32(word, TXD_W0_RESULT); + retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); + + rt2x00lib_txdone(entry, tx_status, retry); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_desc_write(txd, 0, word); + rt2x00_ring_index_done_inc(ring); + } + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + entry = ring->entry; + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); +} + +static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, CSR7, ®); + rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + if (!reg) + return IRQ_NONE; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Beacon timer expired interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + rt2x00lib_beacondone(rt2x00dev); + + /* + * 2 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 3 - Atim ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) + rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); + + /* + * 4 - Priority ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) + rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + + /* + * 5 - Tx ring transmit done interrupt. + */ + if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) + rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + + rt2x00pci_register_read(rt2x00dev, CSR21, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt2500pci_eepromregister_read; + eeprom.register_write = rt2500pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", + print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, + DEFAULT_RSSI_OFFSET); + rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); + EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); + } + + return 0; +} + +static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2560, value, reg); + + if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && + !rt2x00_rf(&rt2x00dev->chip, RF2523) && + !rt2x00_rf(&rt2x00dev->chip, RF2524) && + !rt2x00_rf(&rt2x00dev->chip, RF2525) && + !rt2x00_rf(&rt2x00dev->chip, RF2525E) && + !rt2x00_rf(&rt2x00dev->chip, RF5222)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ + rt2x00dev->led_mode = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + /* + * Detect if this device has an hardware controlled radio. + */ +#ifdef CONFIG_RT2500PCI_RFKILL + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); +#endif /* CONFIG_RT2500PCI_RFKILL */ + + /* + * Check if the BBP tuning should be enabled. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Read the RSSI <-> dBm offset information. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); + rt2x00dev->rssi_offset = + rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); + + return 0; +} + +/* + * RF value list for RF2522 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2522[] = { + { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, +}; + +/* + * RF value list for RF2523 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2523[] = { + { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, + { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, + { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, + { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, + { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, + { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, + { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, + { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, + { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, + { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, + { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, + { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, + { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, + { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, +}; + +/* + * RF value list for RF2524 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2524[] = { + { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, + { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, + { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, + { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, + { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, + { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, + { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, + { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, + { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, + { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, + { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, + { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, + { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, + { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, +}; + +/* + * RF value list for RF2525 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525[] = { + { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, + { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, + { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, + { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, + { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, + { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, + { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, + { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, + { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, + { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, + { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, + { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, + { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, + { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, +}; + +/* + * RF value list for RF2525e + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525e[] = { + { 1, 0x00022020, 0x00081136, 0x00060111, 0x00000a0b }, + { 2, 0x00022020, 0x0008113a, 0x00060111, 0x00000a0b }, + { 3, 0x00022020, 0x0008113e, 0x00060111, 0x00000a0b }, + { 4, 0x00022020, 0x00081182, 0x00060111, 0x00000a0b }, + { 5, 0x00022020, 0x00081186, 0x00060111, 0x00000a0b }, + { 6, 0x00022020, 0x0008118a, 0x00060111, 0x00000a0b }, + { 7, 0x00022020, 0x0008118e, 0x00060111, 0x00000a0b }, + { 8, 0x00022020, 0x00081192, 0x00060111, 0x00000a0b }, + { 9, 0x00022020, 0x00081196, 0x00060111, 0x00000a0b }, + { 10, 0x00022020, 0x0008119a, 0x00060111, 0x00000a0b }, + { 11, 0x00022020, 0x0008119e, 0x00060111, 0x00000a0b }, + { 12, 0x00022020, 0x000811a2, 0x00060111, 0x00000a0b }, + { 13, 0x00022020, 0x000811a6, 0x00060111, 0x00000a0b }, + { 14, 0x00022020, 0x000811ae, 0x00060111, 0x00000a1b }, +}; + +/* + * RF value list for RF5222 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5222[] = { + { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, + { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, + { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, + { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, + { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, + { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, + { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, + { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, + { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, + { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, + { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, + { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, + { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, + { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, + { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, + { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, + { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, + { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, + { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, + { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, + { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, + { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, + { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, + { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, + { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, + { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, + { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, + { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, + { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, + { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, + + /* 802.11 UNII */ + { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, + { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, + { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, + { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, + { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, +}; + +static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = 0; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 2; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); + spec->channels = rf_vals_bg_2522; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); + spec->channels = rf_vals_bg_2523; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); + spec->channels = rf_vals_bg_2524; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); + spec->channels = rf_vals_bg_2525; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); + spec->channels = rf_vals_bg_2525e; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5222); + spec->channels = rf_vals_5222; + spec->num_modes = 3; + } +} + +static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2500pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2500pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt2500pci_probe_hw_mode(rt2x00dev); + + /* + * This device requires the beacon ring + */ + __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt2500pci_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, RXCSR0, ®); + rt2x00_set_field32(®, RXCSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, RXCSR0_DROP_MCAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, RXCSR0_DROP_BCAST, 0); + rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); +} + +static int rt2500pci_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR11, ®); + rt2x00_set_field32(®, CSR11_LONG_RETRY, long_retry); + rt2x00_set_field32(®, CSR11_SHORT_RETRY, short_retry); + rt2x00pci_register_write(rt2x00dev, CSR11, reg); + + return 0; +} + +static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR17, ®); + tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, CSR16, ®); + tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); + + return tsf; +} + +static void rt2500pci_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00pci_register_write(rt2x00dev, CSR16, 0); + rt2x00pci_register_write(rt2x00dev, CSR17, 0); +} + +static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, CSR15, ®); + return rt2x00_get_field32(reg, CSR15_BEACON_SENT); +} + +static const struct ieee80211_ops rt2500pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt2500pci_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt2500pci_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt2500pci_get_tsf, + .reset_tsf = rt2500pci_reset_tsf, + .beacon_update = rt2x00pci_beacon_update, + .tx_last_beacon = rt2500pci_tx_last_beacon, +}; + +static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { + .irq_handler = rt2500pci_interrupt, + .probe_hw = rt2500pci_probe_hw, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .set_device_state = rt2500pci_set_device_state, + .rfkill_poll = rt2500pci_rfkill_poll, + .link_stats = rt2500pci_link_stats, + .reset_tuner = rt2500pci_reset_tuner, + .link_tuner = rt2500pci_link_tuner, + .write_tx_desc = rt2500pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .kick_tx_queue = rt2500pci_kick_tx_queue, + .fill_rxdone = rt2500pci_fill_rxdone, + .config_mac_addr = rt2500pci_config_mac_addr, + .config_bssid = rt2500pci_config_bssid, + .config_type = rt2500pci_config_type, + .config_preamble = rt2500pci_config_preamble, + .config = rt2500pci_config, +}; + +static const struct rt2x00_ops rt2500pci_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt2500pci_rt2x00_ops, + .hw = &rt2500pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2500pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT2500pci module information. + */ +static struct pci_device_id rt2500pci_device_table[] = { + { PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt2500pci_driver = { + .name = DRV_NAME, + .id_table = rt2500pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt2500pci_init(void) +{ + return pci_register_driver(&rt2500pci_driver); +} + +static void __exit rt2500pci_exit(void) +{ + pci_unregister_driver(&rt2500pci_driver); +} + +module_init(rt2500pci_init); +module_exit(rt2500pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h new file mode 100644 index 0000000000000000000000000000000000000000..d92aa56b2f4b01f54fcea7feec61ca347717341d --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500pci.h @@ -0,0 +1,1236 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500pci + Abstract: Data structures and registers for the rt2500pci module. + Supported chipsets: RT2560. + */ + +#ifndef RT2500PCI_H +#define RT2500PCI_H + +/* + * RF chip defines. + */ +#define RF2522 0x0000 +#define RF2523 0x0001 +#define RF2524 0x0002 +#define RF2525 0x0003 +#define RF2525E 0x0004 +#define RF5222 0x0010 + +/* + * RT2560 version + */ +#define RT2560_VERSION_B 2 +#define RT2560_VERSION_C 3 +#define RT2560_VERSION_D 4 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 121 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0000 +#define CSR_REG_SIZE 0x0174 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0200 +#define BBP_SIZE 0x0040 +#define RF_SIZE 0x0014 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * CSR0: ASIC revision number. + */ +#define CSR0 0x0000 + +/* + * CSR1: System control register. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define CSR1 0x0004 +#define CSR1_SOFT_RESET FIELD32(0x00000001) +#define CSR1_BBP_RESET FIELD32(0x00000002) +#define CSR1_HOST_READY FIELD32(0x00000004) + +/* + * CSR2: System admin status register (invalid). + */ +#define CSR2 0x0008 + +/* + * CSR3: STA MAC address register 0. + */ +#define CSR3 0x000c +#define CSR3_BYTE0 FIELD32(0x000000ff) +#define CSR3_BYTE1 FIELD32(0x0000ff00) +#define CSR3_BYTE2 FIELD32(0x00ff0000) +#define CSR3_BYTE3 FIELD32(0xff000000) + +/* + * CSR4: STA MAC address register 1. + */ +#define CSR4 0x0010 +#define CSR4_BYTE4 FIELD32(0x000000ff) +#define CSR4_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR5: BSSID register 0. + */ +#define CSR5 0x0014 +#define CSR5_BYTE0 FIELD32(0x000000ff) +#define CSR5_BYTE1 FIELD32(0x0000ff00) +#define CSR5_BYTE2 FIELD32(0x00ff0000) +#define CSR5_BYTE3 FIELD32(0xff000000) + +/* + * CSR6: BSSID register 1. + */ +#define CSR6 0x0018 +#define CSR6_BYTE4 FIELD32(0x000000ff) +#define CSR6_BYTE5 FIELD32(0x0000ff00) + +/* + * CSR7: Interrupt source register. + * Write 1 to clear. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + * DECRYPTION_DONE: Decryption done interrupt. + * ENCRYPTION_DONE: Encryption done interrupt. + * UART1_TX_TRESHOLD: UART1 TX reaches threshold. + * UART1_RX_TRESHOLD: UART1 RX reaches threshold. + * UART1_IDLE_TRESHOLD: UART1 IDLE over threshold. + * UART1_TX_BUFF_ERROR: UART1 TX buffer error. + * UART1_RX_BUFF_ERROR: UART1 RX buffer error. + * UART2_TX_TRESHOLD: UART2 TX reaches threshold. + * UART2_RX_TRESHOLD: UART2 RX reaches threshold. + * UART2_IDLE_TRESHOLD: UART2 IDLE over threshold. + * UART2_TX_BUFF_ERROR: UART2 TX buffer error. + * UART2_RX_BUFF_ERROR: UART2 RX buffer error. + * TIMER_CSR3_EXPIRE: TIMECSR3 timer expired (802.1H quiet period). + + */ +#define CSR7 0x001c +#define CSR7_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR7_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR7_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR7_TXDONE_TXRING FIELD32(0x00000008) +#define CSR7_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR7_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR7_RXDONE FIELD32(0x00000040) +#define CSR7_DECRYPTION_DONE FIELD32(0x00000080) +#define CSR7_ENCRYPTION_DONE FIELD32(0x00000100) +#define CSR7_UART1_TX_TRESHOLD FIELD32(0x00000200) +#define CSR7_UART1_RX_TRESHOLD FIELD32(0x00000400) +#define CSR7_UART1_IDLE_TRESHOLD FIELD32(0x00000800) +#define CSR7_UART1_TX_BUFF_ERROR FIELD32(0x00001000) +#define CSR7_UART1_RX_BUFF_ERROR FIELD32(0x00002000) +#define CSR7_UART2_TX_TRESHOLD FIELD32(0x00004000) +#define CSR7_UART2_RX_TRESHOLD FIELD32(0x00008000) +#define CSR7_UART2_IDLE_TRESHOLD FIELD32(0x00010000) +#define CSR7_UART2_TX_BUFF_ERROR FIELD32(0x00020000) +#define CSR7_UART2_RX_BUFF_ERROR FIELD32(0x00040000) +#define CSR7_TIMER_CSR3_EXPIRE FIELD32(0x00080000) + +/* + * CSR8: Interrupt mask register. + * Write 1 to mask interrupt. + * TBCN_EXPIRE: Beacon timer expired interrupt. + * TWAKE_EXPIRE: Wakeup timer expired interrupt. + * TATIMW_EXPIRE: Timer of atim window expired interrupt. + * TXDONE_TXRING: Tx ring transmit done interrupt. + * TXDONE_ATIMRING: Atim ring transmit done interrupt. + * TXDONE_PRIORING: Priority ring transmit done interrupt. + * RXDONE: Receive done interrupt. + * DECRYPTION_DONE: Decryption done interrupt. + * ENCRYPTION_DONE: Encryption done interrupt. + * UART1_TX_TRESHOLD: UART1 TX reaches threshold. + * UART1_RX_TRESHOLD: UART1 RX reaches threshold. + * UART1_IDLE_TRESHOLD: UART1 IDLE over threshold. + * UART1_TX_BUFF_ERROR: UART1 TX buffer error. + * UART1_RX_BUFF_ERROR: UART1 RX buffer error. + * UART2_TX_TRESHOLD: UART2 TX reaches threshold. + * UART2_RX_TRESHOLD: UART2 RX reaches threshold. + * UART2_IDLE_TRESHOLD: UART2 IDLE over threshold. + * UART2_TX_BUFF_ERROR: UART2 TX buffer error. + * UART2_RX_BUFF_ERROR: UART2 RX buffer error. + * TIMER_CSR3_EXPIRE: TIMECSR3 timer expired (802.1H quiet period). + */ +#define CSR8 0x0020 +#define CSR8_TBCN_EXPIRE FIELD32(0x00000001) +#define CSR8_TWAKE_EXPIRE FIELD32(0x00000002) +#define CSR8_TATIMW_EXPIRE FIELD32(0x00000004) +#define CSR8_TXDONE_TXRING FIELD32(0x00000008) +#define CSR8_TXDONE_ATIMRING FIELD32(0x00000010) +#define CSR8_TXDONE_PRIORING FIELD32(0x00000020) +#define CSR8_RXDONE FIELD32(0x00000040) +#define CSR8_DECRYPTION_DONE FIELD32(0x00000080) +#define CSR8_ENCRYPTION_DONE FIELD32(0x00000100) +#define CSR8_UART1_TX_TRESHOLD FIELD32(0x00000200) +#define CSR8_UART1_RX_TRESHOLD FIELD32(0x00000400) +#define CSR8_UART1_IDLE_TRESHOLD FIELD32(0x00000800) +#define CSR8_UART1_TX_BUFF_ERROR FIELD32(0x00001000) +#define CSR8_UART1_RX_BUFF_ERROR FIELD32(0x00002000) +#define CSR8_UART2_TX_TRESHOLD FIELD32(0x00004000) +#define CSR8_UART2_RX_TRESHOLD FIELD32(0x00008000) +#define CSR8_UART2_IDLE_TRESHOLD FIELD32(0x00010000) +#define CSR8_UART2_TX_BUFF_ERROR FIELD32(0x00020000) +#define CSR8_UART2_RX_BUFF_ERROR FIELD32(0x00040000) +#define CSR8_TIMER_CSR3_EXPIRE FIELD32(0x00080000) + +/* + * CSR9: Maximum frame length register. + * MAX_FRAME_UNIT: Maximum frame length in 128b unit, default: 12. + */ +#define CSR9 0x0024 +#define CSR9_MAX_FRAME_UNIT FIELD32(0x00000f80) + +/* + * SECCSR0: WEP control register. + * KICK_DECRYPT: Kick decryption engine, self-clear. + * ONE_SHOT: 0: ring mode, 1: One shot only mode. + * DESC_ADDRESS: Descriptor physical address of frame. + */ +#define SECCSR0 0x0028 +#define SECCSR0_KICK_DECRYPT FIELD32(0x00000001) +#define SECCSR0_ONE_SHOT FIELD32(0x00000002) +#define SECCSR0_DESC_ADDRESS FIELD32(0xfffffffc) + +/* + * CSR11: Back-off control register. + * CWMIN: CWmin. Default cwmin is 31 (2^5 - 1). + * CWMAX: CWmax. Default cwmax is 1023 (2^10 - 1). + * SLOT_TIME: Slot time, default is 20us for 802.11b + * CW_SELECT: CWmin/CWmax selection, 1: Register, 0: TXD. + * LONG_RETRY: Long retry count. + * SHORT_RETRY: Short retry count. + */ +#define CSR11 0x002c +#define CSR11_CWMIN FIELD32(0x0000000f) +#define CSR11_CWMAX FIELD32(0x000000f0) +#define CSR11_SLOT_TIME FIELD32(0x00001f00) +#define CSR11_CW_SELECT FIELD32(0x00002000) +#define CSR11_LONG_RETRY FIELD32(0x00ff0000) +#define CSR11_SHORT_RETRY FIELD32(0xff000000) + +/* + * CSR12: Synchronization configuration register 0. + * All units in 1/16 TU. + * BEACON_INTERVAL: Beacon interval, default is 100 TU. + * CFP_MAX_DURATION: Cfp maximum duration, default is 100 TU. + */ +#define CSR12 0x0030 +#define CSR12_BEACON_INTERVAL FIELD32(0x0000ffff) +#define CSR12_CFP_MAX_DURATION FIELD32(0xffff0000) + +/* + * CSR13: Synchronization configuration register 1. + * All units in 1/16 TU. + * ATIMW_DURATION: Atim window duration. + * CFP_PERIOD: Cfp period, default is 0 TU. + */ +#define CSR13 0x0034 +#define CSR13_ATIMW_DURATION FIELD32(0x0000ffff) +#define CSR13_CFP_PERIOD FIELD32(0x00ff0000) + +/* + * CSR14: Synchronization control register. + * TSF_COUNT: Enable tsf auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable tbcn with reload value. + * TCFP: Enable tcfp & cfp / cp switching. + * TATIMW: Enable tatimw & atim window switching. + * BEACON_GEN: Enable beacon generator. + * CFP_COUNT_PRELOAD: Cfp count preload value. + * TBCM_PRELOAD: Tbcn preload value in units of 64us. + */ +#define CSR14 0x0038 +#define CSR14_TSF_COUNT FIELD32(0x00000001) +#define CSR14_TSF_SYNC FIELD32(0x00000006) +#define CSR14_TBCN FIELD32(0x00000008) +#define CSR14_TCFP FIELD32(0x00000010) +#define CSR14_TATIMW FIELD32(0x00000020) +#define CSR14_BEACON_GEN FIELD32(0x00000040) +#define CSR14_CFP_COUNT_PRELOAD FIELD32(0x0000ff00) +#define CSR14_TBCM_PRELOAD FIELD32(0xffff0000) + +/* + * CSR15: Synchronization status register. + * CFP: ASIC is in contention-free period. + * ATIMW: ASIC is in ATIM window. + * BEACON_SENT: Beacon is send. + */ +#define CSR15 0x003c +#define CSR15_CFP FIELD32(0x00000001) +#define CSR15_ATIMW FIELD32(0x00000002) +#define CSR15_BEACON_SENT FIELD32(0x00000004) + +/* + * CSR16: TSF timer register 0. + */ +#define CSR16 0x0040 +#define CSR16_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR17: TSF timer register 1. + */ +#define CSR17 0x0044 +#define CSR17_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * CSR18: IFS timer register 0. + * SIFS: Sifs, default is 10 us. + * PIFS: Pifs, default is 30 us. + */ +#define CSR18 0x0048 +#define CSR18_SIFS FIELD32(0x000001ff) +#define CSR18_PIFS FIELD32(0x001f0000) + +/* + * CSR19: IFS timer register 1. + * DIFS: Difs, default is 50 us. + * EIFS: Eifs, default is 364 us. + */ +#define CSR19 0x004c +#define CSR19_DIFS FIELD32(0x0000ffff) +#define CSR19_EIFS FIELD32(0xffff0000) + +/* + * CSR20: Wakeup timer register. + * DELAY_AFTER_TBCN: Delay after tbcn expired in units of 1/16 TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTOWAKE: Enable auto wakeup / sleep mechanism. + */ +#define CSR20 0x0050 +#define CSR20_DELAY_AFTER_TBCN FIELD32(0x0000ffff) +#define CSR20_TBCN_BEFORE_WAKEUP FIELD32(0x00ff0000) +#define CSR20_AUTOWAKE FIELD32(0x01000000) + +/* + * CSR21: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + */ +#define CSR21 0x0054 +#define CSR21_RELOAD FIELD32(0x00000001) +#define CSR21_EEPROM_DATA_CLOCK FIELD32(0x00000002) +#define CSR21_EEPROM_CHIP_SELECT FIELD32(0x00000004) +#define CSR21_EEPROM_DATA_IN FIELD32(0x00000008) +#define CSR21_EEPROM_DATA_OUT FIELD32(0x00000010) +#define CSR21_TYPE_93C46 FIELD32(0x00000020) + +/* + * CSR22: CFP control register. + * CFP_DURATION_REMAIN: Cfp duration remain, in units of TU. + * RELOAD_CFP_DURATION: Write 1 to reload cfp duration remain. + */ +#define CSR22 0x0058 +#define CSR22_CFP_DURATION_REMAIN FIELD32(0x0000ffff) +#define CSR22_RELOAD_CFP_DURATION FIELD32(0x00010000) + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXCSR0: TX Control Register. + * KICK_TX: Kick tx ring. + * KICK_ATIM: Kick atim ring. + * KICK_PRIO: Kick priority ring. + * ABORT: Abort all transmit related ring operation. + */ +#define TXCSR0 0x0060 +#define TXCSR0_KICK_TX FIELD32(0x00000001) +#define TXCSR0_KICK_ATIM FIELD32(0x00000002) +#define TXCSR0_KICK_PRIO FIELD32(0x00000004) +#define TXCSR0_ABORT FIELD32(0x00000008) + +/* + * TXCSR1: TX Configuration Register. + * ACK_TIMEOUT: Ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. + * ACK_CONSUME_TIME: Ack consume time, default = sifs + acktime @ 1mbps. + * TSF_OFFSET: Insert tsf offset. + * AUTORESPONDER: Enable auto responder which include ack & cts. + */ +#define TXCSR1 0x0064 +#define TXCSR1_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXCSR1_ACK_CONSUME_TIME FIELD32(0x0003fe00) +#define TXCSR1_TSF_OFFSET FIELD32(0x00fc0000) +#define TXCSR1_AUTORESPONDER FIELD32(0x01000000) + +/* + * TXCSR2: Tx descriptor configuration register. + * TXD_SIZE: Tx descriptor size, default is 48. + * NUM_TXD: Number of tx entries in ring. + * NUM_ATIM: Number of atim entries in ring. + * NUM_PRIO: Number of priority entries in ring. + */ +#define TXCSR2 0x0068 +#define TXCSR2_TXD_SIZE FIELD32(0x000000ff) +#define TXCSR2_NUM_TXD FIELD32(0x0000ff00) +#define TXCSR2_NUM_ATIM FIELD32(0x00ff0000) +#define TXCSR2_NUM_PRIO FIELD32(0xff000000) + +/* + * TXCSR3: TX Ring Base address register. + */ +#define TXCSR3 0x006c +#define TXCSR3_TX_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR4: TX Atim Ring Base address register. + */ +#define TXCSR4 0x0070 +#define TXCSR4_ATIM_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR5: TX Prio Ring Base address register. + */ +#define TXCSR5 0x0074 +#define TXCSR5_PRIO_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR6: Beacon Base address register. + */ +#define TXCSR6 0x0078 +#define TXCSR6_BEACON_RING_REGISTER FIELD32(0xffffffff) + +/* + * TXCSR7: Auto responder control register. + * AR_POWERMANAGEMENT: Auto responder power management bit. + */ +#define TXCSR7 0x007c +#define TXCSR7_AR_POWERMANAGEMENT FIELD32(0x00000001) + +/* + * TXCSR8: CCK Tx BBP register. + */ +#define TXCSR8 0x0098 +#define TXCSR8_BBP_ID0 FIELD32(0x0000007f) +#define TXCSR8_BBP_ID0_VALID FIELD32(0x00000080) +#define TXCSR8_BBP_ID1 FIELD32(0x00007f00) +#define TXCSR8_BBP_ID1_VALID FIELD32(0x00008000) +#define TXCSR8_BBP_ID2 FIELD32(0x007f0000) +#define TXCSR8_BBP_ID2_VALID FIELD32(0x00800000) +#define TXCSR8_BBP_ID3 FIELD32(0x7f000000) +#define TXCSR8_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXCSR9: OFDM TX BBP registers + * OFDM_SIGNAL: BBP rate field address for OFDM. + * OFDM_SERVICE: BBP service field address for OFDM. + * OFDM_LENGTH_LOW: BBP length low byte address for OFDM. + * OFDM_LENGTH_HIGH: BBP length high byte address for OFDM. + */ +#define TXCSR9 0x0094 +#define TXCSR9_OFDM_RATE FIELD32(0x000000ff) +#define TXCSR9_OFDM_SERVICE FIELD32(0x0000ff00) +#define TXCSR9_OFDM_LENGTH_LOW FIELD32(0x00ff0000) +#define TXCSR9_OFDM_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Receive related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * RXCSR0: RX Control Register. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * PASS_CRC: Pass all packets with crc attached. + * PASS_CRC: Pass all packets with crc attached. + * PASS_PLCP: Pass all packets with 4 bytes PLCP attached. + * DROP_MCAST: Drop multicast frames. + * DROP_BCAST: Drop broadcast frames. + * ENABLE_QOS: Accept QOS data frame and parse QOS field. + */ +#define RXCSR0 0x0080 +#define RXCSR0_DISABLE_RX FIELD32(0x00000001) +#define RXCSR0_DROP_CRC FIELD32(0x00000002) +#define RXCSR0_DROP_PHYSICAL FIELD32(0x00000004) +#define RXCSR0_DROP_CONTROL FIELD32(0x00000008) +#define RXCSR0_DROP_NOT_TO_ME FIELD32(0x00000010) +#define RXCSR0_DROP_TODS FIELD32(0x00000020) +#define RXCSR0_DROP_VERSION_ERROR FIELD32(0x00000040) +#define RXCSR0_PASS_CRC FIELD32(0x00000080) +#define RXCSR0_PASS_PLCP FIELD32(0x00000100) +#define RXCSR0_DROP_MCAST FIELD32(0x00000200) +#define RXCSR0_DROP_BCAST FIELD32(0x00000400) +#define RXCSR0_ENABLE_QOS FIELD32(0x00000800) + +/* + * RXCSR1: RX descriptor configuration register. + * RXD_SIZE: Rx descriptor size, default is 32b. + * NUM_RXD: Number of rx entries in ring. + */ +#define RXCSR1 0x0084 +#define RXCSR1_RXD_SIZE FIELD32(0x000000ff) +#define RXCSR1_NUM_RXD FIELD32(0x0000ff00) + +/* + * RXCSR2: RX Ring base address register. + */ +#define RXCSR2 0x0088 +#define RXCSR2_RX_RING_REGISTER FIELD32(0xffffffff) + +/* + * RXCSR3: BBP ID register for Rx operation. + * BBP_ID#: BBP register # id. + * BBP_ID#_VALID: BBP register # id is valid or not. + */ +#define RXCSR3 0x0090 +#define RXCSR3_BBP_ID0 FIELD32(0x0000007f) +#define RXCSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define RXCSR3_BBP_ID1 FIELD32(0x00007f00) +#define RXCSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define RXCSR3_BBP_ID2 FIELD32(0x007f0000) +#define RXCSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define RXCSR3_BBP_ID3 FIELD32(0x7f000000) +#define RXCSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * ARCSR1: Auto Responder PLCP config register 1. + * AR_BBP_DATA#: Auto responder BBP register # data. + * AR_BBP_ID#: Auto responder BBP register # Id. + */ +#define ARCSR1 0x009c +#define ARCSR1_AR_BBP_DATA2 FIELD32(0x000000ff) +#define ARCSR1_AR_BBP_ID2 FIELD32(0x0000ff00) +#define ARCSR1_AR_BBP_DATA3 FIELD32(0x00ff0000) +#define ARCSR1_AR_BBP_ID3 FIELD32(0xff000000) + +/* + * Miscellaneous Registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + + */ + +/* + * PCICSR: PCI control register. + * BIG_ENDIAN: 1: big endian, 0: little endian. + * RX_TRESHOLD: Rx threshold in dw to start pci access + * 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. + * TX_TRESHOLD: Tx threshold in dw to start pci access + * 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. + * BURST_LENTH: Pci burst length 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. + * ENABLE_CLK: Enable clk_run, pci clock can't going down to non-operational. + * READ_MULTIPLE: Enable memory read multiple. + * WRITE_INVALID: Enable memory write & invalid. + */ +#define PCICSR 0x008c +#define PCICSR_BIG_ENDIAN FIELD32(0x00000001) +#define PCICSR_RX_TRESHOLD FIELD32(0x00000006) +#define PCICSR_TX_TRESHOLD FIELD32(0x00000018) +#define PCICSR_BURST_LENTH FIELD32(0x00000060) +#define PCICSR_ENABLE_CLK FIELD32(0x00000080) +#define PCICSR_READ_MULTIPLE FIELD32(0x00000100) +#define PCICSR_WRITE_INVALID FIELD32(0x00000200) + +/* + * CNT0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define CNT0 0x00a0 +#define CNT0_FCS_ERROR FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT1: PLCP error count. + * CNT2: Long error count. + */ +#define TIMECSR2 0x00a8 +#define CNT1 0x00ac +#define CNT2 0x00b0 +#define TIMECSR3 0x00b4 + +/* + * CNT3: CCA false alarm count. + */ +#define CNT3 0x00b8 +#define CNT3_FALSE_CCA FIELD32(0x0000ffff) + +/* + * Statistic Register. + * CNT4: Rx FIFO overflow count. + * CNT5: Tx FIFO underrun count. + */ +#define CNT4 0x00bc +#define CNT5 0x00c0 + +/* + * Baseband Control Register. + */ + +/* + * PWRCSR0: Power mode configuration register. + */ +#define PWRCSR0 0x00c4 + +/* + * Power state transition time registers. + */ +#define PSCSR0 0x00c8 +#define PSCSR1 0x00cc +#define PSCSR2 0x00d0 +#define PSCSR3 0x00d4 + +/* + * PWRCSR1: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURR_STATE: BBP current state. + * RF_CURR_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define PWRCSR1 0x00d8 +#define PWRCSR1_SET_STATE FIELD32(0x00000001) +#define PWRCSR1_BBP_DESIRE_STATE FIELD32(0x00000006) +#define PWRCSR1_RF_DESIRE_STATE FIELD32(0x00000018) +#define PWRCSR1_BBP_CURR_STATE FIELD32(0x00000060) +#define PWRCSR1_RF_CURR_STATE FIELD32(0x00000180) +#define PWRCSR1_PUT_TO_SLEEP FIELD32(0x00000200) + +/* + * TIMECSR: Timer control register. + * US_COUNT: 1 us timer count in units of clock cycles. + * US_64_COUNT: 64 us timer count in units of 1 us timer. + * BEACON_EXPECT: Beacon expect window. + */ +#define TIMECSR 0x00dc +#define TIMECSR_US_COUNT FIELD32(0x000000ff) +#define TIMECSR_US_64_COUNT FIELD32(0x0000ff00) +#define TIMECSR_BEACON_EXPECT FIELD32(0x00070000) + +/* + * MACCSR0: MAC configuration register 0. + */ +#define MACCSR0 0x00e0 + +/* + * MACCSR1: MAC configuration register 1. + * KICK_RX: Kick one-shot rx in one-shot rx mode. + * ONESHOT_RXMODE: Enable one-shot rx mode for debugging. + * BBPRX_RESET_MODE: Ralink bbp rx reset mode. + * AUTO_TXBBP: Auto tx logic access bbp control register. + * AUTO_RXBBP: Auto rx logic access bbp control register. + * LOOPBACK: Loopback mode. 0: normal, 1: internal, 2: external, 3:rsvd. + * INTERSIL_IF: Intersil if calibration pin. + */ +#define MACCSR1 0x00e4 +#define MACCSR1_KICK_RX FIELD32(0x00000001) +#define MACCSR1_ONESHOT_RXMODE FIELD32(0x00000002) +#define MACCSR1_BBPRX_RESET_MODE FIELD32(0x00000004) +#define MACCSR1_AUTO_TXBBP FIELD32(0x00000008) +#define MACCSR1_AUTO_RXBBP FIELD32(0x00000010) +#define MACCSR1_LOOPBACK FIELD32(0x00000060) +#define MACCSR1_INTERSIL_IF FIELD32(0x00000080) + +/* + * RALINKCSR: Ralink Rx auto-reset BBCR. + * AR_BBP_DATA#: Auto reset BBP register # data. + * AR_BBP_ID#: Auto reset BBP register # id. + */ +#define RALINKCSR 0x00e8 +#define RALINKCSR_AR_BBP_DATA0 FIELD32(0x000000ff) +#define RALINKCSR_AR_BBP_ID0 FIELD32(0x00007f00) +#define RALINKCSR_AR_BBP_VALID0 FIELD32(0x00008000) +#define RALINKCSR_AR_BBP_DATA1 FIELD32(0x00ff0000) +#define RALINKCSR_AR_BBP_ID1 FIELD32(0x7f000000) +#define RALINKCSR_AR_BBP_VALID1 FIELD32(0x80000000) + +/* + * BCNCSR: Beacon interval control register. + * CHANGE: Write one to change beacon interval. + * DELTATIME: The delta time value. + * NUM_BEACON: Number of beacon according to mode. + * MODE: Please refer to asic specs. + * PLUS: Plus or minus delta time value. + */ +#define BCNCSR 0x00ec +#define BCNCSR_CHANGE FIELD32(0x00000001) +#define BCNCSR_DELTATIME FIELD32(0x0000001e) +#define BCNCSR_NUM_BEACON FIELD32(0x00001fe0) +#define BCNCSR_MODE FIELD32(0x00006000) +#define BCNCSR_PLUS FIELD32(0x00008000) + +/* + * BBP / RF / IF Control Register. + */ + +/* + * BBPCSR: BBP serial control register. + * VALUE: Register value to program into BBP. + * REGNUM: Selected BBP register. + * BUSY: 1: asic is busy execute BBP programming. + * WRITE_CONTROL: 1: write BBP, 0: read BBP. + */ +#define BBPCSR 0x00f0 +#define BBPCSR_VALUE FIELD32(0x000000ff) +#define BBPCSR_REGNUM FIELD32(0x00007f00) +#define BBPCSR_BUSY FIELD32(0x00008000) +#define BBPCSR_WRITE_CONTROL FIELD32(0x00010000) + +/* + * RFCSR: RF serial control register. + * VALUE: Register value + id to program into rf/if. + * NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * IF_SELECT: Chip to program: 0: rf, 1: if. + * PLL_LD: Rf pll_ld status. + * BUSY: 1: asic is busy execute rf programming. + */ +#define RFCSR 0x00f4 +#define RFCSR_VALUE FIELD32(0x00ffffff) +#define RFCSR_NUMBER_OF_BITS FIELD32(0x1f000000) +#define RFCSR_IF_SELECT FIELD32(0x20000000) +#define RFCSR_PLL_LD FIELD32(0x40000000) +#define RFCSR_BUSY FIELD32(0x80000000) + +/* + * LEDCSR: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY: 0: idle, 1: active. + * LINK_POLARITY: 0: active low, 1: active high. + * ACTIVITY_POLARITY: 0: active low, 1: active high. + * LED_DEFAULT: LED state for "enable" 0: ON, 1: OFF. + */ +#define LEDCSR 0x00f8 +#define LEDCSR_ON_PERIOD FIELD32(0x000000ff) +#define LEDCSR_OFF_PERIOD FIELD32(0x0000ff00) +#define LEDCSR_LINK FIELD32(0x00010000) +#define LEDCSR_ACTIVITY FIELD32(0x00020000) +#define LEDCSR_LINK_POLARITY FIELD32(0x00040000) +#define LEDCSR_ACTIVITY_POLARITY FIELD32(0x00080000) +#define LEDCSR_LED_DEFAULT FIELD32(0x00100000) + +/* + * AES control register. + */ +#define SECCSR3 0x00fc + +/* + * ASIC pointer information. + * RXPTR: Current RX ring address. + * TXPTR: Current Tx ring address. + * PRIPTR: Current Priority ring address. + * ATIMPTR: Current ATIM ring address. + */ +#define RXPTR 0x0100 +#define TXPTR 0x0104 +#define PRIPTR 0x0108 +#define ATIMPTR 0x010c + +/* + * TXACKCSR0: TX ACK timeout. + */ +#define TXACKCSR0 0x0110 + +/* + * ACK timeout count registers. + * ACKCNT0: TX ACK timeout count. + * ACKCNT1: RX ACK timeout count. + */ +#define ACKCNT0 0x0114 +#define ACKCNT1 0x0118 + +/* + * GPIO and others. + */ + +/* + * GPIOCSR: GPIO control register. + */ +#define GPIOCSR 0x0120 +#define GPIOCSR_BIT0 FIELD32(0x00000001) +#define GPIOCSR_BIT1 FIELD32(0x00000002) +#define GPIOCSR_BIT2 FIELD32(0x00000004) +#define GPIOCSR_BIT3 FIELD32(0x00000008) +#define GPIOCSR_BIT4 FIELD32(0x00000010) +#define GPIOCSR_BIT5 FIELD32(0x00000020) +#define GPIOCSR_BIT6 FIELD32(0x00000040) +#define GPIOCSR_BIT7 FIELD32(0x00000080) +#define GPIOCSR_DIR0 FIELD32(0x00000100) +#define GPIOCSR_DIR1 FIELD32(0x00000200) +#define GPIOCSR_DIR2 FIELD32(0x00000400) +#define GPIOCSR_DIR3 FIELD32(0x00000800) +#define GPIOCSR_DIR4 FIELD32(0x00001000) +#define GPIOCSR_DIR5 FIELD32(0x00002000) +#define GPIOCSR_DIR6 FIELD32(0x00004000) +#define GPIOCSR_DIR7 FIELD32(0x00008000) + +/* + * FIFO pointer registers. + * FIFOCSR0: TX FIFO pointer. + * FIFOCSR1: RX FIFO pointer. + */ +#define FIFOCSR0 0x0128 +#define FIFOCSR1 0x012c + +/* + * BCNCSR1: Tx BEACON offset time control register. + * PRELOAD: Beacon timer offset in units of usec. + * BEACON_CWMIN: 2^CwMin. + */ +#define BCNCSR1 0x0130 +#define BCNCSR1_PRELOAD FIELD32(0x0000ffff) +#define BCNCSR1_BEACON_CWMIN FIELD32(0x000f0000) + +/* + * MACCSR2: TX_PE to RX_PE turn-around time control register + * DELAY: RX_PE low width, in units of pci clock cycle. + */ +#define MACCSR2 0x0134 +#define MACCSR2_DELAY FIELD32(0x000000ff) + +/* + * TESTCSR: TEST mode selection register. + */ +#define TESTCSR 0x0138 + +/* + * ARCSR2: 1 Mbps ACK/CTS PLCP. + */ +#define ARCSR2 0x013c +#define ARCSR2_SIGNAL FIELD32(0x000000ff) +#define ARCSR2_SERVICE FIELD32(0x0000ff00) +#define ARCSR2_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR3: 2 Mbps ACK/CTS PLCP. + */ +#define ARCSR3 0x0140 +#define ARCSR3_SIGNAL FIELD32(0x000000ff) +#define ARCSR3_SERVICE FIELD32(0x0000ff00) +#define ARCSR3_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR4: 5.5 Mbps ACK/CTS PLCP. + */ +#define ARCSR4 0x0144 +#define ARCSR4_SIGNAL FIELD32(0x000000ff) +#define ARCSR4_SERVICE FIELD32(0x0000ff00) +#define ARCSR4_LENGTH FIELD32(0xffff0000) + +/* + * ARCSR5: 11 Mbps ACK/CTS PLCP. + */ +#define ARCSR5 0x0148 +#define ARCSR5_SIGNAL FIELD32(0x000000ff) +#define ARCSR5_SERVICE FIELD32(0x0000ff00) +#define ARCSR5_LENGTH FIELD32(0xffff0000) + +/* + * ARTCSR0: CCK ACK/CTS payload consumed time for 1/2/5.5/11 mbps. + */ +#define ARTCSR0 0x014c +#define ARTCSR0_ACK_CTS_11MBS FIELD32(0x000000ff) +#define ARTCSR0_ACK_CTS_5_5MBS FIELD32(0x0000ff00) +#define ARTCSR0_ACK_CTS_2MBS FIELD32(0x00ff0000) +#define ARTCSR0_ACK_CTS_1MBS FIELD32(0xff000000) + + +/* + * ARTCSR1: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define ARTCSR1 0x0150 +#define ARTCSR1_ACK_CTS_6MBS FIELD32(0x000000ff) +#define ARTCSR1_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define ARTCSR1_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define ARTCSR1_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * ARTCSR2: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define ARTCSR2 0x0154 +#define ARTCSR2_ACK_CTS_24MBS FIELD32(0x000000ff) +#define ARTCSR2_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define ARTCSR2_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define ARTCSR2_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * SECCSR1_RT2509: WEP control register. + * KICK_ENCRYPT: Kick encryption engine, self-clear. + * ONE_SHOT: 0: ring mode, 1: One shot only mode. + * DESC_ADDRESS: Descriptor physical address of frame. + */ +#define SECCSR1 0x0158 +#define SECCSR1_KICK_ENCRYPT FIELD32(0x00000001) +#define SECCSR1_ONE_SHOT FIELD32(0x00000002) +#define SECCSR1_DESC_ADDRESS FIELD32(0xfffffffc) + +/* + * BBPCSR1: BBP TX configuration. + */ +#define BBPCSR1 0x015c +#define BBPCSR1_CCK FIELD32(0x00000003) +#define BBPCSR1_CCK_FLIP FIELD32(0x00000004) +#define BBPCSR1_OFDM FIELD32(0x00030000) +#define BBPCSR1_OFDM_FLIP FIELD32(0x00040000) + +/* + * Dual band configuration registers. + * DBANDCSR0: Dual band configuration register 0. + * DBANDCSR1: Dual band configuration register 1. + */ +#define DBANDCSR0 0x0160 +#define DBANDCSR1 0x0164 + +/* + * BBPPCSR: BBP Pin control register. + */ +#define BBPPCSR 0x0168 + +/* + * MAC special debug mode selection registers. + * DBGSEL0: MAC special debug mode selection register 0. + * DBGSEL1: MAC special debug mode selection register 1. + */ +#define DBGSEL0 0x016c +#define DBGSEL1 0x0170 + +/* + * BISTCSR: BBP BIST register. + */ +#define BISTCSR 0x0174 + +/* + * Multicast filter registers. + * MCAST0: Multicast filter register 0. + * MCAST1: Multicast filter register 1. + */ +#define MCAST0 0x0178 +#define MCAST1 0x017c + +/* + * UART registers. + * UARTCSR0: UART1 TX register. + * UARTCSR1: UART1 RX register. + * UARTCSR3: UART1 frame control register. + * UARTCSR4: UART1 buffer control register. + * UART2CSR0: UART2 TX register. + * UART2CSR1: UART2 RX register. + * UART2CSR3: UART2 frame control register. + * UART2CSR4: UART2 buffer control register. + */ +#define UARTCSR0 0x0180 +#define UARTCSR1 0x0184 +#define UARTCSR3 0x0188 +#define UARTCSR4 0x018c +#define UART2CSR0 0x0190 +#define UART2CSR1 0x0194 +#define UART2CSR3 0x0198 +#define UART2CSR4 0x019c + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2: TX antenna control + */ +#define BBP_R2_TX_ANTENNA FIELD8(0x03) +#define BBP_R2_TX_IQ_FLIP FIELD8(0x04) + +/* + * R14: RX antenna control + */ +#define BBP_R14_RX_ANTENNA FIELD8(0x03) +#define BBP_R14_RX_IQ_FLIP FIELD8(0x04) + +/* + * BBP_R70 + */ +#define BBP_R70_JAPAN_FILTER FIELD8(0x08) + +/* + * RF registers + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * LED_MODE: 0: default, 1: TX/RX activity,2: Single (ignore link), 3: rsvd. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x10 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x01c0) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * CARDBUS_ACCEL: 0: enable, 1: disable. + * DYN_BBP_TUNE: 0: enable, 1: disable. + * CCK_TX_POWER: CCK TX power compensation. + */ +#define EEPROM_NIC 0x11 +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0001) +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(0x0002) +#define EEPROM_NIC_CCK_TX_POWER FIELD16(0x000c) + +/* + * EEPROM geography. + * GEO: Default geography setting for device. + */ +#define EEPROM_GEOGRAPHY 0x12 +#define EEPROM_GEOGRAPHY_GEO FIELD16(0x0f00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x13 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x23 +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * RSSI <-> dBm offset calibration + */ +#define EEPROM_CALIBRATE_OFFSET 0x3e +#define EEPROM_CALIBRATE_OFFSET_RSSI FIELD16(0x00ff) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 11 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 11 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_RESULT FIELD32(0x0000001c) +#define TXD_W0_RETRY_COUNT FIELD32(0x000000e0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_OFDM FIELD32(0x00000800) +#define TXD_W0_CIPHER_OWNER FIELD32(0x00001000) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_RETRY_MODE FIELD32(0x00008000) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + */ +#define TXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define TXD_W2_IV_OFFSET FIELD32(0x0000003f) +#define TXD_W2_AIFS FIELD32(0x000000c0) +#define TXD_W2_CWMIN FIELD32(0x00000f00) +#define TXD_W2_CWMAX FIELD32(0x0000f000) + +/* + * Word3: PLCP information + */ +#define TXD_W3_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W3_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W3_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W3_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word4 + */ +#define TXD_W4_IV FIELD32(0xffffffff) + +/* + * Word5 + */ +#define TXD_W5_EIV FIELD32(0xffffffff) + +/* + * Word6-9: Key + */ +#define TXD_W6_KEY FIELD32(0xffffffff) +#define TXD_W7_KEY FIELD32(0xffffffff) +#define TXD_W8_KEY FIELD32(0xffffffff) +#define TXD_W9_KEY FIELD32(0xffffffff) + +/* + * Word10 + */ +#define TXD_W10_RTS FIELD32(0x00000001) +#define TXD_W10_TX_RATE FIELD32(0x000000fe) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_OFDM FIELD32(0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_CIPHER_OWNER FIELD32(0x00000100) +#define RXD_W0_ICV_ERROR FIELD32(0x00000200) +#define RXD_W0_IV_OFFSET FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + */ +#define RXD_W1_BUFFER_ADDRESS FIELD32(0xffffffff) + +/* + * Word2 + */ +#define RXD_W2_SIGNAL FIELD32(0x000000ff) +#define RXD_W2_RSSI FIELD32(0x0000ff00) +#define RXD_W2_TA FIELD32(0xffff0000) + +/* + * Word3 + */ +#define RXD_W3_TA FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_IV FIELD32(0xffffffff) + +/* + * Word5 + */ +#define RXD_W5_EIV FIELD32(0xffffffff) + +/* + * Word6-9: Key + */ +#define RXD_W6_KEY FIELD32(0xffffffff) +#define RXD_W7_KEY FIELD32(0xffffffff) +#define RXD_W8_KEY FIELD32(0xffffffff) +#define RXD_W9_KEY FIELD32(0xffffffff) + +/* + * Word10 + */ +#define RXD_W10_DROP FIELD32(0x00000001) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT2500PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c new file mode 100644 index 0000000000000000000000000000000000000000..7cdc80a122bb9aaa10b9f9eb514fad6c1d0005de --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -0,0 +1,1832 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500usb + Abstract: rt2500usb device specific routines. + Supported chipsets: RT2570. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2500usb" + +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt2500usb.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt2500usb_register_read and rt2500usb_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static inline void rt2500usb_register_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 *value) +{ + __le16 reg; + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(u16), REGISTER_TIMEOUT); + *value = le16_to_cpu(reg); +} + +static inline void rt2500usb_register_multiread(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u16 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u16)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + value, length, timeout); +} + +static inline void rt2500usb_register_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, + u16 value) +{ + __le16 reg = cpu_to_le16(value); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(u16), REGISTER_TIMEOUT); +} + +static inline void rt2500usb_register_multiwrite(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u16 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u16)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + value, length, timeout); +} + +static u16 rt2500usb_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, PHY_CSR8, ®); + if (!rt2x00_get_field16(reg, PHY_CSR8_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt2500usb_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u16 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500usb_bbp_check(rt2x00dev); + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field16(®, PHY_CSR7_DATA, value); + rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); + rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 0); + + rt2500usb_register_write(rt2x00dev, PHY_CSR7, reg); +} + +static void rt2500usb_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u16 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500usb_bbp_check(rt2x00dev); + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field16(®, PHY_CSR7_REG_ID, word); + rt2x00_set_field16(®, PHY_CSR7_READ_CONTROL, 1); + + rt2500usb_register_write(rt2x00dev, PHY_CSR7, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt2500usb_bbp_check(rt2x00dev); + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); + *value = 0xff; + return; + } + + rt2500usb_register_read(rt2x00dev, PHY_CSR7, ®); + *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); +} + +static void rt2500usb_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u16 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, PHY_CSR10, ®); + if (!rt2x00_get_field16(reg, PHY_CSR10_RF_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "PHY_CSR10 register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field16(®, PHY_CSR9_RF_VALUE, value); + rt2500usb_register_write(rt2x00dev, PHY_CSR9, reg); + + reg = 0; + rt2x00_set_field16(®, PHY_CSR10_RF_VALUE, value >> 16); + rt2x00_set_field16(®, PHY_CSR10_RF_NUMBER_OF_BITS, 20); + rt2x00_set_field16(®, PHY_CSR10_RF_IF_SELECT, 0); + rt2x00_set_field16(®, PHY_CSR10_RF_BUSY, 1); + + rt2500usb_register_write(rt2x00dev, PHY_CSR10, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u16)) ) + +static void rt2500usb_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2500usb_register_read(rt2x00dev, CSR_OFFSET(word), (u16 *) data); +} + +static void rt2500usb_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2500usb_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt2500usb_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt2500usb_read_csr, + .write = rt2500usb_write_csr, + .word_size = sizeof(u16), + .word_count = CSR_REG_SIZE / sizeof(u16), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt2500usb_bbp_read, + .write = rt2500usb_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt2500usb_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * Configuration handlers. + */ +static void rt2500usb_config_mac_addr(struct rt2x00_dev *rt2x00dev, + __le32 *mac) +{ + rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, &mac, + (3 * sizeof(__le16))); +} + +static void rt2500usb_config_bssid(struct rt2x00_dev *rt2x00dev, + __le32 *bssid) +{ + rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, bssid, + (3 * sizeof(__le16))); +} + +static void rt2500usb_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u16 reg; + + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + + /* + * Enable beacon config + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR20, ®); + rt2x00_set_field16(®, TXRX_CSR20_OFFSET, + (PREAMBLE + get_duration(IEEE80211_HEADER, 20)) >> 6); + if (type == IEEE80211_IF_TYPE_STA) + rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW, 0); + else + rt2x00_set_field16(®, TXRX_CSR20_BCN_EXPECT_WINDOW, 2); + rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); + + /* + * Enable synchronisation. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®); + rt2x00_set_field16(®, TXRX_CSR18_OFFSET, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); + rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 0); + rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, tsf_sync); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); +} + +static void rt2500usb_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + u16 reg; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->config_work); + return; + } + + rt2500usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field16(®, TXRX_CSR1_ACK_TIMEOUT, ack_timeout); + rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR10, ®); + rt2x00_set_field16(®, TXRX_CSR10_AUTORESPOND_PREAMBLE, + !!short_preamble); + rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); +} + +static void rt2500usb_config_phymode(struct rt2x00_dev *rt2x00dev, + const int phymode, + const int basic_rate_mask) +{ + rt2500usb_register_write(rt2x00dev, TXRX_CSR11, basic_rate_mask); + + if (phymode == HWMODE_B) { + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x000b); + rt2500usb_register_write(rt2x00dev, MAC_CSR12, 0x0040); + } else { + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0005); + rt2500usb_register_write(rt2x00dev, MAC_CSR12, 0x016c); + } +} + +static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + /* + * Set TXpower. + */ + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + + /* + * For RT2525E we should first set the channel to half band higher. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { + static const u32 vals[] = { + 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, + 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, + 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, + 0x00000902, 0x00000906 + }; + + rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); + if (rf->rf4) + rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); + } + + rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); + rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); + rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); + if (rf->rf4) + rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); +} + +static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + u32 rf3; + + rt2x00_rf_read(rt2x00dev, 3, &rf3); + rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2500usb_rf_write(rt2x00dev, 3, rf3); +} + +static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + u8 r2; + u8 r14; + u16 csr5; + u16 csr6; + + rt2500usb_bbp_read(rt2x00dev, 2, &r2); + rt2500usb_bbp_read(rt2x00dev, 14, &r14); + rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5); + rt2500usb_register_read(rt2x00dev, PHY_CSR6, &csr6); + + /* + * Configure the TX antenna. + */ + switch (antenna_tx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); + break; + } + + /* + * Configure the RX antenna. + */ + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); + break; + case ANTENNA_A: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); + break; + case ANTENNA_B: + rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); + break; + } + + /* + * RT2525E and RT5222 need to flip TX I/Q + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E) || + rt2x00_rf(&rt2x00dev->chip, RF5222)) { + rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); + rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); + + /* + * RT2525E does not need RX I/Q Flip. + */ + if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) + rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); + } else { + rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); + rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); + } + + rt2500usb_bbp_write(rt2x00dev, 2, r2); + rt2500usb_bbp_write(rt2x00dev, 14, r14); + rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); + rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); +} + +static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u16 reg; + + rt2500usb_register_write(rt2x00dev, MAC_CSR10, libconf->slot_time); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR18, ®); + rt2x00_set_field16(®, TXRX_CSR18_INTERVAL, + libconf->conf->beacon_int * 4); + rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); +} + +static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt2500usb_config_phymode(rt2x00dev, libconf->phymode, + libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt2500usb_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt2500usb_config_txpower(rt2x00dev, + libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt2500usb_config_antenna(rt2x00dev, + libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt2500usb_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt2500usb_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, MAC_CSR21, ®); + rt2x00_set_field16(®, MAC_CSR21_ON_PERIOD, 70); + rt2x00_set_field16(®, MAC_CSR21_OFF_PERIOD, 30); + rt2500usb_register_write(rt2x00dev, MAC_CSR21, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR20, ®); + + if (rt2x00dev->led_mode == LED_MODE_TXRX_ACTIVITY) { + rt2x00_set_field16(®, MAC_CSR20_LINK, 1); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 0); + } else if (rt2x00dev->led_mode == LED_MODE_ASUS) { + rt2x00_set_field16(®, MAC_CSR20_LINK, 0); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 1); + } else { + rt2x00_set_field16(®, MAC_CSR20_LINK, 1); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 1); + } + + rt2500usb_register_write(rt2x00dev, MAC_CSR20, reg); +} + +static void rt2500usb_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, MAC_CSR20, ®); + rt2x00_set_field16(®, MAC_CSR20_LINK, 0); + rt2x00_set_field16(®, MAC_CSR20_ACTIVITY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR20, reg); +} + +/* + * Link tuning + */ +static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + /* + * Update FCS error count from register. + */ + rt2500usb_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2500usb_register_read(rt2x00dev, STA_CSR3, ®); + rt2x00dev->link.false_cca = + rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); +} + +static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + u16 eeprom; + u16 value; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); + rt2500usb_bbp_write(rt2x00dev, 24, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); + rt2500usb_bbp_write(rt2x00dev, 25, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); + rt2500usb_bbp_write(rt2x00dev, 61, value); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &eeprom); + value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); + rt2500usb_bbp_write(rt2x00dev, 17, value); + + rt2x00dev->link.vgc_level = value; +} + +static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u16 bbp_thresh; + u16 vgc_bound; + u16 sens; + u16 r24; + u16 r25; + u16 r61; + u16 r17_sens; + u8 r17; + u8 up_bound; + u8 low_bound; + + /* + * Determine the BBP tuning threshold and correctly + * set BBP 24, 25 and 61. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &bbp_thresh); + bbp_thresh = rt2x00_get_field16(bbp_thresh, EEPROM_BBPTUNE_THRESHOLD); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &r24); + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &r25); + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &r61); + + if ((rssi + bbp_thresh) > 0) { + r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_HIGH); + r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_HIGH); + r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_HIGH); + } else { + r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_LOW); + r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_LOW); + r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_LOW); + } + + rt2500usb_bbp_write(rt2x00dev, 24, r24); + rt2500usb_bbp_write(rt2x00dev, 25, r25); + rt2500usb_bbp_write(rt2x00dev, 61, r61); + + /* + * Read current r17 value, as well as the sensitivity values + * for the r17 register. + */ + rt2500usb_bbp_read(rt2x00dev, 17, &r17); + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &r17_sens); + + /* + * A too low RSSI will cause too much false CCA which will + * then corrupt the R17 tuning. To remidy this the tuning should + * be stopped (While making sure the R17 value will not exceed limits) + */ + if (rssi >= -40) { + if (r17 != 0x60) + rt2500usb_bbp_write(rt2x00dev, 17, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_LOW); + if (r17 != sens) + rt2500usb_bbp_write(rt2x00dev, 17, sens); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_HIGH); + if (r17 != sens) + rt2500usb_bbp_write(rt2x00dev, 17, sens); + return; + } + + /* + * Leave short or middle distance condition, restore r17 + * to the dynamic tuning range. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &vgc_bound); + vgc_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCUPPER); + + low_bound = 0x32; + if (rssi >= -77) + up_bound = vgc_bound; + else + up_bound = vgc_bound - (-77 - rssi); + + if (up_bound < low_bound) + up_bound = low_bound; + + if (r17 > up_bound) { + rt2500usb_bbp_write(rt2x00dev, 17, up_bound); + rt2x00dev->link.vgc_level = up_bound; + } else if (rt2x00dev->link.false_cca > 512 && r17 < up_bound) { + rt2500usb_bbp_write(rt2x00dev, 17, ++r17); + rt2x00dev->link.vgc_level = r17; + } else if (rt2x00dev->link.false_cca < 100 && r17 > low_bound) { + rt2500usb_bbp_write(rt2x00dev, 17, --r17); + rt2x00dev->link.vgc_level = r17; + } +} + +/* + * Initialization functions. + */ +static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, + USB_MODE_TEST, REGISTER_TIMEOUT); + rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, + 0x00f0, REGISTER_TIMEOUT); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); + rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 1); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR5, ®); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0, 13); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1, 12); + rt2x00_set_field16(®, TXRX_CSR5_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR6, ®); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0, 10); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1, 11); + rt2x00_set_field16(®, TXRX_CSR6_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0, 7); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1, 6); + rt2x00_set_field16(®, TXRX_CSR7_BBP_ID1_VALID, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0, 5); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID0_VALID, 1); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1, 0); + rt2x00_set_field16(®, TXRX_CSR8_BBP_ID1_VALID, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); + rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2500usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field16(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_BBP_RESET, 0); + rt2x00_set_field16(®, MAC_CSR1_HOST_READY, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); + + if (rt2x00_get_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) { + rt2500usb_register_read(rt2x00dev, PHY_CSR2, ®); + reg &= ~0x0002; + } else { + reg = 0x3002; + } + rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); + + rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); + rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); + rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); + rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); + + rt2500usb_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field16(®, MAC_CSR8_MAX_FRAME_UNIT, + rt2x00dev->rx->data_size); + rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field16(®, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field16(®, TXRX_CSR0_KEY_ID, 0xff); + rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2500usb_register_read(rt2x00dev, MAC_CSR18, ®); + rt2x00_set_field16(®, MAC_CSR18_DELAY_AFTER_BEACON, 90); + rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); + + rt2500usb_register_read(rt2x00dev, PHY_CSR4, ®); + rt2x00_set_field16(®, PHY_CSR4_LOW_RF_LE, 1); + rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); + + rt2500usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field16(®, TXRX_CSR1_AUTO_SEQUENCE, 1); + rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + return 0; +} + +static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 value; + u8 reg_id; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt2500usb_bbp_write(rt2x00dev, 3, 0x02); + rt2500usb_bbp_write(rt2x00dev, 4, 0x19); + rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); + rt2500usb_bbp_write(rt2x00dev, 15, 0x30); + rt2500usb_bbp_write(rt2x00dev, 16, 0xac); + rt2500usb_bbp_write(rt2x00dev, 18, 0x18); + rt2500usb_bbp_write(rt2x00dev, 19, 0xff); + rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); + rt2500usb_bbp_write(rt2x00dev, 21, 0x08); + rt2500usb_bbp_write(rt2x00dev, 22, 0x08); + rt2500usb_bbp_write(rt2x00dev, 23, 0x08); + rt2500usb_bbp_write(rt2x00dev, 24, 0x80); + rt2500usb_bbp_write(rt2x00dev, 25, 0x50); + rt2500usb_bbp_write(rt2x00dev, 26, 0x08); + rt2500usb_bbp_write(rt2x00dev, 27, 0x23); + rt2500usb_bbp_write(rt2x00dev, 30, 0x10); + rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); + rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); + rt2500usb_bbp_write(rt2x00dev, 34, 0x12); + rt2500usb_bbp_write(rt2x00dev, 35, 0x50); + rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); + rt2500usb_bbp_write(rt2x00dev, 40, 0x02); + rt2500usb_bbp_write(rt2x00dev, 41, 0x60); + rt2500usb_bbp_write(rt2x00dev, 53, 0x10); + rt2500usb_bbp_write(rt2x00dev, 54, 0x18); + rt2500usb_bbp_write(rt2x00dev, 56, 0x08); + rt2500usb_bbp_write(rt2x00dev, 57, 0x10); + rt2500usb_bbp_write(rt2x00dev, 58, 0x08); + rt2500usb_bbp_write(rt2x00dev, 61, 0x60); + rt2500usb_bbp_write(rt2x00dev, 62, 0x10); + rt2500usb_bbp_write(rt2x00dev, 75, 0xff); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt2500usb_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u16 reg; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); +} + +static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt2500usb_init_registers(rt2x00dev) || + rt2500usb_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + rt2x00usb_enable_radio(rt2x00dev); + + /* + * Enable LED + */ + rt2500usb_enable_led(rt2x00dev); + + return 0; +} + +static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable LED + */ + rt2500usb_disable_led(rt2x00dev); + + rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); + rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); + + /* + * Disable synchronisation. + */ + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u16 reg; + u16 reg2; + unsigned int i; + char put_to_sleep; + char bbp_state; + char rf_state; + + put_to_sleep = (state != STATE_AWAKE); + + reg = 0; + rt2x00_set_field16(®, MAC_CSR17_BBP_DESIRE_STATE, state); + rt2x00_set_field16(®, MAC_CSR17_RF_DESIRE_STATE, state); + rt2x00_set_field16(®, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + rt2x00_set_field16(®, MAC_CSR17_SET_STATE, 1); + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2500usb_register_read(rt2x00dev, MAC_CSR17, ®2); + bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); + rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); + if (bbp_state == state && rf_state == state) + return 0; + rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); + msleep(30); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state: bbp %d and rf %d.\n", + state, bbp_state, rf_state); + + return -EBUSY; +} + +static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt2500usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt2500usb_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt2500usb_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt2500usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W1_AIFS, desc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, + !!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev, + int maxpacket, struct sk_buff *skb) +{ + int length; + + /* + * The length _must_ be a multiple of 2, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(skb->len, 2); + length += (2 * !(length % maxpacket)); + + return length; +} + +/* + * TX data initialization + */ +static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u16 reg; + + if (queue != IEEE80211_TX_QUEUE_BEACON) + return; + + rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); + if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) { + rt2x00_set_field16(®, TXRX_CSR19_BEACON_GEN, 1); + /* + * Beacon generation will fail initially. + * To prevent this we need to register the TXRX_CSR19 + * register several times. + */ + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); + } +} + +/* + * RX control handlers + */ +static void rt2500usb_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct urb *urb = entry->priv; + struct data_desc *rxd = (struct data_desc *)(entry->skb->data + + (urb->actual_length - + entry->ring->desc_size)); + u32 word0; + u32 word1; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) + desc->flags |= RX_FLAG_FAILED_PLCP_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + desc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - + entry->ring->rt2x00dev->rssi_offset; + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + return; +} + +/* + * Interrupt functions. + */ +static void rt2500usb_beacondone(struct urb *urb) +{ + struct data_entry *entry = (struct data_entry *)urb->context; + struct data_ring *ring = entry->ring; + + if (!test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) + return; + + /* + * Check if this was the guardian beacon, + * if that was the case we need to send the real beacon now. + * Otherwise we should free the sk_buffer, the device + * should be doing the rest of the work now. + */ + if (ring->index == 1) { + rt2x00_ring_index_done_inc(ring); + entry = rt2x00_get_data_entry(ring); + usb_submit_urb(entry->priv, GFP_ATOMIC); + rt2x00_ring_index_inc(ring); + } else if (ring->index_done == 1) { + entry = rt2x00_get_data_entry_done(ring); + if (entry->skb) { + dev_kfree_skb(entry->skb); + entry->skb = NULL; + } + rt2x00_ring_index_done_inc(ring); + } +} + +/* + * Device probe functions. + */ +static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, + DEFAULT_RSSI_OFFSET); + rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); + EEPROM(rt2x00dev, "Calibrate offset: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); + EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); + EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); + EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); + EEPROM(rt2x00dev, "BBPtune r24: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); + EEPROM(rt2x00dev, "BBPtune r25: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); + rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); + rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); + EEPROM(rt2x00dev, "BBPtune r61: 0x%04x\n", word); + } + + return 0; +} + +static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2500usb_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2570, value, reg); + + if (rt2x00_rev(&rt2x00dev->chip, 0xffff0)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && + !rt2x00_rf(&rt2x00dev->chip, RF2523) && + !rt2x00_rf(&rt2x00dev->chip, RF2524) && + !rt2x00_rf(&rt2x00dev->chip, RF2525) && + !rt2x00_rf(&rt2x00dev->chip, RF2525E) && + !rt2x00_rf(&rt2x00dev->chip, RF5222)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Store led mode, for correct led behaviour. + */ + rt2x00dev->led_mode = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); + + /* + * Check if the BBP tuning should be disabled. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) + __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); + + /* + * Read the RSSI <-> dBm offset information. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET, &eeprom); + rt2x00dev->rssi_offset = + rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); + + return 0; +} + +/* + * RF value list for RF2522 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2522[] = { + { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, + { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, + { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, + { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, + { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, + { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, + { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, + { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, + { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, + { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, + { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, + { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, + { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, + { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, +}; + +/* + * RF value list for RF2523 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2523[] = { + { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, + { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, + { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, + { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, + { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, + { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, + { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, + { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, + { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, + { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, + { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, + { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, + { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, + { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, +}; + +/* + * RF value list for RF2524 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2524[] = { + { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, + { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, + { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, + { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, + { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, + { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, + { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, + { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, + { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, + { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, + { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, + { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, + { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, + { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, +}; + +/* + * RF value list for RF2525 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525[] = { + { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, + { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, + { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, + { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, + { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, + { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, + { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, + { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, + { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, + { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, + { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, + { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, + { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, + { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, +}; + +/* + * RF value list for RF2525e + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2525e[] = { + { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, + { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, + { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, + { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, + { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, + { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, + { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, + { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, + { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, + { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, + { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, + { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, + { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, + { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, +}; + +/* + * RF value list for RF5222 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5222[] = { + { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, + { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, + { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, + { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, + { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, + { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, + { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, + { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, + { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, + { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, + { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, + { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, + { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, + { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, + { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, + { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, + { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, + { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, + { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, + { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, + { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, + { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, + { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, + { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, + { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, + { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, + { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, + { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, + { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, + { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, + + /* 802.11 UNII */ + { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, + { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, + { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, + { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, + { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, +}; + +static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_RX_INCLUDES_FCS | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 2; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); + spec->channels = rf_vals_bg_2522; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); + spec->channels = rf_vals_bg_2523; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); + spec->channels = rf_vals_bg_2524; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); + spec->channels = rf_vals_bg_2525; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); + spec->channels = rf_vals_bg_2525e; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5222); + spec->channels = rf_vals_5222; + spec->num_modes = 3; + } +} + +static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt2500usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt2500usb_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt2500usb_probe_hw_mode(rt2x00dev); + + /* + * This device requires the beacon ring + */ + __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt2500usb_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u16 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work); + return; + } + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2500usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field16(®, TXRX_CSR2_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); + rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field16(®, TXRX_CSR2_DROP_BROADCAST, 0); + rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); +} + +static int rt2500usb_beacon_update(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + struct data_ring *ring = + rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + struct data_entry *beacon; + struct data_entry *guardian; + int pipe = usb_sndbulkpipe(usb_dev, 1); + int max_packet = usb_maxpacket(usb_dev, pipe, 1); + int length; + + /* + * Just in case the ieee80211 doesn't set this, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * Obtain 2 entries, one for the guardian byte, + * the second for the actual beacon. + */ + guardian = rt2x00_get_data_entry(ring); + rt2x00_ring_index_inc(ring); + beacon = rt2x00_get_data_entry(ring); + + /* + * First we create the beacon. + */ + skb_push(skb, ring->desc_size); + memset(skb->data, 0, ring->desc_size); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + ring->desc_size), + skb->len - ring->desc_size, control); + + length = rt2500usb_get_tx_data_len(rt2x00dev, max_packet, skb); + + usb_fill_bulk_urb(beacon->priv, usb_dev, pipe, + skb->data, length, rt2500usb_beacondone, beacon); + + beacon->skb = skb; + + /* + * Second we need to create the guardian byte. + * We only need a single byte, so lets recycle + * the 'flags' field we are not using for beacons. + */ + guardian->flags = 0; + usb_fill_bulk_urb(guardian->priv, usb_dev, pipe, + &guardian->flags, 1, rt2500usb_beacondone, guardian); + + /* + * Send out the guardian byte. + */ + usb_submit_urb(guardian->priv, GFP_ATOMIC); + + /* + * Enable beacon generation. + */ + rt2500usb_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + return 0; +} + +static const struct ieee80211_ops rt2500usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt2500usb_configure_filter, + .get_stats = rt2x00mac_get_stats, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .beacon_update = rt2500usb_beacon_update, +}; + +static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { + .probe_hw = rt2500usb_probe_hw, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .set_device_state = rt2500usb_set_device_state, + .link_stats = rt2500usb_link_stats, + .reset_tuner = rt2500usb_reset_tuner, + .link_tuner = rt2500usb_link_tuner, + .write_tx_desc = rt2500usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .get_tx_data_len = rt2500usb_get_tx_data_len, + .kick_tx_queue = rt2500usb_kick_tx_queue, + .fill_rxdone = rt2500usb_fill_rxdone, + .config_mac_addr = rt2500usb_config_mac_addr, + .config_bssid = rt2500usb_config_bssid, + .config_type = rt2500usb_config_type, + .config_preamble = rt2500usb_config_preamble, + .config = rt2500usb_config, +}; + +static const struct rt2x00_ops rt2500usb_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt2500usb_rt2x00_ops, + .hw = &rt2500usb_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt2500usb_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2500usb module information. + */ +static struct usb_device_id rt2500usb_device_table[] = { + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0b05, 0x1707), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x050d, 0x7051), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Cisco Systems */ + { USB_DEVICE(0x13b1, 0x000d), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x13b1, 0x0011), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x13b1, 0x001a), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c02), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* D-LINK */ + { USB_DEVICE(0x2001, 0x3c00), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x8001), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x1044, 0x8007), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Melco */ + { USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) }, + + /* MSI */ + { USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x0db0, 0x6869), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x1706), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x2570), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt2500usb_ops) }, + { USB_DEVICE(0x148f, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Siemens */ + { USB_DEVICE(0x0681, 0x3c06), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* SMC */ + { USB_DEVICE(0x0707, 0xee13), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Spairon */ + { USB_DEVICE(0x114b, 0x0110), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Trust */ + { USB_DEVICE(0x0eb0, 0x9020), USB_DEVICE_DATA(&rt2500usb_ops) }, + /* Zinwell */ + { USB_DEVICE(0x5a57, 0x0260), USB_DEVICE_DATA(&rt2500usb_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt2500usb_driver = { + .name = DRV_NAME, + .id_table = rt2500usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt2500usb_init(void) +{ + return usb_register(&rt2500usb_driver); +} + +static void __exit rt2500usb_exit(void) +{ + usb_deregister(&rt2500usb_driver); +} + +module_init(rt2500usb_init); +module_exit(rt2500usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h new file mode 100644 index 0000000000000000000000000000000000000000..b18d56e73cf1cf8c5a5ed7d2316bf9f67e8802d1 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -0,0 +1,798 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2500usb + Abstract: Data structures and registers for the rt2500usb module. + Supported chipsets: RT2570. + */ + +#ifndef RT2500USB_H +#define RT2500USB_H + +/* + * RF chip defines. + */ +#define RF2522 0x0000 +#define RF2523 0x0001 +#define RF2524 0x0002 +#define RF2525 0x0003 +#define RF2525E 0x0005 +#define RF5222 0x0010 + +/* + * RT2570 version + */ +#define RT2570_VERSION_B 2 +#define RT2570_VERSION_C 3 +#define RT2570_VERSION_D 4 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x0400 +#define CSR_REG_SIZE 0x0100 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x006a +#define BBP_SIZE 0x0060 +#define RF_SIZE 0x0014 + +/* + * Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x0400 + +/* + * MAC_CSR1: System control. + * SOFT_RESET: Software reset, 1: reset, 0: normal. + * BBP_RESET: Hardware reset, 1: reset, 0, release. + * HOST_READY: Host ready after initialization. + */ +#define MAC_CSR1 0x0402 +#define MAC_CSR1_SOFT_RESET FIELD16(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD16(0x00000002) +#define MAC_CSR1_HOST_READY FIELD16(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x0404 +#define MAC_CSR2_BYTE0 FIELD16(0x00ff) +#define MAC_CSR2_BYTE1 FIELD16(0xff00) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x0406 +#define MAC_CSR3_BYTE2 FIELD16(0x00ff) +#define MAC_CSR3_BYTE3 FIELD16(0xff00) + +/* + * MAC_CSR4: STA MAC register 2. + */ +#define MAC_CSR4 0X0408 +#define MAC_CSR4_BYTE4 FIELD16(0x00ff) +#define MAC_CSR4_BYTE5 FIELD16(0xff00) + +/* + * MAC_CSR5: BSSID register 0. + */ +#define MAC_CSR5 0x040a +#define MAC_CSR5_BYTE0 FIELD16(0x00ff) +#define MAC_CSR5_BYTE1 FIELD16(0xff00) + +/* + * MAC_CSR6: BSSID register 1. + */ +#define MAC_CSR6 0x040c +#define MAC_CSR6_BYTE2 FIELD16(0x00ff) +#define MAC_CSR6_BYTE3 FIELD16(0xff00) + +/* + * MAC_CSR7: BSSID register 2. + */ +#define MAC_CSR7 0x040e +#define MAC_CSR7_BYTE4 FIELD16(0x00ff) +#define MAC_CSR7_BYTE5 FIELD16(0xff00) + +/* + * MAC_CSR8: Max frame length. + */ +#define MAC_CSR8 0x0410 +#define MAC_CSR8_MAX_FRAME_UNIT FIELD16(0x0fff) + +/* + * Misc MAC_CSR registers. + * MAC_CSR9: Timer control. + * MAC_CSR10: Slot time. + * MAC_CSR11: IFS. + * MAC_CSR12: EIFS. + * MAC_CSR13: Power mode0. + * MAC_CSR14: Power mode1. + * MAC_CSR15: Power saving transition0 + * MAC_CSR16: Power saving transition1 + */ +#define MAC_CSR9 0x0412 +#define MAC_CSR10 0x0414 +#define MAC_CSR11 0x0416 +#define MAC_CSR12 0x0418 +#define MAC_CSR13 0x041a +#define MAC_CSR14 0x041c +#define MAC_CSR15 0x041e +#define MAC_CSR16 0x0420 + +/* + * MAC_CSR17: Manual power control / status register. + * Allowed state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake. + * SET_STATE: Set state. Write 1 to trigger, self cleared. + * BBP_DESIRE_STATE: BBP desired state. + * RF_DESIRE_STATE: RF desired state. + * BBP_CURRENT_STATE: BBP current state. + * RF_CURRENT_STATE: RF current state. + * PUT_TO_SLEEP: Put to sleep. Write 1 to trigger, self cleared. + */ +#define MAC_CSR17 0x0422 +#define MAC_CSR17_SET_STATE FIELD16(0x0001) +#define MAC_CSR17_BBP_DESIRE_STATE FIELD16(0x0006) +#define MAC_CSR17_RF_DESIRE_STATE FIELD16(0x0018) +#define MAC_CSR17_BBP_CURR_STATE FIELD16(0x0060) +#define MAC_CSR17_RF_CURR_STATE FIELD16(0x0180) +#define MAC_CSR17_PUT_TO_SLEEP FIELD16(0x0200) + +/* + * MAC_CSR18: Wakeup timer register. + * DELAY_AFTER_BEACON: Delay after Tbcn expired in units of 1/16 TU. + * BEACONS_BEFORE_WAKEUP: Number of beacon before wakeup. + * AUTO_WAKE: Enable auto wakeup / sleep mechanism. + */ +#define MAC_CSR18 0x0424 +#define MAC_CSR18_DELAY_AFTER_BEACON FIELD16(0x00ff) +#define MAC_CSR18_BEACONS_BEFORE_WAKEUP FIELD16(0x7f00) +#define MAC_CSR18_AUTO_WAKE FIELD16(0x8000) + +/* + * MAC_CSR19: GPIO control register. + */ +#define MAC_CSR19 0x0426 + +/* + * MAC_CSR20: LED control register. + * ACTIVITY: 0: idle, 1: active. + * LINK: 0: linkoff, 1: linkup. + * ACTIVITY_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR20 0x0428 +#define MAC_CSR20_ACTIVITY FIELD16(0x0001) +#define MAC_CSR20_LINK FIELD16(0x0002) +#define MAC_CSR20_ACTIVITY_POLARITY FIELD16(0x0004) + +/* + * MAC_CSR21: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + */ +#define MAC_CSR21 0x042a +#define MAC_CSR21_ON_PERIOD FIELD16(0x00ff) +#define MAC_CSR21_OFF_PERIOD FIELD16(0xff00) + +/* + * Collision window control register. + */ +#define MAC_CSR22 0x042c + +/* + * Transmit related CSRs. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: Security control register. + */ +#define TXRX_CSR0 0x0440 +#define TXRX_CSR0_ALGORITHM FIELD16(0x0007) +#define TXRX_CSR0_IV_OFFSET FIELD16(0x01f8) +#define TXRX_CSR0_KEY_ID FIELD16(0x1e00) + +/* + * TXRX_CSR1: TX configuration. + * ACK_TIMEOUT: ACK Timeout in unit of 1-us. + * TSF_OFFSET: TSF offset in MAC header. + * AUTO_SEQUENCE: Let ASIC control frame sequence number. + */ +#define TXRX_CSR1 0x0442 +#define TXRX_CSR1_ACK_TIMEOUT FIELD16(0x00ff) +#define TXRX_CSR1_TSF_OFFSET FIELD16(0x7f00) +#define TXRX_CSR1_AUTO_SEQUENCE FIELD16(0x8000) + +/* + * TXRX_CSR2: RX control. + * DISABLE_RX: Disable rx engine. + * DROP_CRC: Drop crc error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TODS: Drop frame tods bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MCAST: Drop multicast frames. + * DROP_BCAST: Drop broadcast frames. + */ +#define TXRX_CSR2 0x0444 +#define TXRX_CSR2_DISABLE_RX FIELD16(0x0001) +#define TXRX_CSR2_DROP_CRC FIELD16(0x0002) +#define TXRX_CSR2_DROP_PHYSICAL FIELD16(0x0004) +#define TXRX_CSR2_DROP_CONTROL FIELD16(0x0008) +#define TXRX_CSR2_DROP_NOT_TO_ME FIELD16(0x0010) +#define TXRX_CSR2_DROP_TODS FIELD16(0x0020) +#define TXRX_CSR2_DROP_VERSION_ERROR FIELD16(0x0040) +#define TXRX_CSR2_DROP_MULTICAST FIELD16(0x0200) +#define TXRX_CSR2_DROP_BROADCAST FIELD16(0x0400) + +/* + * RX BBP ID registers + * TXRX_CSR3: CCK RX BBP ID. + * TXRX_CSR4: OFDM RX BBP ID. + */ +#define TXRX_CSR3 0x0446 +#define TXRX_CSR4 0x0448 + +/* + * TXRX_CSR5: CCK TX BBP ID0. + */ +#define TXRX_CSR5 0x044a +#define TXRX_CSR5_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR5_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR5_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR5_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR6: CCK TX BBP ID1. + */ +#define TXRX_CSR6 0x044c +#define TXRX_CSR6_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR6_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR6_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR6_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR7: OFDM TX BBP ID0. + */ +#define TXRX_CSR7 0x044e +#define TXRX_CSR7_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR7_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR7_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR7_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR5: OFDM TX BBP ID1. + */ +#define TXRX_CSR8 0x0450 +#define TXRX_CSR8_BBP_ID0 FIELD16(0x007f) +#define TXRX_CSR8_BBP_ID0_VALID FIELD16(0x0080) +#define TXRX_CSR8_BBP_ID1 FIELD16(0x7f00) +#define TXRX_CSR8_BBP_ID1_VALID FIELD16(0x8000) + +/* + * TXRX_CSR9: TX ACK time-out. + */ +#define TXRX_CSR9 0x0452 + +/* + * TXRX_CSR10: Auto responder control. + */ +#define TXRX_CSR10 0x0454 +#define TXRX_CSR10_AUTORESPOND_PREAMBLE FIELD16(0x0004) + +/* + * TXRX_CSR11: Auto responder basic rate. + */ +#define TXRX_CSR11 0x0456 + +/* + * ACK/CTS time registers. + */ +#define TXRX_CSR12 0x0458 +#define TXRX_CSR13 0x045a +#define TXRX_CSR14 0x045c +#define TXRX_CSR15 0x045e +#define TXRX_CSR16 0x0460 +#define TXRX_CSR17 0x0462 + +/* + * TXRX_CSR18: Synchronization control register. + */ +#define TXRX_CSR18 0x0464 +#define TXRX_CSR18_OFFSET FIELD16(0x000f) +#define TXRX_CSR18_INTERVAL FIELD16(0xfff0) + +/* + * TXRX_CSR19: Synchronization control register. + * TSF_COUNT: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * TBCN: Enable Tbcn with reload value. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR19 0x0466 +#define TXRX_CSR19_TSF_COUNT FIELD16(0x0001) +#define TXRX_CSR19_TSF_SYNC FIELD16(0x0006) +#define TXRX_CSR19_TBCN FIELD16(0x0008) +#define TXRX_CSR19_BEACON_GEN FIELD16(0x0010) + +/* + * TXRX_CSR20: Tx BEACON offset time control register. + * OFFSET: In units of usec. + * BCN_EXPECT_WINDOW: Default: 2^CWmin + */ +#define TXRX_CSR20 0x0468 +#define TXRX_CSR20_OFFSET FIELD16(0x1fff) +#define TXRX_CSR20_BCN_EXPECT_WINDOW FIELD16(0xe000) + +/* + * TXRX_CSR21 + */ +#define TXRX_CSR21 0x046a + +/* + * Encryption related CSRs. + * + */ + +/* + * SEC_CSR0-SEC_CSR7: Shared key 0, word 0-7 + */ +#define SEC_CSR0 0x0480 +#define SEC_CSR1 0x0482 +#define SEC_CSR2 0x0484 +#define SEC_CSR3 0x0486 +#define SEC_CSR4 0x0488 +#define SEC_CSR5 0x048a +#define SEC_CSR6 0x048c +#define SEC_CSR7 0x048e + +/* + * SEC_CSR8-SEC_CSR15: Shared key 1, word 0-7 + */ +#define SEC_CSR8 0x0490 +#define SEC_CSR9 0x0492 +#define SEC_CSR10 0x0494 +#define SEC_CSR11 0x0496 +#define SEC_CSR12 0x0498 +#define SEC_CSR13 0x049a +#define SEC_CSR14 0x049c +#define SEC_CSR15 0x049e + +/* + * SEC_CSR16-SEC_CSR23: Shared key 2, word 0-7 + */ +#define SEC_CSR16 0x04a0 +#define SEC_CSR17 0x04a2 +#define SEC_CSR18 0X04A4 +#define SEC_CSR19 0x04a6 +#define SEC_CSR20 0x04a8 +#define SEC_CSR21 0x04aa +#define SEC_CSR22 0x04ac +#define SEC_CSR23 0x04ae + +/* + * SEC_CSR24-SEC_CSR31: Shared key 3, word 0-7 + */ +#define SEC_CSR24 0x04b0 +#define SEC_CSR25 0x04b2 +#define SEC_CSR26 0x04b4 +#define SEC_CSR27 0x04b6 +#define SEC_CSR28 0x04b8 +#define SEC_CSR29 0x04ba +#define SEC_CSR30 0x04bc +#define SEC_CSR31 0x04be + +/* + * PHY control registers. + */ + +/* + * PHY_CSR0: RF switching timing control. + */ +#define PHY_CSR0 0x04c0 + +/* + * PHY_CSR1: TX PA configuration. + */ +#define PHY_CSR1 0x04c2 + +/* + * MAC configuration registers. + * PHY_CSR2: TX MAC configuration. + * PHY_CSR3: RX MAC configuration. + */ +#define PHY_CSR2 0x04c4 +#define PHY_CSR3 0x04c6 + +/* + * PHY_CSR4: Interface configuration. + */ +#define PHY_CSR4 0x04c8 +#define PHY_CSR4_LOW_RF_LE FIELD16(0x0001) + +/* + * BBP pre-TX registers. + * PHY_CSR5: BBP pre-TX CCK. + */ +#define PHY_CSR5 0x04ca +#define PHY_CSR5_CCK FIELD16(0x0003) +#define PHY_CSR5_CCK_FLIP FIELD16(0x0004) + +/* + * BBP pre-TX registers. + * PHY_CSR6: BBP pre-TX OFDM. + */ +#define PHY_CSR6 0x04cc +#define PHY_CSR6_OFDM FIELD16(0x0003) +#define PHY_CSR6_OFDM_FLIP FIELD16(0x0004) + +/* + * PHY_CSR7: BBP access register 0. + * BBP_DATA: BBP data. + * BBP_REG_ID: BBP register ID. + * BBP_READ_CONTROL: 0: write, 1: read. + */ +#define PHY_CSR7 0x04ce +#define PHY_CSR7_DATA FIELD16(0x00ff) +#define PHY_CSR7_REG_ID FIELD16(0x7f00) +#define PHY_CSR7_READ_CONTROL FIELD16(0x8000) + +/* + * PHY_CSR8: BBP access register 1. + * BBP_BUSY: ASIC is busy execute BBP programming. + */ +#define PHY_CSR8 0x04d0 +#define PHY_CSR8_BUSY FIELD16(0x0001) + +/* + * PHY_CSR9: RF access register. + * RF_VALUE: Register value + id to program into rf/if. + */ +#define PHY_CSR9 0x04d2 +#define PHY_CSR9_RF_VALUE FIELD16(0xffff) + +/* + * PHY_CSR10: RF access register. + * RF_VALUE: Register value + id to program into rf/if. + * RF_NUMBER_OF_BITS: Number of bits used in value (i:20, rfmd:22). + * RF_IF_SELECT: Chip to program: 0: rf, 1: if. + * RF_PLL_LD: Rf pll_ld status. + * RF_BUSY: 1: asic is busy execute rf programming. + */ +#define PHY_CSR10 0x04d4 +#define PHY_CSR10_RF_VALUE FIELD16(0x00ff) +#define PHY_CSR10_RF_NUMBER_OF_BITS FIELD16(0x1f00) +#define PHY_CSR10_RF_IF_SELECT FIELD16(0x2000) +#define PHY_CSR10_RF_PLL_LD FIELD16(0x4000) +#define PHY_CSR10_RF_BUSY FIELD16(0x8000) + +/* + * STA_CSR0: FCS error count. + * FCS_ERROR: FCS error count, cleared when read. + */ +#define STA_CSR0 0x04e0 +#define STA_CSR0_FCS_ERROR FIELD16(0xffff) + +/* + * STA_CSR1: PLCP error count. + */ +#define STA_CSR1 0x04e2 + +/* + * STA_CSR2: LONG error count. + */ +#define STA_CSR2 0x04e4 + +/* + * STA_CSR3: CCA false alarm. + * FALSE_CCA_ERROR: False CCA error count, cleared when read. + */ +#define STA_CSR3 0x04e6 +#define STA_CSR3_FALSE_CCA_ERROR FIELD16(0xffff) + +/* + * STA_CSR4: RX FIFO overflow. + */ +#define STA_CSR4 0x04e8 + +/* + * STA_CSR5: Beacon sent counter. + */ +#define STA_CSR5 0x04ea + +/* + * Statistics registers + */ +#define STA_CSR6 0x04ec +#define STA_CSR7 0x04ee +#define STA_CSR8 0x04f0 +#define STA_CSR9 0x04f2 +#define STA_CSR10 0x04f4 + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2: TX antenna control + */ +#define BBP_R2_TX_ANTENNA FIELD8(0x03) +#define BBP_R2_TX_IQ_FLIP FIELD8(0x04) + +/* + * R14: RX antenna control + */ +#define BBP_R14_RX_ANTENNA FIELD8(0x03) +#define BBP_R14_RX_IQ_FLIP FIELD8(0x04) + +/* + * RF registers. + */ + +/* + * RF 1 + */ +#define RF1_TUNER FIELD32(0x00020000) + +/* + * RF 3 + */ +#define RF3_TUNER FIELD32(0x00000100) +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * EEPROM contents. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * LED_MODE: 0: default, 1: TX/RX activity, 2: Single (ignore link), 3: rsvd. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x000b +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_LED_MODE FIELD16(0x01c0) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * CARDBUS_ACCEL: 0: enable, 1: disable. + * DYN_BBP_TUNE: 0: enable, 1: disable. + * CCK_TX_POWER: CCK TX power compensation. + */ +#define EEPROM_NIC 0x000c +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0001) +#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(0x0002) +#define EEPROM_NIC_CCK_TX_POWER FIELD16(0x000c) + +/* + * EEPROM geography. + * GEO: Default geography setting for device. + */ +#define EEPROM_GEOGRAPHY 0x000d +#define EEPROM_GEOGRAPHY_GEO FIELD16(0x0f00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x000e +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER + */ +#define EEPROM_TXPOWER_START 0x001e +#define EEPROM_TXPOWER_SIZE 7 +#define EEPROM_TXPOWER_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_2 FIELD16(0xff00) + +/* + * EEPROM Tuning threshold + */ +#define EEPROM_BBPTUNE 0x0030 +#define EEPROM_BBPTUNE_THRESHOLD FIELD16(0x00ff) + +/* + * EEPROM BBP R24 Tuning. + */ +#define EEPROM_BBPTUNE_R24 0x0031 +#define EEPROM_BBPTUNE_R24_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R24_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP R25 Tuning. + */ +#define EEPROM_BBPTUNE_R25 0x0032 +#define EEPROM_BBPTUNE_R25_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R25_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP R24 Tuning. + */ +#define EEPROM_BBPTUNE_R61 0x0033 +#define EEPROM_BBPTUNE_R61_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R61_HIGH FIELD16(0xff00) + +/* + * EEPROM BBP VGC Tuning. + */ +#define EEPROM_BBPTUNE_VGC 0x0034 +#define EEPROM_BBPTUNE_VGCUPPER FIELD16(0x00ff) + +/* + * EEPROM BBP R17 Tuning. + */ +#define EEPROM_BBPTUNE_R17 0x0035 +#define EEPROM_BBPTUNE_R17_LOW FIELD16(0x00ff) +#define EEPROM_BBPTUNE_R17_HIGH FIELD16(0xff00) + +/* + * RSSI <-> dBm offset calibration + */ +#define EEPROM_CALIBRATE_OFFSET 0x0036 +#define EEPROM_CALIBRATE_OFFSET_RSSI FIELD16(0x00ff) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 5 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 4 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. + */ + +/* + * Word0 + */ +#define TXD_W0_PACKET_ID FIELD32(0x0000000f) +#define TXD_W0_RETRY_LIMIT FIELD32(0x000000f0) +#define TXD_W0_MORE_FRAG FIELD32(0x00000100) +#define TXD_W0_ACK FIELD32(0x00000200) +#define TXD_W0_TIMESTAMP FIELD32(0x00000400) +#define TXD_W0_OFDM FIELD32(0x00000800) +#define TXD_W0_NEW_SEQ FIELD32(0x00001000) +#define TXD_W0_IFS FIELD32(0x00006000) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_CIPHER FIELD32(0x20000000) +#define TXD_W0_KEY_ID FIELD32(0xc0000000) + +/* + * Word1 + */ +#define TXD_W1_IV_OFFSET FIELD32(0x0000003f) +#define TXD_W1_AIFS FIELD32(0x000000c0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + */ +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000002) +#define RXD_W0_MULTICAST FIELD32(0x00000004) +#define RXD_W0_BROADCAST FIELD32(0x00000008) +#define RXD_W0_MY_BSS FIELD32(0x00000010) +#define RXD_W0_CRC_ERROR FIELD32(0x00000020) +#define RXD_W0_OFDM FIELD32(0x00000040) +#define RXD_W0_PHYSICAL_ERROR FIELD32(0x00000080) +#define RXD_W0_CIPHER FIELD32(0x00000100) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000200) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) + +/* + * Word1 + */ +#define RXD_W1_RSSI FIELD32(0x000000ff) +#define RXD_W1_SIGNAL FIELD32(0x0000ff00) + +/* + * Word2 + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT2500USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h new file mode 100644 index 0000000000000000000000000000000000000000..9845e584b731a48454ebf0ccf08ef606dffb8ea8 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -0,0 +1,838 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 global information. + */ + +#ifndef RT2X00_H +#define RT2X00_H + +#include +#include +#include +#include +#include + +#include + +#include "rt2x00debug.h" +#include "rt2x00reg.h" +#include "rt2x00ring.h" + +/* + * Module information. + * DRV_NAME should be set within the individual module source files. + */ +#define DRV_VERSION "2.0.10" +#define DRV_PROJECT "http://rt2x00.serialmonkey.com" + +/* + * Debug definitions. + * Debug output has to be enabled during compile time. + */ +#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \ + printk(__kernlvl "%s -> %s: %s - " __msg, \ + wiphy_name((__dev)->hw->wiphy), __FUNCTION__, __lvl, ##__args) + +#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \ + printk(__kernlvl "%s -> %s: %s - " __msg, \ + DRV_NAME, __FUNCTION__, __lvl, ##__args) + +#ifdef CONFIG_RT2X00_DEBUG +#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args); +#else +#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ + do { } while (0) +#endif /* CONFIG_RT2X00_DEBUG */ + +/* + * Various debug levels. + * The debug levels PANIC and ERROR both indicate serious problems, + * for this reason they should never be ignored. + * The special ERROR_PROBE message is for messages that are generated + * when the rt2x00_dev is not yet initialized. + */ +#define PANIC(__dev, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, KERN_CRIT, "Panic", __msg, ##__args) +#define ERROR(__dev, __msg, __args...) \ + DEBUG_PRINTK_MSG(__dev, KERN_ERR, "Error", __msg, ##__args) +#define ERROR_PROBE(__msg, __args...) \ + DEBUG_PRINTK_PROBE(KERN_ERR, "Error", __msg, ##__args) +#define WARNING(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_WARNING, "Warning", __msg, ##__args) +#define NOTICE(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_NOTICE, "Notice", __msg, ##__args) +#define INFO(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_INFO, "Info", __msg, ##__args) +#define DEBUG(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_DEBUG, "Debug", __msg, ##__args) +#define EEPROM(__dev, __msg, __args...) \ + DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args) + +/* + * Ring sizes. + * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes. + * DATA_FRAME_SIZE is used for TX, RX, ATIM and PRIO rings. + * MGMT_FRAME_SIZE is used for the BEACON ring. + */ +#define DATA_FRAME_SIZE 2432 +#define MGMT_FRAME_SIZE 256 + +/* + * Number of entries in a packet ring. + * PCI devices only need 1 Beacon entry, + * but USB devices require a second because they + * have to send a Guardian byte first. + */ +#define RX_ENTRIES 12 +#define TX_ENTRIES 12 +#define ATIM_ENTRIES 1 +#define BEACON_ENTRIES 2 + +/* + * Standard timing and size defines. + * These values should follow the ieee80211 specifications. + */ +#define ACK_SIZE 14 +#define IEEE80211_HEADER 24 +#define PLCP 48 +#define BEACON 100 +#define PREAMBLE 144 +#define SHORT_PREAMBLE 72 +#define SLOT_TIME 20 +#define SHORT_SLOT_TIME 9 +#define SIFS 10 +#define PIFS ( SIFS + SLOT_TIME ) +#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) +#define DIFS ( PIFS + SLOT_TIME ) +#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) +#define EIFS ( SIFS + (8 * (IEEE80211_HEADER + ACK_SIZE)) ) + +/* + * IEEE802.11 header defines + */ +static inline int is_rts_frame(u16 fc) +{ + return !!(((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_RTS)); +} + +static inline int is_cts_frame(u16 fc) +{ + return !!(((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_CTS)); +} + +static inline int is_probe_resp(u16 fc) +{ + return !!(((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) && + ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)); +} + +/* + * Chipset identification + * The chipset on the device is composed of a RT and RF chip. + * The chipset combination is important for determining device capabilities. + */ +struct rt2x00_chip { + u16 rt; +#define RT2460 0x0101 +#define RT2560 0x0201 +#define RT2570 0x1201 +#define RT2561s 0x0301 /* Turbo */ +#define RT2561 0x0302 +#define RT2661 0x0401 +#define RT2571 0x1300 + + u16 rf; + u32 rev; +}; + +/* + * RF register values that belong to a particular channel. + */ +struct rf_channel { + int channel; + u32 rf1; + u32 rf2; + u32 rf3; + u32 rf4; +}; + +/* + * To optimize the quality of the link we need to store + * the quality of received frames and periodically + * optimize the link. + */ +struct link { + /* + * Link tuner counter + * The number of times the link has been tuned + * since the radio has been switched on. + */ + u32 count; + + /* + * Statistics required for Link tuning. + * For the average RSSI value we use the "Walking average" approach. + * When adding RSSI to the average value the following calculation + * is needed: + * + * avg_rssi = ((avg_rssi * 7) + rssi) / 8; + * + * The advantage of this approach is that we only need 1 variable + * to store the average in (No need for a count and a total). + * But more importantly, normal average values will over time + * move less and less towards newly added values this results + * that with link tuning, the device can have a very good RSSI + * for a few minutes but when the device is moved away from the AP + * the average will not decrease fast enough to compensate. + * The walking average compensates this and will move towards + * the new values correctly allowing a effective link tuning. + */ + int avg_rssi; + int vgc_level; + int false_cca; + + /* + * Statistics required for Signal quality calculation. + * For calculating the Signal quality we have to determine + * the total number of success and failed RX and TX frames. + * After that we also use the average RSSI value to help + * determining the signal quality. + * For the calculation we will use the following algorithm: + * + * rssi_percentage = (avg_rssi * 100) / rssi_offset + * rx_percentage = (rx_success * 100) / rx_total + * tx_percentage = (tx_success * 100) / tx_total + * avg_signal = ((WEIGHT_RSSI * avg_rssi) + + * (WEIGHT_TX * tx_percentage) + + * (WEIGHT_RX * rx_percentage)) / 100 + * + * This value should then be checked to not be greated then 100. + */ + int rx_percentage; + int rx_success; + int rx_failed; + int tx_percentage; + int tx_success; + int tx_failed; +#define WEIGHT_RSSI 20 +#define WEIGHT_RX 40 +#define WEIGHT_TX 40 + + /* + * Work structure for scheduling periodic link tuning. + */ + struct delayed_work work; +}; + +/* + * Clear all counters inside the link structure. + * This can be easiest achieved by memsetting everything + * except for the work structure at the end. + */ +static inline void rt2x00_clear_link(struct link *link) +{ + memset(link, 0x00, sizeof(*link) - sizeof(link->work)); + link->rx_percentage = 50; + link->tx_percentage = 50; +} + +/* + * Update the rssi using the walking average approach. + */ +static inline void rt2x00_update_link_rssi(struct link *link, int rssi) +{ + if (!link->avg_rssi) + link->avg_rssi = rssi; + else + link->avg_rssi = ((link->avg_rssi * 7) + rssi) / 8; +} + +/* + * When the avg_rssi is unset or no frames have been received), + * we need to return the default value which needs to be less + * than -80 so the device will select the maximum sensitivity. + */ +static inline int rt2x00_get_link_rssi(struct link *link) +{ + return (link->avg_rssi && link->rx_success) ? link->avg_rssi : -128; +} + +/* + * Interface structure + * Configuration details about the current interface. + */ +struct interface { + /* + * Interface identification. The value is assigned + * to us by the 80211 stack, and is used to request + * new beacons. + */ + int id; + + /* + * Current working type (IEEE80211_IF_TYPE_*). + * When set to INVALID_INTERFACE, no interface is configured. + */ + int type; +#define INVALID_INTERFACE IEEE80211_IF_TYPE_INVALID + + /* + * MAC of the device. + */ + u8 mac[ETH_ALEN]; + + /* + * BBSID of the AP to associate with. + */ + u8 bssid[ETH_ALEN]; + + /* + * Store the packet filter mode for the current interface. + */ + unsigned int filter; +}; + +static inline int is_interface_present(struct interface *intf) +{ + return !!intf->id; +} + +static inline int is_interface_type(struct interface *intf, int type) +{ + return intf->type == type; +} + +/* + * Details about the supported modes, rates and channels + * of a particular chipset. This is used by rt2x00lib + * to build the ieee80211_hw_mode array for mac80211. + */ +struct hw_mode_spec { + /* + * Number of modes, rates and channels. + */ + int num_modes; + int num_rates; + int num_channels; + + /* + * txpower values. + */ + const u8 *tx_power_a; + const u8 *tx_power_bg; + u8 tx_power_default; + + /* + * Device/chipset specific value. + */ + const struct rf_channel *channels; +}; + +/* + * Configuration structure wrapper around the + * mac80211 configuration structure. + * When mac80211 configures the driver, rt2x00lib + * can precalculate values which are equal for all + * rt2x00 drivers. Those values can be stored in here. + */ +struct rt2x00lib_conf { + struct ieee80211_conf *conf; + struct rf_channel rf; + + int phymode; + + int basic_rates; + int slot_time; + + short sifs; + short pifs; + short difs; + short eifs; +}; + +/* + * rt2x00lib callback functions. + */ +struct rt2x00lib_ops { + /* + * Interrupt handlers. + */ + irq_handler_t irq_handler; + + /* + * Device init handlers. + */ + int (*probe_hw) (struct rt2x00_dev *rt2x00dev); + char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev); + int (*load_firmware) (struct rt2x00_dev *rt2x00dev, void *data, + const size_t len); + + /* + * Device initialization/deinitialization handlers. + */ + int (*initialize) (struct rt2x00_dev *rt2x00dev); + void (*uninitialize) (struct rt2x00_dev *rt2x00dev); + + /* + * Radio control handlers. + */ + int (*set_device_state) (struct rt2x00_dev *rt2x00dev, + enum dev_state state); + int (*rfkill_poll) (struct rt2x00_dev *rt2x00dev); + void (*link_stats) (struct rt2x00_dev *rt2x00dev); + void (*reset_tuner) (struct rt2x00_dev *rt2x00dev); + void (*link_tuner) (struct rt2x00_dev *rt2x00dev); + + /* + * TX control handlers + */ + void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control); + int (*write_tx_data) (struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control); + int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev, int maxpacket, + struct sk_buff *skb); + void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, + unsigned int queue); + + /* + * RX control handlers + */ + void (*fill_rxdone) (struct data_entry *entry, + struct rxdata_entry_desc *desc); + + /* + * Configuration handlers. + */ + void (*config_mac_addr) (struct rt2x00_dev *rt2x00dev, __le32 *mac); + void (*config_bssid) (struct rt2x00_dev *rt2x00dev, __le32 *bssid); + void (*config_type) (struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync); + void (*config_preamble) (struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time); + void (*config) (struct rt2x00_dev *rt2x00dev, const unsigned int flags, + struct rt2x00lib_conf *libconf); +#define CONFIG_UPDATE_PHYMODE ( 1 << 1 ) +#define CONFIG_UPDATE_CHANNEL ( 1 << 2 ) +#define CONFIG_UPDATE_TXPOWER ( 1 << 3 ) +#define CONFIG_UPDATE_ANTENNA ( 1 << 4 ) +#define CONFIG_UPDATE_SLOT_TIME ( 1 << 5 ) +#define CONFIG_UPDATE_BEACON_INT ( 1 << 6 ) +#define CONFIG_UPDATE_ALL 0xffff +}; + +/* + * rt2x00 driver callback operation structure. + */ +struct rt2x00_ops { + const char *name; + const unsigned int rxd_size; + const unsigned int txd_size; + const unsigned int eeprom_size; + const unsigned int rf_size; + const struct rt2x00lib_ops *lib; + const struct ieee80211_ops *hw; +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + const struct rt2x00debug *debugfs; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt2x00 device flags + */ +enum rt2x00_flags { + /* + * Device state flags + */ + DEVICE_PRESENT, + DEVICE_REGISTERED_HW, + DEVICE_INITIALIZED, + DEVICE_STARTED, + DEVICE_STARTED_SUSPEND, + DEVICE_ENABLED_RADIO, + DEVICE_DISABLED_RADIO_HW, + + /* + * Driver features + */ + DRIVER_REQUIRE_FIRMWARE, + DRIVER_REQUIRE_BEACON_RING, + + /* + * Driver configuration + */ + CONFIG_SUPPORT_HW_BUTTON, + CONFIG_FRAME_TYPE, + CONFIG_RF_SEQUENCE, + CONFIG_EXTERNAL_LNA_A, + CONFIG_EXTERNAL_LNA_BG, + CONFIG_DOUBLE_ANTENNA, + CONFIG_DISABLE_LINK_TUNING, + CONFIG_SHORT_PREAMBLE, +}; + +/* + * rt2x00 device structure. + */ +struct rt2x00_dev { + /* + * Device structure. + * The structure stored in here depends on the + * system bus (PCI or USB). + * When accessing this variable, the rt2x00dev_{pci,usb} + * macro's should be used for correct typecasting. + */ + void *dev; +#define rt2x00dev_pci(__dev) ( (struct pci_dev*)(__dev)->dev ) +#define rt2x00dev_usb(__dev) ( (struct usb_interface*)(__dev)->dev ) + + /* + * Callback functions. + */ + const struct rt2x00_ops *ops; + + /* + * IEEE80211 control structure. + */ + struct ieee80211_hw *hw; + struct ieee80211_hw_mode *hwmodes; + unsigned int curr_hwmode; +#define HWMODE_B 0 +#define HWMODE_G 1 +#define HWMODE_A 2 + + /* + * rfkill structure for RF state switching support. + * This will only be compiled in when required. + */ +#ifdef CONFIG_RT2X00_LIB_RFKILL + struct rfkill *rfkill; + struct input_polled_dev *poll_dev; +#endif /* CONFIG_RT2X00_LIB_RFKILL */ + + /* + * If enabled, the debugfs interface structures + * required for deregistration of debugfs. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + const struct rt2x00debug_intf *debugfs_intf; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + + /* + * Device flags. + * In these flags the current status and some + * of the device capabilities are stored. + */ + unsigned long flags; + + /* + * Chipset identification. + */ + struct rt2x00_chip chip; + + /* + * hw capability specifications. + */ + struct hw_mode_spec spec; + + /* + * Register pointers + * csr_addr: Base register address. (PCI) + * csr_cache: CSR cache for usb_control_msg. (USB) + */ + void __iomem *csr_addr; + void *csr_cache; + + /* + * Interface configuration. + */ + struct interface interface; + + /* + * Link quality + */ + struct link link; + + /* + * EEPROM data. + */ + __le16 *eeprom; + + /* + * Active RF register values. + * These are stored here so we don't need + * to read the rf registers and can directly + * use this value instead. + * This field should be accessed by using + * rt2x00_rf_read() and rt2x00_rf_write(). + */ + u32 *rf; + + /* + * Current TX power value. + */ + u16 tx_power; + + /* + * LED register (for rt61pci & rt73usb). + */ + u16 led_reg; + + /* + * Led mode (LED_MODE_*) + */ + u8 led_mode; + + /* + * Rssi <-> Dbm offset + */ + u8 rssi_offset; + + /* + * Frequency offset (for rt61pci & rt73usb). + */ + u8 freq_offset; + + /* + * Low level statistics which will have + * to be kept up to date while device is running. + */ + struct ieee80211_low_level_stats low_level_stats; + + /* + * RX configuration information. + */ + struct ieee80211_rx_status rx_status; + + /* + * Scheduled work. + */ + struct work_struct beacon_work; + struct work_struct filter_work; + struct work_struct config_work; + + /* + * Data ring arrays for RX, TX and Beacon. + * The Beacon array also contains the Atim ring + * if that is supported by the device. + */ + int data_rings; + struct data_ring *rx; + struct data_ring *tx; + struct data_ring *bcn; + + /* + * Firmware image. + */ + const struct firmware *fw; +}; + +/* + * For-each loop for the ring array. + * All rings have been allocated as a single array, + * this means we can create a very simply loop macro + * that is capable of looping through all rings. + * ring_end(), txring_end() and ring_loop() are helper macro's which + * should not be used directly. Instead the following should be used: + * ring_for_each() - Loops through all rings (RX, TX, Beacon & Atim) + * txring_for_each() - Loops through TX data rings (TX only) + * txringall_for_each() - Loops through all TX rings (TX, Beacon & Atim) + */ +#define ring_end(__dev) \ + &(__dev)->rx[(__dev)->data_rings] + +#define txring_end(__dev) \ + &(__dev)->tx[(__dev)->hw->queues] + +#define ring_loop(__entry, __start, __end) \ + for ((__entry) = (__start); \ + prefetch(&(__entry)[1]), (__entry) != (__end); \ + (__entry) = &(__entry)[1]) + +#define ring_for_each(__dev, __entry) \ + ring_loop(__entry, (__dev)->rx, ring_end(__dev)) + +#define txring_for_each(__dev, __entry) \ + ring_loop(__entry, (__dev)->tx, txring_end(__dev)) + +#define txringall_for_each(__dev, __entry) \ + ring_loop(__entry, (__dev)->tx, ring_end(__dev)) + +/* + * Generic RF access. + * The RF is being accessed by word index. + */ +static inline void rt2x00_rf_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + *data = rt2x00dev->rf[word]; +} + +static inline void rt2x00_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00dev->rf[word] = data; +} + +/* + * Generic EEPROM access. + * The EEPROM is being accessed by word index. + */ +static inline void *rt2x00_eeprom_addr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word) +{ + return (void *)&rt2x00dev->eeprom[word]; +} + +static inline void rt2x00_eeprom_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u16 *data) +{ + *data = le16_to_cpu(rt2x00dev->eeprom[word]); +} + +static inline void rt2x00_eeprom_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u16 data) +{ + rt2x00dev->eeprom[word] = cpu_to_le16(data); +} + +/* + * Chipset handlers + */ +static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev, + const u16 rt, const u16 rf, const u32 rev) +{ + INFO(rt2x00dev, + "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n", + rt, rf, rev); + + rt2x00dev->chip.rt = rt; + rt2x00dev->chip.rf = rf; + rt2x00dev->chip.rev = rev; +} + +static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip) +{ + return (chipset->rt == chip); +} + +static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip) +{ + return (chipset->rf == chip); +} + +static inline u16 rt2x00_get_rev(const struct rt2x00_chip *chipset) +{ + return chipset->rev; +} + +static inline u16 rt2x00_rev(const struct rt2x00_chip *chipset, const u32 mask) +{ + return chipset->rev & mask; +} + +/* + * Duration calculations + * The rate variable passed is: 100kbs. + * To convert from bytes to bits we multiply size with 8, + * then the size is multiplied with 10 to make the + * real rate -> rate argument correction. + */ +static inline u16 get_duration(const unsigned int size, const u8 rate) +{ + return ((size * 8 * 10) / rate); +} + +static inline u16 get_duration_res(const unsigned int size, const u8 rate) +{ + return ((size * 8 * 10) % rate); +} + +/* + * Library functions. + */ +struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev, + const unsigned int queue); + +/* + * Interrupt context handlers. + */ +void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_txdone(struct data_entry *entry, + const int status, const int retry); +void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb, + struct rxdata_entry_desc *desc); + +/* + * TX descriptor initializer + */ +void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control); + +/* + * mac80211 handlers. + */ +int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control); +int rt2x00mac_start(struct ieee80211_hw *hw); +void rt2x00mac_stop(struct ieee80211_hw *hw); +int rt2x00mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf); +void rt2x00mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf); +int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf); +int rt2x00mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf); +int rt2x00mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats); +int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats); +void rt2x00mac_erp_ie_changed(struct ieee80211_hw *hw, u8 changes, + int cts_protection, int preamble); +int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params); + +/* + * Driver allocation handlers. + */ +int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev); +#ifdef CONFIG_PM +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state); +int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev); +#endif /* CONFIG_PM */ + +#endif /* RT2X00_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c new file mode 100644 index 0000000000000000000000000000000000000000..12914cf7156cb4fc6582dd6cbfe06b96ed4fb4bb --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00config.c @@ -0,0 +1,205 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic configuration routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + + +/* + * The MAC and BSSID addressess are simple array of bytes, + * these arrays are little endian, so when sending the addressess + * to the drivers, copy the it into a endian-signed variable. + * + * Note that all devices (except rt2500usb) have 32 bits + * register word sizes. This means that whatever variable we + * pass _must_ be a multiple of 32 bits. Otherwise the device + * might not accept what we are sending to it. + * This will also make it easier for the driver to write + * the data to the device. + * + * Also note that when NULL is passed as address the + * we will send 00:00:00:00:00 to the device to clear the address. + * This will prevent the device being confused when it wants + * to ACK frames or consideres itself associated. + */ +void rt2x00lib_config_mac_addr(struct rt2x00_dev *rt2x00dev, u8 *mac) +{ + __le32 reg[2]; + + memset(®, 0, sizeof(reg)); + if (mac) + memcpy(®, mac, ETH_ALEN); + + rt2x00dev->ops->lib->config_mac_addr(rt2x00dev, ®[0]); +} + +void rt2x00lib_config_bssid(struct rt2x00_dev *rt2x00dev, u8 *bssid) +{ + __le32 reg[2]; + + memset(®, 0, sizeof(reg)); + if (bssid) + memcpy(®, bssid, ETH_ALEN); + + rt2x00dev->ops->lib->config_bssid(rt2x00dev, ®[0]); +} + +void rt2x00lib_config_type(struct rt2x00_dev *rt2x00dev, const int type) +{ + int tsf_sync; + + switch (type) { + case IEEE80211_IF_TYPE_IBSS: + case IEEE80211_IF_TYPE_AP: + tsf_sync = TSF_SYNC_BEACON; + break; + case IEEE80211_IF_TYPE_STA: + tsf_sync = TSF_SYNC_INFRA; + break; + default: + tsf_sync = TSF_SYNC_NONE; + break; + } + + rt2x00dev->ops->lib->config_type(rt2x00dev, type, tsf_sync); +} + +void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, const int force_config) +{ + struct rt2x00lib_conf libconf; + struct ieee80211_hw_mode *mode; + struct ieee80211_rate *rate; + int flags = 0; + int short_slot_time; + + /* + * In some situations we want to force all configurations + * to be reloaded (When resuming for instance). + */ + if (force_config) { + flags = CONFIG_UPDATE_ALL; + goto config; + } + + /* + * Check which configuration options have been + * updated and should be send to the device. + */ + if (rt2x00dev->rx_status.phymode != conf->phymode) + flags |= CONFIG_UPDATE_PHYMODE; + if (rt2x00dev->rx_status.channel != conf->channel) + flags |= CONFIG_UPDATE_CHANNEL; + if (rt2x00dev->tx_power != conf->power_level) + flags |= CONFIG_UPDATE_TXPOWER; + if (rt2x00dev->rx_status.antenna == conf->antenna_sel_rx) + flags |= CONFIG_UPDATE_ANTENNA; + + /* + * The following configuration options are never + * stored anywhere and will always be updated. + */ + flags |= CONFIG_UPDATE_SLOT_TIME; + flags |= CONFIG_UPDATE_BEACON_INT; + + /* + * We have determined what options should be updated, + * now precalculate device configuration values depending + * on what configuration options need to be updated. + */ +config: + memset(&libconf, 0, sizeof(libconf)); + + if (flags & CONFIG_UPDATE_PHYMODE) { + switch (conf->phymode) { + case MODE_IEEE80211A: + libconf.phymode = HWMODE_A; + break; + case MODE_IEEE80211B: + libconf.phymode = HWMODE_B; + break; + case MODE_IEEE80211G: + libconf.phymode = HWMODE_G; + break; + default: + ERROR(rt2x00dev, + "Attempt to configure unsupported mode (%d)" + "Defaulting to 802.11b", conf->phymode); + libconf.phymode = HWMODE_B; + } + + mode = &rt2x00dev->hwmodes[libconf.phymode]; + rate = &mode->rates[mode->num_rates - 1]; + + libconf.basic_rates = + DEVICE_GET_RATE_FIELD(rate->val, RATEMASK) & DEV_BASIC_RATEMASK; + } + + if (flags & CONFIG_UPDATE_CHANNEL) { + memcpy(&libconf.rf, + &rt2x00dev->spec.channels[conf->channel_val], + sizeof(libconf.rf)); + } + + if (flags & CONFIG_UPDATE_SLOT_TIME) { + short_slot_time = conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME; + + libconf.slot_time = + short_slot_time ? SHORT_SLOT_TIME : SLOT_TIME; + libconf.sifs = SIFS; + libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS; + libconf.difs = short_slot_time ? SHORT_DIFS : DIFS; + libconf.eifs = EIFS; + } + + libconf.conf = conf; + + /* + * Start configuration. + */ + rt2x00dev->ops->lib->config(rt2x00dev, flags, &libconf); + + /* + * Some configuration changes affect the link quality + * which means we need to reset the link tuner. + */ + if (flags & (CONFIG_UPDATE_CHANNEL | CONFIG_UPDATE_ANTENNA)) + rt2x00lib_reset_link_tuner(rt2x00dev); + + rt2x00dev->curr_hwmode = libconf.phymode; + rt2x00dev->rx_status.phymode = conf->phymode; + rt2x00dev->rx_status.freq = conf->freq; + rt2x00dev->rx_status.channel = conf->channel; + rt2x00dev->tx_power = conf->power_level; + rt2x00dev->rx_status.antenna = conf->antenna_sel_rx; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c new file mode 100644 index 0000000000000000000000000000000000000000..9275d6f9517e32c714a179923f2b162c157cbd8a --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -0,0 +1,368 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 debugfs specific routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +#define PRINT_LINE_LEN_MAX 32 + +struct rt2x00debug_intf { + /* + * Pointer to driver structure where + * this debugfs entry belongs to. + */ + struct rt2x00_dev *rt2x00dev; + + /* + * Reference to the rt2x00debug structure + * which can be used to communicate with + * the registers. + */ + const struct rt2x00debug *debug; + + /* + * Debugfs entries for: + * - driver folder + * - driver file + * - chipset file + * - device flags file + * - register offset/value files + * - eeprom offset/value files + * - bbp offset/value files + * - rf offset/value files + */ + struct dentry *driver_folder; + struct dentry *driver_entry; + struct dentry *chipset_entry; + struct dentry *dev_flags; + struct dentry *csr_off_entry; + struct dentry *csr_val_entry; + struct dentry *eeprom_off_entry; + struct dentry *eeprom_val_entry; + struct dentry *bbp_off_entry; + struct dentry *bbp_val_entry; + struct dentry *rf_off_entry; + struct dentry *rf_val_entry; + + /* + * Driver and chipset files will use a data buffer + * that has been created in advance. This will simplify + * the code since we can use the debugfs functions. + */ + struct debugfs_blob_wrapper driver_blob; + struct debugfs_blob_wrapper chipset_blob; + + /* + * Requested offset for each register type. + */ + unsigned int offset_csr; + unsigned int offset_eeprom; + unsigned int offset_bbp; + unsigned int offset_rf; +}; + +static int rt2x00debug_file_open(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = inode->i_private; + + file->private_data = inode->i_private; + + if (!try_module_get(intf->debug->owner)) + return -EBUSY; + + return 0; +} + +static int rt2x00debug_file_release(struct inode *inode, struct file *file) +{ + struct rt2x00debug_intf *intf = file->private_data; + + module_put(intf->debug->owner); + + return 0; +} + +#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ +static ssize_t rt2x00debug_read_##__name(struct file *file, \ + char __user *buf, \ + size_t length, \ + loff_t *offset) \ +{ \ + struct rt2x00debug_intf *intf = file->private_data; \ + const struct rt2x00debug *debug = intf->debug; \ + char line[16]; \ + size_t size; \ + __type value; \ + \ + if (*offset) \ + return 0; \ + \ + if (intf->offset_##__name >= debug->__name.word_count) \ + return -EINVAL; \ + \ + debug->__name.read(intf->rt2x00dev, \ + intf->offset_##__name, &value); \ + \ + size = sprintf(line, __format, value); \ + \ + if (copy_to_user(buf, line, size)) \ + return -EFAULT; \ + \ + *offset += size; \ + return size; \ +} + +#define RT2X00DEBUGFS_OPS_WRITE(__name, __type) \ +static ssize_t rt2x00debug_write_##__name(struct file *file, \ + const char __user *buf,\ + size_t length, \ + loff_t *offset) \ +{ \ + struct rt2x00debug_intf *intf = file->private_data; \ + const struct rt2x00debug *debug = intf->debug; \ + char line[16]; \ + size_t size; \ + __type value; \ + \ + if (*offset) \ + return 0; \ + \ + if (!capable(CAP_NET_ADMIN)) \ + return -EPERM; \ + \ + if (intf->offset_##__name >= debug->__name.word_count) \ + return -EINVAL; \ + \ + if (copy_from_user(line, buf, length)) \ + return -EFAULT; \ + \ + size = strlen(line); \ + value = simple_strtoul(line, NULL, 0); \ + \ + debug->__name.write(intf->rt2x00dev, \ + intf->offset_##__name, value); \ + \ + *offset += size; \ + return size; \ +} + +#define RT2X00DEBUGFS_OPS(__name, __format, __type) \ +RT2X00DEBUGFS_OPS_READ(__name, __format, __type); \ +RT2X00DEBUGFS_OPS_WRITE(__name, __type); \ + \ +static const struct file_operations rt2x00debug_fop_##__name = {\ + .owner = THIS_MODULE, \ + .read = rt2x00debug_read_##__name, \ + .write = rt2x00debug_write_##__name, \ + .open = rt2x00debug_file_open, \ + .release = rt2x00debug_file_release, \ +}; + +RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32); +RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16); +RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8); +RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32); + +static ssize_t rt2x00debug_read_dev_flags(struct file *file, + char __user *buf, + size_t length, + loff_t *offset) +{ + struct rt2x00debug_intf *intf = file->private_data; + char line[16]; + size_t size; + + if (*offset) + return 0; + + size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags); + + if (copy_to_user(buf, line, size)) + return -EFAULT; + + *offset += size; + return size; +} + +static const struct file_operations rt2x00debug_fop_dev_flags = { + .owner = THIS_MODULE, + .read = rt2x00debug_read_dev_flags, + .open = rt2x00debug_file_open, + .release = rt2x00debug_file_release, +}; + +static struct dentry *rt2x00debug_create_file_driver(const char *name, + struct rt2x00debug_intf + *intf, + struct debugfs_blob_wrapper + *blob) +{ + char *data; + + data = kzalloc(3 * PRINT_LINE_LEN_MAX, GFP_KERNEL); + if (!data) + return NULL; + + blob->data = data; + data += sprintf(data, "driver: %s\n", intf->rt2x00dev->ops->name); + data += sprintf(data, "version: %s\n", DRV_VERSION); + data += sprintf(data, "compiled: %s %s\n", __DATE__, __TIME__); + blob->size = strlen(blob->data); + + return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); +} + +static struct dentry *rt2x00debug_create_file_chipset(const char *name, + struct rt2x00debug_intf + *intf, + struct + debugfs_blob_wrapper + *blob) +{ + const struct rt2x00debug *debug = intf->debug; + char *data; + + data = kzalloc(4 * PRINT_LINE_LEN_MAX, GFP_KERNEL); + if (!data) + return NULL; + + blob->data = data; + data += sprintf(data, "csr length: %d\n", debug->csr.word_count); + data += sprintf(data, "eeprom length: %d\n", debug->eeprom.word_count); + data += sprintf(data, "bbp length: %d\n", debug->bbp.word_count); + data += sprintf(data, "rf length: %d\n", debug->rf.word_count); + blob->size = strlen(blob->data); + + return debugfs_create_blob(name, S_IRUGO, intf->driver_folder, blob); +} + +void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) +{ + const struct rt2x00debug *debug = rt2x00dev->ops->debugfs; + struct rt2x00debug_intf *intf; + + intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL); + if (!intf) { + ERROR(rt2x00dev, "Failed to allocate debug handler.\n"); + return; + } + + intf->debug = debug; + intf->rt2x00dev = rt2x00dev; + rt2x00dev->debugfs_intf = intf; + + intf->driver_folder = + debugfs_create_dir(intf->rt2x00dev->ops->name, + rt2x00dev->hw->wiphy->debugfsdir); + if (IS_ERR(intf->driver_folder)) + goto exit; + + intf->driver_entry = + rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); + if (IS_ERR(intf->driver_entry)) + goto exit; + + intf->chipset_entry = + rt2x00debug_create_file_chipset("chipset", + intf, &intf->chipset_blob); + if (IS_ERR(intf->chipset_entry)) + goto exit; + + intf->dev_flags = debugfs_create_file("dev_flags", S_IRUGO, + intf->driver_folder, intf, + &rt2x00debug_fop_dev_flags); + if (IS_ERR(intf->dev_flags)) + goto exit; + +#define RT2X00DEBUGFS_CREATE_ENTRY(__intf, __name) \ +({ \ + (__intf)->__name##_off_entry = \ + debugfs_create_u32(__stringify(__name) "_offset", \ + S_IRUGO | S_IWUSR, \ + (__intf)->driver_folder, \ + &(__intf)->offset_##__name); \ + if (IS_ERR((__intf)->__name##_off_entry)) \ + goto exit; \ + \ + (__intf)->__name##_val_entry = \ + debugfs_create_file(__stringify(__name) "_value", \ + S_IRUGO | S_IWUSR, \ + (__intf)->driver_folder, \ + (__intf), &rt2x00debug_fop_##__name);\ + if (IS_ERR((__intf)->__name##_val_entry)) \ + goto exit; \ +}) + + RT2X00DEBUGFS_CREATE_ENTRY(intf, csr); + RT2X00DEBUGFS_CREATE_ENTRY(intf, eeprom); + RT2X00DEBUGFS_CREATE_ENTRY(intf, bbp); + RT2X00DEBUGFS_CREATE_ENTRY(intf, rf); + +#undef RT2X00DEBUGFS_CREATE_ENTRY + + return; + +exit: + rt2x00debug_deregister(rt2x00dev); + ERROR(rt2x00dev, "Failed to register debug handler.\n"); + + return; +} + +void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) +{ + const struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; + + if (unlikely(!intf)) + return; + + debugfs_remove(intf->rf_val_entry); + debugfs_remove(intf->rf_off_entry); + debugfs_remove(intf->bbp_val_entry); + debugfs_remove(intf->bbp_off_entry); + debugfs_remove(intf->eeprom_val_entry); + debugfs_remove(intf->eeprom_off_entry); + debugfs_remove(intf->csr_val_entry); + debugfs_remove(intf->csr_off_entry); + debugfs_remove(intf->dev_flags); + debugfs_remove(intf->chipset_entry); + debugfs_remove(intf->driver_entry); + debugfs_remove(intf->driver_folder); + kfree(intf->chipset_blob.data); + kfree(intf->driver_blob.data); + kfree(intf); + + rt2x00dev->debugfs_intf = NULL; +} diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h new file mode 100644 index 0000000000000000000000000000000000000000..860e8fa3a0da7a285c675206f28575692efb5bc1 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00debug.h @@ -0,0 +1,57 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00debug + Abstract: Data structures for the rt2x00debug. + */ + +#ifndef RT2X00DEBUG_H +#define RT2X00DEBUG_H + +struct rt2x00_dev; + +#define RT2X00DEBUGFS_REGISTER_ENTRY(__name, __type) \ +struct reg##__name { \ + void (*read)(const struct rt2x00_dev *rt2x00dev, \ + const unsigned int word, __type *data); \ + void (*write)(const struct rt2x00_dev *rt2x00dev, \ + const unsigned int word, __type data); \ + \ + unsigned int word_size; \ + unsigned int word_count; \ +} __name + +struct rt2x00debug { + /* + * Reference to the modules structure. + */ + struct module *owner; + + /* + * Register access entries. + */ + RT2X00DEBUGFS_REGISTER_ENTRY(csr, u32); + RT2X00DEBUGFS_REGISTER_ENTRY(eeprom, u16); + RT2X00DEBUGFS_REGISTER_ENTRY(bbp, u8); + RT2X00DEBUGFS_REGISTER_ENTRY(rf, u32); +}; + +#endif /* RT2X00DEBUG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c new file mode 100644 index 0000000000000000000000000000000000000000..bb6f46cfbb9f87e27dc792d52a1aed4ba5218706 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -0,0 +1,1202 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 generic device routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +/* + * Ring handler. + */ +struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev, + const unsigned int queue) +{ + int beacon = test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); + + /* + * Check if we are requesting a reqular TX ring, + * or if we are requesting a Beacon or Atim ring. + * For Atim rings, we should check if it is supported. + */ + if (queue < rt2x00dev->hw->queues && rt2x00dev->tx) + return &rt2x00dev->tx[queue]; + + if (!rt2x00dev->bcn || !beacon) + return NULL; + + if (queue == IEEE80211_TX_QUEUE_BEACON) + return &rt2x00dev->bcn[0]; + else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + return &rt2x00dev->bcn[1]; + + return NULL; +} +EXPORT_SYMBOL_GPL(rt2x00lib_get_ring); + +/* + * Link tuning handlers + */ +static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt2x00_clear_link(&rt2x00dev->link); + + /* + * Reset the link tuner. + */ + rt2x00dev->ops->lib->reset_tuner(rt2x00dev); + + queue_delayed_work(rt2x00dev->hw->workqueue, + &rt2x00dev->link.work, LINK_TUNE_INTERVAL); +} + +static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + cancel_delayed_work_sync(&rt2x00dev->link.work); +} + +void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + rt2x00lib_stop_link_tuner(rt2x00dev); + rt2x00lib_start_link_tuner(rt2x00dev); +} + +/* + * Radio control handlers. + */ +int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + int status; + + /* + * Don't enable the radio twice. + * And check if the hardware button has been disabled. + */ + if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || + test_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags)) + return 0; + + /* + * Enable radio. + */ + status = rt2x00dev->ops->lib->set_device_state(rt2x00dev, + STATE_RADIO_ON); + if (status) + return status; + + __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); + + /* + * Enable RX. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + + /* + * Start the TX queues. + */ + ieee80211_start_queues(rt2x00dev->hw); + + return 0; +} + +void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Stop all scheduled work. + */ + if (work_pending(&rt2x00dev->beacon_work)) + cancel_work_sync(&rt2x00dev->beacon_work); + if (work_pending(&rt2x00dev->filter_work)) + cancel_work_sync(&rt2x00dev->filter_work); + if (work_pending(&rt2x00dev->config_work)) + cancel_work_sync(&rt2x00dev->config_work); + + /* + * Stop the TX queues. + */ + ieee80211_stop_queues(rt2x00dev->hw); + + /* + * Disable RX. + */ + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + + /* + * Disable radio. + */ + rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); +} + +void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + /* + * When we are disabling the RX, we should also stop the link tuner. + */ + if (state == STATE_RADIO_RX_OFF) + rt2x00lib_stop_link_tuner(rt2x00dev); + + rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); + + /* + * When we are enabling the RX, we should also start the link tuner. + */ + if (state == STATE_RADIO_RX_ON && + is_interface_present(&rt2x00dev->interface)) + rt2x00lib_start_link_tuner(rt2x00dev); +} + +static void rt2x00lib_precalculate_link_signal(struct link *link) +{ + if (link->rx_failed || link->rx_success) + link->rx_percentage = + (link->rx_success * 100) / + (link->rx_failed + link->rx_success); + else + link->rx_percentage = 50; + + if (link->tx_failed || link->tx_success) + link->tx_percentage = + (link->tx_success * 100) / + (link->tx_failed + link->tx_success); + else + link->tx_percentage = 50; + + link->rx_success = 0; + link->rx_failed = 0; + link->tx_success = 0; + link->tx_failed = 0; +} + +static int rt2x00lib_calculate_link_signal(struct rt2x00_dev *rt2x00dev, + int rssi) +{ + int rssi_percentage = 0; + int signal; + + /* + * We need a positive value for the RSSI. + */ + if (rssi < 0) + rssi += rt2x00dev->rssi_offset; + + /* + * Calculate the different percentages, + * which will be used for the signal. + */ + if (rt2x00dev->rssi_offset) + rssi_percentage = (rssi * 100) / rt2x00dev->rssi_offset; + + /* + * Add the individual percentages and use the WEIGHT + * defines to calculate the current link signal. + */ + signal = ((WEIGHT_RSSI * rssi_percentage) + + (WEIGHT_TX * rt2x00dev->link.tx_percentage) + + (WEIGHT_RX * rt2x00dev->link.rx_percentage)) / 100; + + return (signal > 100) ? 100 : signal; +} + +static void rt2x00lib_link_tuner(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, link.work.work); + + /* + * When the radio is shutting down we should + * immediately cease all link tuning. + */ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + /* + * Update statistics. + */ + rt2x00dev->ops->lib->link_stats(rt2x00dev); + + rt2x00dev->low_level_stats.dot11FCSErrorCount += + rt2x00dev->link.rx_failed; + + /* + * Only perform the link tuning when Link tuning + * has been enabled (This could have been disabled from the EEPROM). + */ + if (!test_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags)) + rt2x00dev->ops->lib->link_tuner(rt2x00dev); + + /* + * Precalculate a portion of the link signal which is + * in based on the tx/rx success/failure counters. + */ + rt2x00lib_precalculate_link_signal(&rt2x00dev->link); + + /* + * Increase tuner counter, and reschedule the next link tuner run. + */ + rt2x00dev->link.count++; + queue_delayed_work(rt2x00dev->hw->workqueue, &rt2x00dev->link.work, + LINK_TUNE_INTERVAL); +} + +static void rt2x00lib_packetfilter_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, filter_work); + unsigned int filter = rt2x00dev->interface.filter; + + /* + * Since we had stored the filter inside interface.filter, + * we should now clear that field. Otherwise the driver will + * assume nothing has changed (*total_flags will be compared + * to interface.filter to determine if any action is required). + */ + rt2x00dev->interface.filter = 0; + + rt2x00dev->ops->hw->configure_filter(rt2x00dev->hw, + filter, &filter, 0, NULL); +} + +static void rt2x00lib_configuration_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, config_work); + int preamble = !test_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); + + rt2x00mac_erp_ie_changed(rt2x00dev->hw, + IEEE80211_ERP_CHANGE_PREAMBLE, 0, preamble); +} + +/* + * Interrupt context handlers. + */ +static void rt2x00lib_beacondone_scheduled(struct work_struct *work) +{ + struct rt2x00_dev *rt2x00dev = + container_of(work, struct rt2x00_dev, beacon_work); + struct data_ring *ring = + rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + struct data_entry *entry = rt2x00_get_data_entry(ring); + struct sk_buff *skb; + + skb = ieee80211_beacon_get(rt2x00dev->hw, + rt2x00dev->interface.id, + &entry->tx_status.control); + if (!skb) + return; + + rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb, + &entry->tx_status.control); + + dev_kfree_skb(skb); +} + +void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return; + + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->beacon_work); +} +EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); + +void rt2x00lib_txdone(struct data_entry *entry, + const int status, const int retry) +{ + struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev; + struct ieee80211_tx_status *tx_status = &entry->tx_status; + struct ieee80211_low_level_stats *stats = &rt2x00dev->low_level_stats; + int success = !!(status == TX_SUCCESS || status == TX_SUCCESS_RETRY); + int fail = !!(status == TX_FAIL_RETRY || status == TX_FAIL_INVALID || + status == TX_FAIL_OTHER); + + /* + * Update TX statistics. + */ + tx_status->flags = 0; + tx_status->ack_signal = 0; + tx_status->excessive_retries = (status == TX_FAIL_RETRY); + tx_status->retry_count = retry; + rt2x00dev->link.tx_success += success; + rt2x00dev->link.tx_failed += retry + fail; + + if (!(tx_status->control.flags & IEEE80211_TXCTL_NO_ACK)) { + if (success) + tx_status->flags |= IEEE80211_TX_STATUS_ACK; + else + stats->dot11ACKFailureCount++; + } + + tx_status->queue_length = entry->ring->stats.limit; + tx_status->queue_number = tx_status->control.queue; + + if (tx_status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) { + if (success) + stats->dot11RTSSuccessCount++; + else + stats->dot11RTSFailureCount++; + } + + /* + * Send the tx_status to mac80211, + * that method also cleans up the skb structure. + */ + ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb, tx_status); + entry->skb = NULL; +} +EXPORT_SYMBOL_GPL(rt2x00lib_txdone); + +void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb, + struct rxdata_entry_desc *desc) +{ + struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev; + struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; + struct ieee80211_hw_mode *mode; + struct ieee80211_rate *rate; + unsigned int i; + int val = 0; + + /* + * Update RX statistics. + */ + mode = &rt2x00dev->hwmodes[rt2x00dev->curr_hwmode]; + for (i = 0; i < mode->num_rates; i++) { + rate = &mode->rates[i]; + + /* + * When frame was received with an OFDM bitrate, + * the signal is the PLCP value. If it was received with + * a CCK bitrate the signal is the rate in 0.5kbit/s. + */ + if (!desc->ofdm) + val = DEVICE_GET_RATE_FIELD(rate->val, RATE); + else + val = DEVICE_GET_RATE_FIELD(rate->val, PLCP); + + if (val == desc->signal) { + val = rate->val; + break; + } + } + + rt2x00_update_link_rssi(&rt2x00dev->link, desc->rssi); + rt2x00dev->link.rx_success++; + rx_status->rate = val; + rx_status->signal = + rt2x00lib_calculate_link_signal(rt2x00dev, desc->rssi); + rx_status->ssi = desc->rssi; + rx_status->flag = desc->flags; + + /* + * Send frame to mac80211 + */ + ieee80211_rx_irqsafe(rt2x00dev->hw, skb, rx_status); +} +EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); + +/* + * TX descriptor initializer + */ +void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + struct txdata_entry_desc desc; + struct data_ring *ring; + int tx_rate; + int bitrate; + int duration; + int residual; + u16 frame_control; + u16 seq_ctrl; + + /* + * Make sure the descriptor is properly cleared. + */ + memset(&desc, 0x00, sizeof(desc)); + + /* + * Get ring pointer, if we fail to obtain the + * correct ring, then use the first TX ring. + */ + ring = rt2x00lib_get_ring(rt2x00dev, control->queue); + if (!ring) + ring = rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + + desc.cw_min = ring->tx_params.cw_min; + desc.cw_max = ring->tx_params.cw_max; + desc.aifs = ring->tx_params.aifs; + + /* + * Identify queue + */ + if (control->queue < rt2x00dev->hw->queues) + desc.queue = control->queue; + else if (control->queue == IEEE80211_TX_QUEUE_BEACON || + control->queue == IEEE80211_TX_QUEUE_AFTER_BEACON) + desc.queue = QUEUE_MGMT; + else + desc.queue = QUEUE_OTHER; + + /* + * Read required fields from ieee80211 header. + */ + frame_control = le16_to_cpu(ieee80211hdr->frame_control); + seq_ctrl = le16_to_cpu(ieee80211hdr->seq_ctrl); + + tx_rate = control->tx_rate; + + /* + * Check if this is a RTS/CTS frame + */ + if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) { + __set_bit(ENTRY_TXD_BURST, &desc.flags); + if (is_rts_frame(frame_control)) + __set_bit(ENTRY_TXD_RTS_FRAME, &desc.flags); + if (control->rts_cts_rate) + tx_rate = control->rts_cts_rate; + } + + /* + * Check for OFDM + */ + if (DEVICE_GET_RATE_FIELD(tx_rate, RATEMASK) & DEV_OFDM_RATEMASK) + __set_bit(ENTRY_TXD_OFDM_RATE, &desc.flags); + + /* + * Check if more fragments are pending + */ + if (ieee80211_get_morefrag(ieee80211hdr)) { + __set_bit(ENTRY_TXD_BURST, &desc.flags); + __set_bit(ENTRY_TXD_MORE_FRAG, &desc.flags); + } + + /* + * Beacons and probe responses require the tsf timestamp + * to be inserted into the frame. + */ + if (control->queue == IEEE80211_TX_QUEUE_BEACON || + is_probe_resp(frame_control)) + __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc.flags); + + /* + * Determine with what IFS priority this frame should be send. + * Set ifs to IFS_SIFS when the this is not the first fragment, + * or this fragment came after RTS/CTS. + */ + if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 || + test_bit(ENTRY_TXD_RTS_FRAME, &desc.flags)) + desc.ifs = IFS_SIFS; + else + desc.ifs = IFS_BACKOFF; + + /* + * PLCP setup + * Length calculation depends on OFDM/CCK rate. + */ + desc.signal = DEVICE_GET_RATE_FIELD(tx_rate, PLCP); + desc.service = 0x04; + + if (test_bit(ENTRY_TXD_OFDM_RATE, &desc.flags)) { + desc.length_high = ((length + FCS_LEN) >> 6) & 0x3f; + desc.length_low = ((length + FCS_LEN) & 0x3f); + } else { + bitrate = DEVICE_GET_RATE_FIELD(tx_rate, RATE); + + /* + * Convert length to microseconds. + */ + residual = get_duration_res(length + FCS_LEN, bitrate); + duration = get_duration(length + FCS_LEN, bitrate); + + if (residual != 0) { + duration++; + + /* + * Check if we need to set the Length Extension + */ + if (bitrate == 110 && residual <= 3) + desc.service |= 0x80; + } + + desc.length_high = (duration >> 8) & 0xff; + desc.length_low = duration & 0xff; + + /* + * When preamble is enabled we should set the + * preamble bit for the signal. + */ + if (DEVICE_GET_RATE_FIELD(tx_rate, PREAMBLE)) + desc.signal |= 0x08; + } + + rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, txd, &desc, + ieee80211hdr, length, control); +} +EXPORT_SYMBOL_GPL(rt2x00lib_write_tx_desc); + +/* + * Driver initialization handlers. + */ +static void rt2x00lib_channel(struct ieee80211_channel *entry, + const int channel, const int tx_power, + const int value) +{ + entry->chan = channel; + if (channel <= 14) + entry->freq = 2407 + (5 * channel); + else + entry->freq = 5000 + (5 * channel); + entry->val = value; + entry->flag = + IEEE80211_CHAN_W_IBSS | + IEEE80211_CHAN_W_ACTIVE_SCAN | + IEEE80211_CHAN_W_SCAN; + entry->power_level = tx_power; + entry->antenna_max = 0xff; +} + +static void rt2x00lib_rate(struct ieee80211_rate *entry, + const int rate, const int mask, + const int plcp, const int flags) +{ + entry->rate = rate; + entry->val = + DEVICE_SET_RATE_FIELD(rate, RATE) | + DEVICE_SET_RATE_FIELD(mask, RATEMASK) | + DEVICE_SET_RATE_FIELD(plcp, PLCP); + entry->flags = flags; + entry->val2 = entry->val; + if (entry->flags & IEEE80211_RATE_PREAMBLE2) + entry->val2 |= DEVICE_SET_RATE_FIELD(1, PREAMBLE); + entry->min_rssi_ack = 0; + entry->min_rssi_ack_delta = 0; +} + +static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, + struct hw_mode_spec *spec) +{ + struct ieee80211_hw *hw = rt2x00dev->hw; + struct ieee80211_hw_mode *hwmodes; + struct ieee80211_channel *channels; + struct ieee80211_rate *rates; + unsigned int i; + unsigned char tx_power; + + hwmodes = kzalloc(sizeof(*hwmodes) * spec->num_modes, GFP_KERNEL); + if (!hwmodes) + goto exit; + + channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); + if (!channels) + goto exit_free_modes; + + rates = kzalloc(sizeof(*rates) * spec->num_rates, GFP_KERNEL); + if (!rates) + goto exit_free_channels; + + /* + * Initialize Rate list. + */ + rt2x00lib_rate(&rates[0], 10, DEV_RATEMASK_1MB, + 0x00, IEEE80211_RATE_CCK); + rt2x00lib_rate(&rates[1], 20, DEV_RATEMASK_2MB, + 0x01, IEEE80211_RATE_CCK_2); + rt2x00lib_rate(&rates[2], 55, DEV_RATEMASK_5_5MB, + 0x02, IEEE80211_RATE_CCK_2); + rt2x00lib_rate(&rates[3], 110, DEV_RATEMASK_11MB, + 0x03, IEEE80211_RATE_CCK_2); + + if (spec->num_rates > 4) { + rt2x00lib_rate(&rates[4], 60, DEV_RATEMASK_6MB, + 0x0b, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[5], 90, DEV_RATEMASK_9MB, + 0x0f, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[6], 120, DEV_RATEMASK_12MB, + 0x0a, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[7], 180, DEV_RATEMASK_18MB, + 0x0e, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[8], 240, DEV_RATEMASK_24MB, + 0x09, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[9], 360, DEV_RATEMASK_36MB, + 0x0d, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[10], 480, DEV_RATEMASK_48MB, + 0x08, IEEE80211_RATE_OFDM); + rt2x00lib_rate(&rates[11], 540, DEV_RATEMASK_54MB, + 0x0c, IEEE80211_RATE_OFDM); + } + + /* + * Initialize Channel list. + */ + for (i = 0; i < spec->num_channels; i++) { + if (spec->channels[i].channel <= 14) + tx_power = spec->tx_power_bg[i]; + else if (spec->tx_power_a) + tx_power = spec->tx_power_a[i]; + else + tx_power = spec->tx_power_default; + + rt2x00lib_channel(&channels[i], + spec->channels[i].channel, tx_power, i); + } + + /* + * Intitialize 802.11b + * Rates: CCK. + * Channels: OFDM. + */ + if (spec->num_modes > HWMODE_B) { + hwmodes[HWMODE_B].mode = MODE_IEEE80211B; + hwmodes[HWMODE_B].num_channels = 14; + hwmodes[HWMODE_B].num_rates = 4; + hwmodes[HWMODE_B].channels = channels; + hwmodes[HWMODE_B].rates = rates; + } + + /* + * Intitialize 802.11g + * Rates: CCK, OFDM. + * Channels: OFDM. + */ + if (spec->num_modes > HWMODE_G) { + hwmodes[HWMODE_G].mode = MODE_IEEE80211G; + hwmodes[HWMODE_G].num_channels = 14; + hwmodes[HWMODE_G].num_rates = spec->num_rates; + hwmodes[HWMODE_G].channels = channels; + hwmodes[HWMODE_G].rates = rates; + } + + /* + * Intitialize 802.11a + * Rates: OFDM. + * Channels: OFDM, UNII, HiperLAN2. + */ + if (spec->num_modes > HWMODE_A) { + hwmodes[HWMODE_A].mode = MODE_IEEE80211A; + hwmodes[HWMODE_A].num_channels = spec->num_channels - 14; + hwmodes[HWMODE_A].num_rates = spec->num_rates - 4; + hwmodes[HWMODE_A].channels = &channels[14]; + hwmodes[HWMODE_A].rates = &rates[4]; + } + + if (spec->num_modes > HWMODE_G && + ieee80211_register_hwmode(hw, &hwmodes[HWMODE_G])) + goto exit_free_rates; + + if (spec->num_modes > HWMODE_B && + ieee80211_register_hwmode(hw, &hwmodes[HWMODE_B])) + goto exit_free_rates; + + if (spec->num_modes > HWMODE_A && + ieee80211_register_hwmode(hw, &hwmodes[HWMODE_A])) + goto exit_free_rates; + + rt2x00dev->hwmodes = hwmodes; + + return 0; + +exit_free_rates: + kfree(rates); + +exit_free_channels: + kfree(channels); + +exit_free_modes: + kfree(hwmodes); + +exit: + ERROR(rt2x00dev, "Allocation ieee80211 modes failed.\n"); + return -ENOMEM; +} + +static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) +{ + if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags)) + ieee80211_unregister_hw(rt2x00dev->hw); + + if (likely(rt2x00dev->hwmodes)) { + kfree(rt2x00dev->hwmodes->channels); + kfree(rt2x00dev->hwmodes->rates); + kfree(rt2x00dev->hwmodes); + rt2x00dev->hwmodes = NULL; + } +} + +static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + int status; + + /* + * Initialize HW modes. + */ + status = rt2x00lib_probe_hw_modes(rt2x00dev, spec); + if (status) + return status; + + /* + * Register HW. + */ + status = ieee80211_register_hw(rt2x00dev->hw); + if (status) { + rt2x00lib_remove_hw(rt2x00dev); + return status; + } + + __set_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags); + + return 0; +} + +/* + * Initialization/uninitialization handlers. + */ +static int rt2x00lib_alloc_entries(struct data_ring *ring, + const u16 max_entries, const u16 data_size, + const u16 desc_size) +{ + struct data_entry *entry; + unsigned int i; + + ring->stats.limit = max_entries; + ring->data_size = data_size; + ring->desc_size = desc_size; + + /* + * Allocate all ring entries. + */ + entry = kzalloc(ring->stats.limit * sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < ring->stats.limit; i++) { + entry[i].flags = 0; + entry[i].ring = ring; + entry[i].skb = NULL; + } + + ring->entry = entry; + + return 0; +} + +static int rt2x00lib_alloc_ring_entries(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + /* + * Allocate the RX ring. + */ + if (rt2x00lib_alloc_entries(rt2x00dev->rx, RX_ENTRIES, DATA_FRAME_SIZE, + rt2x00dev->ops->rxd_size)) + return -ENOMEM; + + /* + * First allocate the TX rings. + */ + txring_for_each(rt2x00dev, ring) { + if (rt2x00lib_alloc_entries(ring, TX_ENTRIES, DATA_FRAME_SIZE, + rt2x00dev->ops->txd_size)) + return -ENOMEM; + } + + if (!test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags)) + return 0; + + /* + * Allocate the BEACON ring. + */ + if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[0], BEACON_ENTRIES, + MGMT_FRAME_SIZE, rt2x00dev->ops->txd_size)) + return -ENOMEM; + + /* + * Allocate the Atim ring. + */ + if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[1], ATIM_ENTRIES, + DATA_FRAME_SIZE, rt2x00dev->ops->txd_size)) + return -ENOMEM; + + return 0; +} + +static void rt2x00lib_free_ring_entries(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + ring_for_each(rt2x00dev, ring) { + kfree(ring->entry); + ring->entry = NULL; + } +} + +void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) + return; + + /* + * Unregister rfkill. + */ + rt2x00rfkill_unregister(rt2x00dev); + + /* + * Allow the HW to uninitialize. + */ + rt2x00dev->ops->lib->uninitialize(rt2x00dev); + + /* + * Free allocated ring entries. + */ + rt2x00lib_free_ring_entries(rt2x00dev); +} + +int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) +{ + int status; + + if (test_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) + return 0; + + /* + * Allocate all ring entries. + */ + status = rt2x00lib_alloc_ring_entries(rt2x00dev); + if (status) { + ERROR(rt2x00dev, "Ring entries allocation failed.\n"); + return status; + } + + /* + * Initialize the device. + */ + status = rt2x00dev->ops->lib->initialize(rt2x00dev); + if (status) + goto exit; + + __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); + + /* + * Register the rfkill handler. + */ + status = rt2x00rfkill_register(rt2x00dev); + if (status) + goto exit_unitialize; + + return 0; + +exit_unitialize: + rt2x00lib_uninitialize(rt2x00dev); + +exit: + rt2x00lib_free_ring_entries(rt2x00dev); + + return status; +} + +/* + * driver allocation handlers. + */ +static int rt2x00lib_alloc_rings(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + /* + * We need the following rings: + * RX: 1 + * TX: hw->queues + * Beacon: 1 (if required) + * Atim: 1 (if required) + */ + rt2x00dev->data_rings = 1 + rt2x00dev->hw->queues + + (2 * test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags)); + + ring = kzalloc(rt2x00dev->data_rings * sizeof(*ring), GFP_KERNEL); + if (!ring) { + ERROR(rt2x00dev, "Ring allocation failed.\n"); + return -ENOMEM; + } + + /* + * Initialize pointers + */ + rt2x00dev->rx = ring; + rt2x00dev->tx = &rt2x00dev->rx[1]; + if (test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags)) + rt2x00dev->bcn = &rt2x00dev->tx[rt2x00dev->hw->queues]; + + /* + * Initialize ring parameters. + * cw_min: 2^5 = 32. + * cw_max: 2^10 = 1024. + */ + ring_for_each(rt2x00dev, ring) { + ring->rt2x00dev = rt2x00dev; + ring->tx_params.aifs = 2; + ring->tx_params.cw_min = 5; + ring->tx_params.cw_max = 10; + } + + return 0; +} + +static void rt2x00lib_free_rings(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rx); + rt2x00dev->rx = NULL; + rt2x00dev->tx = NULL; + rt2x00dev->bcn = NULL; +} + +int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) +{ + int retval = -ENOMEM; + + /* + * Let the driver probe the device to detect the capabilities. + */ + retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); + if (retval) { + ERROR(rt2x00dev, "Failed to allocate device.\n"); + goto exit; + } + + /* + * Initialize configuration work. + */ + INIT_WORK(&rt2x00dev->beacon_work, rt2x00lib_beacondone_scheduled); + INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled); + INIT_WORK(&rt2x00dev->config_work, rt2x00lib_configuration_scheduled); + INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner); + + /* + * Reset current working type. + */ + rt2x00dev->interface.type = INVALID_INTERFACE; + + /* + * Allocate ring array. + */ + retval = rt2x00lib_alloc_rings(rt2x00dev); + if (retval) + goto exit; + + /* + * Initialize ieee80211 structure. + */ + retval = rt2x00lib_probe_hw(rt2x00dev); + if (retval) { + ERROR(rt2x00dev, "Failed to initialize hw.\n"); + goto exit; + } + + /* + * Allocatie rfkill. + */ + retval = rt2x00rfkill_allocate(rt2x00dev); + if (retval) + goto exit; + + /* + * Open the debugfs entry. + */ + rt2x00debug_register(rt2x00dev); + + __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + return 0; + +exit: + rt2x00lib_remove_dev(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev); + +void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) +{ + __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + /* + * Disable radio. + */ + rt2x00lib_disable_radio(rt2x00dev); + + /* + * Uninitialize device. + */ + rt2x00lib_uninitialize(rt2x00dev); + + /* + * Close debugfs entry. + */ + rt2x00debug_deregister(rt2x00dev); + + /* + * Free rfkill + */ + rt2x00rfkill_free(rt2x00dev); + + /* + * Free ieee80211_hw memory. + */ + rt2x00lib_remove_hw(rt2x00dev); + + /* + * Free firmware image. + */ + rt2x00lib_free_firmware(rt2x00dev); + + /* + * Free ring structures. + */ + rt2x00lib_free_rings(rt2x00dev); +} +EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); + +/* + * Device state handlers + */ +#ifdef CONFIG_PM +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) +{ + int retval; + + NOTICE(rt2x00dev, "Going to sleep.\n"); + __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + /* + * Only continue if mac80211 has open interfaces. + */ + if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) + goto exit; + __set_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags); + + /* + * Disable radio and unitialize all items + * that must be recreated on resume. + */ + rt2x00mac_stop(rt2x00dev->hw); + rt2x00lib_uninitialize(rt2x00dev); + rt2x00debug_deregister(rt2x00dev); + +exit: + /* + * Set device mode to sleep for power management. + */ + retval = rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP); + if (retval) + return retval; + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00lib_suspend); + +int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) +{ + struct interface *intf = &rt2x00dev->interface; + int retval; + + NOTICE(rt2x00dev, "Waking up.\n"); + __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); + + /* + * Open the debugfs entry. + */ + rt2x00debug_register(rt2x00dev); + + /* + * Only continue if mac80211 had open interfaces. + */ + if (!__test_and_clear_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags)) + return 0; + + /* + * Reinitialize device and all active interfaces. + */ + retval = rt2x00mac_start(rt2x00dev->hw); + if (retval) + goto exit; + + /* + * Reconfigure device. + */ + rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, 1); + if (!rt2x00dev->hw->conf.radio_enabled) + rt2x00lib_disable_radio(rt2x00dev); + + rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); + rt2x00lib_config_bssid(rt2x00dev, intf->bssid); + rt2x00lib_config_type(rt2x00dev, intf->type); + + /* + * It is possible that during that mac80211 has attempted + * to send frames while we were suspending or resuming. + * In that case we have disabled the TX queue and should + * now enable it again + */ + ieee80211_start_queues(rt2x00dev->hw); + + /* + * When in Master or Ad-hoc mode, + * restart Beacon transmitting by faking a beacondone event. + */ + if (intf->type == IEEE80211_IF_TYPE_AP || + intf->type == IEEE80211_IF_TYPE_IBSS) + rt2x00lib_beacondone(rt2x00dev); + + return 0; + +exit: + rt2x00lib_disable_radio(rt2x00dev); + rt2x00lib_uninitialize(rt2x00dev); + rt2x00debug_deregister(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00lib_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00lib module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c new file mode 100644 index 0000000000000000000000000000000000000000..236025f8b90f8834f491ee4d02be075904d21bcb --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c @@ -0,0 +1,124 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: rt2x00 firmware loading routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev) +{ + struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); + const struct firmware *fw; + char *fw_name; + int retval; + u16 crc; + u16 tmp; + + /* + * Read correct firmware from harddisk. + */ + fw_name = rt2x00dev->ops->lib->get_firmware_name(rt2x00dev); + if (!fw_name) { + ERROR(rt2x00dev, + "Invalid firmware filename.\n" + "Please file bug report to %s.\n", DRV_PROJECT); + return -EINVAL; + } + + INFO(rt2x00dev, "Loading firmware file '%s'.\n", fw_name); + + retval = request_firmware(&fw, fw_name, device); + if (retval) { + ERROR(rt2x00dev, "Failed to request Firmware.\n"); + return retval; + } + + if (!fw || !fw->size || !fw->data) { + ERROR(rt2x00dev, "Failed to read Firmware.\n"); + return -ENOENT; + } + + /* + * Validate the firmware using 16 bit CRC. + * The last 2 bytes of the firmware are the CRC + * so substract those 2 bytes from the CRC checksum, + * and set those 2 bytes to 0 when calculating CRC. + */ + tmp = 0; + crc = crc_itu_t(0, fw->data, fw->size - 2); + crc = crc_itu_t(crc, (u8 *)&tmp, 2); + + if (crc != (fw->data[fw->size - 2] << 8 | fw->data[fw->size - 1])) { + ERROR(rt2x00dev, "Firmware CRC error.\n"); + retval = -ENOENT; + goto exit; + } + + INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", + fw->data[fw->size - 4], fw->data[fw->size - 3]); + + rt2x00dev->fw = fw; + + return 0; + +exit: + release_firmware(fw); + + return retval; +} + +int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + if (!rt2x00dev->fw) { + retval = rt2x00lib_request_firmware(rt2x00dev); + if (retval) + return retval; + } + + /* + * Send firmware to the device. + */ + retval = rt2x00dev->ops->lib->load_firmware(rt2x00dev, + rt2x00dev->fw->data, + rt2x00dev->fw->size); + return retval; +} + +void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) +{ + release_firmware(rt2x00dev->fw); + rt2x00dev->fw = NULL; +} + diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h new file mode 100644 index 0000000000000000000000000000000000000000..298faa9d3f61520a79565eff5194c744b835d4a9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -0,0 +1,119 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00lib + Abstract: Data structures and definitions for the rt2x00lib module. + */ + +#ifndef RT2X00LIB_H +#define RT2X00LIB_H + +/* + * Interval defines + * Both the link tuner as the rfkill will be called once per second. + */ +#define LINK_TUNE_INTERVAL ( round_jiffies(HZ) ) +#define RFKILL_POLL_INTERVAL ( 1000 ) + +/* + * Radio control handlers. + */ +int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state); +void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev); + +/* + * Initialization handlers. + */ +int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * Configuration handlers. + */ +void rt2x00lib_config_mac_addr(struct rt2x00_dev *rt2x00dev, u8 *mac); +void rt2x00lib_config_bssid(struct rt2x00_dev *rt2x00dev, u8 *bssid); +void rt2x00lib_config_type(struct rt2x00_dev *rt2x00dev, const int type); +void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, const int force_config); + +/* + * Firmware handlers. + */ +#ifdef CONFIG_RT2X00_LIB_FIRMWARE +int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev); +void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev); +#else +static inline int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} +static inline void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_FIRMWARE */ + +/* + * Debugfs handlers. + */ +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +void rt2x00debug_register(struct rt2x00_dev *rt2x00dev); +void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); +#else +static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * RFkill handlers. + */ +#ifdef CONFIG_RT2X00_LIB_RFKILL +int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev); +void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev); +int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev); +void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev); +#else +static inline int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} + +static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) +{ +} + +static inline int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) +{ + return 0; +} + +static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) +{ +} +#endif /* CONFIG_RT2X00_LIB_RFKILL */ + +#endif /* RT2X00LIB_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c new file mode 100644 index 0000000000000000000000000000000000000000..4a6a0bd01ff192ead09ab3b929f33f57f7c18882 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -0,0 +1,438 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00mac + Abstract: rt2x00 generic mac80211 routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, + struct sk_buff *frag_skb, + struct ieee80211_tx_control *control) +{ + struct sk_buff *skb; + int size; + + if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + size = sizeof(struct ieee80211_cts); + else + size = sizeof(struct ieee80211_rts); + + skb = dev_alloc_skb(size + rt2x00dev->hw->extra_tx_headroom); + if (!skb) { + WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n"); + return NETDEV_TX_BUSY; + } + + skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); + skb_put(skb, size); + + if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + ieee80211_ctstoself_get(rt2x00dev->hw, rt2x00dev->interface.id, + frag_skb->data, frag_skb->len, control, + (struct ieee80211_cts *)(skb->data)); + else + ieee80211_rts_get(rt2x00dev->hw, rt2x00dev->interface.id, + frag_skb->data, frag_skb->len, control, + (struct ieee80211_rts *)(skb->data)); + + if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) { + WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; + struct data_ring *ring; + u16 frame_control; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + * Note that we can only stop the TX queues inside the TX path + * due to possible race conditions in mac80211. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { + ieee80211_stop_queues(hw); + return 0; + } + + /* + * Determine which ring to put packet on. + */ + ring = rt2x00lib_get_ring(rt2x00dev, control->queue); + if (unlikely(!ring)) { + ERROR(rt2x00dev, + "Attempt to send packet over invalid queue %d.\n" + "Please file bug report to %s.\n", + control->queue, DRV_PROJECT); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * If CTS/RTS is required. and this frame is not CTS or RTS, + * create and queue that frame first. But make sure we have + * at least enough entries available to send this CTS/RTS + * frame as well as the data frame. + */ + frame_control = le16_to_cpu(ieee80211hdr->frame_control); + if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) && + (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS | + IEEE80211_TXCTL_USE_CTS_PROTECT))) { + if (rt2x00_ring_free(ring) <= 1) + return NETDEV_TX_BUSY; + + if (rt2x00mac_tx_rts_cts(rt2x00dev, ring, skb, control)) + return NETDEV_TX_BUSY; + } + + if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) + return NETDEV_TX_BUSY; + + if (rt2x00dev->ops->lib->kick_tx_queue) + rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); + + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(rt2x00mac_tx); + +int rt2x00mac_start(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + int status; + + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || + test_bit(DEVICE_STARTED, &rt2x00dev->flags)) + return 0; + + /* + * If this is the first interface which is added, + * we should load the firmware now. + */ + if (test_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags)) { + status = rt2x00lib_load_firmware(rt2x00dev); + if (status) + return status; + } + + /* + * Initialize the device. + */ + status = rt2x00lib_initialize(rt2x00dev); + if (status) + return status; + + /* + * Enable radio. + */ + status = rt2x00lib_enable_radio(rt2x00dev); + if (status) { + rt2x00lib_uninitialize(rt2x00dev); + return status; + } + + __set_bit(DEVICE_STARTED, &rt2x00dev->flags); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_start); + +void rt2x00mac_stop(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) + return; + + /* + * Perhaps we can add something smarter here, + * but for now just disabling the radio should do. + */ + rt2x00lib_disable_radio(rt2x00dev); + + __clear_bit(DEVICE_STARTED, &rt2x00dev->flags); +} +EXPORT_SYMBOL_GPL(rt2x00mac_stop); + +int rt2x00mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + + /* + * Don't allow interfaces to be added while + * either the device has disappeared or when + * another interface is already present. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || + is_interface_present(intf)) + return -ENOBUFS; + + intf->id = conf->if_id; + intf->type = conf->type; + if (conf->type == IEEE80211_IF_TYPE_AP) + memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); + memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); + + /* + * The MAC adddress must be configured after the device + * has been initialized. Otherwise the device can reset + * the MAC registers. + */ + rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); + rt2x00lib_config_type(rt2x00dev, conf->type); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); + +void rt2x00mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + + /* + * Don't allow interfaces to be remove while + * either the device has disappeared or when + * no interface is present. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || + !is_interface_present(intf)) + return; + + intf->id = 0; + intf->type = INVALID_INTERFACE; + memset(&intf->bssid, 0x00, ETH_ALEN); + memset(&intf->mac, 0x00, ETH_ALEN); + + /* + * Make sure the bssid and mac address registers + * are cleared to prevent false ACKing of frames. + */ + rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); + rt2x00lib_config_bssid(rt2x00dev, intf->bssid); + rt2x00lib_config_type(rt2x00dev, intf->type); +} +EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); + +int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) + return 0; + + /* + * Check if we need to disable the radio, + * if this is not the case, at least the RX must be disabled. + */ + if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) { + if (!conf->radio_enabled) + rt2x00lib_disable_radio(rt2x00dev); + else + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); + } + + rt2x00lib_config(rt2x00dev, conf, 0); + + /* + * Reenable RX only if the radio should be on. + */ + if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); + else if (conf->radio_enabled) + return rt2x00lib_enable_radio(rt2x00dev); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_config); + +int rt2x00mac_config_interface(struct ieee80211_hw *hw, int if_id, + struct ieee80211_if_conf *conf) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + int status; + + /* + * Mac80211 might be calling this function while we are trying + * to remove the device or perhaps suspending it. + */ + if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) + return 0; + + /* + * If the given type does not match the configured type, + * there has been a problem. + */ + if (conf->type != intf->type) + return -EINVAL; + + /* + * If the interface does not work in master mode, + * then the bssid value in the interface structure + * should now be set. + */ + if (conf->type != IEEE80211_IF_TYPE_AP) + memcpy(&intf->bssid, conf->bssid, ETH_ALEN); + rt2x00lib_config_bssid(rt2x00dev, intf->bssid); + + /* + * We only need to initialize the beacon when master mode is enabled. + */ + if (conf->type != IEEE80211_IF_TYPE_AP || !conf->beacon) + return 0; + + status = rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, + conf->beacon, + conf->beacon_control); + if (status) + dev_kfree_skb(conf->beacon); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00mac_config_interface); + +int rt2x00mac_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * The dot11ACKFailureCount, dot11RTSFailureCount and + * dot11RTSSuccessCount are updated in interrupt time. + * dot11FCSErrorCount is updated in the link tuner. + */ + memcpy(stats, &rt2x00dev->low_level_stats, sizeof(*stats)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_get_stats); + +int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, + struct ieee80211_tx_queue_stats *stats) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + unsigned int i; + + for (i = 0; i < hw->queues; i++) + memcpy(&stats->data[i], &rt2x00dev->tx[i].stats, + sizeof(rt2x00dev->tx[i].stats)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats); + +void rt2x00mac_erp_ie_changed(struct ieee80211_hw *hw, u8 changes, + int cts_protection, int preamble) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + int short_preamble; + int ack_timeout; + int ack_consume_time; + int difs; + + /* + * We only support changing preamble mode. + */ + if (!(changes & IEEE80211_ERP_CHANGE_PREAMBLE)) + return; + + short_preamble = !preamble; + preamble = !!(preamble) ? PREAMBLE : SHORT_PREAMBLE; + + difs = (hw->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) ? + SHORT_DIFS : DIFS; + ack_timeout = difs + PLCP + preamble + get_duration(ACK_SIZE, 10); + + ack_consume_time = SIFS + PLCP + preamble + get_duration(ACK_SIZE, 10); + + if (short_preamble) + __set_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); + else + __clear_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); + + rt2x00dev->ops->lib->config_preamble(rt2x00dev, short_preamble, + ack_timeout, ack_consume_time); +} +EXPORT_SYMBOL_GPL(rt2x00mac_erp_ie_changed); + +int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, + const struct ieee80211_tx_queue_params *params) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_ring *ring; + + ring = rt2x00lib_get_ring(rt2x00dev, queue); + if (unlikely(!ring)) + return -EINVAL; + + /* + * The passed variables are stored as real value ((2^n)-1). + * Ralink registers require to know the bit number 'n'. + */ + if (params->cw_min) + ring->tx_params.cw_min = fls(params->cw_min); + else + ring->tx_params.cw_min = 5; /* cw_min: 2^5 = 32. */ + + if (params->cw_max) + ring->tx_params.cw_max = fls(params->cw_max); + else + ring->tx_params.cw_max = 10; /* cw_min: 2^10 = 1024. */ + + if (params->aifs) + ring->tx_params.aifs = params->aifs; + else + ring->tx_params.aifs = 2; + + INFO(rt2x00dev, + "Configured TX ring %d - CWmin: %d, CWmax: %d, Aifs: %d.\n", + queue, ring->tx_params.cw_min, ring->tx_params.cw_max, + ring->tx_params.aifs); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00mac_conf_tx); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c new file mode 100644 index 0000000000000000000000000000000000000000..2780df00623c4f9392e40087868326fbd8bb7522 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -0,0 +1,474 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00pci + Abstract: rt2x00 generic pci device routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00pci" + +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" + +/* + * Beacon handlers. + */ +int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct data_ring *ring = + rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + struct data_entry *entry = rt2x00_get_data_entry(ring); + + /* + * Just in case mac80211 doesn't set this correctly, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * Update the beacon entry. + */ + memcpy(entry->data_addr, skb->data, skb->len); + rt2x00lib_write_tx_desc(rt2x00dev, entry->priv, + (struct ieee80211_hdr *)skb->data, + skb->len, control); + + /* + * Enable beacon generation. + */ + rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update); + +/* + * TX data handlers. + */ +int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; + struct data_entry *entry = rt2x00_get_data_entry(ring); + struct data_desc *txd = entry->priv; + u32 word; + + if (rt2x00_ring_full(ring)) { + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || + rt2x00_get_field32(word, TXD_ENTRY_VALID)) { + ERROR(rt2x00dev, + "Arrived at non-free entry in the non-full queue %d.\n" + "Please file bug report to %s.\n", + control->queue, DRV_PROJECT); + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + entry->skb = skb; + memcpy(&entry->tx_status.control, control, sizeof(*control)); + memcpy(entry->data_addr, skb->data, skb->len); + rt2x00lib_write_tx_desc(rt2x00dev, txd, ieee80211hdr, + skb->len, control); + + rt2x00_ring_index_inc(ring); + + if (rt2x00_ring_full(ring)) + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data); + +/* + * RX data handlers. + */ +void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_entry *entry; + struct data_desc *rxd; + struct sk_buff *skb; + struct rxdata_entry_desc desc; + u32 word; + + while (1) { + entry = rt2x00_get_data_entry(ring); + rxd = entry->priv; + rt2x00_desc_read(rxd, 0, &word); + + if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) + break; + + memset(&desc, 0x00, sizeof(desc)); + rt2x00dev->ops->lib->fill_rxdone(entry, &desc); + + /* + * Allocate the sk_buffer, initialize it and copy + * all data into it. + */ + skb = dev_alloc_skb(desc.size + NET_IP_ALIGN); + if (!skb) + return; + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, desc.size); + memcpy(skb->data, entry->data_addr, desc.size); + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(entry, skb, &desc); + + if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { + rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_inc(ring); + } +} +EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); + +/* + * Device initialization handlers. + */ +#define priv_offset(__ring, __i) \ +({ \ + ring->data_addr + (i * ring->desc_size); \ +}) + +#define data_addr_offset(__ring, __i) \ +({ \ + (__ring)->data_addr + \ + ((__ring)->stats.limit * (__ring)->desc_size) + \ + ((__i) * (__ring)->data_size); \ +}) + +#define data_dma_offset(__ring, __i) \ +({ \ + (__ring)->data_dma + \ + ((__ring)->stats.limit * (__ring)->desc_size) + \ + ((__i) * (__ring)->data_size); \ +}) + +static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + unsigned int i; + + /* + * Allocate DMA memory for descriptor and buffer. + */ + ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev), + rt2x00_get_ring_size(ring), + &ring->data_dma); + if (!ring->data_addr) + return -ENOMEM; + + /* + * Initialize all ring entries to contain valid + * addresses. + */ + for (i = 0; i < ring->stats.limit; i++) { + ring->entry[i].priv = priv_offset(ring, i); + ring->entry[i].data_addr = data_addr_offset(ring, i); + ring->entry[i].data_dma = data_dma_offset(ring, i); + } + + return 0; +} + +static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + if (ring->data_addr) + pci_free_consistent(rt2x00dev_pci(rt2x00dev), + rt2x00_get_ring_size(ring), + ring->data_addr, ring->data_dma); + ring->data_addr = NULL; +} + +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); + struct data_ring *ring; + int status; + + /* + * Allocate DMA + */ + ring_for_each(rt2x00dev, ring) { + status = rt2x00pci_alloc_dma(rt2x00dev, ring); + if (status) + goto exit; + } + + /* + * Register interrupt handler. + */ + status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler, + IRQF_SHARED, pci_name(pci_dev), rt2x00dev); + if (status) { + ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", + pci_dev->irq, status); + return status; + } + + return 0; + +exit: + rt2x00pci_uninitialize(rt2x00dev); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00pci_initialize); + +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + /* + * Free irq line. + */ + free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev); + + /* + * Free DMA + */ + ring_for_each(rt2x00dev, ring) + rt2x00pci_free_dma(rt2x00dev, ring); +} +EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); + +/* + * PCI driver handlers. + */ +static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; + + if (rt2x00dev->csr_addr) { + iounmap(rt2x00dev->csr_addr); + rt2x00dev->csr_addr = NULL; + } +} + +static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); + + rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0), + pci_resource_len(pci_dev, 0)); + if (!rt2x00dev->csr_addr) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + + rt2x00pci_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) +{ + struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data; + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + retval = pci_request_regions(pci_dev, pci_name(pci_dev)); + if (retval) { + ERROR_PROBE("PCI request regions failed.\n"); + return retval; + } + + retval = pci_enable_device(pci_dev); + if (retval) { + ERROR_PROBE("Enable device failed.\n"); + goto exit_release_regions; + } + + pci_set_master(pci_dev); + + if (pci_set_mwi(pci_dev)) + ERROR_PROBE("MWI not available.\n"); + + if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) && + pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) { + ERROR_PROBE("PCI DMA not supported.\n"); + retval = -EIO; + goto exit_disable_device; + } + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + retval = -ENOMEM; + goto exit_disable_device; + } + + pci_set_drvdata(pci_dev, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = pci_dev; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + + retval = rt2x00pci_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00pci_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + +exit_disable_device: + if (retval != -EBUSY) + pci_disable_device(pci_dev); + +exit_release_regions: + pci_release_regions(pci_dev); + + pci_set_drvdata(pci_dev, NULL); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00pci_probe); + +void rt2x00pci_remove(struct pci_dev *pci_dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00pci_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + /* + * Free the PCI device data. + */ + pci_set_drvdata(pci_dev, NULL); + pci_disable_device(pci_dev); + pci_release_regions(pci_dev); +} +EXPORT_SYMBOL_GPL(rt2x00pci_remove); + +#ifdef CONFIG_PM +int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + retval = rt2x00lib_suspend(rt2x00dev, state); + if (retval) + return retval; + + rt2x00pci_free_reg(rt2x00dev); + + pci_save_state(pci_dev); + pci_disable_device(pci_dev); + return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); +} +EXPORT_SYMBOL_GPL(rt2x00pci_suspend); + +int rt2x00pci_resume(struct pci_dev *pci_dev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + if (pci_set_power_state(pci_dev, PCI_D0) || + pci_enable_device(pci_dev) || + pci_restore_state(pci_dev)) { + ERROR(rt2x00dev, "Failed to resume device.\n"); + return -EIO; + } + + retval = rt2x00pci_alloc_reg(rt2x00dev); + if (retval) + return retval; + + retval = rt2x00lib_resume(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00pci_free_reg(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00pci_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00pci module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h new file mode 100644 index 0000000000000000000000000000000000000000..82adeac061d0d309c0f8a475db4dcb2cc6175e9e --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h @@ -0,0 +1,127 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00pci + Abstract: Data structures for the rt2x00pci module. + */ + +#ifndef RT2X00PCI_H +#define RT2X00PCI_H + +#include + +/* + * This variable should be used with the + * pci_driver structure initialization. + */ +#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) + +/* + * Register defines. + * Some registers require multiple attempts before success, + * in those cases REGISTER_BUSY_COUNT attempts should be + * taken with a REGISTER_BUSY_DELAY interval. + */ +#define REGISTER_BUSY_COUNT 5 +#define REGISTER_BUSY_DELAY 100 + +/* + * Descriptor availability flags. + * All PCI device descriptors have these 2 flags + * with the exact same definition. + * By storing them here we can use them inside rt2x00pci + * for some simple entry availability checking. + */ +#define TXD_ENTRY_OWNER_NIC FIELD32(0x00000001) +#define TXD_ENTRY_VALID FIELD32(0x00000002) +#define RXD_ENTRY_OWNER_NIC FIELD32(0x00000001) + +/* + * Register access. + */ +static inline void rt2x00pci_register_read(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + u32 *value) +{ + *value = readl(rt2x00dev->csr_addr + offset); +} + +static inline void +rt2x00pci_register_multiread(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + void *value, const u16 length) +{ + memcpy_fromio(value, rt2x00dev->csr_addr + offset, length); +} + +static inline void rt2x00pci_register_write(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + u32 value) +{ + writel(value, rt2x00dev->csr_addr + offset); +} + +static inline void +rt2x00pci_register_multiwrite(const struct rt2x00_dev *rt2x00dev, + const unsigned long offset, + void *value, const u16 length) +{ + memcpy_toio(rt2x00dev->csr_addr + offset, value, length); +} + +/* + * Beacon handlers. + */ +int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control); + +/* + * TX data handlers. + */ +int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control); + +/* + * RX data handlers. + */ +void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); + +/* + * Device initialization handlers. + */ +int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * PCI driver handlers. + */ +int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id); +void rt2x00pci_remove(struct pci_dev *pci_dev); +#ifdef CONFIG_PM +int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state); +int rt2x00pci_resume(struct pci_dev *pci_dev); +#else +#define rt2x00pci_suspend NULL +#define rt2x00pci_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h new file mode 100644 index 0000000000000000000000000000000000000000..838421216da0d52a0d7aedd0f78ec56a09ec9756 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00reg.h @@ -0,0 +1,292 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 generic register information. + */ + +#ifndef RT2X00REG_H +#define RT2X00REG_H + +/* + * TX result flags. + */ +enum TX_STATUS { + TX_SUCCESS = 0, + TX_SUCCESS_RETRY = 1, + TX_FAIL_RETRY = 2, + TX_FAIL_INVALID = 3, + TX_FAIL_OTHER = 4, +}; + +/* + * Antenna values + */ +enum antenna { + ANTENNA_SW_DIVERSITY = 0, + ANTENNA_A = 1, + ANTENNA_B = 2, + ANTENNA_HW_DIVERSITY = 3, +}; + +/* + * Led mode values. + */ +enum led_mode { + LED_MODE_DEFAULT = 0, + LED_MODE_TXRX_ACTIVITY = 1, + LED_MODE_SIGNAL_STRENGTH = 2, + LED_MODE_ASUS = 3, + LED_MODE_ALPHA = 4, +}; + +/* + * TSF sync values + */ +enum tsf_sync { + TSF_SYNC_NONE = 0, + TSF_SYNC_INFRA = 1, + TSF_SYNC_BEACON = 2, +}; + +/* + * Device states + */ +enum dev_state { + STATE_DEEP_SLEEP = 0, + STATE_SLEEP = 1, + STATE_STANDBY = 2, + STATE_AWAKE = 3, + +/* + * Additional device states, these values are + * not strict since they are not directly passed + * into the device. + */ + STATE_RADIO_ON, + STATE_RADIO_OFF, + STATE_RADIO_RX_ON, + STATE_RADIO_RX_OFF, + STATE_RADIO_IRQ_ON, + STATE_RADIO_IRQ_OFF, +}; + +/* + * IFS backoff values + */ +enum ifs { + IFS_BACKOFF = 0, + IFS_SIFS = 1, + IFS_NEW_BACKOFF = 2, + IFS_NONE = 3, +}; + +/* + * Cipher types for hardware encryption + */ +enum cipher { + CIPHER_NONE = 0, + CIPHER_WEP64 = 1, + CIPHER_WEP128 = 2, + CIPHER_TKIP = 3, + CIPHER_AES = 4, +/* + * The following fields were added by rt61pci and rt73usb. + */ + CIPHER_CKIP64 = 5, + CIPHER_CKIP128 = 6, + CIPHER_TKIP_NO_MIC = 7, +}; + +/* + * Register handlers. + * We store the position of a register field inside a field structure, + * This will simplify the process of setting and reading a certain field + * inside the register while making sure the process remains byte order safe. + */ +struct rt2x00_field8 { + u8 bit_offset; + u8 bit_mask; +}; + +struct rt2x00_field16 { + u16 bit_offset; + u16 bit_mask; +}; + +struct rt2x00_field32 { + u32 bit_offset; + u32 bit_mask; +}; + +/* + * Power of two check, this will check + * if the mask that has been given contains + * and contiguous set of bits. + */ +#define is_power_of_two(x) ( !((x) & ((x)-1)) ) +#define low_bit_mask(x) ( ((x)-1) & ~(x) ) +#define is_valid_mask(x) is_power_of_two(1 + (x) + low_bit_mask(x)) + +#define FIELD8(__mask) \ +({ \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (u8)(__mask)); \ + (struct rt2x00_field8) { \ + __ffs(__mask), (__mask) \ + }; \ +}) + +#define FIELD16(__mask) \ +({ \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (u16)(__mask));\ + (struct rt2x00_field16) { \ + __ffs(__mask), (__mask) \ + }; \ +}) + +#define FIELD32(__mask) \ +({ \ + BUILD_BUG_ON(!(__mask) || \ + !is_valid_mask(__mask) || \ + (__mask) != (u32)(__mask));\ + (struct rt2x00_field32) { \ + __ffs(__mask), (__mask) \ + }; \ +}) + +static inline void rt2x00_set_field32(u32 *reg, + const struct rt2x00_field32 field, + const u32 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u32 rt2x00_get_field32(const u32 reg, + const struct rt2x00_field32 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +static inline void rt2x00_set_field16(u16 *reg, + const struct rt2x00_field16 field, + const u16 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u16 rt2x00_get_field16(const u16 reg, + const struct rt2x00_field16 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +static inline void rt2x00_set_field8(u8 *reg, + const struct rt2x00_field8 field, + const u8 value) +{ + *reg &= ~(field.bit_mask); + *reg |= (value << field.bit_offset) & field.bit_mask; +} + +static inline u8 rt2x00_get_field8(const u8 reg, + const struct rt2x00_field8 field) +{ + return (reg & field.bit_mask) >> field.bit_offset; +} + +/* + * Device specific rate value. + * We will have to create the device specific rate value + * passed to the ieee80211 kernel. We need to make it a consist of + * multiple fields because we want to store more then 1 device specific + * values inside the value. + * 1 - rate, stored as 100 kbit/s. + * 2 - preamble, short_preamble enabled flag. + * 3 - MASK_RATE, which rates are enabled in this mode, this mask + * corresponds with the TX register format for the current device. + * 4 - plcp, 802.11b rates are device specific, + * 802.11g rates are set according to the ieee802.11a-1999 p.14. + * The bit to enable preamble is set in a seperate define. + */ +#define DEV_RATE FIELD32(0x000007ff) +#define DEV_PREAMBLE FIELD32(0x00000800) +#define DEV_RATEMASK FIELD32(0x00fff000) +#define DEV_PLCP FIELD32(0xff000000) + +/* + * Bitfields + */ +#define DEV_RATEBIT_1MB ( 1 << 0 ) +#define DEV_RATEBIT_2MB ( 1 << 1 ) +#define DEV_RATEBIT_5_5MB ( 1 << 2 ) +#define DEV_RATEBIT_11MB ( 1 << 3 ) +#define DEV_RATEBIT_6MB ( 1 << 4 ) +#define DEV_RATEBIT_9MB ( 1 << 5 ) +#define DEV_RATEBIT_12MB ( 1 << 6 ) +#define DEV_RATEBIT_18MB ( 1 << 7 ) +#define DEV_RATEBIT_24MB ( 1 << 8 ) +#define DEV_RATEBIT_36MB ( 1 << 9 ) +#define DEV_RATEBIT_48MB ( 1 << 10 ) +#define DEV_RATEBIT_54MB ( 1 << 11 ) + +/* + * Bitmasks for DEV_RATEMASK + */ +#define DEV_RATEMASK_1MB ( (DEV_RATEBIT_1MB << 1) -1 ) +#define DEV_RATEMASK_2MB ( (DEV_RATEBIT_2MB << 1) -1 ) +#define DEV_RATEMASK_5_5MB ( (DEV_RATEBIT_5_5MB << 1) -1 ) +#define DEV_RATEMASK_11MB ( (DEV_RATEBIT_11MB << 1) -1 ) +#define DEV_RATEMASK_6MB ( (DEV_RATEBIT_6MB << 1) -1 ) +#define DEV_RATEMASK_9MB ( (DEV_RATEBIT_9MB << 1) -1 ) +#define DEV_RATEMASK_12MB ( (DEV_RATEBIT_12MB << 1) -1 ) +#define DEV_RATEMASK_18MB ( (DEV_RATEBIT_18MB << 1) -1 ) +#define DEV_RATEMASK_24MB ( (DEV_RATEBIT_24MB << 1) -1 ) +#define DEV_RATEMASK_36MB ( (DEV_RATEBIT_36MB << 1) -1 ) +#define DEV_RATEMASK_48MB ( (DEV_RATEBIT_48MB << 1) -1 ) +#define DEV_RATEMASK_54MB ( (DEV_RATEBIT_54MB << 1) -1 ) + +/* + * Bitmask groups of bitrates + */ +#define DEV_BASIC_RATEMASK \ + ( DEV_RATEMASK_11MB | \ + DEV_RATEBIT_6MB | DEV_RATEBIT_12MB | DEV_RATEBIT_24MB ) + +#define DEV_CCK_RATEMASK ( DEV_RATEMASK_11MB ) +#define DEV_OFDM_RATEMASK ( DEV_RATEMASK_54MB & ~DEV_CCK_RATEMASK ) + +/* + * Macro's to set and get specific fields from the device specific val and val2 + * fields inside the ieee80211_rate entry. + */ +#define DEVICE_SET_RATE_FIELD(__value, __mask) \ + (int)( ((__value) << DEV_##__mask.bit_offset) & DEV_##__mask.bit_mask ) + +#define DEVICE_GET_RATE_FIELD(__value, __mask) \ + (int)( ((__value) & DEV_##__mask.bit_mask) >> DEV_##__mask.bit_offset ) + +#endif /* RT2X00REG_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c new file mode 100644 index 0000000000000000000000000000000000000000..a0f8b8e0a24b52dc274267e4db22c4aa056191e4 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c @@ -0,0 +1,146 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00rfkill + Abstract: rt2x00 rfkill routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00lib" + +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00lib.h" + +static int rt2x00rfkill_toggle_radio(void *data, enum rfkill_state state) +{ + struct rt2x00_dev *rt2x00dev = data; + int retval = 0; + + if (unlikely(!rt2x00dev)) + return 0; + + /* + * Only continue if there are enabled interfaces. + */ + if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) + return 0; + + if (state == RFKILL_STATE_ON) { + INFO(rt2x00dev, "Hardware button pressed, enabling radio.\n"); + __clear_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); + retval = rt2x00lib_enable_radio(rt2x00dev); + } else if (state == RFKILL_STATE_OFF) { + INFO(rt2x00dev, "Hardware button pressed, disabling radio.\n"); + __set_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); + rt2x00lib_disable_radio(rt2x00dev); + } + + return retval; +} + +static void rt2x00rfkill_poll(struct input_polled_dev *poll_dev) +{ + struct rt2x00_dev *rt2x00dev = poll_dev->private; + int state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); + + if (rt2x00dev->rfkill->state != state) + input_report_key(poll_dev->input, KEY_WLAN, 1); +} + +int rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return 0; + + retval = rfkill_register(rt2x00dev->rfkill); + if (retval) { + ERROR(rt2x00dev, "Failed to register rfkill handler.\n"); + return retval; + } + + retval = input_register_polled_device(rt2x00dev->poll_dev); + if (retval) { + ERROR(rt2x00dev, "Failed to register polled device.\n"); + rfkill_unregister(rt2x00dev->rfkill); + return retval; + } + + return 0; +} + +void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return; + + input_unregister_polled_device(rt2x00dev->poll_dev); + rfkill_unregister(rt2x00dev->rfkill); +} + +int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) +{ + struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); + + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return 0; + + rt2x00dev->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN); + if (!rt2x00dev->rfkill) { + ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); + return -ENOMEM; + } + + rt2x00dev->rfkill->name = rt2x00dev->ops->name; + rt2x00dev->rfkill->data = rt2x00dev; + rt2x00dev->rfkill->state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); + rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; + + rt2x00dev->poll_dev = input_allocate_polled_device(); + if (!rt2x00dev->poll_dev) { + ERROR(rt2x00dev, "Failed to allocate polled device.\n"); + rfkill_free(rt2x00dev->rfkill); + return -ENOMEM; + } + + rt2x00dev->poll_dev->private = rt2x00dev; + rt2x00dev->poll_dev->poll = rt2x00rfkill_poll; + rt2x00dev->poll_dev->poll_interval = RFKILL_POLL_INTERVAL; + + return 0; +} + +void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) +{ + if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) + return; + + input_free_polled_device(rt2x00dev->poll_dev); + rfkill_free(rt2x00dev->rfkill); +} diff --git a/drivers/net/wireless/rt2x00/rt2x00ring.h b/drivers/net/wireless/rt2x00/rt2x00ring.h new file mode 100644 index 0000000000000000000000000000000000000000..1a864d32cfbd2a9615169c87d7967d2877f95378 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00ring.h @@ -0,0 +1,268 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00 + Abstract: rt2x00 ring datastructures and routines + */ + +#ifndef RT2X00RING_H +#define RT2X00RING_H + +/* + * data_desc + * Each data entry also contains a descriptor which is used by the + * device to determine what should be done with the packet and + * what the current status is. + * This structure is greatly simplified, but the descriptors + * are basically a list of little endian 32 bit values. + * Make the array by default 1 word big, this will allow us + * to use sizeof() correctly. + */ +struct data_desc { + __le32 word[1]; +}; + +/* + * rxdata_entry_desc + * Summary of information that has been read from the + * RX frame descriptor. + */ +struct rxdata_entry_desc { + int signal; + int rssi; + int ofdm; + int size; + int flags; +}; + +/* + * txdata_entry_desc + * Summary of information that should be written into the + * descriptor for sending a TX frame. + */ +struct txdata_entry_desc { + unsigned long flags; +#define ENTRY_TXDONE 1 +#define ENTRY_TXD_RTS_FRAME 2 +#define ENTRY_TXD_OFDM_RATE 3 +#define ENTRY_TXD_MORE_FRAG 4 +#define ENTRY_TXD_REQ_TIMESTAMP 5 +#define ENTRY_TXD_BURST 6 + +/* + * Queue ID. ID's 0-4 are data TX rings + */ + int queue; +#define QUEUE_MGMT 13 +#define QUEUE_RX 14 +#define QUEUE_OTHER 15 + + /* + * PLCP values. + */ + u16 length_high; + u16 length_low; + u16 signal; + u16 service; + + /* + * Timing information + */ + int aifs; + int ifs; + int cw_min; + int cw_max; +}; + +/* + * data_entry + * The data ring is a list of data entries. + * Each entry holds a reference to the descriptor + * and the data buffer. For TX rings the reference to the + * sk_buff of the packet being transmitted is also stored here. + */ +struct data_entry { + /* + * Status flags + */ + unsigned long flags; +#define ENTRY_OWNER_NIC 1 + + /* + * Ring we belong to. + */ + struct data_ring *ring; + + /* + * sk_buff for the packet which is being transmitted + * in this entry (Only used with TX related rings). + */ + struct sk_buff *skb; + + /* + * Store a ieee80211_tx_status structure in each + * ring entry, this will optimize the txdone + * handler. + */ + struct ieee80211_tx_status tx_status; + + /* + * private pointer specific to driver. + */ + void *priv; + + /* + * Data address for this entry. + */ + void *data_addr; + dma_addr_t data_dma; +}; + +/* + * data_ring + * Data rings are used by the device to send and receive packets. + * The data_addr is the base address of the data memory. + * To determine at which point in the ring we are, + * have to use the rt2x00_ring_index_*() functions. + */ +struct data_ring { + /* + * Pointer to main rt2x00dev structure where this + * ring belongs to. + */ + struct rt2x00_dev *rt2x00dev; + + /* + * Base address for the device specific data entries. + */ + struct data_entry *entry; + + /* + * TX queue statistic info. + */ + struct ieee80211_tx_queue_stats_data stats; + + /* + * TX Queue parameters. + */ + struct ieee80211_tx_queue_params tx_params; + + /* + * Base address for data ring. + */ + dma_addr_t data_dma; + void *data_addr; + + /* + * Index variables. + */ + u16 index; + u16 index_done; + + /* + * Size of packet and descriptor in bytes. + */ + u16 data_size; + u16 desc_size; +}; + +/* + * Handlers to determine the address of the current device specific + * data entry, where either index or index_done points to. + */ +static inline struct data_entry *rt2x00_get_data_entry(struct data_ring *ring) +{ + return &ring->entry[ring->index]; +} + +static inline struct data_entry *rt2x00_get_data_entry_done(struct data_ring + *ring) +{ + return &ring->entry[ring->index_done]; +} + +/* + * Total ring memory + */ +static inline int rt2x00_get_ring_size(struct data_ring *ring) +{ + return ring->stats.limit * (ring->desc_size + ring->data_size); +} + +/* + * Ring index manipulation functions. + */ +static inline void rt2x00_ring_index_inc(struct data_ring *ring) +{ + ring->index++; + if (ring->index >= ring->stats.limit) + ring->index = 0; + ring->stats.len++; +} + +static inline void rt2x00_ring_index_done_inc(struct data_ring *ring) +{ + ring->index_done++; + if (ring->index_done >= ring->stats.limit) + ring->index_done = 0; + ring->stats.len--; + ring->stats.count++; +} + +static inline void rt2x00_ring_index_clear(struct data_ring *ring) +{ + ring->index = 0; + ring->index_done = 0; + ring->stats.len = 0; + ring->stats.count = 0; +} + +static inline int rt2x00_ring_empty(struct data_ring *ring) +{ + return ring->stats.len == 0; +} + +static inline int rt2x00_ring_full(struct data_ring *ring) +{ + return ring->stats.len == ring->stats.limit; +} + +static inline int rt2x00_ring_free(struct data_ring *ring) +{ + return ring->stats.limit - ring->stats.len; +} + +/* + * TX/RX Descriptor access functions. + */ +static inline void rt2x00_desc_read(struct data_desc *desc, + const u8 word, u32 *value) +{ + *value = le32_to_cpu(desc->word[word]); +} + +static inline void rt2x00_desc_write(struct data_desc *desc, + const u8 word, const u32 value) +{ + desc->word[word] = cpu_to_le32(value); +} + +#endif /* RT2X00RING_H */ diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c new file mode 100644 index 0000000000000000000000000000000000000000..73cc726c4046724f01f6e5b9e1b88b5eaf8dc0d2 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -0,0 +1,592 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00usb + Abstract: rt2x00 generic usb device routines. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt2x00usb" + +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" + +/* + * Interfacing with the HW. + */ +int rt2x00usb_vendor_request(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const u16 value, + void *buffer, const u16 buffer_length, + const int timeout) +{ + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + int status; + unsigned int i; + unsigned int pipe = + (requesttype == USB_VENDOR_REQUEST_IN) ? + usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + status = usb_control_msg(usb_dev, pipe, request, requesttype, + value, offset, buffer, buffer_length, + timeout); + if (status >= 0) + return 0; + + /* + * Check for errors + * -ENODEV: Device has disappeared, no point continuing. + * All other errors: Try again. + */ + else if (status == -ENODEV) + break; + } + + ERROR(rt2x00dev, + "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n", + request, offset, status); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); + +int rt2x00usb_vendor_request_buff(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout) +{ + int status; + + /* + * Check for Cache availability. + */ + if (unlikely(!rt2x00dev->csr_cache || buffer_length > CSR_CACHE_SIZE)) { + ERROR(rt2x00dev, "CSR cache not available.\n"); + return -ENOMEM; + } + + if (requesttype == USB_VENDOR_REQUEST_OUT) + memcpy(rt2x00dev->csr_cache, buffer, buffer_length); + + status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, + offset, 0, rt2x00dev->csr_cache, + buffer_length, timeout); + + if (!status && requesttype == USB_VENDOR_REQUEST_IN) + memcpy(buffer, rt2x00dev->csr_cache, buffer_length); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); + +/* + * TX data handlers. + */ +static void rt2x00usb_interrupt_txdone(struct urb *urb) +{ + struct data_entry *entry = (struct data_entry *)urb->context; + struct data_ring *ring = entry->ring; + struct rt2x00_dev *rt2x00dev = ring->rt2x00dev; + struct data_desc *txd = (struct data_desc *)entry->skb->data; + u32 word; + int tx_status; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || + !__test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) + return; + + rt2x00_desc_read(txd, 0, &word); + + /* + * Remove the descriptor data from the buffer. + */ + skb_pull(entry->skb, ring->desc_size); + + /* + * Obtain the status about this packet. + */ + tx_status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; + + rt2x00lib_txdone(entry, tx_status, 0); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_ring_index_done_inc(entry->ring); + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); +} + +int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + struct data_entry *entry = rt2x00_get_data_entry(ring); + int pipe = usb_sndbulkpipe(usb_dev, 1); + int max_packet = usb_maxpacket(usb_dev, pipe, 1); + u32 length; + + if (rt2x00_ring_full(ring)) { + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + if (test_bit(ENTRY_OWNER_NIC, &entry->flags)) { + ERROR(rt2x00dev, + "Arrived at non-free entry in the non-full queue %d.\n" + "Please file bug report to %s.\n", + control->queue, DRV_PROJECT); + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + return -EINVAL; + } + + /* + * Add the descriptor in front of the skb. + */ + skb_push(skb, ring->desc_size); + memset(skb->data, 0, ring->desc_size); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + ring->desc_size), + skb->len - ring->desc_size, control); + memcpy(&entry->tx_status.control, control, sizeof(*control)); + entry->skb = skb; + + /* + * USB devices cannot blindly pass the skb->len as the + * length of the data to usb_fill_bulk_urb. Pass the skb + * to the driver to determine what the length should be. + */ + length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, + max_packet, skb); + + /* + * Initialize URB and send the frame to the device. + */ + __set_bit(ENTRY_OWNER_NIC, &entry->flags); + usb_fill_bulk_urb(entry->priv, usb_dev, pipe, + skb->data, length, rt2x00usb_interrupt_txdone, entry); + usb_submit_urb(entry->priv, GFP_ATOMIC); + + rt2x00_ring_index_inc(ring); + + if (rt2x00_ring_full(ring)) + ieee80211_stop_queue(rt2x00dev->hw, control->queue); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); + +/* + * RX data handlers. + */ +static void rt2x00usb_interrupt_rxdone(struct urb *urb) +{ + struct data_entry *entry = (struct data_entry *)urb->context; + struct data_ring *ring = entry->ring; + struct rt2x00_dev *rt2x00dev = ring->rt2x00dev; + struct sk_buff *skb; + struct rxdata_entry_desc desc; + int frame_size; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || + !test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) + return; + + /* + * Check if the received data is simply too small + * to be actually valid, or if the urb is signaling + * a problem. + */ + if (urb->actual_length < entry->ring->desc_size || urb->status) + goto skip_entry; + + memset(&desc, 0x00, sizeof(desc)); + rt2x00dev->ops->lib->fill_rxdone(entry, &desc); + + /* + * Allocate a new sk buffer to replace the current one. + * If allocation fails, we should drop the current frame + * so we can recycle the existing sk buffer for the new frame. + */ + frame_size = entry->ring->data_size + entry->ring->desc_size; + skb = dev_alloc_skb(frame_size + NET_IP_ALIGN); + if (!skb) + goto skip_entry; + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, frame_size); + + /* + * Trim the skb_buffer to only contain the valid + * frame data (so ignore the device's descriptor). + */ + skb_trim(entry->skb, desc.size); + + /* + * Send the frame to rt2x00lib for further processing. + */ + rt2x00lib_rxdone(entry, entry->skb, &desc); + + /* + * Replace current entry's skb with the newly allocated one, + * and reinitialize the urb. + */ + entry->skb = skb; + urb->transfer_buffer = entry->skb->data; + urb->transfer_buffer_length = entry->skb->len; + +skip_entry: + if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { + __set_bit(ENTRY_OWNER_NIC, &entry->flags); + usb_submit_urb(urb, GFP_ATOMIC); + } + + rt2x00_ring_index_inc(ring); +} + +/* + * Radio handlers + */ +void rt2x00usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + struct usb_device *usb_dev = + interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); + struct data_ring *ring; + struct data_entry *entry; + unsigned int i; + + /* + * Initialize the TX rings + */ + txringall_for_each(rt2x00dev, ring) { + for (i = 0; i < ring->stats.limit; i++) + ring->entry[i].flags = 0; + + rt2x00_ring_index_clear(ring); + } + + /* + * Initialize and start the RX ring. + */ + rt2x00_ring_index_clear(rt2x00dev->rx); + + for (i = 0; i < rt2x00dev->rx->stats.limit; i++) { + entry = &rt2x00dev->rx->entry[i]; + + usb_fill_bulk_urb(entry->priv, usb_dev, + usb_rcvbulkpipe(usb_dev, 1), + entry->skb->data, entry->skb->len, + rt2x00usb_interrupt_rxdone, entry); + + __set_bit(ENTRY_OWNER_NIC, &entry->flags); + usb_submit_urb(entry->priv, GFP_ATOMIC); + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_enable_radio); + +void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + unsigned int i; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, + REGISTER_TIMEOUT); + + /* + * Cancel all rings. + */ + ring_for_each(rt2x00dev, ring) { + for (i = 0; i < ring->stats.limit; i++) + usb_kill_urb(ring->entry[i].priv); + } +} +EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); + +/* + * Device initialization handlers. + */ +static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + unsigned int i; + + /* + * Allocate the URB's + */ + for (i = 0; i < ring->stats.limit; i++) { + ring->entry[i].priv = usb_alloc_urb(0, GFP_KERNEL); + if (!ring->entry[i].priv) + return -ENOMEM; + } + + return 0; +} + +static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring) +{ + unsigned int i; + + if (!ring->entry) + return; + + for (i = 0; i < ring->stats.limit; i++) { + usb_kill_urb(ring->entry[i].priv); + usb_free_urb(ring->entry[i].priv); + if (ring->entry[i].skb) + kfree_skb(ring->entry[i].skb); + } +} + +int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + struct sk_buff *skb; + unsigned int entry_size; + unsigned int i; + int status; + + /* + * Allocate DMA + */ + ring_for_each(rt2x00dev, ring) { + status = rt2x00usb_alloc_urb(rt2x00dev, ring); + if (status) + goto exit; + } + + /* + * For the RX ring, skb's should be allocated. + */ + entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size; + for (i = 0; i < rt2x00dev->rx->stats.limit; i++) { + skb = dev_alloc_skb(NET_IP_ALIGN + entry_size); + if (!skb) + goto exit; + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, entry_size); + + rt2x00dev->rx->entry[i].skb = skb; + } + + return 0; + +exit: + rt2x00usb_uninitialize(rt2x00dev); + + return status; +} +EXPORT_SYMBOL_GPL(rt2x00usb_initialize); + +void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + + ring_for_each(rt2x00dev, ring) + rt2x00usb_free_urb(rt2x00dev, ring); +} +EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); + +/* + * USB driver handlers. + */ +static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) +{ + kfree(rt2x00dev->rf); + rt2x00dev->rf = NULL; + + kfree(rt2x00dev->eeprom); + rt2x00dev->eeprom = NULL; + + kfree(rt2x00dev->csr_cache); + rt2x00dev->csr_cache = NULL; +} + +static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) +{ + rt2x00dev->csr_cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); + if (!rt2x00dev->csr_cache) + goto exit; + + rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); + if (!rt2x00dev->eeprom) + goto exit; + + rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); + if (!rt2x00dev->rf) + goto exit; + + return 0; + +exit: + ERROR_PROBE("Failed to allocate registers.\n"); + + rt2x00usb_free_reg(rt2x00dev); + + return -ENOMEM; +} + +int rt2x00usb_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(usb_intf); + struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info; + struct ieee80211_hw *hw; + struct rt2x00_dev *rt2x00dev; + int retval; + + usb_dev = usb_get_dev(usb_dev); + + hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); + if (!hw) { + ERROR_PROBE("Failed to allocate hardware.\n"); + retval = -ENOMEM; + goto exit_put_device; + } + + usb_set_intfdata(usb_intf, hw); + + rt2x00dev = hw->priv; + rt2x00dev->dev = usb_intf; + rt2x00dev->ops = ops; + rt2x00dev->hw = hw; + + retval = rt2x00usb_alloc_reg(rt2x00dev); + if (retval) + goto exit_free_device; + + retval = rt2x00lib_probe_dev(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00usb_free_reg(rt2x00dev); + +exit_free_device: + ieee80211_free_hw(hw); + +exit_put_device: + usb_put_dev(usb_dev); + + usb_set_intfdata(usb_intf, NULL); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00usb_probe); + +void rt2x00usb_disconnect(struct usb_interface *usb_intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Free all allocated data. + */ + rt2x00lib_remove_dev(rt2x00dev); + rt2x00usb_free_reg(rt2x00dev); + ieee80211_free_hw(hw); + + /* + * Free the USB device data. + */ + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); +} +EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); + +#ifdef CONFIG_PM +int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + retval = rt2x00lib_suspend(rt2x00dev, state); + if (retval) + return retval; + + rt2x00usb_free_reg(rt2x00dev); + + /* + * Decrease usbdev refcount. + */ + usb_put_dev(interface_to_usbdev(usb_intf)); + + return 0; +} +EXPORT_SYMBOL_GPL(rt2x00usb_suspend); + +int rt2x00usb_resume(struct usb_interface *usb_intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); + struct rt2x00_dev *rt2x00dev = hw->priv; + int retval; + + usb_get_dev(interface_to_usbdev(usb_intf)); + + retval = rt2x00usb_alloc_reg(rt2x00dev); + if (retval) + return retval; + + retval = rt2x00lib_resume(rt2x00dev); + if (retval) + goto exit_free_reg; + + return 0; + +exit_free_reg: + rt2x00usb_free_reg(rt2x00dev); + + return retval; +} +EXPORT_SYMBOL_GPL(rt2x00usb_resume); +#endif /* CONFIG_PM */ + +/* + * rt2x00pci module information. + */ +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("rt2x00 library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h new file mode 100644 index 0000000000000000000000000000000000000000..2681abe4d49eb7a4f595cbdadce556c160b4ad42 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00usb.h @@ -0,0 +1,180 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt2x00usb + Abstract: Data structures for the rt2x00usb module. + */ + +#ifndef RT2X00USB_H +#define RT2X00USB_H + +/* + * This variable should be used with the + * usb_driver structure initialization. + */ +#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) + +/* + * Register defines. + * Some registers require multiple attempts before success, + * in those cases REGISTER_BUSY_COUNT attempts should be + * taken with a REGISTER_BUSY_DELAY interval. + * For USB vendor requests we need to pass a timeout + * time in ms, for this we use the REGISTER_TIMEOUT, + * however when loading firmware a higher value is + * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE. + */ +#define REGISTER_BUSY_COUNT 5 +#define REGISTER_BUSY_DELAY 100 +#define REGISTER_TIMEOUT 500 +#define REGISTER_TIMEOUT_FIRMWARE 1000 + +/* + * Cache size + */ +#define CSR_CACHE_SIZE 8 +#define CSR_CACHE_SIZE_FIRMWARE 64 + +/* + * USB request types. + */ +#define USB_VENDOR_REQUEST ( USB_TYPE_VENDOR | USB_RECIP_DEVICE ) +#define USB_VENDOR_REQUEST_IN ( USB_DIR_IN | USB_VENDOR_REQUEST ) +#define USB_VENDOR_REQUEST_OUT ( USB_DIR_OUT | USB_VENDOR_REQUEST ) + +/* + * USB vendor commands. + */ +#define USB_DEVICE_MODE 0x01 +#define USB_SINGLE_WRITE 0x02 +#define USB_SINGLE_READ 0x03 +#define USB_MULTI_WRITE 0x06 +#define USB_MULTI_READ 0x07 +#define USB_EEPROM_WRITE 0x08 +#define USB_EEPROM_READ 0x09 +#define USB_LED_CONTROL 0x0a /* RT73USB */ +#define USB_RX_CONTROL 0x0c + +/* + * Device modes offset + */ +#define USB_MODE_RESET 0x01 +#define USB_MODE_UNPLUG 0x02 +#define USB_MODE_FUNCTION 0x03 +#define USB_MODE_TEST 0x04 +#define USB_MODE_SLEEP 0x07 /* RT73USB */ +#define USB_MODE_FIRMWARE 0x08 /* RT73USB */ +#define USB_MODE_WAKEUP 0x09 /* RT73USB */ + +/* + * Used to read/write from/to the device. + * This is the main function to communicate with the device, + * the buffer argument _must_ either be NULL or point to + * a buffer allocated by kmalloc. Failure to do so can lead + * to unexpected behavior depending on the architecture. + */ +int rt2x00usb_vendor_request(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, const u16 value, + void *buffer, const u16 buffer_length, + const int timeout); + +/* + * Used to read/write from/to the device. + * This function will use a previously with kmalloc allocated cache + * to communicate with the device. The contents of the buffer pointer + * will be copied to this cache when writing, or read from the cache + * when reading. + * Buffers send to rt2x00usb_vendor_request _must_ be allocated with + * kmalloc. Hence the reason for using a previously allocated cache + * which has been allocated properly. + */ +int rt2x00usb_vendor_request_buff(const struct rt2x00_dev *rt2x00dev, + const u8 request, const u8 requesttype, + const u16 offset, void *buffer, + const u16 buffer_length, const int timeout); + +/* + * Simple wrapper around rt2x00usb_vendor_request to write a single + * command to the device. Since we don't use the buffer argument we + * don't have to worry about kmalloc here. + */ +static inline int rt2x00usb_vendor_request_sw(const struct rt2x00_dev + *rt2x00dev, + const u8 request, + const u16 offset, + const u16 value, + const int timeout) +{ + return rt2x00usb_vendor_request(rt2x00dev, request, + USB_VENDOR_REQUEST_OUT, offset, + value, NULL, 0, timeout); +} + +/* + * Simple wrapper around rt2x00usb_vendor_request to read the eeprom + * from the device. Note that the eeprom argument _must_ be allocated using + * kmalloc for correct handling inside the kernel USB layer. + */ +static inline int rt2x00usb_eeprom_read(const struct rt2x00_dev *rt2x00dev, + __le16 *eeprom, const u16 lenght) +{ + int timeout = REGISTER_TIMEOUT * (lenght / sizeof(u16)); + + return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, + USB_VENDOR_REQUEST_IN, 0x0000, + 0x0000, eeprom, lenght, timeout); +} + +/* + * Radio handlers + */ +void rt2x00usb_enable_radio(struct rt2x00_dev *rt2x00dev); +void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev); + +/* + * TX data handlers. + */ +int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, + struct data_ring *ring, struct sk_buff *skb, + struct ieee80211_tx_control *control); + +/* + * Device initialization handlers. + */ +int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev); +void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev); + +/* + * USB driver handlers. + */ +int rt2x00usb_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id); +void rt2x00usb_disconnect(struct usb_interface *usb_intf); +#ifdef CONFIG_PM +int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state); +int rt2x00usb_resume(struct usb_interface *usb_intf); +#else +#define rt2x00usb_suspend NULL +#define rt2x00usb_resume NULL +#endif /* CONFIG_PM */ + +#endif /* RT2X00USB_H */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c new file mode 100644 index 0000000000000000000000000000000000000000..01dbef19d651f14f902805f451afa20246fc01a4 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -0,0 +1,2557 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt61pci + Abstract: rt61pci device specific routines. + Supported chipsets: RT2561, RT2561s, RT2661. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt61pci" + +#include +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00pci.h" +#include "rt61pci.h" + +/* + * Register access. + * BBP and RF register require indirect register access, + * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static u32 rt61pci_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PHY_CSR3, ®); + if (!rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt61pci_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt61pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_VALUE, value); + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); +} + +static void rt61pci_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt61pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt61pci_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); +} + +static void rt61pci_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, PHY_CSR4, ®); + if (!rt2x00_get_field32(reg, PHY_CSR4_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "PHY_CSR4 register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, PHY_CSR4_VALUE, value); + rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS, 21); + rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0); + rt2x00_set_field32(®, PHY_CSR4_BUSY, 1); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR4, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +static void rt61pci_mcu_request(const struct rt2x00_dev *rt2x00dev, + const u8 command, const u8 token, + const u8 arg0, const u8 arg1) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CSR, ®); + + if (rt2x00_get_field32(reg, H2M_MAILBOX_CSR_OWNER)) { + ERROR(rt2x00dev, "mcu request error. " + "Request 0x%02x failed for token 0x%02x.\n", + command, token); + return; + } + + rt2x00_set_field32(®, H2M_MAILBOX_CSR_OWNER, 1); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_CMD_TOKEN, token); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG0, arg0); + rt2x00_set_field32(®, H2M_MAILBOX_CSR_ARG1, arg1); + rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, HOST_CMD_CSR, ®); + rt2x00_set_field32(®, HOST_CMD_CSR_HOST_COMMAND, command); + rt2x00_set_field32(®, HOST_CMD_CSR_INTERRUPT_MCU, 1); + rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); +} + +static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); + eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); + eeprom->reg_data_clock = + !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); + eeprom->reg_chip_select = + !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); +} + +static void rt61pci_eepromregister_write(struct eeprom_93cx6 *eeprom) +{ + struct rt2x00_dev *rt2x00dev = eeprom->data; + u32 reg = 0; + + rt2x00_set_field32(®, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); + rt2x00_set_field32(®, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); + rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, + !!eeprom->reg_data_clock); + rt2x00_set_field32(®, E2PROM_CSR_CHIP_SELECT, + !!eeprom->reg_chip_select); + + rt2x00pci_register_write(rt2x00dev, E2PROM_CSR, reg); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt61pci_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt61pci_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt61pci_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt61pci_read_csr, + .write = rt61pci_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt61pci_bbp_read, + .write = rt61pci_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt61pci_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +#ifdef CONFIG_RT61PCI_RFKILL +static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + return rt2x00_get_field32(reg, MAC_CSR13_BIT5);; +} +#else +#define rt61pci_rfkill_poll NULL +#endif /* CONFIG_RT61PCI_RFKILL */ + +/* + * Configuration handlers. + */ +static void rt61pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, __le32 *mac) +{ + u32 tmp; + + tmp = le32_to_cpu(mac[1]); + rt2x00_set_field32(&tmp, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); + mac[1] = cpu_to_le32(tmp); + + rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR2, mac, + (2 * sizeof(__le32))); +} + +static void rt61pci_config_bssid(struct rt2x00_dev *rt2x00dev, __le32 *bssid) +{ + u32 tmp; + + tmp = le32_to_cpu(bssid[1]); + rt2x00_set_field32(&tmp, MAC_CSR5_BSS_ID_MASK, 3); + bssid[1] = cpu_to_le32(tmp); + + rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR4, bssid, + (2 * sizeof(__le32))); +} + +static void rt61pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + /* + * Clear current synchronisation setup. + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + + /* + * Enable synchronisation. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, tsf_sync); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt61pci_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, ack_timeout); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE, + !!short_preamble); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); +} + +static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, basic_rate_mask); +} + +static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r3; + u8 r94; + u8 smart; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527)); + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); + rt61pci_bbp_write(rt2x00dev, 3, r3); + + r94 = 6; + if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) + r94 += txpower - MAX_TXPOWER; + else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) + r94 += txpower; + rt61pci_bbp_write(rt2x00dev, 94, r94); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(200); + + rt61pci_rf_write(rt2x00dev, 1, rf->rf1); + rt61pci_rf_write(rt2x00dev, 2, rf->rf2); + rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt61pci_rf_write(rt2x00dev, 4, rf->rf4); + + msleep(1); +} + +static void rt61pci_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + struct rf_channel rf; + + rt2x00_rf_read(rt2x00dev, 1, &rf.rf1); + rt2x00_rf_read(rt2x00dev, 2, &rf.rf2); + rt2x00_rf_read(rt2x00dev, 3, &rf.rf3); + rt2x00_rf_read(rt2x00dev, 4, &rf.rf4); + + rt61pci_config_channel(rt2x00dev, &rf, txpower); +} + +static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, + !rt2x00_rf(&rt2x00dev->chip, RF5225)); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !!(rt2x00dev->curr_hwmode != HWMODE_A)); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, + !rt2x00_rf(&rt2x00dev->chip, RF2527)); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev, + const int p1, const int p2) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + + if (p1 != 0xff) { + rt2x00_set_field32(®, MAC_CSR13_BIT4, !!p1); + rt2x00_set_field32(®, MAC_CSR13_BIT12, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + } + if (p2 != 0xff) { + rt2x00_set_field32(®, MAC_CSR13_BIT3, !p2); + rt2x00_set_field32(®, MAC_CSR13_BIT11, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + } +} + +static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u16 eeprom; + u8 r3; + u8 r4; + u8 r77; + + rt61pci_bbp_read(rt2x00dev, 3, &r3); + rt61pci_bbp_read(rt2x00dev, 4, &r4); + rt61pci_bbp_read(rt2x00dev, 77, &r77); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY) && + rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) { + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 1); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 1); + } else if (rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY)) { + if (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED) >= 2) { + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + rt61pci_bbp_write(rt2x00dev, 77, r77); + } + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + } else if (!rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY) && + rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) { + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + switch (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED)) { + case 0: + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 1); + break; + case 1: + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 0); + break; + case 2: + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); + break; + case 3: + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + break; + } + } else if (!rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY) && + !rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) { + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + switch (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_RX_FIXED)) { + case 0: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 1); + break; + case 1: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 0); + break; + case 2: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); + break; + case 3: + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + rt61pci_bbp_write(rt2x00dev, 77, r77); + rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); + break; + } + } + + rt61pci_bbp_write(rt2x00dev, 3, r3); + rt61pci_bbp_write(rt2x00dev, 4, r4); +} + +struct antenna_sel { + u8 word; + /* + * value[0] -> non-LNA + * value[1] -> LNA + */ + u8 value[2]; +}; + +static const struct antenna_sel antenna_sel_a[] = { + { 96, { 0x58, 0x78 } }, + { 104, { 0x38, 0x48 } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x60, 0x60 } }, + { 97, { 0x58, 0x58 } }, + { 98, { 0x58, 0x58 } }, +}; + +static const struct antenna_sel antenna_sel_bg[] = { + { 96, { 0x48, 0x68 } }, + { 104, { 0x2c, 0x3c } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x50, 0x50 } }, + { 97, { 0x48, 0x48 } }, + { 98, { 0x48, 0x48 } }, +}; + +static void rt61pci_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + const struct antenna_sel *sel; + unsigned int lna; + unsigned int i; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, PHY_CSR0, ®); + + if (rt2x00dev->curr_hwmode == HWMODE_A) { + sel = antenna_sel_a; + lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 0); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 1); + } else { + sel = antenna_sel_bg; + lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 1); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 0); + } + + for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) + rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); + + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF5325)) + rt61pci_config_antenna_5x(rt2x00dev, antenna_tx, antenna_rx); + else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) + rt61pci_config_antenna_2x(rt2x00dev, antenna_tx, antenna_rx); + else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) { + if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) + rt61pci_config_antenna_2x(rt2x00dev, antenna_tx, + antenna_rx); + else + rt61pci_config_antenna_2529(rt2x00dev, antenna_tx, + antenna_rx); + } +} + +static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, libconf->slot_time); + rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field32(®, MAC_CSR8_SIFS, libconf->sifs); + rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); + rt2x00_set_field32(®, MAC_CSR8_EIFS, libconf->eifs); + rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt61pci_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt61pci_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt61pci_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt61pci_config_antenna(rt2x00dev, libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt61pci_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt61pci_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 led_reg; + u8 arg0; + u8 arg1; + + rt2x00pci_register_read(rt2x00dev, MAC_CSR14, ®); + rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, 70); + rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, 30); + rt2x00pci_register_write(rt2x00dev, MAC_CSR14, reg); + + led_reg = rt2x00dev->led_reg; + rt2x00_set_field16(&led_reg, MCU_LEDCS_RADIO_STATUS, 1); + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_A_STATUS, 1); + else + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_BG_STATUS, 1); + + arg0 = led_reg & 0xff; + arg1 = (led_reg >> 8) & 0xff; + + rt61pci_mcu_request(rt2x00dev, MCU_LED, 0xff, arg0, arg1); +} + +static void rt61pci_disable_led(struct rt2x00_dev *rt2x00dev) +{ + u16 led_reg; + u8 arg0; + u8 arg1; + + led_reg = rt2x00dev->led_reg; + rt2x00_set_field16(&led_reg, MCU_LEDCS_RADIO_STATUS, 0); + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_BG_STATUS, 0); + rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_A_STATUS, 0); + + arg0 = led_reg & 0xff; + arg1 = (led_reg >> 8) & 0xff; + + rt61pci_mcu_request(rt2x00dev, MCU_LED, 0xff, arg0, arg1); +} + +static void rt61pci_activity_led(struct rt2x00_dev *rt2x00dev, int rssi) +{ + u8 led; + + if (rt2x00dev->led_mode != LED_MODE_SIGNAL_STRENGTH) + return; + + /* + * Led handling requires a positive value for the rssi, + * to do that correctly we need to add the correction. + */ + rssi += rt2x00dev->rssi_offset; + + if (rssi <= 30) + led = 0; + else if (rssi <= 39) + led = 1; + else if (rssi <= 49) + led = 2; + else if (rssi <= 53) + led = 3; + else if (rssi <= 63) + led = 4; + else + led = 5; + + rt61pci_mcu_request(rt2x00dev, MCU_LED_STRENGTH, 0xff, led, 0); +} + +/* + * Link tuning + */ +static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR1, ®); + rt2x00dev->link.false_cca = + rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); +} + +static void rt61pci_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt61pci_bbp_write(rt2x00dev, 17, 0x20); + rt2x00dev->link.vgc_level = 0x20; +} + +static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u8 r17; + u8 up_bound; + u8 low_bound; + + /* + * Update Led strength + */ + rt61pci_activity_led(rt2x00dev, rssi); + + rt61pci_bbp_read(rt2x00dev, 17, &r17); + + /* + * Determine r17 bounds. + */ + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + low_bound = 0x28; + up_bound = 0x48; + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } else { + low_bound = 0x20; + up_bound = 0x40; + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } + + /* + * Special big-R17 for very short distance + */ + if (rssi >= -35) { + if (r17 != 0x60) + rt61pci_bbp_write(rt2x00dev, 17, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + if (r17 != up_bound) + rt61pci_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * Special big-R17 for middle-short distance + */ + if (rssi >= -66) { + low_bound += 0x10; + if (r17 != low_bound) + rt61pci_bbp_write(rt2x00dev, 17, low_bound); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + low_bound += 0x08; + if (r17 != low_bound) + rt61pci_bbp_write(rt2x00dev, 17, low_bound); + return; + } + + /* + * Special case: Change up_bound based on the rssi. + * Lower up_bound when rssi is weaker then -74 dBm. + */ + up_bound -= 2 * (-74 - rssi); + if (low_bound > up_bound) + up_bound = low_bound; + + if (r17 > up_bound) { + rt61pci_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * r17 does not yet exceed upper limit, continue and base + * the r17 tuning on the false CCA count. + */ + if (rt2x00dev->link.false_cca > 512 && r17 < up_bound) { + if (++r17 > up_bound) + r17 = up_bound; + rt61pci_bbp_write(rt2x00dev, 17, r17); + } else if (rt2x00dev->link.false_cca < 100 && r17 > low_bound) { + if (--r17 < low_bound) + r17 = low_bound; + rt61pci_bbp_write(rt2x00dev, 17, r17); + } +} + +/* + * Firmware name function. + */ +static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + char *fw_name; + + switch (rt2x00dev->chip.rt) { + case RT2561: + fw_name = FIRMWARE_RT2561; + break; + case RT2561s: + fw_name = FIRMWARE_RT2561s; + break; + case RT2661: + fw_name = FIRMWARE_RT2661; + break; + default: + fw_name = NULL; + break; + } + + return fw_name; +} + +/* + * Initialization functions. + */ +static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data, + const size_t len) +{ + int i; + u32 reg; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < 100; i++) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg) + break; + msleep(1); + } + + if (!reg) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Prepare MCU and mailbox for firmware loading. + */ + reg = 0; + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); + rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); + rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, 0); + + /* + * Write firmware to device. + */ + reg = 0; + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 1); + rt2x00_set_field32(®, MCU_CNTL_CSR_SELECT_BANK, 1); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + rt2x00pci_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, + data, len); + + rt2x00_set_field32(®, MCU_CNTL_CSR_SELECT_BANK, 0); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + rt2x00_set_field32(®, MCU_CNTL_CSR_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MCU_CNTL_CSR, reg); + + for (i = 0; i < 100; i++) { + rt2x00pci_register_read(rt2x00dev, MCU_CNTL_CSR, ®); + if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY)) + break; + msleep(1); + } + + if (i == 100) { + ERROR(rt2x00dev, "MCU Control register not ready.\n"); + return -EBUSY; + } + + /* + * Reset MAC and BBP registers. + */ + reg = 0; + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static void rt61pci_init_rxring(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring = rt2x00dev->rx; + struct data_desc *rxd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + rxd = ring->entry[i].priv; + + rt2x00_desc_read(rxd, 5, &word); + rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(rxd, 5, word); + + rt2x00_desc_read(rxd, 0, &word); + rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); + rt2x00_desc_write(rxd, 0, word); + } + + rt2x00_ring_index_clear(rt2x00dev->rx); +} + +static void rt61pci_init_txring(struct rt2x00_dev *rt2x00dev, const int queue) +{ + struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); + struct data_desc *txd; + unsigned int i; + u32 word; + + memset(ring->data_addr, 0x00, rt2x00_get_ring_size(ring)); + + for (i = 0; i < ring->stats.limit; i++) { + txd = ring->entry[i].priv; + + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_PID_TYPE, queue); + rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, i); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 6, &word); + rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, + ring->entry[i].data_dma); + rt2x00_desc_write(txd, 6, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); + rt2x00_desc_write(txd, 0, word); + } + + rt2x00_ring_index_clear(ring); +} + +static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize rings. + */ + rt61pci_init_rxring(rt2x00dev); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA0); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA1); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA2); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA3); + rt61pci_init_txring(rt2x00dev, IEEE80211_TX_QUEUE_DATA4); + + /* + * Initialize registers. + */ + rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, ®); + rt2x00_set_field32(®, TX_RING_CSR0_AC0_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC1_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC2_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR0_AC3_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].stats.limit); + rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, ®); + rt2x00_set_field32(®, TX_RING_CSR1_MGMT_RING_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].stats.limit); + rt2x00_set_field32(®, TX_RING_CSR1_TXD_SIZE, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size / + 4); + rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, ®); + rt2x00_set_field32(®, AC0_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); + rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, ®); + rt2x00_set_field32(®, AC1_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); + rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, ®); + rt2x00_set_field32(®, AC2_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].data_dma); + rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, ®); + rt2x00_set_field32(®, AC3_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].data_dma); + rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MGMT_BASE_CSR, ®); + rt2x00_set_field32(®, MGMT_BASE_CSR_RING_REGISTER, + rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].data_dma); + rt2x00pci_register_write(rt2x00dev, MGMT_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, ®); + rt2x00_set_field32(®, RX_RING_CSR_RING_SIZE, + rt2x00dev->rx->stats.limit); + rt2x00_set_field32(®, RX_RING_CSR_RXD_SIZE, + rt2x00dev->rx->desc_size / 4); + rt2x00_set_field32(®, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); + rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, ®); + rt2x00_set_field32(®, RX_BASE_CSR_RING_REGISTER, + rt2x00dev->rx->data_dma); + rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, ®); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC0, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC1, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC2, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_AC3, 2); + rt2x00_set_field32(®, TX_DMA_DST_CSR_DEST_MGMT, 0); + rt2x00pci_register_write(rt2x00dev, TX_DMA_DST_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, LOAD_TX_RING_CSR, ®); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1); + rt2x00_set_field32(®, LOAD_TX_RING_CSR_LOAD_TXD_MGMT, 1); + rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®); + rt2x00_set_field32(®, RX_CNTL_CSR_LOAD_RXD, 1); + rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); + + return 0; +} + +static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR1, reg); + + /* + * CCK TXD BBP registers + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR2, reg); + + /* + * OFDM TXD BBP registers + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR3, ®); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR3, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt2x00pci_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR6, 0x00000fff); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x0000071c); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, 0x0000e000); + + /* + * Invalidate all Shared Keys (SEC_CSR0), + * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) + */ + rt2x00pci_register_write(rt2x00dev, SEC_CSR0, 0x00000000); + rt2x00pci_register_write(rt2x00dev, SEC_CSR1, 0x00000000); + rt2x00pci_register_write(rt2x00dev, SEC_CSR5, 0x00000000); + + rt2x00pci_register_write(rt2x00dev, PHY_CSR1, 0x000023b0); + rt2x00pci_register_write(rt2x00dev, PHY_CSR5, 0x060a100c); + rt2x00pci_register_write(rt2x00dev, PHY_CSR6, 0x00080606); + rt2x00pci_register_write(rt2x00dev, PHY_CSR7, 0x00000a08); + + rt2x00pci_register_write(rt2x00dev, PCI_CFG_CSR, 0x28ca4404); + + rt2x00pci_register_write(rt2x00dev, TEST_MODE_CSR, 0x00000200); + + rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); + + rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, ®); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC0_TX_OP, 0); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC1_TX_OP, 0); + rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg); + + rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, ®); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC2_TX_OP, 192); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC3_TX_OP, 48); + rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt2x00pci_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00pci_register_read(rt2x00dev, STA_CSR1, ®); + rt2x00pci_register_read(rt2x00dev, STA_CSR2, ®); + + /* + * Reset MAC and BBP registers. + */ + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt61pci_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt61pci_bbp_write(rt2x00dev, 3, 0x00); + rt61pci_bbp_write(rt2x00dev, 15, 0x30); + rt61pci_bbp_write(rt2x00dev, 21, 0xc8); + rt61pci_bbp_write(rt2x00dev, 22, 0x38); + rt61pci_bbp_write(rt2x00dev, 23, 0x06); + rt61pci_bbp_write(rt2x00dev, 24, 0xfe); + rt61pci_bbp_write(rt2x00dev, 25, 0x0a); + rt61pci_bbp_write(rt2x00dev, 26, 0x0d); + rt61pci_bbp_write(rt2x00dev, 34, 0x12); + rt61pci_bbp_write(rt2x00dev, 37, 0x07); + rt61pci_bbp_write(rt2x00dev, 39, 0xf8); + rt61pci_bbp_write(rt2x00dev, 41, 0x60); + rt61pci_bbp_write(rt2x00dev, 53, 0x10); + rt61pci_bbp_write(rt2x00dev, 54, 0x18); + rt61pci_bbp_write(rt2x00dev, 60, 0x10); + rt61pci_bbp_write(rt2x00dev, 61, 0x04); + rt61pci_bbp_write(rt2x00dev, 62, 0x04); + rt61pci_bbp_write(rt2x00dev, 75, 0xfe); + rt61pci_bbp_write(rt2x00dev, 86, 0xfe); + rt61pci_bbp_write(rt2x00dev, 88, 0xfe); + rt61pci_bbp_write(rt2x00dev, 90, 0x0f); + rt61pci_bbp_write(rt2x00dev, 99, 0x00); + rt61pci_bbp_write(rt2x00dev, 102, 0x16); + rt61pci_bbp_write(rt2x00dev, 107, 0x04); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt61pci_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int mask = (state == STATE_RADIO_IRQ_OFF); + u32 reg; + + /* + * When interrupts are being enabled, the interrupt registers + * should clear the register to assure a clean state. + */ + if (state == STATE_RADIO_IRQ_ON) { + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); + } + + /* + * Only toggle the interrupts bits we are going to use. + * Non-checked interrupt bits are disabled by default. + */ + rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); + rt2x00_set_field32(®, INT_MASK_CSR_TXDONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_RXDONE, mask); + rt2x00_set_field32(®, INT_MASK_CSR_ENABLE_MITIGATION, mask); + rt2x00_set_field32(®, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); + rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_0, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_1, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_2, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_3, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_4, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_5, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_6, mask); + rt2x00_set_field32(®, MCU_INT_MASK_CSR_7, mask); + rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); +} + +static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Initialize all registers. + */ + if (rt61pci_init_rings(rt2x00dev) || + rt61pci_init_registers(rt2x00dev) || + rt61pci_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + /* + * Enable interrupts. + */ + rt61pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); + + /* + * Enable RX. + */ + rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, ®); + rt2x00_set_field32(®, RX_CNTL_CSR_ENABLE_RX_DMA, 1); + rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); + + /* + * Enable LED + */ + rt61pci_enable_led(rt2x00dev); + + return 0; +} + +static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Disable LED + */ + rt61pci_disable_led(rt2x00dev); + + rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818); + + /* + * Disable synchronisation. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); + + /* + * Cancel RX and TX. + */ + rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, ®); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC0, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC1, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC2, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_AC3, 1); + rt2x00_set_field32(®, TX_CNTL_CSR_ABORT_TX_MGMT, 1); + rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); + + /* + * Disable interrupts. + */ + rt61pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_OFF); +} + +static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char current_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); + rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); + rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); + rt2x00pci_register_write(rt2x00dev, MAC_CSR12, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2x00pci_register_read(rt2x00dev, MAC_CSR12, ®); + current_state = + rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + if (current_state == !put_to_sleep) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state %d.\n", !put_to_sleep, current_state); + + return -EBUSY; +} + +static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt61pci_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt61pci_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt61pci_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt61pci_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue); + rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_TX_POWER, + TXPOWER_TO_DEV(control->power_level)); + rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 11, &word); + rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, length); + rt2x00_desc_write(txd, 11, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_BURST, + test_bit(ENTRY_TXD_BURST, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +/* + * TX data initialization + */ +static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue == IEEE80211_TX_QUEUE_BEACON) { + /* + * For Wi-Fi faily generated beacons between participating + * stations. Set TBTT phase adaptive adjustment step to 8us. + */ + rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + } + return; + } + + rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, ®); + if (queue == IEEE80211_TX_QUEUE_DATA0) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC0, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA1) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC1, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA2) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC2, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA3) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_AC3, 1); + else if (queue == IEEE80211_TX_QUEUE_DATA4) + rt2x00_set_field32(®, TX_CNTL_CSR_KICK_TX_MGMT, 1); + rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); +} + +/* + * RX control handlers + */ +static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) +{ + u16 eeprom; + u8 offset; + u8 lna; + + lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); + switch (lna) { + case 3: + offset = 90; + break; + case 2: + offset = 74; + break; + case 1: + offset = 64; + break; + default: + return 0; + } + + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) + offset += 14; + + if (lna == 3 || lna == 2) + offset += 10; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); + } else { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + offset += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); + } + + return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; +} + +static void rt61pci_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = entry->priv; + u32 word0; + u32 word1; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + desc->rssi = rt61pci_agc_to_rssi(entry->ring->rt2x00dev, word1); + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + return; +} + +/* + * Interrupt functions. + */ +static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) +{ + struct data_ring *ring; + struct data_entry *entry; + struct data_desc *txd; + u32 word; + u32 reg; + u32 old_reg; + int type; + int index; + int tx_status; + int retry; + + /* + * During each loop we will compare the freshly read + * STA_CSR4 register value with the value read from + * the previous loop. If the 2 values are equal then + * we should stop processing because the chance it + * quite big that the device has been unplugged and + * we risk going into an endless loop. + */ + old_reg = 0; + + while (1) { + rt2x00pci_register_read(rt2x00dev, STA_CSR4, ®); + if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) + break; + + if (old_reg == reg) + break; + old_reg = reg; + + /* + * Skip this entry when it contains an invalid + * ring identication number. + */ + type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); + ring = rt2x00lib_get_ring(rt2x00dev, type); + if (unlikely(!ring)) + continue; + + /* + * Skip this entry when it contains an invalid + * index number. + */ + index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE); + if (unlikely(index >= ring->stats.limit)) + continue; + + entry = &ring->entry[index]; + txd = entry->priv; + rt2x00_desc_read(txd, 0, &word); + + if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || + !rt2x00_get_field32(word, TXD_W0_VALID)) + return; + + /* + * Obtain the status about this packet. + */ + tx_status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT); + retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); + + rt2x00lib_txdone(entry, tx_status, retry); + + /* + * Make this entry available for reuse. + */ + entry->flags = 0; + rt2x00_set_field32(&word, TXD_W0_VALID, 0); + rt2x00_desc_write(txd, 0, word); + rt2x00_ring_index_done_inc(entry->ring); + + /* + * If the data ring was full before the txdone handler + * we must make sure the packet queue in the mac80211 stack + * is reenabled when the txdone handler has finished. + */ + if (!rt2x00_ring_full(ring)) + ieee80211_wake_queue(rt2x00dev->hw, + entry->tx_status.control.queue); + } +} + +static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) +{ + struct rt2x00_dev *rt2x00dev = dev_instance; + u32 reg_mcu; + u32 reg; + + /* + * Get the interrupt sources & saved to local variable. + * Write register value back to clear pending interrupts. + */ + rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®_mcu); + rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); + + rt2x00pci_register_read(rt2x00dev, INT_SOURCE_CSR, ®); + rt2x00pci_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + if (!reg && !reg_mcu) + return IRQ_NONE; + + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) + return IRQ_HANDLED; + + /* + * Handle interrupts, walk through all bits + * and run the tasks, the bits are checked in order of + * priority. + */ + + /* + * 1 - Rx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE)) + rt2x00pci_rxdone(rt2x00dev); + + /* + * 2 - Tx ring done interrupt. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE)) + rt61pci_txdone(rt2x00dev); + + /* + * 3 - Handle MCU command done. + */ + if (reg_mcu) + rt2x00pci_register_write(rt2x00dev, + M2H_CMD_DONE_CSR, 0xffffffff); + + return IRQ_HANDLED; +} + +/* + * Device probe functions. + */ +static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + struct eeprom_93cx6 eeprom; + u32 reg; + u16 word; + u8 *mac; + s8 value; + + rt2x00pci_register_read(rt2x00dev, E2PROM_CSR, ®); + + eeprom.data = rt2x00dev; + eeprom.register_read = rt61pci_eepromregister_read; + eeprom.register_write = rt61pci_eepromregister_write; + eeprom.width = rt2x00_get_field32(reg, E2PROM_CSR_TYPE_93C46) ? + PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; + eeprom.reg_data_in = 0; + eeprom.reg_data_out = 0; + eeprom.reg_data_clock = 0; + eeprom.reg_chip_select = 0; + + eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, + EEPROM_SIZE / sizeof(u16)); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5225); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0); + rt2x00_set_field16(&word, EEPROM_NIC_TX_RX_FIXED, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); + rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); + EEPROM(rt2x00dev, "Led: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + } + + return 0; +} + +static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + u16 device; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + * To determine the RT chip we have to read the + * PCI header of the device. + */ + pci_read_config_word(rt2x00dev_pci(rt2x00dev), + PCI_CONFIG_HEADER_DEVICE, &device); + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt2x00pci_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, device, value, reg); + + if (!rt2x00_rf(&rt2x00dev->chip, RF5225) && + !rt2x00_rf(&rt2x00dev->chip, RF5325) && + !rt2x00_rf(&rt2x00dev->chip, RF2527) && + !rt2x00_rf(&rt2x00dev->chip, RF2529)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Read the Frame type. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) + __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); + + /* + * Determine number of antenna's. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_NUM) == 2) + __set_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags); + + /* + * Detect if this device has an hardware controlled radio. + */ +#ifdef CONFIG_RT61PCI_RFKILL + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); +#endif /* CONFIG_RT61PCI_RFKILL */ + + /* + * Read frequency offset and RF programming sequence. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ)) + __set_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags); + + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + /* + * Store led settings, for correct led behaviour. + * If the eeprom value is invalid, + * switch to default led mode. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); + + rt2x00dev->led_mode = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); + + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LED_MODE, + rt2x00dev->led_mode); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_0, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_0)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_1, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_1)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_2, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_2)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_3, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_3)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_4, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_4)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_ACT, + rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_BG, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_G)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_A, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_A)); + + return 0; +} + +/* + * RF value list for RF5225 & RF5325 + * Supports: 2.4 GHz & 5.2 GHz, rf_sequence disabled + */ +static const struct rf_channel rf_vals_noseq[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, + { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, + { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, + { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, + { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, + { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, + { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, + { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, + { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, + { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, + { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, + { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, + { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, + { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, + { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, + { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, + { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, + + /* 802.11 UNII */ + { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, + { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, + { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, + { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, + { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, + { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, +}; + +/* + * RF value list for RF5225 & RF5325 + * Supports: 2.4 GHz & 5.2 GHz, rf_sequence enabled + */ +static const struct rf_channel rf_vals_seq[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002cd4, 0x0004481a, 0x00098455, 0x000c0a03 }, + { 40, 0x00002cd0, 0x00044682, 0x00098455, 0x000c0a03 }, + { 44, 0x00002cd0, 0x00044686, 0x00098455, 0x000c0a1b }, + { 48, 0x00002cd0, 0x0004468e, 0x00098655, 0x000c0a0b }, + { 52, 0x00002cd0, 0x00044692, 0x00098855, 0x000c0a23 }, + { 56, 0x00002cd0, 0x0004469a, 0x00098c55, 0x000c0a13 }, + { 60, 0x00002cd0, 0x000446a2, 0x00098e55, 0x000c0a03 }, + { 64, 0x00002cd0, 0x000446a6, 0x00099255, 0x000c0a1b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002cd4, 0x0004489a, 0x000b9855, 0x000c0a03 }, + { 104, 0x00002cd4, 0x000448a2, 0x000b9855, 0x000c0a03 }, + { 108, 0x00002cd4, 0x000448aa, 0x000b9855, 0x000c0a03 }, + { 112, 0x00002cd4, 0x000448b2, 0x000b9a55, 0x000c0a03 }, + { 116, 0x00002cd4, 0x000448ba, 0x000b9a55, 0x000c0a03 }, + { 120, 0x00002cd0, 0x00044702, 0x000b9a55, 0x000c0a03 }, + { 124, 0x00002cd0, 0x00044706, 0x000b9a55, 0x000c0a1b }, + { 128, 0x00002cd0, 0x0004470e, 0x000b9c55, 0x000c0a0b }, + { 132, 0x00002cd0, 0x00044712, 0x000b9c55, 0x000c0a23 }, + { 136, 0x00002cd0, 0x0004471a, 0x000b9e55, 0x000c0a13 }, + + /* 802.11 UNII */ + { 140, 0x00002cd0, 0x00044722, 0x000b9e55, 0x000c0a03 }, + { 149, 0x00002cd0, 0x0004472e, 0x000ba255, 0x000c0a1b }, + { 153, 0x00002cd0, 0x00044736, 0x000ba255, 0x000c0a0b }, + { 157, 0x00002cd4, 0x0004490a, 0x000ba255, 0x000c0a17 }, + { 161, 0x00002cd4, 0x00044912, 0x000ba255, 0x000c0a17 }, + { 165, 0x00002cd4, 0x0004491a, 0x000ba255, 0x000c0a17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000c0a0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000c0a13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000c0a1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 }, +}; + +static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = 0; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 5; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) { + spec->num_channels = 14; + spec->channels = rf_vals_noseq; + } else { + spec->num_channels = 14; + spec->channels = rf_vals_seq; + } + + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF5325)) { + spec->num_modes = 3; + spec->num_channels = ARRAY_SIZE(rf_vals_seq); + + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + spec->tx_power_a = txpower; + } +} + +static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt61pci_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt61pci_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt61pci_probe_hw_mode(rt2x00dev); + + /* + * This device requires firmware + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt61pci_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_BORADCAST, 0); + rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt61pci_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, long_retry); + rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, short_retry); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); + + return 0; +} + +static u64 rt61pci_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt2x00pci_register_read(rt2x00dev, TXRX_CSR13, ®); + tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; + rt2x00pci_register_read(rt2x00dev, TXRX_CSR12, ®); + tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); + + return tsf; +} + +static void rt61pci_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt2x00pci_register_write(rt2x00dev, TXRX_CSR12, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR13, 0); +} + +static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + /* + * Just in case the ieee80211 doesn't set this, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * We need to append the descriptor in front of the + * beacon frame. + */ + if (skb_headroom(skb) < TXD_DESC_SIZE) { + if (pskb_expand_head(skb, TXD_DESC_SIZE, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + } + + /* + * First we create the beacon. + */ + skb_push(skb, TXD_DESC_SIZE); + memset(skb->data, 0, TXD_DESC_SIZE); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + TXD_DESC_SIZE), + skb->len - TXD_DESC_SIZE, control); + + /* + * Write entire beacon with descriptor to register, + * and kick the beacon generator. + */ + rt2x00pci_register_multiwrite(rt2x00dev, HW_BEACON_BASE0, + skb->data, skb->len); + rt61pci_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + return 0; +} + +static const struct ieee80211_ops rt61pci_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt61pci_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt61pci_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt61pci_get_tsf, + .reset_tsf = rt61pci_reset_tsf, + .beacon_update = rt61pci_beacon_update, +}; + +static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { + .irq_handler = rt61pci_interrupt, + .probe_hw = rt61pci_probe_hw, + .get_firmware_name = rt61pci_get_firmware_name, + .load_firmware = rt61pci_load_firmware, + .initialize = rt2x00pci_initialize, + .uninitialize = rt2x00pci_uninitialize, + .set_device_state = rt61pci_set_device_state, + .rfkill_poll = rt61pci_rfkill_poll, + .link_stats = rt61pci_link_stats, + .reset_tuner = rt61pci_reset_tuner, + .link_tuner = rt61pci_link_tuner, + .write_tx_desc = rt61pci_write_tx_desc, + .write_tx_data = rt2x00pci_write_tx_data, + .kick_tx_queue = rt61pci_kick_tx_queue, + .fill_rxdone = rt61pci_fill_rxdone, + .config_mac_addr = rt61pci_config_mac_addr, + .config_bssid = rt61pci_config_bssid, + .config_type = rt61pci_config_type, + .config_preamble = rt61pci_config_preamble, + .config = rt61pci_config, +}; + +static const struct rt2x00_ops rt61pci_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt61pci_rt2x00_ops, + .hw = &rt61pci_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt61pci_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * RT61pci module information. + */ +static struct pci_device_id rt61pci_device_table[] = { + /* RT2561s */ + { PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) }, + /* RT2561 v2 */ + { PCI_DEVICE(0x1814, 0x0302), PCI_DEVICE_DATA(&rt61pci_ops) }, + /* RT2661 */ + { PCI_DEVICE(0x1814, 0x0401), PCI_DEVICE_DATA(&rt61pci_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 " + "PCI & PCMCIA chipset based cards"); +MODULE_DEVICE_TABLE(pci, rt61pci_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2561); +MODULE_FIRMWARE(FIRMWARE_RT2561s); +MODULE_FIRMWARE(FIRMWARE_RT2661); +MODULE_LICENSE("GPL"); + +static struct pci_driver rt61pci_driver = { + .name = DRV_NAME, + .id_table = rt61pci_device_table, + .probe = rt2x00pci_probe, + .remove = __devexit_p(rt2x00pci_remove), + .suspend = rt2x00pci_suspend, + .resume = rt2x00pci_resume, +}; + +static int __init rt61pci_init(void) +{ + return pci_register_driver(&rt61pci_driver); +} + +static void __exit rt61pci_exit(void) +{ + pci_unregister_driver(&rt61pci_driver); +} + +module_init(rt61pci_init); +module_exit(rt61pci_exit); diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h new file mode 100644 index 0000000000000000000000000000000000000000..6721d7dd32bc67222f2feae5730fc6f2050cc610 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -0,0 +1,1457 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt61pci + Abstract: Data structures and registers for the rt61pci module. + Supported chipsets: RT2561, RT2561s, RT2661. + */ + +#ifndef RT61PCI_H +#define RT61PCI_H + +/* + * RF chip defines. + */ +#define RF5225 0x0001 +#define RF5325 0x0002 +#define RF2527 0x0003 +#define RF2529 0x0004 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x3000 +#define CSR_REG_SIZE 0x04b0 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_SIZE 0x0080 +#define RF_SIZE 0x0014 + +/* + * PCI registers. + */ + +/* + * PCI Configuration Header + */ +#define PCI_CONFIG_HEADER_VENDOR 0x0000 +#define PCI_CONFIG_HEADER_DEVICE 0x0002 + +/* + * HOST_CMD_CSR: For HOST to interrupt embedded processor + */ +#define HOST_CMD_CSR 0x0008 +#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x0000007f) +#define HOST_CMD_CSR_INTERRUPT_MCU FIELD32(0x00000080) + +/* + * MCU_CNTL_CSR + * SELECT_BANK: Select 8051 program bank. + * RESET: Enable 8051 reset state. + * READY: Ready state for 8051. + */ +#define MCU_CNTL_CSR 0x000c +#define MCU_CNTL_CSR_SELECT_BANK FIELD32(0x00000001) +#define MCU_CNTL_CSR_RESET FIELD32(0x00000002) +#define MCU_CNTL_CSR_READY FIELD32(0x00000004) + +/* + * SOFT_RESET_CSR + */ +#define SOFT_RESET_CSR 0x0010 + +/* + * MCU_INT_SOURCE_CSR: MCU interrupt source/mask register. + */ +#define MCU_INT_SOURCE_CSR 0x0014 +#define MCU_INT_SOURCE_CSR_0 FIELD32(0x00000001) +#define MCU_INT_SOURCE_CSR_1 FIELD32(0x00000002) +#define MCU_INT_SOURCE_CSR_2 FIELD32(0x00000004) +#define MCU_INT_SOURCE_CSR_3 FIELD32(0x00000008) +#define MCU_INT_SOURCE_CSR_4 FIELD32(0x00000010) +#define MCU_INT_SOURCE_CSR_5 FIELD32(0x00000020) +#define MCU_INT_SOURCE_CSR_6 FIELD32(0x00000040) +#define MCU_INT_SOURCE_CSR_7 FIELD32(0x00000080) +#define MCU_INT_SOURCE_CSR_TWAKEUP FIELD32(0x00000100) +#define MCU_INT_SOURCE_CSR_TBTT_EXPIRE FIELD32(0x00000200) + +/* + * MCU_INT_MASK_CSR: MCU interrupt source/mask register. + */ +#define MCU_INT_MASK_CSR 0x0018 +#define MCU_INT_MASK_CSR_0 FIELD32(0x00000001) +#define MCU_INT_MASK_CSR_1 FIELD32(0x00000002) +#define MCU_INT_MASK_CSR_2 FIELD32(0x00000004) +#define MCU_INT_MASK_CSR_3 FIELD32(0x00000008) +#define MCU_INT_MASK_CSR_4 FIELD32(0x00000010) +#define MCU_INT_MASK_CSR_5 FIELD32(0x00000020) +#define MCU_INT_MASK_CSR_6 FIELD32(0x00000040) +#define MCU_INT_MASK_CSR_7 FIELD32(0x00000080) +#define MCU_INT_MASK_CSR_TWAKEUP FIELD32(0x00000100) +#define MCU_INT_MASK_CSR_TBTT_EXPIRE FIELD32(0x00000200) + +/* + * PCI_USEC_CSR + */ +#define PCI_USEC_CSR 0x001c + +/* + * Security key table memory. + * 16 entries 32-byte for shared key table + * 64 entries 32-byte for pairwise key table + * 64 entries 8-byte for pairwise ta key table + */ +#define SHARED_KEY_TABLE_BASE 0x1000 +#define PAIRWISE_KEY_TABLE_BASE 0x1200 +#define PAIRWISE_TA_TABLE_BASE 0x1a00 + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct hw_pairwise_ta_entry { + u8 address[6]; + u8 reserved[2]; +} __attribute__ ((packed)); + +/* + * Other on-chip shared memory space. + */ +#define HW_CIS_BASE 0x2000 +#define HW_NULL_BASE 0x2b00 + +/* + * Since NULL frame won't be that long (256 byte), + * We steal 16 tail bytes to save debugging settings. + */ +#define HW_DEBUG_SETTING_BASE 0x2bf0 + +/* + * On-chip BEACON frame space. + */ +#define HW_BEACON_BASE0 0x2c00 +#define HW_BEACON_BASE1 0x2d00 +#define HW_BEACON_BASE2 0x2e00 +#define HW_BEACON_BASE3 0x2f00 +#define HW_BEACON_OFFSET 0x0100 + +/* + * HOST-MCU shared memory. + */ + +/* + * H2M_MAILBOX_CSR: Host-to-MCU Mailbox. + */ +#define H2M_MAILBOX_CSR 0x2100 +#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff) +#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00) +#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000) +#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000) + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD16(0x001f) +#define MCU_LEDCS_RADIO_STATUS FIELD16(0x0020) +#define MCU_LEDCS_LINK_BG_STATUS FIELD16(0x0040) +#define MCU_LEDCS_LINK_A_STATUS FIELD16(0x0080) +#define MCU_LEDCS_POLARITY_GPIO_0 FIELD16(0x0100) +#define MCU_LEDCS_POLARITY_GPIO_1 FIELD16(0x0200) +#define MCU_LEDCS_POLARITY_GPIO_2 FIELD16(0x0400) +#define MCU_LEDCS_POLARITY_GPIO_3 FIELD16(0x0800) +#define MCU_LEDCS_POLARITY_GPIO_4 FIELD16(0x1000) +#define MCU_LEDCS_POLARITY_ACT FIELD16(0x2000) +#define MCU_LEDCS_POLARITY_READY_BG FIELD16(0x4000) +#define MCU_LEDCS_POLARITY_READY_A FIELD16(0x8000) + +/* + * M2H_CMD_DONE_CSR. + */ +#define M2H_CMD_DONE_CSR 0x2104 + +/* + * MCU_TXOP_ARRAY_BASE. + */ +#define MCU_TXOP_ARRAY_BASE 0x2110 + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x3000 + +/* + * MAC_CSR1: System control register. + * SOFT_RESET: Software reset bit, 1: reset, 0: normal. + * BBP_RESET: Hardware reset BBP. + * HOST_READY: Host is ready after initialization, 1: ready. + */ +#define MAC_CSR1 0x3004 +#define MAC_CSR1_SOFT_RESET FIELD32(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD32(0x00000002) +#define MAC_CSR1_HOST_READY FIELD32(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x3008 +#define MAC_CSR2_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR2_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR2_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR2_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x300c +#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR3_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR3_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR4: BSSID register 0. + */ +#define MAC_CSR4 0x3010 +#define MAC_CSR4_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR4_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR4_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR4_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR5: BSSID register 1. + * BSS_ID_MASK: 3: one BSSID, 0: 4 BSSID, 2 or 1: 2 BSSID. + */ +#define MAC_CSR5 0x3014 +#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR5_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR5_BSS_ID_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR6: Maximum frame length register. + */ +#define MAC_CSR6 0x3018 +#define MAC_CSR6_MAX_FRAME_UNIT FIELD32(0x00000fff) + +/* + * MAC_CSR7: Reserved + */ +#define MAC_CSR7 0x301c + +/* + * MAC_CSR8: SIFS/EIFS register. + * All units are in US. + */ +#define MAC_CSR8 0x3020 +#define MAC_CSR8_SIFS FIELD32(0x000000ff) +#define MAC_CSR8_SIFS_AFTER_RX_OFDM FIELD32(0x0000ff00) +#define MAC_CSR8_EIFS FIELD32(0xffff0000) + +/* + * MAC_CSR9: Back-Off control register. + * SLOT_TIME: Slot time, default is 20us for 802.11BG. + * CWMIN: Bit for Cwmin. default Cwmin is 31 (2^5 - 1). + * CWMAX: Bit for Cwmax, default Cwmax is 1023 (2^10 - 1). + * CW_SELECT: 1: CWmin/Cwmax select from register, 0:select from TxD. + */ +#define MAC_CSR9 0x3024 +#define MAC_CSR9_SLOT_TIME FIELD32(0x000000ff) +#define MAC_CSR9_CWMIN FIELD32(0x00000f00) +#define MAC_CSR9_CWMAX FIELD32(0x0000f000) +#define MAC_CSR9_CW_SELECT FIELD32(0x00010000) + +/* + * MAC_CSR10: Power state configuration. + */ +#define MAC_CSR10 0x3028 + +/* + * MAC_CSR11: Power saving transition time register. + * DELAY_AFTER_TBCN: Delay after Tbcn expired in units of TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * WAKEUP_LATENCY: In unit of TU. + */ +#define MAC_CSR11 0x302c +#define MAC_CSR11_DELAY_AFTER_TBCN FIELD32(0x000000ff) +#define MAC_CSR11_TBCN_BEFORE_WAKEUP FIELD32(0x00007f00) +#define MAC_CSR11_AUTOWAKE FIELD32(0x00008000) +#define MAC_CSR11_WAKEUP_LATENCY FIELD32(0x000f0000) + +/* + * MAC_CSR12: Manual power control / status register (merge CSR20 & PWRCSR1). + * CURRENT_STATE: 0:sleep, 1:awake. + * FORCE_WAKEUP: This has higher priority than PUT_TO_SLEEP. + * BBP_CURRENT_STATE: 0: BBP sleep, 1: BBP awake. + */ +#define MAC_CSR12 0x3030 +#define MAC_CSR12_CURRENT_STATE FIELD32(0x00000001) +#define MAC_CSR12_PUT_TO_SLEEP FIELD32(0x00000002) +#define MAC_CSR12_FORCE_WAKEUP FIELD32(0x00000004) +#define MAC_CSR12_BBP_CURRENT_STATE FIELD32(0x00000008) + +/* + * MAC_CSR13: GPIO. + */ +#define MAC_CSR13 0x3034 +#define MAC_CSR13_BIT0 FIELD32(0x00000001) +#define MAC_CSR13_BIT1 FIELD32(0x00000002) +#define MAC_CSR13_BIT2 FIELD32(0x00000004) +#define MAC_CSR13_BIT3 FIELD32(0x00000008) +#define MAC_CSR13_BIT4 FIELD32(0x00000010) +#define MAC_CSR13_BIT5 FIELD32(0x00000020) +#define MAC_CSR13_BIT6 FIELD32(0x00000040) +#define MAC_CSR13_BIT7 FIELD32(0x00000080) +#define MAC_CSR13_BIT8 FIELD32(0x00000100) +#define MAC_CSR13_BIT9 FIELD32(0x00000200) +#define MAC_CSR13_BIT10 FIELD32(0x00000400) +#define MAC_CSR13_BIT11 FIELD32(0x00000800) +#define MAC_CSR13_BIT12 FIELD32(0x00001000) + +/* + * MAC_CSR14: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * HW_LED: HW TX activity, 1: normal OFF, 0: normal ON. + * SW_LED: s/w LED, 1: ON, 0: OFF. + * HW_LED_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR14 0x3038 +#define MAC_CSR14_ON_PERIOD FIELD32(0x000000ff) +#define MAC_CSR14_OFF_PERIOD FIELD32(0x0000ff00) +#define MAC_CSR14_HW_LED FIELD32(0x00010000) +#define MAC_CSR14_SW_LED FIELD32(0x00020000) +#define MAC_CSR14_HW_LED_POLARITY FIELD32(0x00040000) +#define MAC_CSR14_SW_LED2 FIELD32(0x00080000) + +/* + * MAC_CSR15: NAV control. + */ +#define MAC_CSR15 0x303c + +/* + * TXRX control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: TX/RX configuration register. + * TSF_OFFSET: Default is 24. + * AUTO_TX_SEQ: 1: ASIC auto replace sequence nr in outgoing frame. + * DISABLE_RX: Disable Rx engine. + * DROP_CRC: Drop CRC error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TO_DS: Drop fram ToDs bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MULTICAST: Drop multicast frames. + * DROP_BORADCAST: Drop broadcast frames. + * ROP_ACK_CTS: Drop received ACK and CTS. + */ +#define TXRX_CSR0 0x3040 +#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXRX_CSR0_TSF_OFFSET FIELD32(0x00007e00) +#define TXRX_CSR0_AUTO_TX_SEQ FIELD32(0x00008000) +#define TXRX_CSR0_DISABLE_RX FIELD32(0x00010000) +#define TXRX_CSR0_DROP_CRC FIELD32(0x00020000) +#define TXRX_CSR0_DROP_PHYSICAL FIELD32(0x00040000) +#define TXRX_CSR0_DROP_CONTROL FIELD32(0x00080000) +#define TXRX_CSR0_DROP_NOT_TO_ME FIELD32(0x00100000) +#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) +#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) +#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) +#define TXRX_CSR0_DROP_BORADCAST FIELD32(0x01000000) +#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) +#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) + +/* + * TXRX_CSR1 + */ +#define TXRX_CSR1 0x3044 +#define TXRX_CSR1_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR1_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR1_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR1_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR1_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR1_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR1_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR1_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR2 + */ +#define TXRX_CSR2 0x3048 +#define TXRX_CSR2_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR2_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR2_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR2_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR2_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR2_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR2_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR2_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR3 + */ +#define TXRX_CSR3 0x304c +#define TXRX_CSR3_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR3_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR3_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR3_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR4: Auto-Responder/Tx-retry register. + * AUTORESPOND_PREAMBLE: 0:long, 1:short preamble. + * OFDM_TX_RATE_DOWN: 1:enable. + * OFDM_TX_RATE_STEP: 0:1-step, 1: 2-step, 2:3-step, 3:4-step. + * OFDM_TX_FALLBACK_CCK: 0: Fallback to OFDM 6M only, 1: Fallback to CCK 1M,2M. + */ +#define TXRX_CSR4 0x3050 +#define TXRX_CSR4_TX_ACK_TIMEOUT FIELD32(0x000000ff) +#define TXRX_CSR4_CNTL_ACK_POLICY FIELD32(0x00000700) +#define TXRX_CSR4_ACK_CTS_PSM FIELD32(0x00010000) +#define TXRX_CSR4_AUTORESPOND_ENABLE FIELD32(0x00020000) +#define TXRX_CSR4_AUTORESPOND_PREAMBLE FIELD32(0x00040000) +#define TXRX_CSR4_OFDM_TX_RATE_DOWN FIELD32(0x00080000) +#define TXRX_CSR4_OFDM_TX_RATE_STEP FIELD32(0x00300000) +#define TXRX_CSR4_OFDM_TX_FALLBACK_CCK FIELD32(0x00400000) +#define TXRX_CSR4_LONG_RETRY_LIMIT FIELD32(0x0f000000) +#define TXRX_CSR4_SHORT_RETRY_LIMIT FIELD32(0xf0000000) + +/* + * TXRX_CSR5 + */ +#define TXRX_CSR5 0x3054 + +/* + * TXRX_CSR6: ACK/CTS payload consumed time + */ +#define TXRX_CSR6 0x3058 + +/* + * TXRX_CSR7: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define TXRX_CSR7 0x305c +#define TXRX_CSR7_ACK_CTS_6MBS FIELD32(0x000000ff) +#define TXRX_CSR7_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define TXRX_CSR7_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define TXRX_CSR7_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * TXRX_CSR8: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define TXRX_CSR8 0x3060 +#define TXRX_CSR8_ACK_CTS_24MBS FIELD32(0x000000ff) +#define TXRX_CSR8_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define TXRX_CSR8_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define TXRX_CSR8_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * TXRX_CSR9: Synchronization control register. + * BEACON_INTERVAL: In unit of 1/16 TU. + * TSF_TICKING: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR9 0x3064 +#define TXRX_CSR9_BEACON_INTERVAL FIELD32(0x0000ffff) +#define TXRX_CSR9_TSF_TICKING FIELD32(0x00010000) +#define TXRX_CSR9_TSF_SYNC FIELD32(0x00060000) +#define TXRX_CSR9_TBTT_ENABLE FIELD32(0x00080000) +#define TXRX_CSR9_BEACON_GEN FIELD32(0x00100000) +#define TXRX_CSR9_TIMESTAMP_COMPENSATE FIELD32(0xff000000) + +/* + * TXRX_CSR10: BEACON alignment. + */ +#define TXRX_CSR10 0x3068 + +/* + * TXRX_CSR11: AES mask. + */ +#define TXRX_CSR11 0x306c + +/* + * TXRX_CSR12: TSF low 32. + */ +#define TXRX_CSR12 0x3070 +#define TXRX_CSR12_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR13: TSF high 32. + */ +#define TXRX_CSR13 0x3074 +#define TXRX_CSR13_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR14: TBTT timer. + */ +#define TXRX_CSR14 0x3078 + +/* + * TXRX_CSR15: TKIP MIC priority byte "AND" mask. + */ +#define TXRX_CSR15 0x307c + +/* + * PHY control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PHY_CSR0: RF/PS control. + */ +#define PHY_CSR0 0x3080 +#define PHY_CSR0_PA_PE_BG FIELD32(0x00010000) +#define PHY_CSR0_PA_PE_A FIELD32(0x00020000) + +/* + * PHY_CSR1 + */ +#define PHY_CSR1 0x3084 + +/* + * PHY_CSR2: Pre-TX BBP control. + */ +#define PHY_CSR2 0x3088 + +/* + * PHY_CSR3: BBP serial control register. + * VALUE: Register value to program into BBP. + * REG_NUM: Selected BBP register. + * READ_CONTROL: 0: Write BBP, 1: Read BBP. + * BUSY: 1: ASIC is busy execute BBP programming. + */ +#define PHY_CSR3 0x308c +#define PHY_CSR3_VALUE FIELD32(0x000000ff) +#define PHY_CSR3_REGNUM FIELD32(0x00007f00) +#define PHY_CSR3_READ_CONTROL FIELD32(0x00008000) +#define PHY_CSR3_BUSY FIELD32(0x00010000) + +/* + * PHY_CSR4: RF serial control register + * VALUE: Register value (include register id) serial out to RF/IF chip. + * NUMBER_OF_BITS: Number of bits used in RFRegValue (I:20, RFMD:22). + * IF_SELECT: 1: select IF to program, 0: select RF to program. + * PLL_LD: RF PLL_LD status. + * BUSY: 1: ASIC is busy execute RF programming. + */ +#define PHY_CSR4 0x3090 +#define PHY_CSR4_VALUE FIELD32(0x00ffffff) +#define PHY_CSR4_NUMBER_OF_BITS FIELD32(0x1f000000) +#define PHY_CSR4_IF_SELECT FIELD32(0x20000000) +#define PHY_CSR4_PLL_LD FIELD32(0x40000000) +#define PHY_CSR4_BUSY FIELD32(0x80000000) + +/* + * PHY_CSR5: RX to TX signal switch timing control. + */ +#define PHY_CSR5 0x3094 +#define PHY_CSR5_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR6: TX to RX signal timing control. + */ +#define PHY_CSR6 0x3098 +#define PHY_CSR6_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR7: TX DAC switching timing control. + */ +#define PHY_CSR7 0x309c + +/* + * Security control register. + */ + +/* + * SEC_CSR0: Shared key table control. + */ +#define SEC_CSR0 0x30a0 +#define SEC_CSR0_BSS0_KEY0_VALID FIELD32(0x00000001) +#define SEC_CSR0_BSS0_KEY1_VALID FIELD32(0x00000002) +#define SEC_CSR0_BSS0_KEY2_VALID FIELD32(0x00000004) +#define SEC_CSR0_BSS0_KEY3_VALID FIELD32(0x00000008) +#define SEC_CSR0_BSS1_KEY0_VALID FIELD32(0x00000010) +#define SEC_CSR0_BSS1_KEY1_VALID FIELD32(0x00000020) +#define SEC_CSR0_BSS1_KEY2_VALID FIELD32(0x00000040) +#define SEC_CSR0_BSS1_KEY3_VALID FIELD32(0x00000080) +#define SEC_CSR0_BSS2_KEY0_VALID FIELD32(0x00000100) +#define SEC_CSR0_BSS2_KEY1_VALID FIELD32(0x00000200) +#define SEC_CSR0_BSS2_KEY2_VALID FIELD32(0x00000400) +#define SEC_CSR0_BSS2_KEY3_VALID FIELD32(0x00000800) +#define SEC_CSR0_BSS3_KEY0_VALID FIELD32(0x00001000) +#define SEC_CSR0_BSS3_KEY1_VALID FIELD32(0x00002000) +#define SEC_CSR0_BSS3_KEY2_VALID FIELD32(0x00004000) +#define SEC_CSR0_BSS3_KEY3_VALID FIELD32(0x00008000) + +/* + * SEC_CSR1: Shared key table security mode register. + */ +#define SEC_CSR1 0x30a4 +#define SEC_CSR1_BSS0_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR1_BSS0_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR1_BSS0_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR1_BSS0_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR1_BSS1_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR1_BSS1_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR1_BSS1_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR1_BSS1_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * Pairwise key table valid bitmap registers. + * SEC_CSR2: pairwise key table valid bitmap 0. + * SEC_CSR3: pairwise key table valid bitmap 1. + */ +#define SEC_CSR2 0x30a8 +#define SEC_CSR3 0x30ac + +/* + * SEC_CSR4: Pairwise key table lookup control. + */ +#define SEC_CSR4 0x30b0 + +/* + * SEC_CSR5: shared key table security mode register. + */ +#define SEC_CSR5 0x30b4 +#define SEC_CSR5_BSS2_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR5_BSS2_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR5_BSS2_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR5_BSS2_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR5_BSS3_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR5_BSS3_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR5_BSS3_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR5_BSS3_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * STA control registers. + */ + +/* + * STA_CSR0: RX PLCP error count & RX FCS error count. + */ +#define STA_CSR0 0x30c0 +#define STA_CSR0_FCS_ERROR FIELD32(0x0000ffff) +#define STA_CSR0_PLCP_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR1: RX False CCA count & RX LONG frame count. + */ +#define STA_CSR1 0x30c4 +#define STA_CSR1_PHYSICAL_ERROR FIELD32(0x0000ffff) +#define STA_CSR1_FALSE_CCA_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR2: TX Beacon count and RX FIFO overflow count. + */ +#define STA_CSR2 0x30c8 +#define STA_CSR2_RX_FIFO_OVERFLOW_COUNT FIELD32(0x0000ffff) +#define STA_CSR2_RX_OVERFLOW_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR3: TX Beacon count. + */ +#define STA_CSR3 0x30cc +#define STA_CSR3_TX_BEACON_COUNT FIELD32(0x0000ffff) + +/* + * STA_CSR4: TX Result status register. + * VALID: 1:This register contains a valid TX result. + */ +#define STA_CSR4 0x30d0 +#define STA_CSR4_VALID FIELD32(0x00000001) +#define STA_CSR4_TX_RESULT FIELD32(0x0000000e) +#define STA_CSR4_RETRY_COUNT FIELD32(0x000000f0) +#define STA_CSR4_PID_SUBTYPE FIELD32(0x00001f00) +#define STA_CSR4_PID_TYPE FIELD32(0x0000e000) +#define STA_CSR4_TXRATE FIELD32(0x000f0000) + +/* + * QOS control registers. + */ + +/* + * QOS_CSR0: TXOP holder MAC address register. + */ +#define QOS_CSR0 0x30e0 +#define QOS_CSR0_BYTE0 FIELD32(0x000000ff) +#define QOS_CSR0_BYTE1 FIELD32(0x0000ff00) +#define QOS_CSR0_BYTE2 FIELD32(0x00ff0000) +#define QOS_CSR0_BYTE3 FIELD32(0xff000000) + +/* + * QOS_CSR1: TXOP holder MAC address register. + */ +#define QOS_CSR1 0x30e4 +#define QOS_CSR1_BYTE4 FIELD32(0x000000ff) +#define QOS_CSR1_BYTE5 FIELD32(0x0000ff00) + +/* + * QOS_CSR2: TXOP holder timeout register. + */ +#define QOS_CSR2 0x30e8 + +/* + * RX QOS-CFPOLL MAC address register. + * QOS_CSR3: RX QOS-CFPOLL MAC address 0. + * QOS_CSR4: RX QOS-CFPOLL MAC address 1. + */ +#define QOS_CSR3 0x30ec +#define QOS_CSR4 0x30f0 + +/* + * QOS_CSR5: "QosControl" field of the RX QOS-CFPOLL. + */ +#define QOS_CSR5 0x30f4 + +/* + * Host DMA registers. + */ + +/* + * AC0_BASE_CSR: AC_BK base address. + */ +#define AC0_BASE_CSR 0x3400 +#define AC0_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC1_BASE_CSR: AC_BE base address. + */ +#define AC1_BASE_CSR 0x3404 +#define AC1_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC2_BASE_CSR: AC_VI base address. + */ +#define AC2_BASE_CSR 0x3408 +#define AC2_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * AC3_BASE_CSR: AC_VO base address. + */ +#define AC3_BASE_CSR 0x340c +#define AC3_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * MGMT_BASE_CSR: MGMT ring base address. + */ +#define MGMT_BASE_CSR 0x3410 +#define MGMT_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * TX_RING_CSR0: TX Ring size for AC_BK, AC_BE, AC_VI, AC_VO. + */ +#define TX_RING_CSR0 0x3418 +#define TX_RING_CSR0_AC0_RING_SIZE FIELD32(0x000000ff) +#define TX_RING_CSR0_AC1_RING_SIZE FIELD32(0x0000ff00) +#define TX_RING_CSR0_AC2_RING_SIZE FIELD32(0x00ff0000) +#define TX_RING_CSR0_AC3_RING_SIZE FIELD32(0xff000000) + +/* + * TX_RING_CSR1: TX Ring size for MGMT Ring, HCCA Ring + * TXD_SIZE: In unit of 32-bit. + */ +#define TX_RING_CSR1 0x341c +#define TX_RING_CSR1_MGMT_RING_SIZE FIELD32(0x000000ff) +#define TX_RING_CSR1_HCCA_RING_SIZE FIELD32(0x0000ff00) +#define TX_RING_CSR1_TXD_SIZE FIELD32(0x003f0000) + +/* + * AIFSN_CSR: AIFSN for each EDCA AC. + * AIFSN0: For AC_BK. + * AIFSN1: For AC_BE. + * AIFSN2: For AC_VI. + * AIFSN3: For AC_VO. + */ +#define AIFSN_CSR 0x3420 +#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) +#define AIFSN_CSR_AIFSN1 FIELD32(0x000000f0) +#define AIFSN_CSR_AIFSN2 FIELD32(0x00000f00) +#define AIFSN_CSR_AIFSN3 FIELD32(0x0000f000) + +/* + * CWMIN_CSR: CWmin for each EDCA AC. + * CWMIN0: For AC_BK. + * CWMIN1: For AC_BE. + * CWMIN2: For AC_VI. + * CWMIN3: For AC_VO. + */ +#define CWMIN_CSR 0x3424 +#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) +#define CWMIN_CSR_CWMIN1 FIELD32(0x000000f0) +#define CWMIN_CSR_CWMIN2 FIELD32(0x00000f00) +#define CWMIN_CSR_CWMIN3 FIELD32(0x0000f000) + +/* + * CWMAX_CSR: CWmax for each EDCA AC. + * CWMAX0: For AC_BK. + * CWMAX1: For AC_BE. + * CWMAX2: For AC_VI. + * CWMAX3: For AC_VO. + */ +#define CWMAX_CSR 0x3428 +#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) +#define CWMAX_CSR_CWMAX1 FIELD32(0x000000f0) +#define CWMAX_CSR_CWMAX2 FIELD32(0x00000f00) +#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) + +/* + * TX_DMA_DST_CSR: TX DMA destination + * 0: TX ring0, 1: TX ring1, 2: TX ring2 3: invalid + */ +#define TX_DMA_DST_CSR 0x342c +#define TX_DMA_DST_CSR_DEST_AC0 FIELD32(0x00000003) +#define TX_DMA_DST_CSR_DEST_AC1 FIELD32(0x0000000c) +#define TX_DMA_DST_CSR_DEST_AC2 FIELD32(0x00000030) +#define TX_DMA_DST_CSR_DEST_AC3 FIELD32(0x000000c0) +#define TX_DMA_DST_CSR_DEST_MGMT FIELD32(0x00000300) + +/* + * TX_CNTL_CSR: KICK/Abort TX. + * KICK_TX_AC0: For AC_BK. + * KICK_TX_AC1: For AC_BE. + * KICK_TX_AC2: For AC_VI. + * KICK_TX_AC3: For AC_VO. + * ABORT_TX_AC0: For AC_BK. + * ABORT_TX_AC1: For AC_BE. + * ABORT_TX_AC2: For AC_VI. + * ABORT_TX_AC3: For AC_VO. + */ +#define TX_CNTL_CSR 0x3430 +#define TX_CNTL_CSR_KICK_TX_AC0 FIELD32(0x00000001) +#define TX_CNTL_CSR_KICK_TX_AC1 FIELD32(0x00000002) +#define TX_CNTL_CSR_KICK_TX_AC2 FIELD32(0x00000004) +#define TX_CNTL_CSR_KICK_TX_AC3 FIELD32(0x00000008) +#define TX_CNTL_CSR_KICK_TX_MGMT FIELD32(0x00000010) +#define TX_CNTL_CSR_ABORT_TX_AC0 FIELD32(0x00010000) +#define TX_CNTL_CSR_ABORT_TX_AC1 FIELD32(0x00020000) +#define TX_CNTL_CSR_ABORT_TX_AC2 FIELD32(0x00040000) +#define TX_CNTL_CSR_ABORT_TX_AC3 FIELD32(0x00080000) +#define TX_CNTL_CSR_ABORT_TX_MGMT FIELD32(0x00100000) + +/* + * LOAD_TX_RING_CSR: Load RX de + */ +#define LOAD_TX_RING_CSR 0x3434 +#define LOAD_TX_RING_CSR_LOAD_TXD_AC0 FIELD32(0x00000001) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC1 FIELD32(0x00000002) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC2 FIELD32(0x00000004) +#define LOAD_TX_RING_CSR_LOAD_TXD_AC3 FIELD32(0x00000008) +#define LOAD_TX_RING_CSR_LOAD_TXD_MGMT FIELD32(0x00000010) + +/* + * Several read-only registers, for debugging. + */ +#define AC0_TXPTR_CSR 0x3438 +#define AC1_TXPTR_CSR 0x343c +#define AC2_TXPTR_CSR 0x3440 +#define AC3_TXPTR_CSR 0x3444 +#define MGMT_TXPTR_CSR 0x3448 + +/* + * RX_BASE_CSR + */ +#define RX_BASE_CSR 0x3450 +#define RX_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) + +/* + * RX_RING_CSR. + * RXD_SIZE: In unit of 32-bit. + */ +#define RX_RING_CSR 0x3454 +#define RX_RING_CSR_RING_SIZE FIELD32(0x000000ff) +#define RX_RING_CSR_RXD_SIZE FIELD32(0x00003f00) +#define RX_RING_CSR_RXD_WRITEBACK_SIZE FIELD32(0x00070000) + +/* + * RX_CNTL_CSR + */ +#define RX_CNTL_CSR 0x3458 +#define RX_CNTL_CSR_ENABLE_RX_DMA FIELD32(0x00000001) +#define RX_CNTL_CSR_LOAD_RXD FIELD32(0x00000002) + +/* + * RXPTR_CSR: Read-only, for debugging. + */ +#define RXPTR_CSR 0x345c + +/* + * PCI_CFG_CSR + */ +#define PCI_CFG_CSR 0x3460 + +/* + * BUF_FORMAT_CSR + */ +#define BUF_FORMAT_CSR 0x3464 + +/* + * INT_SOURCE_CSR: Interrupt source register. + * Write one to clear corresponding bit. + */ +#define INT_SOURCE_CSR 0x3468 +#define INT_SOURCE_CSR_TXDONE FIELD32(0x00000001) +#define INT_SOURCE_CSR_RXDONE FIELD32(0x00000002) +#define INT_SOURCE_CSR_BEACON_DONE FIELD32(0x00000004) +#define INT_SOURCE_CSR_TX_ABORT_DONE FIELD32(0x00000010) +#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00010000) +#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00020000) +#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00040000) +#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00080000) +#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00100000) +#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00200000) + +/* + * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF. + * MITIGATION_PERIOD: Interrupt mitigation in unit of 32 PCI clock. + */ +#define INT_MASK_CSR 0x346c +#define INT_MASK_CSR_TXDONE FIELD32(0x00000001) +#define INT_MASK_CSR_RXDONE FIELD32(0x00000002) +#define INT_MASK_CSR_BEACON_DONE FIELD32(0x00000004) +#define INT_MASK_CSR_TX_ABORT_DONE FIELD32(0x00000010) +#define INT_MASK_CSR_ENABLE_MITIGATION FIELD32(0x00000080) +#define INT_MASK_CSR_MITIGATION_PERIOD FIELD32(0x0000ff00) +#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00010000) +#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00020000) +#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00040000) +#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00080000) +#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00100000) +#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00200000) + +/* + * E2PROM_CSR: EEPROM control register. + * RELOAD: Write 1 to reload eeprom content. + * TYPE_93C46: 1: 93c46, 0:93c66. + * LOAD_STATUS: 1:loading, 0:done. + */ +#define E2PROM_CSR 0x3470 +#define E2PROM_CSR_RELOAD FIELD32(0x00000001) +#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000002) +#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000004) +#define E2PROM_CSR_DATA_IN FIELD32(0x00000008) +#define E2PROM_CSR_DATA_OUT FIELD32(0x00000010) +#define E2PROM_CSR_TYPE_93C46 FIELD32(0x00000020) +#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) + +/* + * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. + * AC0_TX_OP: For AC_BK, in unit of 32us. + * AC1_TX_OP: For AC_BE, in unit of 32us. + */ +#define AC_TXOP_CSR0 0x3474 +#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) + +/* + * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. + * AC2_TX_OP: For AC_VI, in unit of 32us. + * AC3_TX_OP: For AC_VO, in unit of 32us. + */ +#define AC_TXOP_CSR1 0x3478 +#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR1_AC3_TX_OP FIELD32(0xffff0000) + +/* + * DMA_STATUS_CSR + */ +#define DMA_STATUS_CSR 0x3480 + +/* + * TEST_MODE_CSR + */ +#define TEST_MODE_CSR 0x3484 + +/* + * UART0_TX_CSR + */ +#define UART0_TX_CSR 0x3488 + +/* + * UART0_RX_CSR + */ +#define UART0_RX_CSR 0x348c + +/* + * UART0_FRAME_CSR + */ +#define UART0_FRAME_CSR 0x3490 + +/* + * UART0_BUFFER_CSR + */ +#define UART0_BUFFER_CSR 0x3494 + +/* + * IO_CNTL_CSR + */ +#define IO_CNTL_CSR 0x3498 + +/* + * UART_INT_SOURCE_CSR + */ +#define UART_INT_SOURCE_CSR 0x34a8 + +/* + * UART_INT_MASK_CSR + */ +#define UART_INT_MASK_CSR 0x34ac + +/* + * PBF_QUEUE_CSR + */ +#define PBF_QUEUE_CSR 0x34b0 + +/* + * Firmware DMA registers. + * Firmware DMA registers are dedicated for MCU usage + * and should not be touched by host driver. + * Therefore we skip the definition of these registers. + */ +#define FW_TX_BASE_CSR 0x34c0 +#define FW_TX_START_CSR 0x34c4 +#define FW_TX_LAST_CSR 0x34c8 +#define FW_MODE_CNTL_CSR 0x34cc +#define FW_TXPTR_CSR 0x34d0 + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2561 "rt2561.bin" +#define FIRMWARE_RT2561s "rt2561s.bin" +#define FIRMWARE_RT2661 "rt2661.bin" +#define FIRMWARE_IMAGE_BASE 0x4000 + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2 + */ +#define BBP_R2_BG_MODE FIELD8(0x20) + +/* + * R3 + */ +#define BBP_R3_SMART_MODE FIELD8(0x01) + +/* + * R4: RX antenna control + * FRAME_END: 1 - DPDT, 0 - SPDT (Only valid for 802.11G, RF2527 & RF2529) + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x03) +#define BBP_R4_RX_FRAME_END FIELD8(0x20) + +/* + * R77 + */ +#define BBP_R77_PAIR FIELD8(0x03) + +/* + * RF registers + */ + +/* + * RF 3 + */ +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * RF 4 + */ +#define RF4_FREQ_OFFSET FIELD32(0x0003f000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0004 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0006 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * FRAME_TYPE: 0: DPDT , 1: SPDT , noted this bit is valid for g only. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x0010 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_FRAME_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * ENABLE_DIVERSITY: 1:enable, 0:disable. + * EXTERNAL_LNA_BG: External LNA enable for 2.4G. + * CARDBUS_ACCEL: 0:enable, 1:disable. + * EXTERNAL_LNA_A: External LNA enable for 5G. + */ +#define EEPROM_NIC 0x0011 +#define EEPROM_NIC_ENABLE_DIVERSITY FIELD16(0x0001) +#define EEPROM_NIC_TX_DIVERSITY FIELD16(0x0002) +#define EEPROM_NIC_TX_RX_FIXED FIELD16(0x000c) +#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0010) +#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0020) +#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0040) + +/* + * EEPROM geography. + * GEO_A: Default geographical setting for 5GHz band + * GEO: Default geographical setting. + */ +#define EEPROM_GEOGRAPHY 0x0012 +#define EEPROM_GEOGRAPHY_GEO_A FIELD16(0x00ff) +#define EEPROM_GEOGRAPHY_GEO FIELD16(0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0013 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11G + */ +#define EEPROM_TXPOWER_G_START 0x0023 +#define EEPROM_TXPOWER_G_SIZE 7 +#define EEPROM_TXPOWER_G_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_G_2 FIELD16(0xff00) + +/* + * EEPROM Frequency + */ +#define EEPROM_FREQ 0x002f +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_SEQ_MASK FIELD16(0xff00) +#define EEPROM_FREQ_SEQ FIELD16(0x0300) + +/* + * EEPROM LED. + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED 0x0030 +#define EEPROM_LED_POLARITY_RDY_G FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A_START 0x0031 +#define EEPROM_TXPOWER_A_SIZE 12 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11BG + */ +#define EEPROM_RSSI_OFFSET_BG 0x004d +#define EEPROM_RSSI_OFFSET_BG_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_BG_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11A + */ +#define EEPROM_RSSI_OFFSET_A 0x004e +#define EEPROM_RSSI_OFFSET_A_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_A_2 FIELD16(0xff00) + +/* + * MCU mailbox commands. + */ +#define MCU_SLEEP 0x30 +#define MCU_WAKEUP 0x31 +#define MCU_LED 0x50 +#define MCU_LED_STRENGTH 0x52 + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 16 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 16 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + * TKIP_MIC: ASIC appends TKIP MIC if TKIP is used. + * KEY_TABLE: Use per-client pairwise KEY table. + * KEY_INDEX: + * Key index (0~31) to the pairwise KEY table. + * 0~3 to shared KEY table 0 (BSS0). + * 4~7 to shared KEY table 1 (BSS1). + * 8~11 to shared KEY table 2 (BSS2). + * 12~15 to shared KEY table 3 (BSS3). + * BURST: Next frame belongs to same "burst" event. + */ +#define TXD_W0_OWNER_NIC FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_MORE_FRAG FIELD32(0x00000004) +#define TXD_W0_ACK FIELD32(0x00000008) +#define TXD_W0_TIMESTAMP FIELD32(0x00000010) +#define TXD_W0_OFDM FIELD32(0x00000020) +#define TXD_W0_IFS FIELD32(0x00000040) +#define TXD_W0_RETRY_MODE FIELD32(0x00000080) +#define TXD_W0_TKIP_MIC FIELD32(0x00000100) +#define TXD_W0_KEY_TABLE FIELD32(0x00000200) +#define TXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_BURST FIELD32(0x10000000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * HOST_Q_ID: EDCA/HCCA queue ID. + * HW_SEQUENCE: MAC overwrites the frame sequence number. + * BUFFER_COUNT: Number of buffers in this TXD. + */ +#define TXD_W1_HOST_Q_ID FIELD32(0x0000000f) +#define TXD_W1_AIFSN FIELD32(0x000000f0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) +#define TXD_W1_IV_OFFSET FIELD32(0x003f0000) +#define TXD_W1_PIGGY_BACK FIELD32(0x01000000) +#define TXD_W1_HW_SEQUENCE FIELD32(0x10000000) +#define TXD_W1_BUFFER_COUNT FIELD32(0xe0000000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * Word5 + * FRAME_OFFSET: Frame start offset inside ASIC TXFIFO (after TXINFO field). + * TXD_W5_PID_SUBTYPE: Driver assigned packet ID index for txdone handler. + * TXD_W5_PID_TYPE: Driver assigned packet ID type for txdone handler. + * WAITING_DMA_DONE_INT: TXD been filled with data + * and waiting for TxDoneISR housekeeping. + */ +#define TXD_W5_FRAME_OFFSET FIELD32(0x000000ff) +#define TXD_W5_PID_SUBTYPE FIELD32(0x00001f00) +#define TXD_W5_PID_TYPE FIELD32(0x0000e000) +#define TXD_W5_TX_POWER FIELD32(0x00ff0000) +#define TXD_W5_WAITING_DMA_DONE_INT FIELD32(0x01000000) + +/* + * the above 24-byte is called TXINFO and will be DMAed to MAC block + * through TXFIFO. MAC block use this TXINFO to control the transmission + * behavior of this frame. + * The following fields are not used by MAC block. + * They are used by DMA block and HOST driver only. + * Once a frame has been DMA to ASIC, all the following fields are useless + * to ASIC. + */ + +/* + * Word6-10: Buffer physical address + */ +#define TXD_W6_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W7_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W8_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W9_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) +#define TXD_W10_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) + +/* + * Word11-13: Buffer length + */ +#define TXD_W11_BUFFER_LENGTH0 FIELD32(0x00000fff) +#define TXD_W11_BUFFER_LENGTH1 FIELD32(0x0fff0000) +#define TXD_W12_BUFFER_LENGTH2 FIELD32(0x00000fff) +#define TXD_W12_BUFFER_LENGTH3 FIELD32(0x0fff0000) +#define TXD_W13_BUFFER_LENGTH4 FIELD32(0x00000fff) + +/* + * Word14 + */ +#define TXD_W14_SK_BUFFER FIELD32(0xffffffff) + +/* + * Word15 + */ +#define TXD_W15_NEXT_SK_BUFFER FIELD32(0xffffffff) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * CIPHER_ERROR: 1:ICV error, 2:MIC error, 3:invalid key. + * KEY_INDEX: Decryption key actually used. + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_DROP FIELD32(0x00000002) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000004) +#define RXD_W0_MULTICAST FIELD32(0x00000008) +#define RXD_W0_BROADCAST FIELD32(0x00000010) +#define RXD_W0_MY_BSS FIELD32(0x00000020) +#define RXD_W0_CRC_ERROR FIELD32(0x00000040) +#define RXD_W0_OFDM FIELD32(0x00000080) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000300) +#define RXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * SIGNAL: RX raw data rate reported by BBP. + */ +#define RXD_W1_SIGNAL FIELD32(0x000000ff) +#define RXD_W1_RSSI_AGC FIELD32(0x00001f00) +#define RXD_W1_RSSI_LNA FIELD32(0x00006000) +#define RXD_W1_FRAME_OFFSET FIELD32(0x7f000000) + +/* + * Word2 + * IV: Received IV of originally encrypted. + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + * EIV: Received EIV of originally encrypted. + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_RESERVED FIELD32(0xffffffff) + +/* + * the above 20-byte is called RXINFO and will be DMAed to MAC RX block + * and passed to the HOST driver. + * The following fields are for DMA block and HOST usage only. + * Can't be touched by ASIC MAC block. + */ + +/* + * Word5 + */ +#define RXD_W5_BUFFER_PHYSICAL_ADDRESS FIELD32(0xffffffff) + +/* + * Word6-15: Reserved + */ +#define RXD_W6_RESERVED FIELD32(0xffffffff) +#define RXD_W7_RESERVED FIELD32(0xffffffff) +#define RXD_W8_RESERVED FIELD32(0xffffffff) +#define RXD_W9_RESERVED FIELD32(0xffffffff) +#define RXD_W10_RESERVED FIELD32(0xffffffff) +#define RXD_W11_RESERVED FIELD32(0xffffffff) +#define RXD_W12_RESERVED FIELD32(0xffffffff) +#define RXD_W13_RESERVED FIELD32(0xffffffff) +#define RXD_W14_RESERVED FIELD32(0xffffffff) +#define RXD_W15_RESERVED FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT61PCI_H */ diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c new file mode 100644 index 0000000000000000000000000000000000000000..3e42759473c3d1e0041f975ae54656fe8b33f88d --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -0,0 +1,2110 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt73usb + Abstract: rt73usb device specific routines. + Supported chipsets: rt2571W & rt2671. + */ + +/* + * Set enviroment defines for rt2x00.h + */ +#define DRV_NAME "rt73usb" + +#include +#include +#include +#include +#include +#include + +#include "rt2x00.h" +#include "rt2x00usb.h" +#include "rt73usb.h" + +/* + * Register access. + * All access to the CSR registers will go through the methods + * rt73usb_register_read and rt73usb_register_write. + * BBP and RF register require indirect register access, + * and use the CSR registers BBPCSR and RFCSR to achieve this. + * These indirect registers work with busy bits, + * and we will try maximal REGISTER_BUSY_COUNT times to access + * the register while taking a REGISTER_BUSY_DELAY us delay + * between each attampt. When the busy bit is still set at that time, + * the access attempt is considered to have failed, + * and we will print an error. + */ +static inline void rt73usb_register_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 *value) +{ + __le32 reg; + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + ®, sizeof(u32), REGISTER_TIMEOUT); + *value = le32_to_cpu(reg); +} + +static inline void rt73usb_register_multiread(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u32)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, + USB_VENDOR_REQUEST_IN, offset, + value, length, timeout); +} + +static inline void rt73usb_register_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int offset, u32 value) +{ + __le32 reg = cpu_to_le32(value); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + ®, sizeof(u32), REGISTER_TIMEOUT); +} + +static inline void rt73usb_register_multiwrite(const struct rt2x00_dev + *rt2x00dev, + const unsigned int offset, + void *value, const u32 length) +{ + int timeout = REGISTER_TIMEOUT * (length / sizeof(u32)); + rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, offset, + value, length, timeout); +} + +static u32 rt73usb_bbp_check(const struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + unsigned int i; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_register_read(rt2x00dev, PHY_CSR3, ®); + if (!rt2x00_get_field32(reg, PHY_CSR3_BUSY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + return reg; +} + +static void rt73usb_bbp_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u8 value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt73usb_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); + return; + } + + /* + * Write the data into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_VALUE, value); + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 0); + + rt73usb_register_write(rt2x00dev, PHY_CSR3, reg); +} + +static void rt73usb_bbp_read(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u8 *value) +{ + u32 reg; + + /* + * Wait until the BBP becomes ready. + */ + reg = rt73usb_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + return; + } + + /* + * Write the request into the BBP. + */ + reg = 0; + rt2x00_set_field32(®, PHY_CSR3_REGNUM, word); + rt2x00_set_field32(®, PHY_CSR3_BUSY, 1); + rt2x00_set_field32(®, PHY_CSR3_READ_CONTROL, 1); + + rt73usb_register_write(rt2x00dev, PHY_CSR3, reg); + + /* + * Wait until the BBP becomes ready. + */ + reg = rt73usb_bbp_check(rt2x00dev); + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); + *value = 0xff; + return; + } + + *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); +} + +static void rt73usb_rf_write(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, const u32 value) +{ + u32 reg; + unsigned int i; + + if (!word) + return; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_register_read(rt2x00dev, PHY_CSR4, ®); + if (!rt2x00_get_field32(reg, PHY_CSR4_BUSY)) + goto rf_write; + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "PHY_CSR4 register busy. Write failed.\n"); + return; + +rf_write: + reg = 0; + rt2x00_set_field32(®, PHY_CSR4_VALUE, value); + + /* + * RF5225 and RF2527 contain 21 bits per RF register value, + * all others contain 20 bits. + */ + rt2x00_set_field32(®, PHY_CSR4_NUMBER_OF_BITS, + 20 + !!(rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527))); + rt2x00_set_field32(®, PHY_CSR4_IF_SELECT, 0); + rt2x00_set_field32(®, PHY_CSR4_BUSY, 1); + + rt73usb_register_write(rt2x00dev, PHY_CSR4, reg); + rt2x00_rf_write(rt2x00dev, word, value); +} + +#ifdef CONFIG_RT2X00_LIB_DEBUGFS +#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) + +static void rt73usb_read_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 *data) +{ + rt73usb_register_read(rt2x00dev, CSR_OFFSET(word), data); +} + +static void rt73usb_write_csr(const struct rt2x00_dev *rt2x00dev, + const unsigned int word, u32 data) +{ + rt73usb_register_write(rt2x00dev, CSR_OFFSET(word), data); +} + +static const struct rt2x00debug rt73usb_rt2x00debug = { + .owner = THIS_MODULE, + .csr = { + .read = rt73usb_read_csr, + .write = rt73usb_write_csr, + .word_size = sizeof(u32), + .word_count = CSR_REG_SIZE / sizeof(u32), + }, + .eeprom = { + .read = rt2x00_eeprom_read, + .write = rt2x00_eeprom_write, + .word_size = sizeof(u16), + .word_count = EEPROM_SIZE / sizeof(u16), + }, + .bbp = { + .read = rt73usb_bbp_read, + .write = rt73usb_bbp_write, + .word_size = sizeof(u8), + .word_count = BBP_SIZE / sizeof(u8), + }, + .rf = { + .read = rt2x00_rf_read, + .write = rt73usb_rf_write, + .word_size = sizeof(u32), + .word_count = RF_SIZE / sizeof(u32), + }, +}; +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ + +/* + * Configuration handlers. + */ +static void rt73usb_config_mac_addr(struct rt2x00_dev *rt2x00dev, __le32 *mac) +{ + u32 tmp; + + tmp = le32_to_cpu(mac[1]); + rt2x00_set_field32(&tmp, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); + mac[1] = cpu_to_le32(tmp); + + rt73usb_register_multiwrite(rt2x00dev, MAC_CSR2, mac, + (2 * sizeof(__le32))); +} + +static void rt73usb_config_bssid(struct rt2x00_dev *rt2x00dev, __le32 *bssid) +{ + u32 tmp; + + tmp = le32_to_cpu(bssid[1]); + rt2x00_set_field32(&tmp, MAC_CSR5_BSS_ID_MASK, 3); + bssid[1] = cpu_to_le32(tmp); + + rt73usb_register_multiwrite(rt2x00dev, MAC_CSR4, bssid, + (2 * sizeof(__le32))); +} + +static void rt73usb_config_type(struct rt2x00_dev *rt2x00dev, const int type, + const int tsf_sync) +{ + u32 reg; + + /* + * Clear current synchronisation setup. + * For the Beacon base registers we only need to clear + * the first byte since that byte contains the VALID and OWNER + * bits which (when set to 0) will invalidate the entire beacon. + */ + rt73usb_register_write(rt2x00dev, TXRX_CSR9, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0); + rt73usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0); + + /* + * Enable synchronisation. + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); + rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, tsf_sync); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt73usb_config_preamble(struct rt2x00_dev *rt2x00dev, + const int short_preamble, + const int ack_timeout, + const int ack_consume_time) +{ + u32 reg; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->config_work); + return; + } + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_RX_ACK_TIMEOUT, ack_timeout); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_PREAMBLE, + !!short_preamble); + rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); +} + +static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev, + const int basic_rate_mask) +{ + rt73usb_register_write(rt2x00dev, TXRX_CSR5, basic_rate_mask); +} + +static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev, + struct rf_channel *rf, const int txpower) +{ + u8 r3; + u8 r94; + u8 smart; + + rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); + rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); + + smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527)); + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); + rt73usb_bbp_write(rt2x00dev, 3, r3); + + r94 = 6; + if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) + r94 += txpower - MAX_TXPOWER; + else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) + r94 += txpower; + rt73usb_bbp_write(rt2x00dev, 94, r94); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + rt73usb_rf_write(rt2x00dev, 1, rf->rf1); + rt73usb_rf_write(rt2x00dev, 2, rf->rf2); + rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); + rt73usb_rf_write(rt2x00dev, 4, rf->rf4); + + udelay(10); +} + +static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev, + const int txpower) +{ + struct rf_channel rf; + + rt2x00_rf_read(rt2x00dev, 1, &rf.rf1); + rt2x00_rf_read(rt2x00dev, 2, &rf.rf2); + rt2x00_rf_read(rt2x00dev, 3, &rf.rf3); + rt2x00_rf_read(rt2x00dev, 4, &rf.rf4); + + rt73usb_config_channel(rt2x00dev, &rf, txpower); +} + +static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt73usb_bbp_read(rt2x00dev, 4, &r4); + rt73usb_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !!(rt2x00dev->curr_hwmode != HWMODE_A)); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); + + if (rt2x00dev->curr_hwmode == HWMODE_A) + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + else + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt73usb_bbp_write(rt2x00dev, 77, r77); + rt73usb_bbp_write(rt2x00dev, 3, r3); + rt73usb_bbp_write(rt2x00dev, 4, r4); +} + +static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, + const int antenna_rx) +{ + u8 r3; + u8 r4; + u8 r77; + + rt73usb_bbp_read(rt2x00dev, 3, &r3); + rt73usb_bbp_read(rt2x00dev, 4, &r4); + rt73usb_bbp_read(rt2x00dev, 77, &r77); + + rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); + rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, + !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)); + + switch (antenna_rx) { + case ANTENNA_SW_DIVERSITY: + case ANTENNA_HW_DIVERSITY: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); + break; + case ANTENNA_A: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 3); + break; + case ANTENNA_B: + rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); + rt2x00_set_field8(&r77, BBP_R77_PAIR, 0); + break; + } + + rt73usb_bbp_write(rt2x00dev, 77, r77); + rt73usb_bbp_write(rt2x00dev, 3, r3); + rt73usb_bbp_write(rt2x00dev, 4, r4); +} + +struct antenna_sel { + u8 word; + /* + * value[0] -> non-LNA + * value[1] -> LNA + */ + u8 value[2]; +}; + +static const struct antenna_sel antenna_sel_a[] = { + { 96, { 0x58, 0x78 } }, + { 104, { 0x38, 0x48 } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x60, 0x60 } }, + { 97, { 0x58, 0x58 } }, + { 98, { 0x58, 0x58 } }, +}; + +static const struct antenna_sel antenna_sel_bg[] = { + { 96, { 0x48, 0x68 } }, + { 104, { 0x2c, 0x3c } }, + { 75, { 0xfe, 0x80 } }, + { 86, { 0xfe, 0x80 } }, + { 88, { 0xfe, 0x80 } }, + { 35, { 0x50, 0x50 } }, + { 97, { 0x48, 0x48 } }, + { 98, { 0x48, 0x48 } }, +}; + +static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev, + const int antenna_tx, const int antenna_rx) +{ + const struct antenna_sel *sel; + unsigned int lna; + unsigned int i; + u32 reg; + + rt73usb_register_read(rt2x00dev, PHY_CSR0, ®); + + if (rt2x00dev->curr_hwmode == HWMODE_A) { + sel = antenna_sel_a; + lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 0); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 1); + } else { + sel = antenna_sel_bg; + lna = test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + + rt2x00_set_field32(®, PHY_CSR0_PA_PE_BG, 1); + rt2x00_set_field32(®, PHY_CSR0_PA_PE_A, 0); + } + + for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) + rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); + + rt73usb_register_write(rt2x00dev, PHY_CSR0, reg); + + if (rt2x00_rf(&rt2x00dev->chip, RF5226) || + rt2x00_rf(&rt2x00dev->chip, RF5225)) + rt73usb_config_antenna_5x(rt2x00dev, antenna_tx, antenna_rx); + else if (rt2x00_rf(&rt2x00dev->chip, RF2528) || + rt2x00_rf(&rt2x00dev->chip, RF2527)) + rt73usb_config_antenna_2x(rt2x00dev, antenna_tx, antenna_rx); +} + +static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev, + struct rt2x00lib_conf *libconf) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_SLOT_TIME, libconf->slot_time); + rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR8, ®); + rt2x00_set_field32(®, MAC_CSR8_SIFS, libconf->sifs); + rt2x00_set_field32(®, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); + rt2x00_set_field32(®, MAC_CSR8_EIFS, libconf->eifs); + rt73usb_register_write(rt2x00dev, MAC_CSR8, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_AUTORESPOND_ENABLE, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_INTERVAL, + libconf->conf->beacon_int * 16); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); +} + +static void rt73usb_config(struct rt2x00_dev *rt2x00dev, + const unsigned int flags, + struct rt2x00lib_conf *libconf) +{ + if (flags & CONFIG_UPDATE_PHYMODE) + rt73usb_config_phymode(rt2x00dev, libconf->basic_rates); + if (flags & CONFIG_UPDATE_CHANNEL) + rt73usb_config_channel(rt2x00dev, &libconf->rf, + libconf->conf->power_level); + if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) + rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level); + if (flags & CONFIG_UPDATE_ANTENNA) + rt73usb_config_antenna(rt2x00dev, libconf->conf->antenna_sel_tx, + libconf->conf->antenna_sel_rx); + if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) + rt73usb_config_duration(rt2x00dev, libconf); +} + +/* + * LED functions. + */ +static void rt73usb_enable_led(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, MAC_CSR14, ®); + rt2x00_set_field32(®, MAC_CSR14_ON_PERIOD, 70); + rt2x00_set_field32(®, MAC_CSR14_OFF_PERIOD, 30); + rt73usb_register_write(rt2x00dev, MAC_CSR14, reg); + + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 1); + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) + rt2x00_set_field16(&rt2x00dev->led_reg, + MCU_LEDCS_LINK_A_STATUS, 1); + else + rt2x00_set_field16(&rt2x00dev->led_reg, + MCU_LEDCS_LINK_BG_STATUS, 1); + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, 0x0000, + rt2x00dev->led_reg, REGISTER_TIMEOUT); +} + +static void rt73usb_disable_led(struct rt2x00_dev *rt2x00dev) +{ + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 0); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_BG_STATUS, 0); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_A_STATUS, 0); + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, 0x0000, + rt2x00dev->led_reg, REGISTER_TIMEOUT); +} + +static void rt73usb_activity_led(struct rt2x00_dev *rt2x00dev, int rssi) +{ + u32 led; + + if (rt2x00dev->led_mode != LED_MODE_SIGNAL_STRENGTH) + return; + + /* + * Led handling requires a positive value for the rssi, + * to do that correctly we need to add the correction. + */ + rssi += rt2x00dev->rssi_offset; + + if (rssi <= 30) + led = 0; + else if (rssi <= 39) + led = 1; + else if (rssi <= 49) + led = 2; + else if (rssi <= 53) + led = 3; + else if (rssi <= 63) + led = 4; + else + led = 5; + + rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, led, + rt2x00dev->led_reg, REGISTER_TIMEOUT); +} + +/* + * Link tuning + */ +static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + /* + * Update FCS error count from register. + */ + rt73usb_register_read(rt2x00dev, STA_CSR0, ®); + rt2x00dev->link.rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); + + /* + * Update False CCA count from register. + */ + rt73usb_register_read(rt2x00dev, STA_CSR1, ®); + reg = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); + rt2x00dev->link.false_cca = + rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); +} + +static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev) +{ + rt73usb_bbp_write(rt2x00dev, 17, 0x20); + rt2x00dev->link.vgc_level = 0x20; +} + +static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev) +{ + int rssi = rt2x00_get_link_rssi(&rt2x00dev->link); + u8 r17; + u8 up_bound; + u8 low_bound; + + /* + * Update Led strength + */ + rt73usb_activity_led(rt2x00dev, rssi); + + rt73usb_bbp_read(rt2x00dev, 17, &r17); + + /* + * Determine r17 bounds. + */ + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + low_bound = 0x28; + up_bound = 0x48; + + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + low_bound += 0x10; + up_bound += 0x10; + } + } else { + if (rssi > -82) { + low_bound = 0x1c; + up_bound = 0x40; + } else if (rssi > -84) { + low_bound = 0x1c; + up_bound = 0x20; + } else { + low_bound = 0x1c; + up_bound = 0x1c; + } + + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { + low_bound += 0x14; + up_bound += 0x10; + } + } + + /* + * Special big-R17 for very short distance + */ + if (rssi > -35) { + if (r17 != 0x60) + rt73usb_bbp_write(rt2x00dev, 17, 0x60); + return; + } + + /* + * Special big-R17 for short distance + */ + if (rssi >= -58) { + if (r17 != up_bound) + rt73usb_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * Special big-R17 for middle-short distance + */ + if (rssi >= -66) { + low_bound += 0x10; + if (r17 != low_bound) + rt73usb_bbp_write(rt2x00dev, 17, low_bound); + return; + } + + /* + * Special mid-R17 for middle distance + */ + if (rssi >= -74) { + if (r17 != (low_bound + 0x10)) + rt73usb_bbp_write(rt2x00dev, 17, low_bound + 0x08); + return; + } + + /* + * Special case: Change up_bound based on the rssi. + * Lower up_bound when rssi is weaker then -74 dBm. + */ + up_bound -= 2 * (-74 - rssi); + if (low_bound > up_bound) + up_bound = low_bound; + + if (r17 > up_bound) { + rt73usb_bbp_write(rt2x00dev, 17, up_bound); + return; + } + + /* + * r17 does not yet exceed upper limit, continue and base + * the r17 tuning on the false CCA count. + */ + if (rt2x00dev->link.false_cca > 512 && r17 < up_bound) { + r17 += 4; + if (r17 > up_bound) + r17 = up_bound; + rt73usb_bbp_write(rt2x00dev, 17, r17); + } else if (rt2x00dev->link.false_cca < 100 && r17 > low_bound) { + r17 -= 4; + if (r17 < low_bound) + r17 = low_bound; + rt73usb_bbp_write(rt2x00dev, 17, r17); + } +} + +/* + * Firmware name function. + */ +static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) +{ + return FIRMWARE_RT2571; +} + +/* + * Initialization functions. + */ +static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data, + const size_t len) +{ + unsigned int i; + int status; + u32 reg; + char *ptr = data; + char *cache; + int buflen; + int timeout; + + /* + * Wait for stable hardware. + */ + for (i = 0; i < 100; i++) { + rt73usb_register_read(rt2x00dev, MAC_CSR0, ®); + if (reg) + break; + msleep(1); + } + + if (!reg) { + ERROR(rt2x00dev, "Unstable hardware.\n"); + return -EBUSY; + } + + /* + * Write firmware to device. + * We setup a seperate cache for this action, + * since we are going to write larger chunks of data + * then normally used cache size. + */ + cache = kmalloc(CSR_CACHE_SIZE_FIRMWARE, GFP_KERNEL); + if (!cache) { + ERROR(rt2x00dev, "Failed to allocate firmware cache.\n"); + return -ENOMEM; + } + + for (i = 0; i < len; i += CSR_CACHE_SIZE_FIRMWARE) { + buflen = min_t(int, len - i, CSR_CACHE_SIZE_FIRMWARE); + timeout = REGISTER_TIMEOUT * (buflen / sizeof(u32)); + + memcpy(cache, ptr, buflen); + + rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, + FIRMWARE_IMAGE_BASE + i, 0x0000, + cache, buflen, timeout); + + ptr += buflen; + } + + kfree(cache); + + /* + * Send firmware request to device to load firmware, + * we need to specify a long timeout time. + */ + status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, + 0x0000, USB_MODE_FIRMWARE, + REGISTER_TIMEOUT_FIRMWARE); + if (status < 0) { + ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); + return status; + } + + rt73usb_disable_led(rt2x00dev); + + return 0; +} + +static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_AUTO_TX_SEQ, 1); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, 0); + rt2x00_set_field32(®, TXRX_CSR0_TX_WITHOUT_WAITING, 0); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR1, ®); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ + rt2x00_set_field32(®, TXRX_CSR1_BBP_ID3_VALID, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR1, reg); + + /* + * CCK TXD BBP registers + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR2, ®); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0, 13); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1, 12); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2, 11); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID2_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3, 10); + rt2x00_set_field32(®, TXRX_CSR2_BBP_ID3_VALID, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR2, reg); + + /* + * OFDM TXD BBP registers + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR3, ®); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0, 7); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID0_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1, 6); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID1_VALID, 1); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2, 5); + rt2x00_set_field32(®, TXRX_CSR3_BBP_ID2_VALID, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR3, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR7, ®); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_6MBS, 59); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_9MBS, 53); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_12MBS, 49); + rt2x00_set_field32(®, TXRX_CSR7_ACK_CTS_18MBS, 46); + rt73usb_register_write(rt2x00dev, TXRX_CSR7, reg); + + rt73usb_register_read(rt2x00dev, TXRX_CSR8, ®); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_24MBS, 44); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_36MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_48MBS, 42); + rt2x00_set_field32(®, TXRX_CSR8_ACK_CTS_54MBS, 42); + rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg); + + rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); + + rt73usb_register_read(rt2x00dev, MAC_CSR6, ®); + rt2x00_set_field32(®, MAC_CSR6_MAX_FRAME_UNIT, 0xfff); + rt73usb_register_write(rt2x00dev, MAC_CSR6, reg); + + rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718); + + if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) + return -EBUSY; + + rt73usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00); + + /* + * Invalidate all Shared Keys (SEC_CSR0), + * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) + */ + rt73usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000); + rt73usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000); + rt73usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); + + reg = 0x000023b0; + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF2527)) + rt2x00_set_field32(®, PHY_CSR1_RF_RPI, 1); + rt73usb_register_write(rt2x00dev, PHY_CSR1, reg); + + rt73usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06); + rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); + rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); + + rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, ®); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC0_TX_OP, 0); + rt2x00_set_field32(®, AC_TXOP_CSR0_AC1_TX_OP, 0); + rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg); + + rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, ®); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC2_TX_OP, 192); + rt2x00_set_field32(®, AC_TXOP_CSR1_AC3_TX_OP, 48); + rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR9, ®); + rt2x00_set_field32(®, MAC_CSR9_CW_SELECT, 0); + rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); + + /* + * We must clear the error counters. + * These registers are cleared on read, + * so we may pass a useless variable to store the value. + */ + rt73usb_register_read(rt2x00dev, STA_CSR0, ®); + rt73usb_register_read(rt2x00dev, STA_CSR1, ®); + rt73usb_register_read(rt2x00dev, STA_CSR2, ®); + + /* + * Reset MAC and BBP registers. + */ + rt73usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 1); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 1); + rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_SOFT_RESET, 0); + rt2x00_set_field32(®, MAC_CSR1_BBP_RESET, 0); + rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); + + rt73usb_register_read(rt2x00dev, MAC_CSR1, ®); + rt2x00_set_field32(®, MAC_CSR1_HOST_READY, 1); + rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); + + return 0; +} + +static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value; + + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_bbp_read(rt2x00dev, 0, &value); + if ((value != 0xff) && (value != 0x00)) + goto continue_csr_init; + NOTICE(rt2x00dev, "Waiting for BBP register.\n"); + udelay(REGISTER_BUSY_DELAY); + } + + ERROR(rt2x00dev, "BBP register access failed, aborting.\n"); + return -EACCES; + +continue_csr_init: + rt73usb_bbp_write(rt2x00dev, 3, 0x80); + rt73usb_bbp_write(rt2x00dev, 15, 0x30); + rt73usb_bbp_write(rt2x00dev, 21, 0xc8); + rt73usb_bbp_write(rt2x00dev, 22, 0x38); + rt73usb_bbp_write(rt2x00dev, 23, 0x06); + rt73usb_bbp_write(rt2x00dev, 24, 0xfe); + rt73usb_bbp_write(rt2x00dev, 25, 0x0a); + rt73usb_bbp_write(rt2x00dev, 26, 0x0d); + rt73usb_bbp_write(rt2x00dev, 32, 0x0b); + rt73usb_bbp_write(rt2x00dev, 34, 0x12); + rt73usb_bbp_write(rt2x00dev, 37, 0x07); + rt73usb_bbp_write(rt2x00dev, 39, 0xf8); + rt73usb_bbp_write(rt2x00dev, 41, 0x60); + rt73usb_bbp_write(rt2x00dev, 53, 0x10); + rt73usb_bbp_write(rt2x00dev, 54, 0x18); + rt73usb_bbp_write(rt2x00dev, 60, 0x10); + rt73usb_bbp_write(rt2x00dev, 61, 0x04); + rt73usb_bbp_write(rt2x00dev, 62, 0x04); + rt73usb_bbp_write(rt2x00dev, 75, 0xfe); + rt73usb_bbp_write(rt2x00dev, 86, 0xfe); + rt73usb_bbp_write(rt2x00dev, 88, 0xfe); + rt73usb_bbp_write(rt2x00dev, 90, 0x0f); + rt73usb_bbp_write(rt2x00dev, 99, 0x00); + rt73usb_bbp_write(rt2x00dev, 102, 0x16); + rt73usb_bbp_write(rt2x00dev, 107, 0x04); + + DEBUG(rt2x00dev, "Start initialization from EEPROM...\n"); + for (i = 0; i < EEPROM_BBP_SIZE; i++) { + rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); + + if (eeprom != 0xffff && eeprom != 0x0000) { + reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); + value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); + DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n", + reg_id, value); + rt73usb_bbp_write(rt2x00dev, reg_id, value); + } + } + DEBUG(rt2x00dev, "...End initialization from EEPROM.\n"); + + return 0; +} + +/* + * Device state switch handlers. + */ +static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DISABLE_RX, + state == STATE_RADIO_RX_OFF); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Initialize all registers. + */ + if (rt73usb_init_registers(rt2x00dev) || + rt73usb_init_bbp(rt2x00dev)) { + ERROR(rt2x00dev, "Register initialization failed.\n"); + return -EIO; + } + + rt2x00usb_enable_radio(rt2x00dev); + + /* + * Enable LED + */ + rt73usb_enable_led(rt2x00dev); + + return 0; +} + +static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) +{ + /* + * Disable LED + */ + rt73usb_disable_led(rt2x00dev); + + rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); + + /* + * Disable synchronisation. + */ + rt73usb_register_write(rt2x00dev, TXRX_CSR9, 0); + + rt2x00usb_disable_radio(rt2x00dev); +} + +static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) +{ + u32 reg; + unsigned int i; + char put_to_sleep; + char current_state; + + put_to_sleep = (state != STATE_AWAKE); + + rt73usb_register_read(rt2x00dev, MAC_CSR12, ®); + rt2x00_set_field32(®, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); + rt2x00_set_field32(®, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); + rt73usb_register_write(rt2x00dev, MAC_CSR12, reg); + + /* + * Device is not guaranteed to be in the requested state yet. + * We must wait until the register indicates that the + * device has entered the correct state. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt73usb_register_read(rt2x00dev, MAC_CSR12, ®); + current_state = + rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); + if (current_state == !put_to_sleep) + return 0; + msleep(10); + } + + NOTICE(rt2x00dev, "Device failed to enter state %d, " + "current device state %d.\n", !put_to_sleep, current_state); + + return -EBUSY; +} + +static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, + enum dev_state state) +{ + int retval = 0; + + switch (state) { + case STATE_RADIO_ON: + retval = rt73usb_enable_radio(rt2x00dev); + break; + case STATE_RADIO_OFF: + rt73usb_disable_radio(rt2x00dev); + break; + case STATE_RADIO_RX_ON: + case STATE_RADIO_RX_OFF: + rt73usb_toggle_rx(rt2x00dev, state); + break; + case STATE_DEEP_SLEEP: + case STATE_SLEEP: + case STATE_STANDBY: + case STATE_AWAKE: + retval = rt73usb_set_state(rt2x00dev, state); + break; + default: + retval = -ENOTSUPP; + break; + } + + return retval; +} + +/* + * TX descriptor initialization + */ +static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, + struct data_desc *txd, + struct txdata_entry_desc *desc, + struct ieee80211_hdr *ieee80211hdr, + unsigned int length, + struct ieee80211_tx_control *control) +{ + u32 word; + + /* + * Start writing the descriptor words. + */ + rt2x00_desc_read(txd, 1, &word); + rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue); + rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs); + rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); + rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); + rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); + rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); + rt2x00_desc_write(txd, 1, word); + + rt2x00_desc_read(txd, 2, &word); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); + rt2x00_desc_write(txd, 2, word); + + rt2x00_desc_read(txd, 5, &word); + rt2x00_set_field32(&word, TXD_W5_TX_POWER, + TXPOWER_TO_DEV(control->power_level)); + rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); + rt2x00_desc_write(txd, 5, word); + + rt2x00_desc_read(txd, 0, &word); + rt2x00_set_field32(&word, TXD_W0_BURST, + test_bit(ENTRY_TXD_BURST, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_VALID, 1); + rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, + test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_ACK, + !(control->flags & IEEE80211_TXCTL_NO_ACK)); + rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, + test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_OFDM, + test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); + rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, + !!(control->flags & + IEEE80211_TXCTL_LONG_RETRY_LIMIT)); + rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); + rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, length); + rt2x00_set_field32(&word, TXD_W0_BURST2, + test_bit(ENTRY_TXD_BURST, &desc->flags)); + rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); + rt2x00_desc_write(txd, 0, word); +} + +static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev, + int maxpacket, struct sk_buff *skb) +{ + int length; + + /* + * The length _must_ be a multiple of 4, + * but it must _not_ be a multiple of the USB packet size. + */ + length = roundup(skb->len, 4); + length += (4 * !(length % maxpacket)); + + return length; +} + +/* + * TX data initialization + */ +static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, + unsigned int queue) +{ + u32 reg; + + if (queue != IEEE80211_TX_QUEUE_BEACON) + return; + + /* + * For Wi-Fi faily generated beacons between participating stations. + * Set TBTT phase adaptive adjustment step to 8us (default 16us) + */ + rt73usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); + + rt73usb_register_read(rt2x00dev, TXRX_CSR9, ®); + if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); + } +} + +/* + * RX control handlers + */ +static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) +{ + u16 eeprom; + u8 offset; + u8 lna; + + lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); + switch (lna) { + case 3: + offset = 90; + break; + case 2: + offset = 74; + break; + case 1: + offset = 64; + break; + default: + return 0; + } + + if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { + if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { + if (lna == 3 || lna == 2) + offset += 10; + } else { + if (lna == 3) + offset += 6; + else if (lna == 2) + offset += 8; + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); + } else { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) + offset += 14; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom); + offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); + } + + return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; +} + +static void rt73usb_fill_rxdone(struct data_entry *entry, + struct rxdata_entry_desc *desc) +{ + struct data_desc *rxd = (struct data_desc *)entry->skb->data; + u32 word0; + u32 word1; + + rt2x00_desc_read(rxd, 0, &word0); + rt2x00_desc_read(rxd, 1, &word1); + + desc->flags = 0; + if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) + desc->flags |= RX_FLAG_FAILED_FCS_CRC; + + /* + * Obtain the status about this packet. + */ + desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); + desc->rssi = rt73usb_agc_to_rssi(entry->ring->rt2x00dev, word1); + desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); + desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); + + /* + * Pull the skb to clear the descriptor area. + */ + skb_pull(entry->skb, entry->ring->desc_size); + + return; +} + +/* + * Device probe functions. + */ +static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u16 word; + u8 *mac; + s8 value; + + rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); + + /* + * Start validation of the data that has been read. + */ + mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); + if (!is_valid_ether_addr(mac)) { + DECLARE_MAC_BUF(macbuf); + + random_ether_addr(mac); + EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, 2); + rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); + rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226); + rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); + EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); + EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_ACT, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_0, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_1, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_2, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_3, 0); + rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_4, 0); + rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, + LED_MODE_DEFAULT); + rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); + EEPROM(rt2x00dev, "Led: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); + rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); + EEPROM(rt2x00dev, "Freq: 0x%04x\n", word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); + } + + rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &word); + if (word == 0xffff) { + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); + } else { + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); + value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); + if (value < -10 || value > 10) + rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); + rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); + } + + return 0; +} + +static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + u16 value; + u16 eeprom; + + /* + * Read EEPROM word for configuration. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); + + /* + * Identify RF chipset. + */ + value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); + rt73usb_register_read(rt2x00dev, MAC_CSR0, ®); + rt2x00_set_chip(rt2x00dev, RT2571, value, reg); + + if (!rt2x00_rev(&rt2x00dev->chip, 0x25730)) { + ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); + return -ENODEV; + } + + if (!rt2x00_rf(&rt2x00dev->chip, RF5226) && + !rt2x00_rf(&rt2x00dev->chip, RF2528) && + !rt2x00_rf(&rt2x00dev->chip, RF5225) && + !rt2x00_rf(&rt2x00dev->chip, RF2527)) { + ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); + return -ENODEV; + } + + /* + * Identify default antenna configuration. + */ + rt2x00dev->hw->conf.antenna_sel_tx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); + rt2x00dev->hw->conf.antenna_sel_rx = + rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); + + /* + * Read the Frame type. + */ + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) + __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); + + /* + * Read frequency offset. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom); + rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); + + /* + * Read external LNA informations. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) { + __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); + __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); + } + + /* + * Store led settings, for correct led behaviour. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); + + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LED_MODE, + rt2x00dev->led_mode); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_0, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_0)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_1, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_1)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_2, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_2)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_3, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_3)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_4, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_GPIO_4)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_ACT, + rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_BG, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_G)); + rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_A, + rt2x00_get_field16(eeprom, + EEPROM_LED_POLARITY_RDY_A)); + + return 0; +} + +/* + * RF value list for RF2528 + * Supports: 2.4 GHz + */ +static const struct rf_channel rf_vals_bg_2528[] = { + { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, + { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, + { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, + { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, + { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, + { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, + { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, + { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, + { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, + { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, + { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, + { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, + { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, + { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, +}; + +/* + * RF value list for RF5226 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5226[] = { + { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, + { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, + { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, + { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, + { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, + { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, + { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, + { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, + { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, + { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, + { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, + { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, + { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, + { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002c0c, 0x0000099a, 0x00098255, 0x000fea23 }, + { 40, 0x00002c0c, 0x000009a2, 0x00098255, 0x000fea03 }, + { 44, 0x00002c0c, 0x000009a6, 0x00098255, 0x000fea0b }, + { 48, 0x00002c0c, 0x000009aa, 0x00098255, 0x000fea13 }, + { 52, 0x00002c0c, 0x000009ae, 0x00098255, 0x000fea1b }, + { 56, 0x00002c0c, 0x000009b2, 0x00098255, 0x000fea23 }, + { 60, 0x00002c0c, 0x000009ba, 0x00098255, 0x000fea03 }, + { 64, 0x00002c0c, 0x000009be, 0x00098255, 0x000fea0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002c0c, 0x00000a2a, 0x000b8255, 0x000fea03 }, + { 104, 0x00002c0c, 0x00000a2e, 0x000b8255, 0x000fea0b }, + { 108, 0x00002c0c, 0x00000a32, 0x000b8255, 0x000fea13 }, + { 112, 0x00002c0c, 0x00000a36, 0x000b8255, 0x000fea1b }, + { 116, 0x00002c0c, 0x00000a3a, 0x000b8255, 0x000fea23 }, + { 120, 0x00002c0c, 0x00000a82, 0x000b8255, 0x000fea03 }, + { 124, 0x00002c0c, 0x00000a86, 0x000b8255, 0x000fea0b }, + { 128, 0x00002c0c, 0x00000a8a, 0x000b8255, 0x000fea13 }, + { 132, 0x00002c0c, 0x00000a8e, 0x000b8255, 0x000fea1b }, + { 136, 0x00002c0c, 0x00000a92, 0x000b8255, 0x000fea23 }, + + /* 802.11 UNII */ + { 140, 0x00002c0c, 0x00000a9a, 0x000b8255, 0x000fea03 }, + { 149, 0x00002c0c, 0x00000aa2, 0x000b8255, 0x000fea1f }, + { 153, 0x00002c0c, 0x00000aa6, 0x000b8255, 0x000fea27 }, + { 157, 0x00002c0c, 0x00000aae, 0x000b8255, 0x000fea07 }, + { 161, 0x00002c0c, 0x00000ab2, 0x000b8255, 0x000fea0f }, + { 165, 0x00002c0c, 0x00000ab6, 0x000b8255, 0x000fea17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002c0c, 0x0008099a, 0x000da255, 0x000d3a0b }, + { 38, 0x00002c0c, 0x0008099e, 0x000da255, 0x000d3a13 }, + { 42, 0x00002c0c, 0x000809a2, 0x000da255, 0x000d3a1b }, + { 46, 0x00002c0c, 0x000809a6, 0x000da255, 0x000d3a23 }, +}; + +/* + * RF value list for RF5225 & RF2527 + * Supports: 2.4 GHz & 5.2 GHz + */ +static const struct rf_channel rf_vals_5225_2527[] = { + { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, + { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, + { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, + { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, + { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, + { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, + { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, + { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, + { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, + { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, + { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, + { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, + { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, + { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, + + /* 802.11 UNI / HyperLan 2 */ + { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, + { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, + { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, + { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, + { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, + { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, + { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, + { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, + + /* 802.11 HyperLan 2 */ + { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, + { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, + { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, + { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, + { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, + { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, + { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, + { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, + { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, + { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, + + /* 802.11 UNII */ + { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, + { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, + { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, + { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, + { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, + { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, + + /* MMAC(Japan)J52 ch 34,38,42,46 */ + { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, + { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, + { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, + { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, +}; + + +static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) +{ + struct hw_mode_spec *spec = &rt2x00dev->spec; + u8 *txpower; + unsigned int i; + + /* + * Initialize all hw fields. + */ + rt2x00dev->hw->flags = + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; + rt2x00dev->hw->max_signal = MAX_SIGNAL; + rt2x00dev->hw->max_rssi = MAX_RX_SSI; + rt2x00dev->hw->queues = 5; + + SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); + SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, + rt2x00_eeprom_addr(rt2x00dev, + EEPROM_MAC_ADDR_0)); + + /* + * Convert tx_power array in eeprom. + */ + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + /* + * Initialize hw_mode information. + */ + spec->num_modes = 2; + spec->num_rates = 12; + spec->tx_power_a = NULL; + spec->tx_power_bg = txpower; + spec->tx_power_default = DEFAULT_TXPOWER; + + if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { + spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); + spec->channels = rf_vals_bg_2528; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5226); + spec->channels = rf_vals_5226; + } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) { + spec->num_channels = 14; + spec->channels = rf_vals_5225_2527; + } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) { + spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); + spec->channels = rf_vals_5225_2527; + } + + if (rt2x00_rf(&rt2x00dev->chip, RF5225) || + rt2x00_rf(&rt2x00dev->chip, RF5226)) { + spec->num_modes = 3; + + txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); + for (i = 0; i < 14; i++) + txpower[i] = TXPOWER_FROM_DEV(txpower[i]); + + spec->tx_power_a = txpower; + } +} + +static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) +{ + int retval; + + /* + * Allocate eeprom data. + */ + retval = rt73usb_validate_eeprom(rt2x00dev); + if (retval) + return retval; + + retval = rt73usb_init_eeprom(rt2x00dev); + if (retval) + return retval; + + /* + * Initialize hw specifications. + */ + rt73usb_probe_hw_mode(rt2x00dev); + + /* + * This device requires firmware + */ + __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); + + /* + * Set the rssi offset. + */ + rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; + + return 0; +} + +/* + * IEEE80211 stack callback functions. + */ +static void rt73usb_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_addr_list *mc_list) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + struct interface *intf = &rt2x00dev->interface; + u32 reg; + + /* + * Mask off any flags we are going to ignore from + * the total_flags field. + */ + *total_flags &= + FIF_ALLMULTI | + FIF_FCSFAIL | + FIF_PLCPFAIL | + FIF_CONTROL | + FIF_OTHER_BSS | + FIF_PROMISC_IN_BSS; + + /* + * Apply some rules to the filters: + * - Some filters imply different filters to be set. + * - Some things we can't filter out at all. + * - Some filters are set based on interface type. + */ + if (mc_count) + *total_flags |= FIF_ALLMULTI; + if (*total_flags & FIF_OTHER_BSS || + *total_flags & FIF_PROMISC_IN_BSS) + *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; + if (is_interface_type(intf, IEEE80211_IF_TYPE_AP)) + *total_flags |= FIF_PROMISC_IN_BSS; + + /* + * Check if there is any work left for us. + */ + if (intf->filter == *total_flags) + return; + intf->filter = *total_flags; + + /* + * When in atomic context, reschedule and let rt2x00lib + * call this function again. + */ + if (in_atomic()) { + queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work); + return; + } + + /* + * Start configuration steps. + * Note that the version error will always be dropped + * and broadcast frames will always be accepted since + * there is no filter for it at this time. + */ + rt73usb_register_read(rt2x00dev, TXRX_CSR0, ®); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CRC, + !(*total_flags & FIF_FCSFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_PHYSICAL, + !(*total_flags & FIF_PLCPFAIL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, + !(*total_flags & FIF_CONTROL)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !(*total_flags & FIF_PROMISC_IN_BSS)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, + !(*total_flags & FIF_ALLMULTI)); + rt2x00_set_field32(®, TXRX_CSR0_DROP_BROADCAST, 0); + rt2x00_set_field32(®, TXRX_CSR0_DROP_ACK_CTS, 1); + rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); +} + +static int rt73usb_set_retry_limit(struct ieee80211_hw *hw, + u32 short_retry, u32 long_retry) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR4, ®); + rt2x00_set_field32(®, TXRX_CSR4_LONG_RETRY_LIMIT, long_retry); + rt2x00_set_field32(®, TXRX_CSR4_SHORT_RETRY_LIMIT, short_retry); + rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); + + return 0; +} + +#if 0 +/* + * Mac80211 demands get_tsf must be atomic. + * This is not possible for rt73usb since all register access + * functions require sleeping. Untill mac80211 no longer needs + * get_tsf to be atomic, this function should be disabled. + */ +static u64 rt73usb_get_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + u64 tsf; + u32 reg; + + rt73usb_register_read(rt2x00dev, TXRX_CSR13, ®); + tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; + rt73usb_register_read(rt2x00dev, TXRX_CSR12, ®); + tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); + + return tsf; +} +#else +#define rt73usb_get_tsf NULL +#endif + +static void rt73usb_reset_tsf(struct ieee80211_hw *hw) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + + rt73usb_register_write(rt2x00dev, TXRX_CSR12, 0); + rt73usb_register_write(rt2x00dev, TXRX_CSR13, 0); +} + +static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct rt2x00_dev *rt2x00dev = hw->priv; + int timeout; + + /* + * Just in case the ieee80211 doesn't set this, + * but we need this queue set for the descriptor + * initialization. + */ + control->queue = IEEE80211_TX_QUEUE_BEACON; + + /* + * First we create the beacon. + */ + skb_push(skb, TXD_DESC_SIZE); + memset(skb->data, 0, TXD_DESC_SIZE); + + rt2x00lib_write_tx_desc(rt2x00dev, (struct data_desc *)skb->data, + (struct ieee80211_hdr *)(skb->data + + TXD_DESC_SIZE), + skb->len - TXD_DESC_SIZE, control); + + /* + * Write entire beacon with descriptor to register, + * and kick the beacon generator. + */ + timeout = REGISTER_TIMEOUT * (skb->len / sizeof(u32)); + rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, + USB_VENDOR_REQUEST_OUT, + HW_BEACON_BASE0, 0x0000, + skb->data, skb->len, timeout); + rt73usb_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); + + return 0; +} + +static const struct ieee80211_ops rt73usb_mac80211_ops = { + .tx = rt2x00mac_tx, + .start = rt2x00mac_start, + .stop = rt2x00mac_stop, + .add_interface = rt2x00mac_add_interface, + .remove_interface = rt2x00mac_remove_interface, + .config = rt2x00mac_config, + .config_interface = rt2x00mac_config_interface, + .configure_filter = rt73usb_configure_filter, + .get_stats = rt2x00mac_get_stats, + .set_retry_limit = rt73usb_set_retry_limit, + .erp_ie_changed = rt2x00mac_erp_ie_changed, + .conf_tx = rt2x00mac_conf_tx, + .get_tx_stats = rt2x00mac_get_tx_stats, + .get_tsf = rt73usb_get_tsf, + .reset_tsf = rt73usb_reset_tsf, + .beacon_update = rt73usb_beacon_update, +}; + +static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { + .probe_hw = rt73usb_probe_hw, + .get_firmware_name = rt73usb_get_firmware_name, + .load_firmware = rt73usb_load_firmware, + .initialize = rt2x00usb_initialize, + .uninitialize = rt2x00usb_uninitialize, + .set_device_state = rt73usb_set_device_state, + .link_stats = rt73usb_link_stats, + .reset_tuner = rt73usb_reset_tuner, + .link_tuner = rt73usb_link_tuner, + .write_tx_desc = rt73usb_write_tx_desc, + .write_tx_data = rt2x00usb_write_tx_data, + .get_tx_data_len = rt73usb_get_tx_data_len, + .kick_tx_queue = rt73usb_kick_tx_queue, + .fill_rxdone = rt73usb_fill_rxdone, + .config_mac_addr = rt73usb_config_mac_addr, + .config_bssid = rt73usb_config_bssid, + .config_type = rt73usb_config_type, + .config_preamble = rt73usb_config_preamble, + .config = rt73usb_config, +}; + +static const struct rt2x00_ops rt73usb_ops = { + .name = DRV_NAME, + .rxd_size = RXD_DESC_SIZE, + .txd_size = TXD_DESC_SIZE, + .eeprom_size = EEPROM_SIZE, + .rf_size = RF_SIZE, + .lib = &rt73usb_rt2x00_ops, + .hw = &rt73usb_mac80211_ops, +#ifdef CONFIG_RT2X00_LIB_DEBUGFS + .debugfs = &rt73usb_rt2x00debug, +#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ +}; + +/* + * rt73usb module information. + */ +static struct usb_device_id rt73usb_device_table[] = { + /* AboCom */ + { USB_DEVICE(0x07b8, 0xb21d), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Askey */ + { USB_DEVICE(0x1690, 0x0722), USB_DEVICE_DATA(&rt73usb_ops) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x1723), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0b05, 0x1724), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Belkin */ + { USB_DEVICE(0x050d, 0x7050), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x705a), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x050d, 0x905b), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Billionton */ + { USB_DEVICE(0x1631, 0xc019), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Buffalo */ + { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, + /* CNet */ + { USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Conceptronic */ + { USB_DEVICE(0x14b2, 0x3c22), USB_DEVICE_DATA(&rt73usb_ops) }, + /* D-Link */ + { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Gemtek */ + { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Gigabyte */ + { USB_DEVICE(0x1044, 0x8008), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x1044, 0x800a), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Huawei-3Com */ + { USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Hercules */ + { USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Linksys */ + { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, + /* MSI */ + { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Qcom */ + { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x18e8, 0x6238), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Senao */ + { USB_DEVICE(0x1740, 0x7100), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x9712), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0df6, 0x90ac), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Surecom */ + { USB_DEVICE(0x0769, 0x31f3), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, + { 0, } +}; + +MODULE_AUTHOR(DRV_PROJECT); +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); +MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards"); +MODULE_DEVICE_TABLE(usb, rt73usb_device_table); +MODULE_FIRMWARE(FIRMWARE_RT2571); +MODULE_LICENSE("GPL"); + +static struct usb_driver rt73usb_driver = { + .name = DRV_NAME, + .id_table = rt73usb_device_table, + .probe = rt2x00usb_probe, + .disconnect = rt2x00usb_disconnect, + .suspend = rt2x00usb_suspend, + .resume = rt2x00usb_resume, +}; + +static int __init rt73usb_init(void) +{ + return usb_register(&rt73usb_driver); +} + +static void __exit rt73usb_exit(void) +{ + usb_deregister(&rt73usb_driver); +} + +module_init(rt73usb_init); +module_exit(rt73usb_exit); diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h new file mode 100644 index 0000000000000000000000000000000000000000..f0951519f74b32592077587d22dfa304d5b9e9fe --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -0,0 +1,1024 @@ +/* + Copyright (C) 2004 - 2007 rt2x00 SourceForge Project + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the + Free Software Foundation, Inc., + 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +/* + Module: rt73usb + Abstract: Data structures and registers for the rt73usb module. + Supported chipsets: rt2571W & rt2671. + */ + +#ifndef RT73USB_H +#define RT73USB_H + +/* + * RF chip defines. + */ +#define RF5226 0x0001 +#define RF2528 0x0002 +#define RF5225 0x0003 +#define RF2527 0x0004 + +/* + * Signal information. + * Defaul offset is required for RSSI <-> dBm conversion. + */ +#define MAX_SIGNAL 100 +#define MAX_RX_SSI -1 +#define DEFAULT_RSSI_OFFSET 120 + +/* + * Register layout information. + */ +#define CSR_REG_BASE 0x3000 +#define CSR_REG_SIZE 0x04b0 +#define EEPROM_BASE 0x0000 +#define EEPROM_SIZE 0x0100 +#define BBP_SIZE 0x0080 +#define RF_SIZE 0x0014 + +/* + * USB registers. + */ + +/* + * MCU_LEDCS: LED control for MCU Mailbox. + */ +#define MCU_LEDCS_LED_MODE FIELD16(0x001f) +#define MCU_LEDCS_RADIO_STATUS FIELD16(0x0020) +#define MCU_LEDCS_LINK_BG_STATUS FIELD16(0x0040) +#define MCU_LEDCS_LINK_A_STATUS FIELD16(0x0080) +#define MCU_LEDCS_POLARITY_GPIO_0 FIELD16(0x0100) +#define MCU_LEDCS_POLARITY_GPIO_1 FIELD16(0x0200) +#define MCU_LEDCS_POLARITY_GPIO_2 FIELD16(0x0400) +#define MCU_LEDCS_POLARITY_GPIO_3 FIELD16(0x0800) +#define MCU_LEDCS_POLARITY_GPIO_4 FIELD16(0x1000) +#define MCU_LEDCS_POLARITY_ACT FIELD16(0x2000) +#define MCU_LEDCS_POLARITY_READY_BG FIELD16(0x4000) +#define MCU_LEDCS_POLARITY_READY_A FIELD16(0x8000) + +/* + * 8051 firmware image. + */ +#define FIRMWARE_RT2571 "rt73.bin" +#define FIRMWARE_IMAGE_BASE 0x0800 + +/* + * Security key table memory. + * 16 entries 32-byte for shared key table + * 64 entries 32-byte for pairwise key table + * 64 entries 8-byte for pairwise ta key table + */ +#define SHARED_KEY_TABLE_BASE 0x1000 +#define PAIRWISE_KEY_TABLE_BASE 0x1200 +#define PAIRWISE_TA_TABLE_BASE 0x1a00 + +struct hw_key_entry { + u8 key[16]; + u8 tx_mic[8]; + u8 rx_mic[8]; +} __attribute__ ((packed)); + +struct hw_pairwise_ta_entry { + u8 address[6]; + u8 reserved[2]; +} __attribute__ ((packed)); + +/* + * Since NULL frame won't be that long (256 byte), + * We steal 16 tail bytes to save debugging settings. + */ +#define HW_DEBUG_SETTING_BASE 0x2bf0 + +/* + * On-chip BEACON frame space. + */ +#define HW_BEACON_BASE0 0x2400 +#define HW_BEACON_BASE1 0x2500 +#define HW_BEACON_BASE2 0x2600 +#define HW_BEACON_BASE3 0x2700 + +/* + * MAC Control/Status Registers(CSR). + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * MAC_CSR0: ASIC revision number. + */ +#define MAC_CSR0 0x3000 + +/* + * MAC_CSR1: System control register. + * SOFT_RESET: Software reset bit, 1: reset, 0: normal. + * BBP_RESET: Hardware reset BBP. + * HOST_READY: Host is ready after initialization, 1: ready. + */ +#define MAC_CSR1 0x3004 +#define MAC_CSR1_SOFT_RESET FIELD32(0x00000001) +#define MAC_CSR1_BBP_RESET FIELD32(0x00000002) +#define MAC_CSR1_HOST_READY FIELD32(0x00000004) + +/* + * MAC_CSR2: STA MAC register 0. + */ +#define MAC_CSR2 0x3008 +#define MAC_CSR2_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR2_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR2_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR2_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR3: STA MAC register 1. + */ +#define MAC_CSR3 0x300c +#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR3_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR3_UNICAST_TO_ME_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR4: BSSID register 0. + */ +#define MAC_CSR4 0x3010 +#define MAC_CSR4_BYTE0 FIELD32(0x000000ff) +#define MAC_CSR4_BYTE1 FIELD32(0x0000ff00) +#define MAC_CSR4_BYTE2 FIELD32(0x00ff0000) +#define MAC_CSR4_BYTE3 FIELD32(0xff000000) + +/* + * MAC_CSR5: BSSID register 1. + * BSS_ID_MASK: 3: one BSSID, 0: 4 BSSID, 2 or 1: 2 BSSID. + */ +#define MAC_CSR5 0x3014 +#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) +#define MAC_CSR5_BYTE5 FIELD32(0x0000ff00) +#define MAC_CSR5_BSS_ID_MASK FIELD32(0x00ff0000) + +/* + * MAC_CSR6: Maximum frame length register. + */ +#define MAC_CSR6 0x3018 +#define MAC_CSR6_MAX_FRAME_UNIT FIELD32(0x00000fff) + +/* + * MAC_CSR7: Reserved + */ +#define MAC_CSR7 0x301c + +/* + * MAC_CSR8: SIFS/EIFS register. + * All units are in US. + */ +#define MAC_CSR8 0x3020 +#define MAC_CSR8_SIFS FIELD32(0x000000ff) +#define MAC_CSR8_SIFS_AFTER_RX_OFDM FIELD32(0x0000ff00) +#define MAC_CSR8_EIFS FIELD32(0xffff0000) + +/* + * MAC_CSR9: Back-Off control register. + * SLOT_TIME: Slot time, default is 20us for 802.11BG. + * CWMIN: Bit for Cwmin. default Cwmin is 31 (2^5 - 1). + * CWMAX: Bit for Cwmax, default Cwmax is 1023 (2^10 - 1). + * CW_SELECT: 1: CWmin/Cwmax select from register, 0:select from TxD. + */ +#define MAC_CSR9 0x3024 +#define MAC_CSR9_SLOT_TIME FIELD32(0x000000ff) +#define MAC_CSR9_CWMIN FIELD32(0x00000f00) +#define MAC_CSR9_CWMAX FIELD32(0x0000f000) +#define MAC_CSR9_CW_SELECT FIELD32(0x00010000) + +/* + * MAC_CSR10: Power state configuration. + */ +#define MAC_CSR10 0x3028 + +/* + * MAC_CSR11: Power saving transition time register. + * DELAY_AFTER_TBCN: Delay after Tbcn expired in units of TU. + * TBCN_BEFORE_WAKEUP: Number of beacon before wakeup. + * WAKEUP_LATENCY: In unit of TU. + */ +#define MAC_CSR11 0x302c +#define MAC_CSR11_DELAY_AFTER_TBCN FIELD32(0x000000ff) +#define MAC_CSR11_TBCN_BEFORE_WAKEUP FIELD32(0x00007f00) +#define MAC_CSR11_AUTOWAKE FIELD32(0x00008000) +#define MAC_CSR11_WAKEUP_LATENCY FIELD32(0x000f0000) + +/* + * MAC_CSR12: Manual power control / status register (merge CSR20 & PWRCSR1). + * CURRENT_STATE: 0:sleep, 1:awake. + * FORCE_WAKEUP: This has higher priority than PUT_TO_SLEEP. + * BBP_CURRENT_STATE: 0: BBP sleep, 1: BBP awake. + */ +#define MAC_CSR12 0x3030 +#define MAC_CSR12_CURRENT_STATE FIELD32(0x00000001) +#define MAC_CSR12_PUT_TO_SLEEP FIELD32(0x00000002) +#define MAC_CSR12_FORCE_WAKEUP FIELD32(0x00000004) +#define MAC_CSR12_BBP_CURRENT_STATE FIELD32(0x00000008) + +/* + * MAC_CSR13: GPIO. + */ +#define MAC_CSR13 0x3034 + +/* + * MAC_CSR14: LED control register. + * ON_PERIOD: On period, default 70ms. + * OFF_PERIOD: Off period, default 30ms. + * HW_LED: HW TX activity, 1: normal OFF, 0: normal ON. + * SW_LED: s/w LED, 1: ON, 0: OFF. + * HW_LED_POLARITY: 0: active low, 1: active high. + */ +#define MAC_CSR14 0x3038 +#define MAC_CSR14_ON_PERIOD FIELD32(0x000000ff) +#define MAC_CSR14_OFF_PERIOD FIELD32(0x0000ff00) +#define MAC_CSR14_HW_LED FIELD32(0x00010000) +#define MAC_CSR14_SW_LED FIELD32(0x00020000) +#define MAC_CSR14_HW_LED_POLARITY FIELD32(0x00040000) +#define MAC_CSR14_SW_LED2 FIELD32(0x00080000) + +/* + * MAC_CSR15: NAV control. + */ +#define MAC_CSR15 0x303c + +/* + * TXRX control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * TXRX_CSR0: TX/RX configuration register. + * TSF_OFFSET: Default is 24. + * AUTO_TX_SEQ: 1: ASIC auto replace sequence nr in outgoing frame. + * DISABLE_RX: Disable Rx engine. + * DROP_CRC: Drop CRC error. + * DROP_PHYSICAL: Drop physical error. + * DROP_CONTROL: Drop control frame. + * DROP_NOT_TO_ME: Drop not to me unicast frame. + * DROP_TO_DS: Drop fram ToDs bit is true. + * DROP_VERSION_ERROR: Drop version error frame. + * DROP_MULTICAST: Drop multicast frames. + * DROP_BORADCAST: Drop broadcast frames. + * ROP_ACK_CTS: Drop received ACK and CTS. + */ +#define TXRX_CSR0 0x3040 +#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) +#define TXRX_CSR0_TSF_OFFSET FIELD32(0x00007e00) +#define TXRX_CSR0_AUTO_TX_SEQ FIELD32(0x00008000) +#define TXRX_CSR0_DISABLE_RX FIELD32(0x00010000) +#define TXRX_CSR0_DROP_CRC FIELD32(0x00020000) +#define TXRX_CSR0_DROP_PHYSICAL FIELD32(0x00040000) +#define TXRX_CSR0_DROP_CONTROL FIELD32(0x00080000) +#define TXRX_CSR0_DROP_NOT_TO_ME FIELD32(0x00100000) +#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) +#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) +#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) +#define TXRX_CSR0_DROP_BROADCAST FIELD32(0x01000000) +#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) +#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) + +/* + * TXRX_CSR1 + */ +#define TXRX_CSR1 0x3044 +#define TXRX_CSR1_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR1_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR1_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR1_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR1_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR1_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR1_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR1_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR2 + */ +#define TXRX_CSR2 0x3048 +#define TXRX_CSR2_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR2_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR2_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR2_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR2_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR2_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR2_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR2_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR3 + */ +#define TXRX_CSR3 0x304c +#define TXRX_CSR3_BBP_ID0 FIELD32(0x0000007f) +#define TXRX_CSR3_BBP_ID0_VALID FIELD32(0x00000080) +#define TXRX_CSR3_BBP_ID1 FIELD32(0x00007f00) +#define TXRX_CSR3_BBP_ID1_VALID FIELD32(0x00008000) +#define TXRX_CSR3_BBP_ID2 FIELD32(0x007f0000) +#define TXRX_CSR3_BBP_ID2_VALID FIELD32(0x00800000) +#define TXRX_CSR3_BBP_ID3 FIELD32(0x7f000000) +#define TXRX_CSR3_BBP_ID3_VALID FIELD32(0x80000000) + +/* + * TXRX_CSR4: Auto-Responder/Tx-retry register. + * AUTORESPOND_PREAMBLE: 0:long, 1:short preamble. + * OFDM_TX_RATE_DOWN: 1:enable. + * OFDM_TX_RATE_STEP: 0:1-step, 1: 2-step, 2:3-step, 3:4-step. + * OFDM_TX_FALLBACK_CCK: 0: Fallback to OFDM 6M only, 1: Fallback to CCK 1M,2M. + */ +#define TXRX_CSR4 0x3050 +#define TXRX_CSR4_TX_ACK_TIMEOUT FIELD32(0x000000ff) +#define TXRX_CSR4_CNTL_ACK_POLICY FIELD32(0x00000700) +#define TXRX_CSR4_ACK_CTS_PSM FIELD32(0x00010000) +#define TXRX_CSR4_AUTORESPOND_ENABLE FIELD32(0x00020000) +#define TXRX_CSR4_AUTORESPOND_PREAMBLE FIELD32(0x00040000) +#define TXRX_CSR4_OFDM_TX_RATE_DOWN FIELD32(0x00080000) +#define TXRX_CSR4_OFDM_TX_RATE_STEP FIELD32(0x00300000) +#define TXRX_CSR4_OFDM_TX_FALLBACK_CCK FIELD32(0x00400000) +#define TXRX_CSR4_LONG_RETRY_LIMIT FIELD32(0x0f000000) +#define TXRX_CSR4_SHORT_RETRY_LIMIT FIELD32(0xf0000000) + +/* + * TXRX_CSR5 + */ +#define TXRX_CSR5 0x3054 + +/* + * TXRX_CSR6: ACK/CTS payload consumed time + */ +#define TXRX_CSR6 0x3058 + +/* + * TXRX_CSR7: OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. + */ +#define TXRX_CSR7 0x305c +#define TXRX_CSR7_ACK_CTS_6MBS FIELD32(0x000000ff) +#define TXRX_CSR7_ACK_CTS_9MBS FIELD32(0x0000ff00) +#define TXRX_CSR7_ACK_CTS_12MBS FIELD32(0x00ff0000) +#define TXRX_CSR7_ACK_CTS_18MBS FIELD32(0xff000000) + +/* + * TXRX_CSR8: OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. + */ +#define TXRX_CSR8 0x3060 +#define TXRX_CSR8_ACK_CTS_24MBS FIELD32(0x000000ff) +#define TXRX_CSR8_ACK_CTS_36MBS FIELD32(0x0000ff00) +#define TXRX_CSR8_ACK_CTS_48MBS FIELD32(0x00ff0000) +#define TXRX_CSR8_ACK_CTS_54MBS FIELD32(0xff000000) + +/* + * TXRX_CSR9: Synchronization control register. + * BEACON_INTERVAL: In unit of 1/16 TU. + * TSF_TICKING: Enable TSF auto counting. + * TSF_SYNC: Tsf sync, 0: disable, 1: infra, 2: ad-hoc/master mode. + * BEACON_GEN: Enable beacon generator. + */ +#define TXRX_CSR9 0x3064 +#define TXRX_CSR9_BEACON_INTERVAL FIELD32(0x0000ffff) +#define TXRX_CSR9_TSF_TICKING FIELD32(0x00010000) +#define TXRX_CSR9_TSF_SYNC FIELD32(0x00060000) +#define TXRX_CSR9_TBTT_ENABLE FIELD32(0x00080000) +#define TXRX_CSR9_BEACON_GEN FIELD32(0x00100000) +#define TXRX_CSR9_TIMESTAMP_COMPENSATE FIELD32(0xff000000) + +/* + * TXRX_CSR10: BEACON alignment. + */ +#define TXRX_CSR10 0x3068 + +/* + * TXRX_CSR11: AES mask. + */ +#define TXRX_CSR11 0x306c + +/* + * TXRX_CSR12: TSF low 32. + */ +#define TXRX_CSR12 0x3070 +#define TXRX_CSR12_LOW_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR13: TSF high 32. + */ +#define TXRX_CSR13 0x3074 +#define TXRX_CSR13_HIGH_TSFTIMER FIELD32(0xffffffff) + +/* + * TXRX_CSR14: TBTT timer. + */ +#define TXRX_CSR14 0x3078 + +/* + * TXRX_CSR15: TKIP MIC priority byte "AND" mask. + */ +#define TXRX_CSR15 0x307c + +/* + * PHY control registers. + * Some values are set in TU, whereas 1 TU == 1024 us. + */ + +/* + * PHY_CSR0: RF/PS control. + */ +#define PHY_CSR0 0x3080 +#define PHY_CSR0_PA_PE_BG FIELD32(0x00010000) +#define PHY_CSR0_PA_PE_A FIELD32(0x00020000) + +/* + * PHY_CSR1 + */ +#define PHY_CSR1 0x3084 +#define PHY_CSR1_RF_RPI FIELD32(0x00010000) + +/* + * PHY_CSR2: Pre-TX BBP control. + */ +#define PHY_CSR2 0x3088 + +/* + * PHY_CSR3: BBP serial control register. + * VALUE: Register value to program into BBP. + * REG_NUM: Selected BBP register. + * READ_CONTROL: 0: Write BBP, 1: Read BBP. + * BUSY: 1: ASIC is busy execute BBP programming. + */ +#define PHY_CSR3 0x308c +#define PHY_CSR3_VALUE FIELD32(0x000000ff) +#define PHY_CSR3_REGNUM FIELD32(0x00007f00) +#define PHY_CSR3_READ_CONTROL FIELD32(0x00008000) +#define PHY_CSR3_BUSY FIELD32(0x00010000) + +/* + * PHY_CSR4: RF serial control register + * VALUE: Register value (include register id) serial out to RF/IF chip. + * NUMBER_OF_BITS: Number of bits used in RFRegValue (I:20, RFMD:22). + * IF_SELECT: 1: select IF to program, 0: select RF to program. + * PLL_LD: RF PLL_LD status. + * BUSY: 1: ASIC is busy execute RF programming. + */ +#define PHY_CSR4 0x3090 +#define PHY_CSR4_VALUE FIELD32(0x00ffffff) +#define PHY_CSR4_NUMBER_OF_BITS FIELD32(0x1f000000) +#define PHY_CSR4_IF_SELECT FIELD32(0x20000000) +#define PHY_CSR4_PLL_LD FIELD32(0x40000000) +#define PHY_CSR4_BUSY FIELD32(0x80000000) + +/* + * PHY_CSR5: RX to TX signal switch timing control. + */ +#define PHY_CSR5 0x3094 +#define PHY_CSR5_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR6: TX to RX signal timing control. + */ +#define PHY_CSR6 0x3098 +#define PHY_CSR6_IQ_FLIP FIELD32(0x00000004) + +/* + * PHY_CSR7: TX DAC switching timing control. + */ +#define PHY_CSR7 0x309c + +/* + * Security control register. + */ + +/* + * SEC_CSR0: Shared key table control. + */ +#define SEC_CSR0 0x30a0 +#define SEC_CSR0_BSS0_KEY0_VALID FIELD32(0x00000001) +#define SEC_CSR0_BSS0_KEY1_VALID FIELD32(0x00000002) +#define SEC_CSR0_BSS0_KEY2_VALID FIELD32(0x00000004) +#define SEC_CSR0_BSS0_KEY3_VALID FIELD32(0x00000008) +#define SEC_CSR0_BSS1_KEY0_VALID FIELD32(0x00000010) +#define SEC_CSR0_BSS1_KEY1_VALID FIELD32(0x00000020) +#define SEC_CSR0_BSS1_KEY2_VALID FIELD32(0x00000040) +#define SEC_CSR0_BSS1_KEY3_VALID FIELD32(0x00000080) +#define SEC_CSR0_BSS2_KEY0_VALID FIELD32(0x00000100) +#define SEC_CSR0_BSS2_KEY1_VALID FIELD32(0x00000200) +#define SEC_CSR0_BSS2_KEY2_VALID FIELD32(0x00000400) +#define SEC_CSR0_BSS2_KEY3_VALID FIELD32(0x00000800) +#define SEC_CSR0_BSS3_KEY0_VALID FIELD32(0x00001000) +#define SEC_CSR0_BSS3_KEY1_VALID FIELD32(0x00002000) +#define SEC_CSR0_BSS3_KEY2_VALID FIELD32(0x00004000) +#define SEC_CSR0_BSS3_KEY3_VALID FIELD32(0x00008000) + +/* + * SEC_CSR1: Shared key table security mode register. + */ +#define SEC_CSR1 0x30a4 +#define SEC_CSR1_BSS0_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR1_BSS0_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR1_BSS0_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR1_BSS0_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR1_BSS1_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR1_BSS1_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR1_BSS1_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR1_BSS1_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * Pairwise key table valid bitmap registers. + * SEC_CSR2: pairwise key table valid bitmap 0. + * SEC_CSR3: pairwise key table valid bitmap 1. + */ +#define SEC_CSR2 0x30a8 +#define SEC_CSR3 0x30ac + +/* + * SEC_CSR4: Pairwise key table lookup control. + */ +#define SEC_CSR4 0x30b0 + +/* + * SEC_CSR5: shared key table security mode register. + */ +#define SEC_CSR5 0x30b4 +#define SEC_CSR5_BSS2_KEY0_CIPHER_ALG FIELD32(0x00000007) +#define SEC_CSR5_BSS2_KEY1_CIPHER_ALG FIELD32(0x00000070) +#define SEC_CSR5_BSS2_KEY2_CIPHER_ALG FIELD32(0x00000700) +#define SEC_CSR5_BSS2_KEY3_CIPHER_ALG FIELD32(0x00007000) +#define SEC_CSR5_BSS3_KEY0_CIPHER_ALG FIELD32(0x00070000) +#define SEC_CSR5_BSS3_KEY1_CIPHER_ALG FIELD32(0x00700000) +#define SEC_CSR5_BSS3_KEY2_CIPHER_ALG FIELD32(0x07000000) +#define SEC_CSR5_BSS3_KEY3_CIPHER_ALG FIELD32(0x70000000) + +/* + * STA control registers. + */ + +/* + * STA_CSR0: RX PLCP error count & RX FCS error count. + */ +#define STA_CSR0 0x30c0 +#define STA_CSR0_FCS_ERROR FIELD32(0x0000ffff) +#define STA_CSR0_PLCP_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR1: RX False CCA count & RX LONG frame count. + */ +#define STA_CSR1 0x30c4 +#define STA_CSR1_PHYSICAL_ERROR FIELD32(0x0000ffff) +#define STA_CSR1_FALSE_CCA_ERROR FIELD32(0xffff0000) + +/* + * STA_CSR2: TX Beacon count and RX FIFO overflow count. + */ +#define STA_CSR2 0x30c8 +#define STA_CSR2_RX_FIFO_OVERFLOW_COUNT FIELD32(0x0000ffff) +#define STA_CSR2_RX_OVERFLOW_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR3: TX Beacon count. + */ +#define STA_CSR3 0x30cc +#define STA_CSR3_TX_BEACON_COUNT FIELD32(0x0000ffff) + +/* + * STA_CSR4: TX Retry count. + */ +#define STA_CSR4 0x30d0 +#define STA_CSR4_TX_NO_RETRY_COUNT FIELD32(0x0000ffff) +#define STA_CSR4_TX_ONE_RETRY_COUNT FIELD32(0xffff0000) + +/* + * STA_CSR5: TX Retry count. + */ +#define STA_CSR5 0x30d4 +#define STA_CSR4_TX_MULTI_RETRY_COUNT FIELD32(0x0000ffff) +#define STA_CSR4_TX_RETRY_FAIL_COUNT FIELD32(0xffff0000) + +/* + * QOS control registers. + */ + +/* + * QOS_CSR1: TXOP holder MAC address register. + */ +#define QOS_CSR1 0x30e4 +#define QOS_CSR1_BYTE4 FIELD32(0x000000ff) +#define QOS_CSR1_BYTE5 FIELD32(0x0000ff00) + +/* + * QOS_CSR2: TXOP holder timeout register. + */ +#define QOS_CSR2 0x30e8 + +/* + * RX QOS-CFPOLL MAC address register. + * QOS_CSR3: RX QOS-CFPOLL MAC address 0. + * QOS_CSR4: RX QOS-CFPOLL MAC address 1. + */ +#define QOS_CSR3 0x30ec +#define QOS_CSR4 0x30f0 + +/* + * QOS_CSR5: "QosControl" field of the RX QOS-CFPOLL. + */ +#define QOS_CSR5 0x30f4 + +/* + * WMM Scheduler Register + */ + +/* + * AIFSN_CSR: AIFSN for each EDCA AC. + * AIFSN0: For AC_BK. + * AIFSN1: For AC_BE. + * AIFSN2: For AC_VI. + * AIFSN3: For AC_VO. + */ +#define AIFSN_CSR 0x0400 +#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) +#define AIFSN_CSR_AIFSN1 FIELD32(0x000000f0) +#define AIFSN_CSR_AIFSN2 FIELD32(0x00000f00) +#define AIFSN_CSR_AIFSN3 FIELD32(0x0000f000) + +/* + * CWMIN_CSR: CWmin for each EDCA AC. + * CWMIN0: For AC_BK. + * CWMIN1: For AC_BE. + * CWMIN2: For AC_VI. + * CWMIN3: For AC_VO. + */ +#define CWMIN_CSR 0x0404 +#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) +#define CWMIN_CSR_CWMIN1 FIELD32(0x000000f0) +#define CWMIN_CSR_CWMIN2 FIELD32(0x00000f00) +#define CWMIN_CSR_CWMIN3 FIELD32(0x0000f000) + +/* + * CWMAX_CSR: CWmax for each EDCA AC. + * CWMAX0: For AC_BK. + * CWMAX1: For AC_BE. + * CWMAX2: For AC_VI. + * CWMAX3: For AC_VO. + */ +#define CWMAX_CSR 0x0408 +#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) +#define CWMAX_CSR_CWMAX1 FIELD32(0x000000f0) +#define CWMAX_CSR_CWMAX2 FIELD32(0x00000f00) +#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) + +/* + * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. + * AC0_TX_OP: For AC_BK, in unit of 32us. + * AC1_TX_OP: For AC_BE, in unit of 32us. + */ +#define AC_TXOP_CSR0 0x040c +#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) + +/* + * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. + * AC2_TX_OP: For AC_VI, in unit of 32us. + * AC3_TX_OP: For AC_VO, in unit of 32us. + */ +#define AC_TXOP_CSR1 0x0410 +#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) +#define AC_TXOP_CSR1_AC3_TX_OP FIELD32(0xffff0000) + +/* + * BBP registers. + * The wordsize of the BBP is 8 bits. + */ + +/* + * R2 + */ +#define BBP_R2_BG_MODE FIELD8(0x20) + +/* + * R3 + */ +#define BBP_R3_SMART_MODE FIELD8(0x01) + +/* + * R4: RX antenna control + * FRAME_END: 1 - DPDT, 0 - SPDT (Only valid for 802.11G, RF2527 & RF2529) + */ +#define BBP_R4_RX_ANTENNA FIELD8(0x03) +#define BBP_R4_RX_FRAME_END FIELD8(0x20) + +/* + * R77 + */ +#define BBP_R77_PAIR FIELD8(0x03) + +/* + * RF registers + */ + +/* + * RF 3 + */ +#define RF3_TXPOWER FIELD32(0x00003e00) + +/* + * RF 4 + */ +#define RF4_FREQ_OFFSET FIELD32(0x0003f000) + +/* + * EEPROM content. + * The wordsize of the EEPROM is 16 bits. + */ + +/* + * HW MAC address. + */ +#define EEPROM_MAC_ADDR_0 0x0002 +#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) +#define EEPROM_MAC_ADDR1 0x0003 +#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) +#define EEPROM_MAC_ADDR_2 0x0004 +#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) +#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) + +/* + * EEPROM antenna. + * ANTENNA_NUM: Number of antenna's. + * TX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * RX_DEFAULT: Default antenna 0: diversity, 1: A, 2: B. + * FRAME_TYPE: 0: DPDT , 1: SPDT , noted this bit is valid for g only. + * DYN_TXAGC: Dynamic TX AGC control. + * HARDWARE_RADIO: 1: Hardware controlled radio. Read GPIO0. + * RF_TYPE: Rf_type of this adapter. + */ +#define EEPROM_ANTENNA 0x0010 +#define EEPROM_ANTENNA_NUM FIELD16(0x0003) +#define EEPROM_ANTENNA_TX_DEFAULT FIELD16(0x000c) +#define EEPROM_ANTENNA_RX_DEFAULT FIELD16(0x0030) +#define EEPROM_ANTENNA_FRAME_TYPE FIELD16(0x0040) +#define EEPROM_ANTENNA_DYN_TXAGC FIELD16(0x0200) +#define EEPROM_ANTENNA_HARDWARE_RADIO FIELD16(0x0400) +#define EEPROM_ANTENNA_RF_TYPE FIELD16(0xf800) + +/* + * EEPROM NIC config. + * EXTERNAL_LNA: External LNA. + */ +#define EEPROM_NIC 0x0011 +#define EEPROM_NIC_EXTERNAL_LNA FIELD16(0x0010) + +/* + * EEPROM geography. + * GEO_A: Default geographical setting for 5GHz band + * GEO: Default geographical setting. + */ +#define EEPROM_GEOGRAPHY 0x0012 +#define EEPROM_GEOGRAPHY_GEO_A FIELD16(0x00ff) +#define EEPROM_GEOGRAPHY_GEO FIELD16(0xff00) + +/* + * EEPROM BBP. + */ +#define EEPROM_BBP_START 0x0013 +#define EEPROM_BBP_SIZE 16 +#define EEPROM_BBP_VALUE FIELD16(0x00ff) +#define EEPROM_BBP_REG_ID FIELD16(0xff00) + +/* + * EEPROM TXPOWER 802.11G + */ +#define EEPROM_TXPOWER_G_START 0x0023 +#define EEPROM_TXPOWER_G_SIZE 7 +#define EEPROM_TXPOWER_G_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_G_2 FIELD16(0xff00) + +/* + * EEPROM Frequency + */ +#define EEPROM_FREQ 0x002f +#define EEPROM_FREQ_OFFSET FIELD16(0x00ff) +#define EEPROM_FREQ_SEQ_MASK FIELD16(0xff00) +#define EEPROM_FREQ_SEQ FIELD16(0x0300) + +/* + * EEPROM LED. + * POLARITY_RDY_G: Polarity RDY_G setting. + * POLARITY_RDY_A: Polarity RDY_A setting. + * POLARITY_ACT: Polarity ACT setting. + * POLARITY_GPIO_0: Polarity GPIO0 setting. + * POLARITY_GPIO_1: Polarity GPIO1 setting. + * POLARITY_GPIO_2: Polarity GPIO2 setting. + * POLARITY_GPIO_3: Polarity GPIO3 setting. + * POLARITY_GPIO_4: Polarity GPIO4 setting. + * LED_MODE: Led mode. + */ +#define EEPROM_LED 0x0030 +#define EEPROM_LED_POLARITY_RDY_G FIELD16(0x0001) +#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) +#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) +#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008) +#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010) +#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020) +#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040) +#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080) +#define EEPROM_LED_LED_MODE FIELD16(0x1f00) + +/* + * EEPROM TXPOWER 802.11A + */ +#define EEPROM_TXPOWER_A_START 0x0031 +#define EEPROM_TXPOWER_A_SIZE 12 +#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff) +#define EEPROM_TXPOWER_A_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11BG + */ +#define EEPROM_RSSI_OFFSET_BG 0x004d +#define EEPROM_RSSI_OFFSET_BG_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_BG_2 FIELD16(0xff00) + +/* + * EEPROM RSSI offset 802.11A + */ +#define EEPROM_RSSI_OFFSET_A 0x004e +#define EEPROM_RSSI_OFFSET_A_1 FIELD16(0x00ff) +#define EEPROM_RSSI_OFFSET_A_2 FIELD16(0xff00) + +/* + * DMA descriptor defines. + */ +#define TXD_DESC_SIZE ( 6 * sizeof(struct data_desc) ) +#define RXD_DESC_SIZE ( 6 * sizeof(struct data_desc) ) + +/* + * TX descriptor format for TX, PRIO and Beacon Ring. + */ + +/* + * Word0 + * BURST: Next frame belongs to same "burst" event. + * TKIP_MIC: ASIC appends TKIP MIC if TKIP is used. + * KEY_TABLE: Use per-client pairwise KEY table. + * KEY_INDEX: + * Key index (0~31) to the pairwise KEY table. + * 0~3 to shared KEY table 0 (BSS0). + * 4~7 to shared KEY table 1 (BSS1). + * 8~11 to shared KEY table 2 (BSS2). + * 12~15 to shared KEY table 3 (BSS3). + * BURST2: For backward compatibility, set to same value as BURST. + */ +#define TXD_W0_BURST FIELD32(0x00000001) +#define TXD_W0_VALID FIELD32(0x00000002) +#define TXD_W0_MORE_FRAG FIELD32(0x00000004) +#define TXD_W0_ACK FIELD32(0x00000008) +#define TXD_W0_TIMESTAMP FIELD32(0x00000010) +#define TXD_W0_OFDM FIELD32(0x00000020) +#define TXD_W0_IFS FIELD32(0x00000040) +#define TXD_W0_RETRY_MODE FIELD32(0x00000080) +#define TXD_W0_TKIP_MIC FIELD32(0x00000100) +#define TXD_W0_KEY_TABLE FIELD32(0x00000200) +#define TXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define TXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define TXD_W0_BURST2 FIELD32(0x10000000) +#define TXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * Word1 + * HOST_Q_ID: EDCA/HCCA queue ID. + * HW_SEQUENCE: MAC overwrites the frame sequence number. + * BUFFER_COUNT: Number of buffers in this TXD. + */ +#define TXD_W1_HOST_Q_ID FIELD32(0x0000000f) +#define TXD_W1_AIFSN FIELD32(0x000000f0) +#define TXD_W1_CWMIN FIELD32(0x00000f00) +#define TXD_W1_CWMAX FIELD32(0x0000f000) +#define TXD_W1_IV_OFFSET FIELD32(0x003f0000) +#define TXD_W1_HW_SEQUENCE FIELD32(0x10000000) +#define TXD_W1_BUFFER_COUNT FIELD32(0xe0000000) + +/* + * Word2: PLCP information + */ +#define TXD_W2_PLCP_SIGNAL FIELD32(0x000000ff) +#define TXD_W2_PLCP_SERVICE FIELD32(0x0000ff00) +#define TXD_W2_PLCP_LENGTH_LOW FIELD32(0x00ff0000) +#define TXD_W2_PLCP_LENGTH_HIGH FIELD32(0xff000000) + +/* + * Word3 + */ +#define TXD_W3_IV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define TXD_W4_EIV FIELD32(0xffffffff) + +/* + * Word5 + * FRAME_OFFSET: Frame start offset inside ASIC TXFIFO (after TXINFO field). + * PACKET_ID: Driver assigned packet ID to categorize TXResult in interrupt. + * WAITING_DMA_DONE_INT: TXD been filled with data + * and waiting for TxDoneISR housekeeping. + */ +#define TXD_W5_FRAME_OFFSET FIELD32(0x000000ff) +#define TXD_W5_PACKET_ID FIELD32(0x0000ff00) +#define TXD_W5_TX_POWER FIELD32(0x00ff0000) +#define TXD_W5_WAITING_DMA_DONE_INT FIELD32(0x01000000) + +/* + * RX descriptor format for RX Ring. + */ + +/* + * Word0 + * CIPHER_ERROR: 1:ICV error, 2:MIC error, 3:invalid key. + * KEY_INDEX: Decryption key actually used. + */ +#define RXD_W0_OWNER_NIC FIELD32(0x00000001) +#define RXD_W0_DROP FIELD32(0x00000002) +#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000004) +#define RXD_W0_MULTICAST FIELD32(0x00000008) +#define RXD_W0_BROADCAST FIELD32(0x00000010) +#define RXD_W0_MY_BSS FIELD32(0x00000020) +#define RXD_W0_CRC_ERROR FIELD32(0x00000040) +#define RXD_W0_OFDM FIELD32(0x00000080) +#define RXD_W0_CIPHER_ERROR FIELD32(0x00000300) +#define RXD_W0_KEY_INDEX FIELD32(0x0000fc00) +#define RXD_W0_DATABYTE_COUNT FIELD32(0x0fff0000) +#define RXD_W0_CIPHER_ALG FIELD32(0xe0000000) + +/* + * WORD1 + * SIGNAL: RX raw data rate reported by BBP. + * RSSI: RSSI reported by BBP. + */ +#define RXD_W1_SIGNAL FIELD32(0x000000ff) +#define RXD_W1_RSSI_AGC FIELD32(0x00001f00) +#define RXD_W1_RSSI_LNA FIELD32(0x00006000) +#define RXD_W1_FRAME_OFFSET FIELD32(0x7f000000) + +/* + * Word2 + * IV: Received IV of originally encrypted. + */ +#define RXD_W2_IV FIELD32(0xffffffff) + +/* + * Word3 + * EIV: Received EIV of originally encrypted. + */ +#define RXD_W3_EIV FIELD32(0xffffffff) + +/* + * Word4 + */ +#define RXD_W4_RESERVED FIELD32(0xffffffff) + +/* + * the above 20-byte is called RXINFO and will be DMAed to MAC RX block + * and passed to the HOST driver. + * The following fields are for DMA block and HOST usage only. + * Can't be touched by ASIC MAC block. + */ + +/* + * Word5 + */ +#define RXD_W5_RESERVED FIELD32(0xffffffff) + +/* + * Macro's for converting txpower from EEPROM to dscape value + * and from dscape value to register value. + */ +#define MIN_TXPOWER 0 +#define MAX_TXPOWER 31 +#define DEFAULT_TXPOWER 24 + +#define TXPOWER_FROM_DEV(__txpower) \ +({ \ + ((__txpower) > MAX_TXPOWER) ? \ + DEFAULT_TXPOWER : (__txpower); \ +}) + +#define TXPOWER_TO_DEV(__txpower) \ +({ \ + ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \ + (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \ + (__txpower)); \ +}) + +#endif /* RT73USB_H */ diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h index 6124e467b156467d2f7101806a78d29d5edbf531..6ad322ef0da13a974a8cb6c7793239213fb9123b 100644 --- a/drivers/net/wireless/rtl8187.h +++ b/drivers/net/wireless/rtl8187.h @@ -36,8 +36,7 @@ struct rtl8187_rx_info { }; struct rtl8187_rx_hdr { - __le16 len; - __le16 rate; + __le32 flags; u8 noise; u8 signal; u8 agc; @@ -67,13 +66,14 @@ struct rtl8187_priv { struct rtl818x_csr *map; void (*rf_init)(struct ieee80211_hw *); int mode; + int if_id; /* rtl8187 specific */ struct ieee80211_channel channels[14]; struct ieee80211_rate rates[12]; struct ieee80211_hw_mode modes[2]; struct usb_device *udev; - u8 *hwaddr; + u32 rx_conf; u16 txpwr_base; u8 asic_rev; struct sk_buff_head rx_queue; diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index e61c6d5ba1a93d26261369dfacffde1c8c5231da..0ef887dd286795fe0cf22a24b019b845362dae48 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c @@ -36,11 +36,64 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { /* Netgear */ {USB_DEVICE(0x0846, 0x6100)}, {USB_DEVICE(0x0846, 0x6a00)}, + /* HP */ + {USB_DEVICE(0x03f0, 0xca02)}, {} }; MODULE_DEVICE_TABLE(usb, rtl8187_table); +static void rtl8187_iowrite_async_cb(struct urb *urb) +{ + kfree(urb->context); + usb_free_urb(urb); +} + +static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, + void *data, u16 len) +{ + struct usb_ctrlrequest *dr; + struct urb *urb; + struct rtl8187_async_write_data { + u8 data[4]; + struct usb_ctrlrequest dr; + } *buf; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + return; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + kfree(buf); + return; + } + + dr = &buf->dr; + + dr->bRequestType = RTL8187_REQT_WRITE; + dr->bRequest = RTL8187_REQ_SET_REG; + dr->wValue = addr; + dr->wIndex = 0; + dr->wLength = cpu_to_le16(len); + + memcpy(buf, data, len); + + usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), + (unsigned char *)dr, buf, len, + rtl8187_iowrite_async_cb, buf); + usb_submit_urb(urb, GFP_ATOMIC); +} + +static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, + __le32 *addr, u32 val) +{ + __le32 buf = cpu_to_le32(val); + + rtl8187_iowrite_async(priv, cpu_to_le16((unsigned long)addr), + &buf, sizeof(buf)); +} + void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) { struct rtl8187_priv *priv = dev->priv; @@ -96,7 +149,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb, if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { tmp |= RTL8187_TX_FLAG_RTS; hdr->rts_duration = - ieee80211_rts_duration(dev, skb->len, control); + ieee80211_rts_duration(dev, priv->if_id, skb->len, control); } if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) tmp |= RTL8187_TX_FLAG_CTS; @@ -125,6 +178,7 @@ static void rtl8187_rx_cb(struct urb *urb) struct rtl8187_rx_hdr *hdr; struct ieee80211_rx_status rx_status = { 0 }; int rate, signal; + u32 flags; spin_lock(&priv->rx_queue.lock); if (skb->next) @@ -143,10 +197,11 @@ static void rtl8187_rx_cb(struct urb *urb) skb_put(skb, urb->actual_length); hdr = (struct rtl8187_rx_hdr *)(skb_tail_pointer(skb) - sizeof(*hdr)); - skb_trim(skb, le16_to_cpu(hdr->len) & 0x0FFF); + flags = le32_to_cpu(hdr->flags); + skb_trim(skb, flags & 0x0FFF); signal = hdr->agc >> 1; - rate = (le16_to_cpu(hdr->rate) >> 4) & 0xF; + rate = (flags >> 20) & 0xF; if (rate > 3) { /* OFDM rate */ if (signal > 90) signal = 90; @@ -169,6 +224,8 @@ static void rtl8187_rx_cb(struct urb *urb) rx_status.channel = dev->conf.channel; rx_status.phymode = dev->conf.phymode; rx_status.mactime = le64_to_cpu(hdr->mac_time); + if (flags & (1 << 13)) + rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; ieee80211_rx_irqsafe(dev, skb, &rx_status); skb = dev_alloc_skb(RTL8187_MAX_RX); @@ -293,8 +350,6 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev) rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); - for (i = 0; i < ETH_ALEN; i++) - rtl818x_iowrite8(priv, &priv->map->MAC[i], priv->hwaddr[i]); rtl818x_iowrite16(priv, (__le16 *)0xFFF4, 0xFFFF); reg = rtl818x_ioread8(priv, &priv->map->CONFIG1); @@ -365,7 +420,7 @@ static void rtl8187_set_channel(struct ieee80211_hw *dev, int channel) rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); } -static int rtl8187_open(struct ieee80211_hw *dev) +static int rtl8187_start(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; u32 reg; @@ -383,16 +438,13 @@ static int rtl8187_open(struct ieee80211_hw *dev) RTL818X_RX_CONF_RX_AUTORESETPHY | RTL818X_RX_CONF_BSSID | RTL818X_RX_CONF_MGMT | - RTL818X_RX_CONF_CTRL | RTL818X_RX_CONF_DATA | (7 << 13 /* RX FIFO threshold NONE */) | (7 << 10 /* MAX RX DMA */) | RTL818X_RX_CONF_BROADCAST | - RTL818X_RX_CONF_MULTICAST | RTL818X_RX_CONF_NICMAC; - if (priv->mode == IEEE80211_IF_TYPE_MNTR) - reg |= RTL818X_RX_CONF_MONITOR; + priv->rx_conf = reg; rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); @@ -419,7 +471,7 @@ static int rtl8187_open(struct ieee80211_hw *dev) return 0; } -static int rtl8187_stop(struct ieee80211_hw *dev) +static void rtl8187_stop(struct ieee80211_hw *dev) { struct rtl8187_priv *priv = dev->priv; struct rtl8187_rx_info *info; @@ -445,28 +497,31 @@ static int rtl8187_stop(struct ieee80211_hw *dev) usb_kill_urb(info->urb); kfree_skb(skb); } - return 0; + return; } static int rtl8187_add_interface(struct ieee80211_hw *dev, struct ieee80211_if_init_conf *conf) { struct rtl8187_priv *priv = dev->priv; + int i; - /* NOTE: using IEEE80211_IF_TYPE_MGMT to indicate no mode selected */ - if (priv->mode != IEEE80211_IF_TYPE_MGMT) - return -1; + if (priv->mode != IEEE80211_IF_TYPE_MNTR) + return -EOPNOTSUPP; switch (conf->type) { case IEEE80211_IF_TYPE_STA: - case IEEE80211_IF_TYPE_MNTR: priv->mode = conf->type; break; default: return -EOPNOTSUPP; } - priv->hwaddr = conf->mac_addr ? conf->mac_addr : dev->wiphy->perm_addr; + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); + for (i = 0; i < ETH_ALEN; i++) + rtl818x_iowrite8(priv, &priv->map->MAC[i], + ((u8 *)conf->mac_addr)[i]); + rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); return 0; } @@ -475,7 +530,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev, struct ieee80211_if_init_conf *conf) { struct rtl8187_priv *priv = dev->priv; - priv->mode = IEEE80211_IF_TYPE_MGMT; + priv->mode = IEEE80211_IF_TYPE_MNTR; } static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) @@ -510,6 +565,8 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id, struct rtl8187_priv *priv = dev->priv; int i; + priv->if_id = if_id; + for (i = 0; i < ETH_ALEN; i++) rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]); @@ -521,14 +578,52 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id, return 0; } +static void rtl8187_configure_filter(struct ieee80211_hw *dev, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list) +{ + struct rtl8187_priv *priv = dev->priv; + + *total_flags = 0; + + if (changed_flags & FIF_PROMISC_IN_BSS) + priv->rx_conf ^= RTL818X_RX_CONF_NICMAC; + if (changed_flags & FIF_ALLMULTI) + priv->rx_conf ^= RTL818X_RX_CONF_MULTICAST; + if (changed_flags & FIF_FCSFAIL) + priv->rx_conf ^= RTL818X_RX_CONF_FCS; + if (changed_flags & FIF_CONTROL) + priv->rx_conf ^= RTL818X_RX_CONF_CTRL; + if (changed_flags & FIF_OTHER_BSS) + priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; + + if (mc_count > 0) + priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; + + if (priv->rx_conf & RTL818X_RX_CONF_NICMAC) + *total_flags |= FIF_PROMISC_IN_BSS; + if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) + *total_flags |= FIF_ALLMULTI; + if (priv->rx_conf & RTL818X_RX_CONF_FCS) + *total_flags |= FIF_FCSFAIL; + if (priv->rx_conf & RTL818X_RX_CONF_CTRL) + *total_flags |= FIF_CONTROL; + if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) + *total_flags |= FIF_OTHER_BSS; + + rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); +} + static const struct ieee80211_ops rtl8187_ops = { .tx = rtl8187_tx, - .open = rtl8187_open, + .start = rtl8187_start, .stop = rtl8187_stop, .add_interface = rtl8187_add_interface, .remove_interface = rtl8187_remove_interface, .config = rtl8187_config, .config_interface = rtl8187_config_interface, + .configure_filter = rtl8187_configure_filter, }; static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom) @@ -572,6 +667,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, struct ieee80211_channel *channel; u16 txpwr, reg; int err, i; + DECLARE_MAC_BUF(mac); dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); if (!dev) { @@ -601,11 +697,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, priv->modes[1].rates = priv->rates; priv->modes[1].num_channels = ARRAY_SIZE(rtl818x_channels); priv->modes[1].channels = priv->channels; - priv->mode = IEEE80211_IF_TYPE_MGMT; + priv->mode = IEEE80211_IF_TYPE_MNTR; dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | - IEEE80211_HW_RX_INCLUDES_FCS | - IEEE80211_HW_WEP_INCLUDE_IV | - IEEE80211_HW_DATA_NULLFUNC_ACK; + IEEE80211_HW_RX_INCLUDES_FCS; dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr); dev->queues = 1; dev->max_rssi = 65; @@ -681,8 +775,8 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, goto err_free_dev; } - printk(KERN_INFO "%s: hwaddr " MAC_FMT ", rtl8187 V%d + %s\n", - wiphy_name(dev->wiphy), MAC_ARG(dev->wiphy->perm_addr), + printk(KERN_INFO "%s: hwaddr %s, rtl8187 V%d + %s\n", + wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), priv->asic_rev, priv->rf_init == rtl8225_rf_init ? "rtl8225" : "rtl8225z2"); diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h index 283de30628e1fbc4969c5ca779586783f3824b09..880d4becae312b92de01681762c919ec44730824 100644 --- a/drivers/net/wireless/rtl818x.h +++ b/drivers/net/wireless/rtl818x.h @@ -71,6 +71,7 @@ struct rtl818x_csr { #define RTL818X_RX_CONF_NICMAC (1 << 1) #define RTL818X_RX_CONF_MULTICAST (1 << 2) #define RTL818X_RX_CONF_BROADCAST (1 << 3) +#define RTL818X_RX_CONF_FCS (1 << 5) #define RTL818X_RX_CONF_DATA (1 << 18) #define RTL818X_RX_CONF_CTRL (1 << 19) #define RTL818X_RX_CONF_MGMT (1 << 20) diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index af70460f008a6a251ca85adb25ff943f20056c0a..98df9bc7836a095ffe4bb42242a0665a20c629b3 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c @@ -782,7 +782,6 @@ spectrum_cs_config(struct pcmcia_device *link) /* Ok, we have the configuration, prepare to register the netdev */ dev->base_addr = link->io.BasePort1; dev->irq = link->irq.AssignedIRQ; - SET_MODULE_OWNER(dev); card->node.major = card->node.minor = 0; /* Reset card and download firmware */ diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index ef32a5c1e8182a68e507907b61fda5b0463033cd..4bd14b331862948f558a74955b9c191b7f4502f4 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -107,6 +107,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE"; #include #include #include +#include #include #include @@ -1630,8 +1631,8 @@ static void strip_IdleTask(unsigned long parameter) */ static int strip_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { struct strip *strip_info = netdev_priv(dev); STRIP_Header *header = (STRIP_Header *) skb_push(skb, sizeof(STRIP_Header)); @@ -1971,7 +1972,7 @@ static struct net_device *get_strip_dev(struct strip *strip_info) sizeof(zero_address))) { struct net_device *dev; read_lock_bh(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (dev->type == strip_info->dev->type && !memcmp(dev->dev_addr, &strip_info->true_dev_addr, @@ -2496,6 +2497,11 @@ static int strip_close_low(struct net_device *dev) return 0; } +static const struct header_ops strip_header_ops = { + .create = strip_header, + .rebuild = strip_rebuild_header, +}; + /* * This routine is called by DDI when the * (dynamically assigned) device is registered @@ -2507,8 +2513,6 @@ static void strip_dev_setup(struct net_device *dev) * Finish setting up the DEVICE info. */ - SET_MODULE_OWNER(dev); - dev->trans_start = 0; dev->last_rx = 0; dev->tx_queue_len = 30; /* Drop after 30 frames queued */ @@ -2532,8 +2536,8 @@ static void strip_dev_setup(struct net_device *dev) dev->open = strip_open_low; dev->stop = strip_close_low; dev->hard_start_xmit = strip_xmit; - dev->hard_header = strip_header; - dev->rebuild_header = strip_rebuild_header; + dev->header_ops = &strip_header_ops; + dev->set_mac_address = strip_set_mac_address; dev->get_stats = strip_get_stats; dev->change_mtu = strip_change_mtu; @@ -2571,7 +2575,7 @@ static struct strip *strip_alloc(void) return NULL; /* If no more memory, return */ - strip_info = dev->priv; + strip_info = netdev_priv(dev); strip_info->dev = dev; strip_info->magic = STRIP_MAGIC; @@ -2787,7 +2791,7 @@ static int __init strip_init_driver(void) /* * Register the status file with /proc */ - proc_net_fops_create("strip", S_IFREG | S_IRUGO, &strip_seq_fops); + proc_net_fops_create(&init_net, "strip", S_IFREG | S_IRUGO, &strip_seq_fops); return status; } @@ -2809,7 +2813,7 @@ static void __exit strip_exit_driver(void) } /* Unregister with the /proc/net file here. */ - proc_net_remove("strip"); + proc_net_remove(&init_net, "strip"); if ((i = tty_unregister_ldisc(N_STRIP))) printk(KERN_ERR "STRIP: can't unregister line discipline (err = %d)\n", i); diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 1cf090d60edca03edc22382141f56254cc049776..a1f8a16878429717e4e0d628e7451ea1a324c742 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -880,6 +880,8 @@ static void wv_82586_reconfig(struct net_device * dev) */ static void wv_psa_show(psa_t * p) { + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "##### WaveLAN PSA contents: #####\n"); printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n", p->psa_io_base_addr_1, @@ -891,22 +893,13 @@ static void wv_psa_show(psa_t * p) printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params); printk("psa_int_req_no: %d\n", p->psa_int_req_no); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG - "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - p->psa_unused0[0], p->psa_unused0[1], p->psa_unused0[2], - p->psa_unused0[3], p->psa_unused0[4], p->psa_unused0[5], - p->psa_unused0[6]); + printk(KERN_DEBUG "psa_unused0[]: %s\n", + print_mac(mac, p->psa_unused0)); #endif /* DEBUG_SHOW_UNUSED */ - printk(KERN_DEBUG - "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_univ_mac_addr[0], p->psa_univ_mac_addr[1], - p->psa_univ_mac_addr[2], p->psa_univ_mac_addr[3], - p->psa_univ_mac_addr[4], p->psa_univ_mac_addr[5]); - printk(KERN_DEBUG - "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_local_mac_addr[0], p->psa_local_mac_addr[1], - p->psa_local_mac_addr[2], p->psa_local_mac_addr[3], - p->psa_local_mac_addr[4], p->psa_local_mac_addr[5]); + printk(KERN_DEBUG "psa_univ_mac_addr[]: %s\n", + print_mac(mac, p->psa_univ_mac_addr)); + printk(KERN_DEBUG "psa_local_mac_addr[]: %s\n", + print_mac(mac, p->psa_local_mac_addr)); printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel); printk("psa_comp_number: %d, ", p->psa_comp_number); @@ -1248,14 +1241,14 @@ static inline void wv_packet_info(u8 * p, /* Packet to dump */ { /* Name of the function */ int i; int maxi; + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG - "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n", - msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length); + "%s: %s(): dest %s, length %d\n", + msg1, msg2, print_mac(mac, p), length); printk(KERN_DEBUG - "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n", - msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], - p[13]); + "%s: %s(): src %s, type 0x%02X%02X\n", + msg1, msg2, print_mac(mac, &p[6]), p[12], p[13]); #ifdef DEBUG_PACKET_DUMP @@ -1286,7 +1279,9 @@ static void wv_init_info(struct net_device * dev) short ioaddr = dev->base_addr; net_local *lp = (net_local *) dev->priv; psa_t psa; - int i; +#ifdef DEBUG_BASIC_SHOW + DECLARE_MAC_BUF(mac); +#endif /* Read the parameter storage area */ psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa)); @@ -1303,10 +1298,8 @@ static void wv_init_info(struct net_device * dev) #ifdef DEBUG_BASIC_SHOW /* Now, let's go for the basic stuff. */ - printk(KERN_NOTICE "%s: WaveLAN at %#x,", dev->name, ioaddr); - for (i = 0; i < WAVELAN_ADDR_SIZE; i++) - printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]); - printk(", IRQ %d", dev->irq); + printk(KERN_NOTICE "%s: WaveLAN at %#x, %s, IRQ %d", + dev->name, ioaddr, print_mac(mac, dev->dev_addr), dev->irq); /* Print current network ID. */ if (psa.psa_nwid_select) @@ -2400,9 +2393,9 @@ static const struct iw_priv_args wavelan_private_args[] = { static const struct iw_handler_def wavelan_handler_def = { - .num_standard = sizeof(wavelan_handler)/sizeof(iw_handler), - .num_private = sizeof(wavelan_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(wavelan_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(wavelan_handler), + .num_private = ARRAY_SIZE(wavelan_private_handler), + .num_private_args = ARRAY_SIZE(wavelan_private_args), .standard = wavelan_handler, .private = wavelan_private_handler, .private_args = wavelan_private_args, @@ -3596,15 +3589,15 @@ static void wv_82586_config(struct net_device * dev) WAVELAN_ADDR_SIZE >> 1); #ifdef DEBUG_CONFIG_INFO + { + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "%s: wv_82586_config(): set %d multicast addresses:\n", dev->name, lp->mc_count); for (dmi = dev->mc_list; dmi; dmi = dmi->next) - printk(KERN_DEBUG - " %02x:%02x:%02x:%02x:%02x:%02x\n", - dmi->dmi_addr[0], dmi->dmi_addr[1], - dmi->dmi_addr[2], dmi->dmi_addr[3], - dmi->dmi_addr[4], dmi->dmi_addr[5]); + printk(KERN_DEBUG " %s\n", + print_mac(mac, dmi->dmi_addr)); + } #endif } @@ -4177,7 +4170,6 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr) /* Init spinlock */ spin_lock_init(&lp->spinlock); - SET_MODULE_OWNER(dev); dev->open = wavelan_open; dev->stop = wavelan_close; dev->hard_start_xmit = wavelan_packet_xmit; diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 5740d4d4267c657b762d90db692c9a2ff45f19ab..577c647824fecd902447c561780db11a82b09caa 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -1042,6 +1042,7 @@ wv_82593_reconfig(struct net_device * dev) static void wv_psa_show(psa_t * p) { + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "##### wavelan psa contents: #####\n"); printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n", p->psa_io_base_addr_1, @@ -1055,29 +1056,13 @@ wv_psa_show(psa_t * p) printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params); printk("psa_int_req_no: %d\n", p->psa_int_req_no); #ifdef DEBUG_SHOW_UNUSED - printk(KERN_DEBUG "psa_unused0[]: %02X:%02X:%02X:%02X:%02X:%02X:%02X\n", - p->psa_unused0[0], - p->psa_unused0[1], - p->psa_unused0[2], - p->psa_unused0[3], - p->psa_unused0[4], - p->psa_unused0[5], - p->psa_unused0[6]); + printk(KERN_DEBUG "psa_unused0[]: %s\n", + print_mac(mac, p->psa_unused0)); #endif /* DEBUG_SHOW_UNUSED */ - printk(KERN_DEBUG "psa_univ_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_univ_mac_addr[0], - p->psa_univ_mac_addr[1], - p->psa_univ_mac_addr[2], - p->psa_univ_mac_addr[3], - p->psa_univ_mac_addr[4], - p->psa_univ_mac_addr[5]); - printk(KERN_DEBUG "psa_local_mac_addr[]: %02x:%02x:%02x:%02x:%02x:%02x\n", - p->psa_local_mac_addr[0], - p->psa_local_mac_addr[1], - p->psa_local_mac_addr[2], - p->psa_local_mac_addr[3], - p->psa_local_mac_addr[4], - p->psa_local_mac_addr[5]); + printk(KERN_DEBUG "psa_univ_mac_addr[]: %s\n", + print_mac(mac, p->psa_univ_mac_addr)); + printk(KERN_DEBUG "psa_local_mac_addr[]: %s\n", + print_mac(mac, p->psa_local_mac_addr)); printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel); printk("psa_comp_number: %d, ", p->psa_comp_number); printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set); @@ -1277,11 +1262,12 @@ wv_packet_info(u_char * p, /* Packet to dump */ { int i; int maxi; + DECLARE_MAC_BUF(mac); - printk(KERN_DEBUG "%s: %s(): dest %02X:%02X:%02X:%02X:%02X:%02X, length %d\n", - msg1, msg2, p[0], p[1], p[2], p[3], p[4], p[5], length); - printk(KERN_DEBUG "%s: %s(): src %02X:%02X:%02X:%02X:%02X:%02X, type 0x%02X%02X\n", - msg1, msg2, p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13]); + printk(KERN_DEBUG "%s: %s(): dest %s, length %d\n", + msg1, msg2, print_mac(mac, p), length); + printk(KERN_DEBUG "%s: %s(): src %s, type 0x%02X%02X\n", + msg1, msg2, print_mac(mac, &p[6]), p[12], p[13]); #ifdef DEBUG_PACKET_DUMP @@ -1312,7 +1298,7 @@ wv_init_info(struct net_device * dev) { kio_addr_t base = dev->base_addr; psa_t psa; - int i; + DECLARE_MAC_BUF(mac); /* Read the parameter storage area */ psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa)); @@ -1329,10 +1315,10 @@ wv_init_info(struct net_device * dev) #ifdef DEBUG_BASIC_SHOW /* Now, let's go for the basic stuff */ - printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, hw_addr", - dev->name, base, dev->irq); - for(i = 0; i < WAVELAN_ADDR_SIZE; i++) - printk("%s%02X", (i == 0) ? " " : ":", dev->dev_addr[i]); + printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, " + "hw_addr %s", + dev->name, base, dev->irq, + print_mac(mac, dev->dev_addr)); /* Print current network id */ if(psa.psa_nwid_select) @@ -2719,9 +2705,9 @@ static const iw_handler wavelan_private_handler[] = static const struct iw_handler_def wavelan_handler_def = { - .num_standard = sizeof(wavelan_handler)/sizeof(iw_handler), - .num_private = sizeof(wavelan_private_handler)/sizeof(iw_handler), - .num_private_args = sizeof(wavelan_private_args)/sizeof(struct iw_priv_args), + .num_standard = ARRAY_SIZE(wavelan_handler), + .num_private = ARRAY_SIZE(wavelan_private_handler), + .num_private_args = ARRAY_SIZE(wavelan_private_args), .standard = wavelan_handler, .private = wavelan_private_handler, .private_args = wavelan_private_args, @@ -3691,12 +3677,12 @@ wv_82593_config(struct net_device * dev) int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count; #ifdef DEBUG_CONFIG_INFO + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n", dev->name, lp->mc_count); for(dmi=dev->mc_list; dmi; dmi=dmi->next) - printk(KERN_DEBUG " %02x:%02x:%02x:%02x:%02x:%02x\n", - dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2], - dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5] ); + printk(KERN_DEBUG " %s\n", + print_mac(mac, dmi->dmi_addr)); #endif /* Initialize adapter's ethernet multicast addresses */ @@ -4577,7 +4563,6 @@ wavelan_probe(struct pcmcia_device *p_dev) lp->dev = dev; /* wavelan NET3 callbacks */ - SET_MODULE_OWNER(dev); dev->open = &wavelan_open; dev->stop = &wavelan_close; dev->hard_start_xmit = &wavelan_packet_xmit; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index c8b5c2271938348cb0890419effcb3160cb32ff6..42a36b3f3ff78789feb1510dbfca4c4bc13b8fe3 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -859,12 +859,11 @@ static int wl3501_esbq_confirm(struct wl3501_card *this) static void wl3501_online(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); + DECLARE_MAC_BUF(mac); - printk(KERN_INFO "%s: Wireless LAN online. BSSID: " - "%02X %02X %02X %02X %02X %02X\n", dev->name, - this->bssid[0], this->bssid[1], this->bssid[2], - this->bssid[3], this->bssid[4], this->bssid[5]); + printk(KERN_INFO "%s: Wireless LAN online. BSSID: %s\n", + dev->name, print_mac(mac, this->bssid)); netif_wake_queue(dev); } @@ -907,7 +906,7 @@ static int wl3501_mgmt_association(struct wl3501_card *this) static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct wl3501_join_confirm sig; dprintk(3, "entry"); @@ -1046,7 +1045,7 @@ static inline void wl3501_start_confirm_interrupt(struct net_device *dev, static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev, u16 addr) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct wl3501_assoc_confirm sig; dprintk(3, "entry"); @@ -1075,7 +1074,7 @@ static inline void wl3501_rx_interrupt(struct net_device *dev) int morepkts; u16 addr; u8 sig_id; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); dprintk(3, "entry"); loop: @@ -1257,7 +1256,7 @@ static int wl3501_init_firmware(struct wl3501_card *this) static int wl3501_close(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = -ENODEV; unsigned long flags; struct pcmcia_device *link; @@ -1289,7 +1288,7 @@ static int wl3501_close(struct net_device *dev) */ static int wl3501_reset(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = -ENODEV; wl3501_block_interrupt(this); @@ -1318,7 +1317,7 @@ static int wl3501_reset(struct net_device *dev) static void wl3501_tx_timeout(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct net_device_stats *stats = &this->stats; unsigned long flags; int rc; @@ -1344,7 +1343,7 @@ static void wl3501_tx_timeout(struct net_device *dev) static int wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int enabled, rc; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&this->lock, flags); @@ -1371,7 +1370,7 @@ static int wl3501_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) static int wl3501_open(struct net_device *dev) { int rc = -ENODEV; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); unsigned long flags; struct pcmcia_device *link; link = this->p_dev; @@ -1410,14 +1409,14 @@ static int wl3501_open(struct net_device *dev) static struct net_device_stats *wl3501_get_stats(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); return &this->stats; } static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); struct iw_statistics *wstats = &this->wstats; u32 value; /* size checked: it is u32 */ @@ -1497,7 +1496,7 @@ static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info, static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int channel = wrqu->freq.m; int rc = -EINVAL; @@ -1511,7 +1510,7 @@ static int wl3501_set_freq(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->freq.m = wl3501_chan2freq[this->chan - 1] * 100000; wrqu->freq.e = 1; @@ -1526,7 +1525,7 @@ static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info, if (wrqu->mode == IW_MODE_INFRA || wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_AUTO) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); this->net_type = wrqu->mode; rc = wl3501_reset(dev); @@ -1537,7 +1536,7 @@ static int wl3501_set_mode(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->mode = this->net_type; return 0; @@ -1546,7 +1545,7 @@ static int wl3501_get_mode(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_sens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->sens.value = this->rssi; wrqu->sens.disabled = !wrqu->sens.value; @@ -1577,7 +1576,7 @@ static int wl3501_get_range(struct net_device *dev, static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); static const u8 bcast[ETH_ALEN] = { 255, 255, 255, 255, 255, 255 }; int rc = -EINVAL; @@ -1597,7 +1596,7 @@ static int wl3501_set_wap(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, this->bssid, ETH_ALEN); @@ -1616,7 +1615,7 @@ static int wl3501_set_scan(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int i; char *current_ev = extra; struct iw_event iwe; @@ -1666,7 +1665,7 @@ static int wl3501_set_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); if (wrqu->data.flags) { iw_set_mgmt_info_element(IW_MGMT_INFO_ELEMENT_SSID, @@ -1683,7 +1682,7 @@ static int wl3501_get_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&this->lock, flags); @@ -1697,7 +1696,7 @@ static int wl3501_get_essid(struct net_device *dev, static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); if (wrqu->data.length > sizeof(this->nick)) return -E2BIG; @@ -1708,7 +1707,7 @@ static int wl3501_set_nick(struct net_device *dev, struct iw_request_info *info, static int wl3501_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); strlcpy(extra, this->nick, 32); wrqu->data.length = strlen(extra); @@ -1733,7 +1732,7 @@ static int wl3501_get_rts_threshold(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u16 threshold; /* size checked: it is u16 */ - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_RTS_THRESHOLD, &threshold, sizeof(threshold)); if (!rc) { @@ -1749,7 +1748,7 @@ static int wl3501_get_frag_threshold(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u16 threshold; /* size checked: it is u16 */ - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_FRAG_THRESHOLD, &threshold, sizeof(threshold)); if (!rc) { @@ -1765,7 +1764,7 @@ static int wl3501_get_txpow(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u16 txpow; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_CURRENT_TX_PWR_LEVEL, &txpow, sizeof(txpow)); @@ -1787,7 +1786,7 @@ static int wl3501_get_retry(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u8 retry; /* size checked: it is u8 */ - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_LONG_RETRY_LIMIT, &retry, sizeof(retry)); @@ -1814,7 +1813,7 @@ static int wl3501_get_encode(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u8 implemented, restricted, keys[100], len_keys, tocopy; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_PRIV_OPT_IMPLEMENTED, &implemented, sizeof(implemented)); @@ -1841,7 +1840,6 @@ static int wl3501_get_encode(struct net_device *dev, tocopy = min_t(u8, len_keys, wrqu->encoding.length); tocopy = min_t(u8, tocopy, 100); wrqu->encoding.length = tocopy; - memset(extra, 0, tocopy); memcpy(extra, keys, tocopy); out: return rc; @@ -1852,7 +1850,7 @@ static int wl3501_get_power(struct net_device *dev, union iwreq_data *wrqu, char *extra) { u8 pwr_state; - struct wl3501_card *this = dev->priv; + struct wl3501_card *this = netdev_priv(dev); int rc = wl3501_get_mib_value(this, WL3501_MIB_ATTR_CURRENT_PWR_STATE, &pwr_state, sizeof(pwr_state)); @@ -1894,7 +1892,7 @@ static const iw_handler wl3501_handler[] = { }; static const struct iw_handler_def wl3501_handler_def = { - .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler), + .num_standard = ARRAY_SIZE(wl3501_handler), .standard = (iw_handler *)wl3501_handler, .get_wireless_stats = wl3501_get_wireless_stats, }; @@ -1937,7 +1935,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev) dev->tx_timeout = wl3501_tx_timeout; dev->watchdog_timeo = 5 * HZ; dev->get_stats = wl3501_get_stats; - this = dev->priv; + this = netdev_priv(dev); this->wireless_data.spy_data = &this->spy_data; this->p_dev = p_dev; dev->wireless_data = &this->wireless_data; @@ -1967,6 +1965,7 @@ static int wl3501_config(struct pcmcia_device *link) struct net_device *dev = link->priv; int i = 0, j, last_fn, last_ret; struct wl3501_card *this; + DECLARE_MAC_BUF(mac); /* Try allocating IO ports. This tries a few fixed addresses. If you * want, you can also read the card's config table to pick addresses -- @@ -2004,9 +2003,7 @@ static int wl3501_config(struct pcmcia_device *link) goto failed; } - SET_MODULE_OWNER(dev); - - this = dev->priv; + this = netdev_priv(dev); /* * At this point, the dev_node_t structure(s) should be initialized and * arranged in a linked list at link->dev_node. @@ -2022,14 +2019,14 @@ static int wl3501_config(struct pcmcia_device *link) } strcpy(this->node.dev_name, dev->name); - /* print probe information */ - printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, MAC addr in flash ROM:", - dev->name, this->base_addr, (int)dev->irq); - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; - printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]); - } - printk("\n"); + + /* print probe information */ + printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, " + "MAC addr in flash ROM:%s\n", + dev->name, this->base_addr, (int)dev->irq, + print_mac(mac, dev->dev_addr)); /* * Initialize card parameters - added by jss */ @@ -2079,7 +2076,7 @@ static int wl3501_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; - wl3501_pwr_mgmt(dev->priv, WL3501_SUSPEND); + wl3501_pwr_mgmt(netdev_priv(dev), WL3501_SUSPEND); if (link->open) netif_device_detach(dev); @@ -2090,7 +2087,7 @@ static int wl3501_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; - wl3501_pwr_mgmt(dev->priv, WL3501_RESUME); + wl3501_pwr_mgmt(netdev_priv(dev), WL3501_RESUME); if (link->open) { wl3501_reset(dev); netif_device_attach(dev); diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile index 4d505903352c3f7235f494dd01196ce2bf200221..7a2f2a98edab61cfce719d163861c45041969823 100644 --- a/drivers/net/wireless/zd1211rw/Makefile +++ b/drivers/net/wireless/zd1211rw/Makefile @@ -4,7 +4,7 @@ zd1211rw-objs := zd_chip.o zd_ieee80211.o \ zd_mac.o zd_netdev.o \ zd_rf_al2230.o zd_rf_rf2959.o \ zd_rf_al7230b.o zd_rf_uw2453.o \ - zd_rf.o zd_usb.o zd_util.o + zd_rf.o zd_usb.o ifeq ($(CONFIG_ZD1211RW_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index c39f1984b84df33d35747f8badd7eff5ea266c7b..f831b68f1b9cb7b562f8299cb90fb70ba8712a69 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -28,7 +28,6 @@ #include "zd_ieee80211.h" #include "zd_mac.h" #include "zd_rf.h" -#include "zd_util.h" void zd_chip_init(struct zd_chip *chip, struct net_device *netdev, @@ -106,7 +105,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr { int r; int i; - zd_addr_t *a16 = (zd_addr_t *)NULL; + zd_addr_t *a16; u16 *v16; unsigned int count16; @@ -377,6 +376,7 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) [0] = { .addr = CR_MAC_ADDR_P1 }, [1] = { .addr = CR_MAC_ADDR_P2 }, }; + DECLARE_MAC_BUF(mac); reqs[0].value = (mac_addr[3] << 24) | (mac_addr[2] << 16) @@ -386,7 +386,7 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) | mac_addr[4]; dev_dbg_f(zd_chip_dev(chip), - "mac addr " MAC_FMT "\n", MAC_ARG(mac_addr)); + "mac addr %s\n", print_mac(mac, mac_addr)); mutex_lock(&chip->mutex); r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); @@ -500,8 +500,6 @@ int zd_chip_lock_phy_regs(struct zd_chip *chip) return r; } - dev_dbg_f(zd_chip_dev(chip), - "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp & ~UNLOCK_PHY_REGS); tmp &= ~UNLOCK_PHY_REGS; r = zd_iowrite32_locked(chip, tmp, CR_REG1); @@ -523,8 +521,6 @@ int zd_chip_unlock_phy_regs(struct zd_chip *chip) return r; } - dev_dbg_f(zd_chip_dev(chip), - "CR_REG1: 0x%02x -> 0x%02x\n", tmp, tmp | UNLOCK_PHY_REGS); tmp |= UNLOCK_PHY_REGS; r = zd_iowrite32_locked(chip, tmp, CR_REG1); @@ -841,8 +837,6 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) s->atim_wnd_period = values[0]; s->pre_tbtt = values[1]; s->beacon_interval = values[2]; - dev_dbg_f(zd_chip_dev(chip), "aw %u pt %u bi %u\n", - s->atim_wnd_period, s->pre_tbtt, s->beacon_interval); return 0; } @@ -864,9 +858,6 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) reqs[2].addr = CR_BCN_INTERVAL; reqs[2].value = s->beacon_interval; - dev_dbg_f(zd_chip_dev(chip), - "aw %u pt %u bi %u\n", s->atim_wnd_period, s->pre_tbtt, - s->beacon_interval); return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); } @@ -1018,19 +1009,19 @@ int zd_chip_set_rts_cts_rate_locked(struct zd_chip *chip, u32 value = 0; /* Modulation bit */ - if (ZD_CS_TYPE(rts_rate) == ZD_CS_OFDM) + if (ZD_MODULATION_TYPE(rts_rate) == ZD_OFDM) rts_mod = ZD_RX_OFDM; dev_dbg_f(zd_chip_dev(chip), "rts_rate=%x preamble=%x\n", rts_rate, preamble); - value |= rts_rate << RTSCTS_SH_RTS_RATE; + value |= ZD_PURE_RATE(rts_rate) << RTSCTS_SH_RTS_RATE; value |= rts_mod << RTSCTS_SH_RTS_MOD_TYPE; value |= preamble << RTSCTS_SH_RTS_PMB_TYPE; value |= preamble << RTSCTS_SH_CTS_PMB_TYPE; /* We always send 11M self-CTS messages, like the vendor driver. */ - value |= ZD_CCK_RATE_11M << RTSCTS_SH_CTS_RATE; + value |= ZD_PURE_RATE(ZD_CCK_RATE_11M) << RTSCTS_SH_CTS_RATE; value |= ZD_RX_CCK << RTSCTS_SH_CTS_MOD_TYPE; return zd_iowrite32_locked(chip, value, CR_RTS_CTS_RATE); @@ -1160,16 +1151,12 @@ int zd_chip_init_hw(struct zd_chip *chip) static int update_pwr_int(struct zd_chip *chip, u8 channel) { u8 value = chip->pwr_int_values[channel - 1]; - dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_int %#04x\n", - channel, value); return zd_iowrite16_locked(chip, value, CR31); } static int update_pwr_cal(struct zd_chip *chip, u8 channel) { u8 value = chip->pwr_cal_values[channel-1]; - dev_dbg_f(zd_chip_dev(chip), "channel %d pwr_cal %#04x\n", - channel, value); return zd_iowrite16_locked(chip, value, CR68); } @@ -1184,9 +1171,6 @@ static int update_ofdm_cal(struct zd_chip *chip, u8 channel) ioreqs[2].addr = CR65; ioreqs[2].value = chip->ofdm_cal_values[OFDM_54M_INDEX][channel-1]; - dev_dbg_f(zd_chip_dev(chip), - "channel %d ofdm_cal 36M %#04x 48M %#04x 54M %#04x\n", - channel, ioreqs[0].value, ioreqs[1].value, ioreqs[2].value); return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); } @@ -1344,7 +1328,7 @@ int zd_chip_set_basic_rates_locked(struct zd_chip *chip, u16 cr_rates) return zd_iowrite32_locked(chip, cr_rates, CR_BASIC_RATE_TBL); } -static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) +static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size) { static const u16 constants[] = { 715, 655, 585, 540, 470, 410, 360, 315, @@ -1358,7 +1342,7 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) /* It seems that their quality parameter is somehow per signal * and is now transferred per bit. */ - switch (rate) { + switch (zd_rate) { case ZD_OFDM_RATE_6M: case ZD_OFDM_RATE_12M: case ZD_OFDM_RATE_24M: @@ -1385,7 +1369,7 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) break; } - switch (rate) { + switch (zd_rate) { case ZD_OFDM_RATE_6M: case ZD_OFDM_RATE_9M: i += 3; @@ -1409,11 +1393,11 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size) return i; } -static int ofdm_qual_percent(u8 status_quality, u8 rate, unsigned int size) +static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size) { int r; - r = ofdm_qual_db(status_quality, rate, size); + r = ofdm_qual_db(status_quality, zd_rate, size); ZD_ASSERT(r >= 0); if (r < 0) r = 0; @@ -1474,12 +1458,17 @@ static int cck_qual_percent(u8 status_quality) return r <= 100 ? r : 100; } +static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame) +{ + return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame); +} + u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, const struct rx_status *status) { return (status->frame_status&ZD_RX_OFDM) ? ofdm_qual_percent(status->signal_quality_ofdm, - zd_ofdm_plcp_header_rate(rx_frame), + zd_rate_from_ofdm_plcp_header(rx_frame), size) : cck_qual_percent(status->signal_quality_cck); } @@ -1495,32 +1484,32 @@ u8 zd_rx_strength_percent(u8 rssi) u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status) { static const u16 ofdm_rates[] = { - [ZD_OFDM_RATE_6M] = 60, - [ZD_OFDM_RATE_9M] = 90, - [ZD_OFDM_RATE_12M] = 120, - [ZD_OFDM_RATE_18M] = 180, - [ZD_OFDM_RATE_24M] = 240, - [ZD_OFDM_RATE_36M] = 360, - [ZD_OFDM_RATE_48M] = 480, - [ZD_OFDM_RATE_54M] = 540, + [ZD_OFDM_PLCP_RATE_6M] = 60, + [ZD_OFDM_PLCP_RATE_9M] = 90, + [ZD_OFDM_PLCP_RATE_12M] = 120, + [ZD_OFDM_PLCP_RATE_18M] = 180, + [ZD_OFDM_PLCP_RATE_24M] = 240, + [ZD_OFDM_PLCP_RATE_36M] = 360, + [ZD_OFDM_PLCP_RATE_48M] = 480, + [ZD_OFDM_PLCP_RATE_54M] = 540, }; u16 rate; if (status->frame_status & ZD_RX_OFDM) { + /* Deals with PLCP OFDM rate (not zd_rates) */ u8 ofdm_rate = zd_ofdm_plcp_header_rate(rx_frame); rate = ofdm_rates[ofdm_rate & 0xf]; } else { - u8 cck_rate = zd_cck_plcp_header_rate(rx_frame); - switch (cck_rate) { - case ZD_CCK_SIGNAL_1M: + switch (zd_cck_plcp_header_signal(rx_frame)) { + case ZD_CCK_PLCP_SIGNAL_1M: rate = 10; break; - case ZD_CCK_SIGNAL_2M: + case ZD_CCK_PLCP_SIGNAL_2M: rate = 20; break; - case ZD_CCK_SIGNAL_5M5: + case ZD_CCK_PLCP_SIGNAL_5M5: rate = 55; break; - case ZD_CCK_SIGNAL_11M: + case ZD_CCK_PLCP_SIGNAL_11M: rate = 110; break; default: @@ -1638,7 +1627,5 @@ int zd_chip_set_multicast_hash(struct zd_chip *chip, { CR_GROUP_HASH_P2, hash->high }, }; - dev_dbg_f(zd_chip_dev(chip), "hash l 0x%08x h 0x%08x\n", - ioreqs[0].value, ioreqs[1].value); return zd_iowrite32a(chip, ioreqs, ARRAY_SIZE(ioreqs)); } diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index f4698576ab7106e267fb4e0a98e495df0ac0c71f..8009b70213e2d21abbda4a7842cb4d078d0cddc7 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -871,11 +871,6 @@ static inline int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates) return r; } -static inline int zd_chip_set_rx_filter(struct zd_chip *chip, u32 filter) -{ - return zd_iowrite32(chip, CR_RX_FILTER, filter); -} - int zd_chip_lock_phy_regs(struct zd_chip *chip); int zd_chip_unlock_phy_regs(struct zd_chip *chip); diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h index deb99d1eaa77941fb5594635fde5fdb4b5976663..505b4d7dd0e29003f7ce2e825f469a5a895e77f1 100644 --- a/drivers/net/wireless/zd1211rw/zd_def.h +++ b/drivers/net/wireless/zd1211rw/zd_def.h @@ -21,7 +21,6 @@ #include #include #include -#include typedef u16 __nocast zd_addr_t; diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h index c4f36d39642b1bdeccf8106415da7a337448acf9..fbf6491dce7e00d45a0ecdaca36ca76a3f0b0a0e 100644 --- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h +++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h @@ -43,21 +43,25 @@ struct ofdm_plcp_header { __le16 service; } __attribute__((packed)); -static inline u8 zd_ofdm_plcp_header_rate( - const struct ofdm_plcp_header *header) +static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header) { return header->prefix[0] & 0xf; } -/* These are referred to as zd_rates */ -#define ZD_OFDM_RATE_6M 0xb -#define ZD_OFDM_RATE_9M 0xf -#define ZD_OFDM_RATE_12M 0xa -#define ZD_OFDM_RATE_18M 0xe -#define ZD_OFDM_RATE_24M 0x9 -#define ZD_OFDM_RATE_36M 0xd -#define ZD_OFDM_RATE_48M 0x8 -#define ZD_OFDM_RATE_54M 0xc +/* The following defines give the encoding of the 4-bit rate field in the + * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to + * define the zd-rate values for OFDM. + * + * See the struct zd_ctrlset definition in zd_mac.h. + */ +#define ZD_OFDM_PLCP_RATE_6M 0xb +#define ZD_OFDM_PLCP_RATE_9M 0xf +#define ZD_OFDM_PLCP_RATE_12M 0xa +#define ZD_OFDM_PLCP_RATE_18M 0xe +#define ZD_OFDM_PLCP_RATE_24M 0x9 +#define ZD_OFDM_PLCP_RATE_36M 0xd +#define ZD_OFDM_PLCP_RATE_48M 0x8 +#define ZD_OFDM_PLCP_RATE_54M 0xc struct cck_plcp_header { u8 signal; @@ -66,15 +70,22 @@ struct cck_plcp_header { __le16 crc16; } __attribute__((packed)); -static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header) +static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header) { return header->signal; } -#define ZD_CCK_SIGNAL_1M 0x0a -#define ZD_CCK_SIGNAL_2M 0x14 -#define ZD_CCK_SIGNAL_5M5 0x37 -#define ZD_CCK_SIGNAL_11M 0x6e +/* These defines give the encodings of the signal field in the 802.11b PLCP + * header. The signal field gives the bit rate of the following packet. Even + * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s + * rate to stay consistent with Zydas and our use of the term. + * + * Notify that these values are *not* used in the zd-rates. + */ +#define ZD_CCK_PLCP_SIGNAL_1M 0x0a +#define ZD_CCK_PLCP_SIGNAL_2M 0x14 +#define ZD_CCK_PLCP_SIGNAL_5M5 0x37 +#define ZD_CCK_PLCP_SIGNAL_11M 0x6e enum ieee80211_std { IEEE80211B = 0x01, diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 26869d107e52acbc4c04b322272b0806cea74ca2..a903645e157a70b62111f26f7159fd4b02bf137a 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -28,7 +28,6 @@ #include "zd_ieee80211.h" #include "zd_netdev.h" #include "zd_rf.h" -#include "zd_util.h" static void ieee_init(struct ieee80211_device *ieee); static void softmac_init(struct ieee80211softmac_device *sm); @@ -161,13 +160,33 @@ void zd_mac_clear(struct zd_mac *mac) ZD_MEMCLEAR(mac, sizeof(struct zd_mac)); } -static int reset_mode(struct zd_mac *mac) +static int set_rx_filter(struct zd_mac *mac) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); u32 filter = (ieee->iw_mode == IW_MODE_MONITOR) ? ~0 : STA_RX_FILTER; return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); } +static int set_sniffer(struct zd_mac *mac) +{ + struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); + return zd_iowrite32(&mac->chip, CR_SNIFFER_ON, + ieee->iw_mode == IW_MODE_MONITOR ? 1 : 0); + return 0; +} + +static int set_mc_hash(struct zd_mac *mac) +{ + struct zd_mc_hash hash; + struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); + + zd_mc_clear(&hash); + if (ieee->iw_mode == IW_MODE_MONITOR) + zd_mc_add_all(&hash); + + return zd_chip_set_multicast_hash(&mac->chip, &hash); +} + int zd_mac_open(struct net_device *netdev) { struct zd_mac *mac = zd_netdev_mac(netdev); @@ -194,7 +213,13 @@ int zd_mac_open(struct net_device *netdev) r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G); if (r < 0) goto disable_int; - r = reset_mode(mac); + r = set_rx_filter(mac); + if (r) + goto disable_int; + r = set_sniffer(mac); + if (r) + goto disable_int; + r = set_mc_hash(mac); if (r) goto disable_int; r = zd_chip_switch_radio_on(chip); @@ -263,12 +288,13 @@ int zd_mac_set_mac_address(struct net_device *netdev, void *p) struct sockaddr *addr = p; struct zd_mac *mac = zd_netdev_mac(netdev); struct zd_chip *chip = &mac->chip; + DECLARE_MAC_BUF(mac2); if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; dev_dbg_f(zd_mac_dev(mac), - "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data)); + "Setting MAC to %s\n", print_mac(mac2, addr->sa_data)); if (netdev->flags & IFF_UP) { r = zd_write_mac_addr(chip, addr->sa_data); @@ -298,18 +324,21 @@ static void set_multicast_hash_handler(struct work_struct *work) void zd_mac_set_multicast_list(struct net_device *dev) { - struct zd_mc_hash hash; struct zd_mac *mac = zd_netdev_mac(dev); + struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); + struct zd_mc_hash hash; struct dev_mc_list *mc; unsigned long flags; + DECLARE_MAC_BUF(mac2); - if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) { + if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI) || + ieee->iw_mode == IW_MODE_MONITOR) { zd_mc_add_all(&hash); } else { zd_mc_clear(&hash); for (mc = dev->mc_list; mc; mc = mc->next) { - dev_dbg_f(zd_mac_dev(mac), "mc addr " MAC_FMT "\n", - MAC_ARG(mc->dmi_addr)); + dev_dbg_f(zd_mac_dev(mac), "mc addr %s\n", + print_mac(mac2, mc->dmi_addr)); zd_mc_add_addr(&hash, mc->dmi_addr); } } @@ -582,28 +611,6 @@ u8 zd_mac_get_channel(struct zd_mac *mac) return channel; } -/* If wrong rate is given, we are falling back to the slowest rate: 1MBit/s */ -static u8 zd_rate_typed(u8 zd_rate) -{ - static const u8 typed_rates[16] = { - [ZD_CCK_RATE_1M] = ZD_CS_CCK|ZD_CCK_RATE_1M, - [ZD_CCK_RATE_2M] = ZD_CS_CCK|ZD_CCK_RATE_2M, - [ZD_CCK_RATE_5_5M] = ZD_CS_CCK|ZD_CCK_RATE_5_5M, - [ZD_CCK_RATE_11M] = ZD_CS_CCK|ZD_CCK_RATE_11M, - [ZD_OFDM_RATE_6M] = ZD_CS_OFDM|ZD_OFDM_RATE_6M, - [ZD_OFDM_RATE_9M] = ZD_CS_OFDM|ZD_OFDM_RATE_9M, - [ZD_OFDM_RATE_12M] = ZD_CS_OFDM|ZD_OFDM_RATE_12M, - [ZD_OFDM_RATE_18M] = ZD_CS_OFDM|ZD_OFDM_RATE_18M, - [ZD_OFDM_RATE_24M] = ZD_CS_OFDM|ZD_OFDM_RATE_24M, - [ZD_OFDM_RATE_36M] = ZD_CS_OFDM|ZD_OFDM_RATE_36M, - [ZD_OFDM_RATE_48M] = ZD_CS_OFDM|ZD_OFDM_RATE_48M, - [ZD_OFDM_RATE_54M] = ZD_CS_OFDM|ZD_OFDM_RATE_54M, - }; - - ZD_ASSERT(ZD_CS_RATE_MASK == 0x0f); - return typed_rates[zd_rate & ZD_CS_RATE_MASK]; -} - int zd_mac_set_mode(struct zd_mac *mac, u32 mode) { struct ieee80211_device *ieee; @@ -628,8 +635,12 @@ int zd_mac_set_mode(struct zd_mac *mac, u32 mode) ieee->iw_mode = mode; spin_unlock_irq(&ieee->lock); - if (netif_running(mac->netdev)) - return reset_mode(mac); + if (netif_running(mac->netdev)) { + int r = set_rx_filter(mac); + if (r) + return r; + return set_sniffer(mac); + } return 0; } @@ -707,25 +718,30 @@ int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range) static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length) { + /* ZD_PURE_RATE() must be used to remove the modulation type flag of + * the zd-rate values. */ static const u8 rate_divisor[] = { - [ZD_CCK_RATE_1M] = 1, - [ZD_CCK_RATE_2M] = 2, - [ZD_CCK_RATE_5_5M] = 11, /* bits must be doubled */ - [ZD_CCK_RATE_11M] = 11, - [ZD_OFDM_RATE_6M] = 6, - [ZD_OFDM_RATE_9M] = 9, - [ZD_OFDM_RATE_12M] = 12, - [ZD_OFDM_RATE_18M] = 18, - [ZD_OFDM_RATE_24M] = 24, - [ZD_OFDM_RATE_36M] = 36, - [ZD_OFDM_RATE_48M] = 48, - [ZD_OFDM_RATE_54M] = 54, + [ZD_PURE_RATE(ZD_CCK_RATE_1M)] = 1, + [ZD_PURE_RATE(ZD_CCK_RATE_2M)] = 2, + + /* bits must be doubled */ + [ZD_PURE_RATE(ZD_CCK_RATE_5_5M)] = 11, + + [ZD_PURE_RATE(ZD_CCK_RATE_11M)] = 11, + [ZD_PURE_RATE(ZD_OFDM_RATE_6M)] = 6, + [ZD_PURE_RATE(ZD_OFDM_RATE_9M)] = 9, + [ZD_PURE_RATE(ZD_OFDM_RATE_12M)] = 12, + [ZD_PURE_RATE(ZD_OFDM_RATE_18M)] = 18, + [ZD_PURE_RATE(ZD_OFDM_RATE_24M)] = 24, + [ZD_PURE_RATE(ZD_OFDM_RATE_36M)] = 36, + [ZD_PURE_RATE(ZD_OFDM_RATE_48M)] = 48, + [ZD_PURE_RATE(ZD_OFDM_RATE_54M)] = 54, }; u32 bits = (u32)tx_length * 8; u32 divisor; - divisor = rate_divisor[zd_rate]; + divisor = rate_divisor[ZD_PURE_RATE(zd_rate)]; if (divisor == 0) return -EINVAL; @@ -748,52 +764,24 @@ static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length) return bits/divisor; } -enum { - R2M_SHORT_PREAMBLE = 0x01, - R2M_11A = 0x02, -}; - -static u8 zd_rate_to_modulation(u8 zd_rate, int flags) -{ - u8 modulation; - - modulation = zd_rate_typed(zd_rate); - if (flags & R2M_SHORT_PREAMBLE) { - switch (ZD_CS_RATE(modulation)) { - case ZD_CCK_RATE_2M: - case ZD_CCK_RATE_5_5M: - case ZD_CCK_RATE_11M: - modulation |= ZD_CS_CCK_PREA_SHORT; - return modulation; - } - } - if (flags & R2M_11A) { - if (ZD_CS_TYPE(modulation) == ZD_CS_OFDM) - modulation |= ZD_CS_OFDM_MODE_11A; - } - return modulation; -} - static void cs_set_modulation(struct zd_mac *mac, struct zd_ctrlset *cs, struct ieee80211_hdr_4addr *hdr) { struct ieee80211softmac_device *softmac = ieee80211_priv(mac->netdev); u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(hdr->frame_ctl)); - u8 rate, zd_rate; + u8 rate; int is_mgt = (ftype == IEEE80211_FTYPE_MGMT) != 0; int is_multicast = is_multicast_ether_addr(hdr->addr1); int short_preamble = ieee80211softmac_short_preamble_ok(softmac, is_multicast, is_mgt); - int flags = 0; - /* FIXME: 802.11a? */ rate = ieee80211softmac_suggest_txrate(softmac, is_multicast, is_mgt); + cs->modulation = rate_to_zd_rate(rate); - if (short_preamble) - flags |= R2M_SHORT_PREAMBLE; - - zd_rate = rate_to_zd_rate(rate); - cs->modulation = zd_rate_to_modulation(zd_rate, flags); + /* Set short preamble bit when appropriate */ + if (short_preamble && ZD_MODULATION_TYPE(cs->modulation) == ZD_CCK + && cs->modulation != ZD_CCK_RATE_1M) + cs->modulation |= ZD_CCK_PREA_SHORT; } static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, @@ -832,7 +820,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, cs->control |= ZD_CS_RTS; /* Use CTS-to-self protection if required */ - if (ZD_CS_TYPE(cs->modulation) == ZD_CS_OFDM && + if (ZD_MODULATION_TYPE(cs->modulation) == ZD_OFDM && ieee80211softmac_protection_needed(softmac)) { /* FIXME: avoid sending RTS *and* self-CTS, is that correct? */ cs->control &= ~ZD_CS_RTS; @@ -893,7 +881,7 @@ static int fill_ctrlset(struct zd_mac *mac, * - see line 53 of zdinlinef.h */ cs->service = 0; - r = zd_calc_tx_length_us(&cs->service, ZD_CS_RATE(cs->modulation), + r = zd_calc_tx_length_us(&cs->service, ZD_RATE(cs->modulation), le16_to_cpu(cs->tx_length)); if (r < 0) return r; @@ -902,7 +890,7 @@ static int fill_ctrlset(struct zd_mac *mac, if (next_frag_len == 0) { cs->next_frame_length = 0; } else { - r = zd_calc_tx_length_us(NULL, ZD_CS_RATE(cs->modulation), + r = zd_calc_tx_length_us(NULL, ZD_RATE(cs->modulation), next_frag_len); if (r < 0) return r; @@ -1077,7 +1065,8 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats, { const struct rx_status *status; - *pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status)); + *pstatus = status = (struct rx_status *) + (buffer + (length - sizeof(struct rx_status))); if (status->frame_status & ZD_RX_ERROR) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); ieee->stats.rx_errors++; diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index 9f9344eb50f943a922b3a7a09eddb1bb4fe22db4..1b15bde3ff60a35599a86361778605e926b71963 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h @@ -40,28 +40,51 @@ struct zd_ctrlset { #define ZD_CS_RESERVED_SIZE 25 -/* zd_crtlset field modulation */ -#define ZD_CS_RATE_MASK 0x0f -#define ZD_CS_TYPE_MASK 0x10 -#define ZD_CS_RATE(modulation) ((modulation) & ZD_CS_RATE_MASK) -#define ZD_CS_TYPE(modulation) ((modulation) & ZD_CS_TYPE_MASK) - -#define ZD_CS_CCK 0x00 -#define ZD_CS_OFDM 0x10 - -/* These are referred to as zd_rates */ -#define ZD_CCK_RATE_1M 0x00 -#define ZD_CCK_RATE_2M 0x01 -#define ZD_CCK_RATE_5_5M 0x02 -#define ZD_CCK_RATE_11M 0x03 -/* The rates for OFDM are encoded as in the PLCP header. Use ZD_OFDM_RATE_*. +/* The field modulation of struct zd_ctrlset controls the bit rate, the use + * of short or long preambles in 802.11b (CCK mode) or the use of 802.11a or + * 802.11g in OFDM mode. + * + * The term zd-rate is used for the combination of the modulation type flag + * and the "pure" rate value. */ - -/* bit 5 is preamble (when in CCK mode), or a/g selection (when in OFDM mode) */ -#define ZD_CS_CCK_PREA_LONG 0x00 -#define ZD_CS_CCK_PREA_SHORT 0x20 -#define ZD_CS_OFDM_MODE_11G 0x00 -#define ZD_CS_OFDM_MODE_11A 0x20 +#define ZD_PURE_RATE_MASK 0x0f +#define ZD_MODULATION_TYPE_MASK 0x10 +#define ZD_RATE_MASK (ZD_PURE_RATE_MASK|ZD_MODULATION_TYPE_MASK) +#define ZD_PURE_RATE(modulation) ((modulation) & ZD_PURE_RATE_MASK) +#define ZD_MODULATION_TYPE(modulation) ((modulation) & ZD_MODULATION_TYPE_MASK) +#define ZD_RATE(modulation) ((modulation) & ZD_RATE_MASK) + +/* The two possible modulation types. Notify that 802.11b doesn't use the CCK + * codeing for the 1 and 2 MBit/s rate. We stay with the term here to remain + * consistent with uses the term at other places. + */ +#define ZD_CCK 0x00 +#define ZD_OFDM 0x10 + +/* The ZD1211 firmware uses proprietary encodings of the 802.11b (CCK) rates. + * For OFDM the PLCP rate encodings are used. We combine these "pure" rates + * with the modulation type flag and call the resulting values zd-rates. + */ +#define ZD_CCK_RATE_1M (ZD_CCK|0x00) +#define ZD_CCK_RATE_2M (ZD_CCK|0x01) +#define ZD_CCK_RATE_5_5M (ZD_CCK|0x02) +#define ZD_CCK_RATE_11M (ZD_CCK|0x03) +#define ZD_OFDM_RATE_6M (ZD_OFDM|ZD_OFDM_PLCP_RATE_6M) +#define ZD_OFDM_RATE_9M (ZD_OFDM|ZD_OFDM_PLCP_RATE_9M) +#define ZD_OFDM_RATE_12M (ZD_OFDM|ZD_OFDM_PLCP_RATE_12M) +#define ZD_OFDM_RATE_18M (ZD_OFDM|ZD_OFDM_PLCP_RATE_18M) +#define ZD_OFDM_RATE_24M (ZD_OFDM|ZD_OFDM_PLCP_RATE_24M) +#define ZD_OFDM_RATE_36M (ZD_OFDM|ZD_OFDM_PLCP_RATE_36M) +#define ZD_OFDM_RATE_48M (ZD_OFDM|ZD_OFDM_PLCP_RATE_48M) +#define ZD_OFDM_RATE_54M (ZD_OFDM|ZD_OFDM_PLCP_RATE_54M) + +/* The bit 5 of the zd_ctrlset modulation field controls the preamble in CCK + * mode or the 802.11a/802.11g selection in OFDM mode. + */ +#define ZD_CCK_PREA_LONG 0x00 +#define ZD_CCK_PREA_SHORT 0x20 +#define ZD_OFDM_MODE_11G 0x00 +#define ZD_OFDM_MODE_11A 0x20 /* zd_ctrlset control field */ #define ZD_CS_NEED_RANDOM_BACKOFF 0x01 diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c index 8bda48de31ef7f58da1a329d4a017a23b716b688..047cab3d87dfc3454af1e5a2d8ee985b881beb47 100644 --- a/drivers/net/wireless/zd1211rw/zd_netdev.c +++ b/drivers/net/wireless/zd1211rw/zd_netdev.c @@ -233,7 +233,6 @@ struct net_device *zd_netdev_alloc(struct usb_interface *intf) return NULL; } - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &intf->dev); dev_dbg_f(&intf->dev, "netdev->flags %#06hx\n", netdev->flags); diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index a9c339ef116a185384ae406aa1f14ad8c635f347..b0684f965761fec054bfc2ffeab014c1e329d810 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -31,7 +31,6 @@ #include "zd_netdev.h" #include "zd_mac.h" #include "zd_usb.h" -#include "zd_util.h" static struct usb_device_id usb_ids[] = { /* ZD1211 */ @@ -55,6 +54,7 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, + { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, /* ZD1211B */ { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, @@ -74,6 +74,9 @@ static struct usb_device_id usb_ids[] = { { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, { USB_DEVICE(0x129b, 0x1667), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, + { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, /* "Driverless" devices that need ejecting */ { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, diff --git a/drivers/net/wireless/zd1211rw/zd_util.c b/drivers/net/wireless/zd1211rw/zd_util.c deleted file mode 100644 index d20036c15d11bdb1a0d5532fd46e7ff2d328515f..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/zd1211rw/zd_util.c +++ /dev/null @@ -1,82 +0,0 @@ -/* zd_util.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Utility program - */ - -#include "zd_def.h" -#include "zd_util.h" - -#ifdef DEBUG -static char hex(u8 v) -{ - v &= 0xf; - return (v < 10 ? '0' : 'a' - 10) + v; -} - -static char hex_print(u8 c) -{ - return (0x20 <= c && c < 0x7f) ? c : '.'; -} - -static void dump_line(const u8 *bytes, size_t size) -{ - char c; - size_t i; - - size = size <= 8 ? size : 8; - printk(KERN_DEBUG "zd1211 %p ", bytes); - for (i = 0; i < 8; i++) { - switch (i) { - case 1: - case 5: - c = '.'; - break; - case 3: - c = ':'; - break; - default: - c = ' '; - } - if (i < size) { - printk("%c%c%c", hex(bytes[i] >> 4), hex(bytes[i]), c); - } else { - printk(" %c", c); - } - } - - for (i = 0; i < size; i++) - printk("%c", hex_print(bytes[i])); - printk("\n"); -} - -void zd_hexdump(const void *bytes, size_t size) -{ - size_t i = 0; - - do { - dump_line((u8 *)bytes + i, size-i); - i += 8; - } while (i < size); -} -#endif /* DEBUG */ - -void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size) -{ - if (buffer_size < tail_size) - return NULL; - return (u8 *)buffer + (buffer_size - tail_size); -} diff --git a/drivers/net/wireless/zd1211rw/zd_util.h b/drivers/net/wireless/zd1211rw/zd_util.h deleted file mode 100644 index ce26f7adea92e7385fdcb462264563eca3141ff2..0000000000000000000000000000000000000000 --- a/drivers/net/wireless/zd1211rw/zd_util.h +++ /dev/null @@ -1,29 +0,0 @@ -/* zd_util.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ZD_UTIL_H -#define _ZD_UTIL_H - -void *zd_tail(const void *buffer, size_t buffer_size, size_t tail_size); - -#ifdef DEBUG -void zd_hexdump(const void *bytes, size_t size); -#else -#define zd_hexdump(bytes, size) -#endif /* DEBUG */ - -#endif /* _ZD_UTIL_H */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 4445810335a8ef7dff90f4c01683308d12e66c73..f464b82c7d5fe3f7795397b217d2459bdd85e211 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -72,7 +72,7 @@ struct netfront_info { struct list_head list; struct net_device *netdev; - struct net_device_stats stats; + struct napi_struct napi; struct xen_netif_tx_front_ring tx; struct xen_netif_rx_front_ring rx; @@ -185,7 +185,8 @@ static int xennet_can_sg(struct net_device *dev) static void rx_refill_timeout(unsigned long data) { struct net_device *dev = (struct net_device *)data; - netif_rx_schedule(dev); + struct netfront_info *np = netdev_priv(dev); + netif_rx_schedule(dev, &np->napi); } static int netfront_tx_slot_available(struct netfront_info *np) @@ -212,11 +213,9 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) struct page *page; int i, batch_target, notify; RING_IDX req_prod = np->rx.req_prod_pvt; - struct xen_memory_reservation reservation; grant_ref_t ref; unsigned long pfn; void *vaddr; - int nr_flips; struct xen_netif_rx_request *req; if (unlikely(!netif_carrier_ok(dev))) @@ -266,7 +265,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) np->rx_target = np->rx_max_target; refill: - for (nr_flips = i = 0; ; i++) { + for (i = 0; ; i++) { skb = __skb_dequeue(&np->rx_batch); if (skb == NULL) break; @@ -295,38 +294,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev) req->gref = ref; } - if (nr_flips != 0) { - reservation.extent_start = np->rx_pfn_array; - reservation.nr_extents = nr_flips; - reservation.extent_order = 0; - reservation.address_bits = 0; - reservation.domid = DOMID_SELF; - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - /* After all PTEs have been zapped, flush the TLB. */ - np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = - UVMF_TLB_FLUSH|UVMF_ALL; - - /* Give away a batch of pages. */ - np->rx_mcl[i].op = __HYPERVISOR_memory_op; - np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; - np->rx_mcl[i].args[1] = (unsigned long)&reservation; - - /* Zap PTEs and give away pages in one big - * multicall. */ - (void)HYPERVISOR_multicall(np->rx_mcl, i+1); - - /* Check return status of HYPERVISOR_memory_op(). */ - if (unlikely(np->rx_mcl[i].result != i)) - panic("Unable to reduce memory reservation\n"); - } else { - if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, - &reservation) != i) - panic("Unable to reduce memory reservation\n"); - } - } else { - wmb(); /* barrier so backend seens requests */ - } + wmb(); /* barrier so backend seens requests */ /* Above is a suitable barrier to ensure backend will see requests. */ np->rx.req_prod_pvt = req_prod + i; @@ -340,14 +308,14 @@ static int xennet_open(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); - memset(&np->stats, 0, sizeof(np->stats)); + napi_enable(&np->napi); spin_lock_bh(&np->rx_lock); if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); } spin_unlock_bh(&np->rx_lock); @@ -566,8 +534,8 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (notify) notify_remote_via_irq(np->netdev->irq); - np->stats.tx_bytes += skb->len; - np->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); @@ -580,7 +548,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; drop: - np->stats.tx_dropped++; + dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; } @@ -589,15 +557,10 @@ static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); netif_stop_queue(np->netdev); + napi_disable(&np->napi); return 0; } -static struct net_device_stats *xennet_get_stats(struct net_device *dev) -{ - struct netfront_info *np = netdev_priv(dev); - return &np->stats; -} - static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, grant_ref_t ref) { @@ -832,9 +795,8 @@ static int skb_checksum_setup(struct sk_buff *skb) } static int handle_incoming_queue(struct net_device *dev, - struct sk_buff_head *rxq) + struct sk_buff_head *rxq) { - struct netfront_info *np = netdev_priv(dev); int packets_dropped = 0; struct sk_buff *skb; @@ -856,13 +818,13 @@ static int handle_incoming_queue(struct net_device *dev, if (skb_checksum_setup(skb)) { kfree_skb(skb); packets_dropped++; - np->stats.rx_errors++; + dev->stats.rx_errors++; continue; } } - np->stats.rx_packets++; - np->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; /* Pass it up. */ netif_receive_skb(skb); @@ -872,15 +834,16 @@ static int handle_incoming_queue(struct net_device *dev, return packets_dropped; } -static int xennet_poll(struct net_device *dev, int *pbudget) +static int xennet_poll(struct napi_struct *napi, int budget) { - struct netfront_info *np = netdev_priv(dev); + struct netfront_info *np = container_of(napi, struct netfront_info, napi); + struct net_device *dev = np->netdev; struct sk_buff *skb; struct netfront_rx_info rinfo; struct xen_netif_rx_response *rx = &rinfo.rx; struct xen_netif_extra_info *extras = rinfo.extras; RING_IDX i, rp; - int work_done, budget, more_to_do = 1; + int work_done; struct sk_buff_head rxq; struct sk_buff_head errq; struct sk_buff_head tmpq; @@ -899,9 +862,6 @@ static int xennet_poll(struct net_device *dev, int *pbudget) skb_queue_head_init(&errq); skb_queue_head_init(&tmpq); - budget = *pbudget; - if (budget > dev->quota) - budget = dev->quota; rp = np->rx.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ @@ -917,7 +877,7 @@ static int xennet_poll(struct net_device *dev, int *pbudget) err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); - np->stats.rx_errors++; + dev->stats.rx_errors++; i = np->rx.rsp_cons; continue; } @@ -1006,22 +966,21 @@ static int xennet_poll(struct net_device *dev, int *pbudget) xennet_alloc_rx_buffers(dev); - *pbudget -= work_done; - dev->quota -= work_done; - if (work_done < budget) { + int more_to_do = 0; + local_irq_save(flags); RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); if (!more_to_do) - __netif_rx_complete(dev); + __netif_rx_complete(dev, napi); local_irq_restore(flags); } spin_unlock(&np->rx_lock); - return more_to_do; + return work_done; } static int xennet_change_mtu(struct net_device *dev, int mtu) @@ -1200,15 +1159,12 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev netdev->open = xennet_open; netdev->hard_start_xmit = xennet_start_xmit; netdev->stop = xennet_close; - netdev->get_stats = xennet_get_stats; - netdev->poll = xennet_poll; + netif_napi_add(netdev, &np->napi, xennet_poll, 64); netdev->uninit = xennet_uninit; netdev->change_mtu = xennet_change_mtu; - netdev->weight = 64; netdev->features = NETIF_F_IP_CSUM; SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); - SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; @@ -1349,7 +1305,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id) xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) - netif_rx_schedule(dev); + netif_rx_schedule(dev, &np->napi); } spin_unlock_irqrestore(&np->tx_lock, flags); @@ -1661,11 +1617,8 @@ static void backend_changed(struct xenbus_device *dev, static struct ethtool_ops xennet_ethtool_ops = { - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = ethtool_op_set_tx_csum, - .get_sg = ethtool_op_get_sg, .set_sg = xennet_set_sg, - .get_tso = ethtool_op_get_tso, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 870c5393c21a0c4ecf7d926355705350d889fda4..87f002ade531150ca3554ebdedaf97e05385599c 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -318,7 +318,6 @@ struct yellowfin_private { dma_addr_t tx_status_dma; struct timer_list timer; /* Media selection timer. */ - struct net_device_stats stats; /* Frequently used and paired value: keep adjacent for cache effect. */ int chip_id, drv_flags; struct pci_dev *pci_dev; @@ -353,7 +352,6 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); static int yellowfin_rx(struct net_device *dev); static void yellowfin_error(struct net_device *dev, int intr_status); static int yellowfin_close(struct net_device *dev); -static struct net_device_stats *yellowfin_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static const struct ethtool_ops ethtool_ops; @@ -376,6 +374,7 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, #else int bar = 1; #endif + DECLARE_MAC_BUF(mac); /* when built into the kernel, we only print version if device is found */ #ifndef MODULE @@ -392,7 +391,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, printk (KERN_ERR PFX "cannot allocate ethernet device\n"); return -ENOMEM; } - SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); np = netdev_priv(dev); @@ -470,7 +468,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, dev->open = &yellowfin_open; dev->hard_start_xmit = &yellowfin_start_xmit; dev->stop = &yellowfin_close; - dev->get_stats = &yellowfin_get_stats; dev->set_multicast_list = &set_rx_mode; dev->do_ioctl = &netdev_ioctl; SET_ETHTOOL_OPS(dev, ðtool_ops); @@ -484,12 +481,10 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev, if (i) goto err_out_unmap_status; - printk(KERN_INFO "%s: %s type %8x at %p, ", + printk(KERN_INFO "%s: %s type %8x at %p, %s, IRQ %d.\n", dev->name, pci_id_tbl[chip_idx].name, - ioread32(ioaddr + ChipRev), ioaddr); - for (i = 0; i < 5; i++) - printk("%2.2x:", dev->dev_addr[i]); - printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + ioread32(ioaddr + ChipRev), ioaddr, + print_mac(mac, dev->dev_addr), irq); if (np->drv_flags & HasMII) { int phy, phy_idx = 0; @@ -718,7 +713,7 @@ static void yellowfin_tx_timeout(struct net_device *dev) netif_wake_queue (dev); /* Typical path */ dev->trans_start = jiffies; - yp->stats.tx_errors++; + dev->stats.tx_errors++; } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ @@ -924,8 +919,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) if (yp->tx_ring[entry].result_status == 0) break; skb = yp->tx_skbuff[entry]; - yp->stats.tx_packets++; - yp->stats.tx_bytes += skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; /* Free the original skb. */ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr, skb->len, PCI_DMA_TODEVICE); @@ -969,20 +964,20 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n", dev->name, tx_errs); #endif - yp->stats.tx_errors++; - if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++; - if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++; - if (tx_errs & 0x2000) yp->stats.tx_window_errors++; - if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++; + dev->stats.tx_errors++; + if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; + if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; + if (tx_errs & 0x2000) dev->stats.tx_window_errors++; + if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; } else { #ifndef final_version if (yellowfin_debug > 4) printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n", dev->name, tx_errs); #endif - yp->stats.tx_bytes += skb->len; - yp->stats.collisions += tx_errs & 15; - yp->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev->stats.collisions += tx_errs & 15; + dev->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(yp->pci_dev, @@ -1077,26 +1072,26 @@ static int yellowfin_rx(struct net_device *dev) if (data_size != 0) printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers," " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size); - yp->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { /* There was a error. */ if (yellowfin_debug > 3) printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n", frame_status); - yp->stats.rx_errors++; - if (frame_status & 0x0060) yp->stats.rx_length_errors++; - if (frame_status & 0x0008) yp->stats.rx_frame_errors++; - if (frame_status & 0x0010) yp->stats.rx_crc_errors++; - if (frame_status < 0) yp->stats.rx_dropped++; + dev->stats.rx_errors++; + if (frame_status & 0x0060) dev->stats.rx_length_errors++; + if (frame_status & 0x0008) dev->stats.rx_frame_errors++; + if (frame_status & 0x0010) dev->stats.rx_crc_errors++; + if (frame_status < 0) dev->stats.rx_dropped++; } else if ( !(yp->drv_flags & IsGigabit) && ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { u8 status1 = buf_addr[data_size-2]; u8 status2 = buf_addr[data_size-1]; - yp->stats.rx_errors++; - if (status1 & 0xC0) yp->stats.rx_length_errors++; - if (status2 & 0x03) yp->stats.rx_frame_errors++; - if (status2 & 0x04) yp->stats.rx_crc_errors++; - if (status2 & 0x80) yp->stats.rx_dropped++; + dev->stats.rx_errors++; + if (status1 & 0xC0) dev->stats.rx_length_errors++; + if (status2 & 0x03) dev->stats.rx_frame_errors++; + if (status2 & 0x04) dev->stats.rx_crc_errors++; + if (status2 & 0x80) dev->stats.rx_dropped++; #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ } else if ((yp->flags & HasMACAddrBug) && memcmp(le32_to_cpu(yp->rx_ring_dma + @@ -1105,11 +1100,11 @@ static int yellowfin_rx(struct net_device *dev) memcmp(le32_to_cpu(yp->rx_ring_dma + entry*sizeof(struct yellowfin_desc)), "\377\377\377\377\377\377", 6) != 0) { - if (bogus_rx++ == 0) - printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:" - "%2.2x:%2.2x.\n", - dev->name, buf_addr[0], buf_addr[1], buf_addr[2], - buf_addr[3], buf_addr[4], buf_addr[5]); + if (bogus_rx++ == 0) { + DECLARE_MAC_BUF(mac); + printk(KERN_WARNING "%s: Bad frame to %s\n", + dev->name, print_mac(mac, buf_addr)); + } #endif } else { struct sk_buff *skb; @@ -1146,8 +1141,8 @@ static int yellowfin_rx(struct net_device *dev) skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->last_rx = jiffies; - yp->stats.rx_packets++; - yp->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } entry = (++yp->cur_rx) % RX_RING_SIZE; } @@ -1181,15 +1176,13 @@ static int yellowfin_rx(struct net_device *dev) static void yellowfin_error(struct net_device *dev, int intr_status) { - struct yellowfin_private *yp = netdev_priv(dev); - printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", dev->name, intr_status); /* Hmmmmm, it's not clear what to do here. */ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) - yp->stats.tx_errors++; + dev->stats.tx_errors++; if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) - yp->stats.rx_errors++; + dev->stats.rx_errors++; } static int yellowfin_close(struct net_device *dev) @@ -1281,12 +1274,6 @@ static int yellowfin_close(struct net_device *dev) return 0; } -static struct net_device_stats *yellowfin_get_stats(struct net_device *dev) -{ - struct yellowfin_private *yp = netdev_priv(dev); - return &yp->stats; -} - /* Set or clear the multicast filter for this adaptor. */ static void set_rx_mode(struct net_device *dev) diff --git a/drivers/net/znet.c b/drivers/net/znet.c index 4032e9f6f9b072f4c0f26acbd1d92fb8b3e1c13b..a86c022d6a9494e4f96822095b06366d7531054b 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c @@ -128,7 +128,6 @@ MODULE_LICENSE("GPL"); struct znet_private { int rx_dma, tx_dma; - struct net_device_stats stats; spinlock_t lock; short sia_base, sia_size, io_size; struct i82593_conf_block i593_init; @@ -161,7 +160,6 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev); static irqreturn_t znet_interrupt(int irq, void *dev_id); static void znet_rx(struct net_device *dev); static int znet_close(struct net_device *dev); -static struct net_device_stats *net_get_stats(struct net_device *dev); static void hardware_init(struct net_device *dev); static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset); static void znet_tx_timeout (struct net_device *dev); @@ -372,6 +370,7 @@ static int __init znet_probe (void) struct net_device *dev; char *p; int err = -ENOMEM; + DECLARE_MAC_BUF(mac); /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) @@ -388,22 +387,20 @@ static int __init znet_probe (void) if (!dev) return -ENOMEM; - SET_MODULE_OWNER (dev); - znet = dev->priv; netinfo = (struct netidblk *)p; dev->base_addr = netinfo->iobase1; dev->irq = netinfo->irq1; - printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr); - /* The station address is in the "netidblk" at 0x0f0000. */ for (i = 0; i < 6; i++) - printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]); + dev->dev_addr[i] = netinfo->netid[i]; - printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1, - netinfo->dma2); + printk(KERN_INFO "%s: ZNET at %#3lx, %s" + ", using IRQ %d DMA %d and %d.\n", + dev->name, dev->base_addr, print_mac(mac, dev->dev_addr), + dev->irq, netinfo->dma1, netinfo->dma2); if (znet_debug > 1) { printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n", @@ -447,7 +444,6 @@ static int __init znet_probe (void) dev->open = &znet_open; dev->hard_start_xmit = &znet_send_packet; dev->stop = &znet_close; - dev->get_stats = net_get_stats; dev->set_multicast_list = &znet_set_multicast_list; dev->tx_timeout = znet_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; @@ -566,7 +562,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev) ushort *tx_link = znet->tx_cur - 1; ushort rnd_len = (length + 1)>>1; - znet->stats.tx_bytes+=length; + dev->stats.tx_bytes+=length; if (znet->tx_cur >= znet->tx_end) znet->tx_cur = znet->tx_start; @@ -641,20 +637,20 @@ static irqreturn_t znet_interrupt(int irq, void *dev_id) tx_status = inw(ioaddr); /* It's undocumented, but tx_status seems to match the i82586. */ if (tx_status & TX_OK) { - znet->stats.tx_packets++; - znet->stats.collisions += tx_status & TX_NCOL_MASK; + dev->stats.tx_packets++; + dev->stats.collisions += tx_status & TX_NCOL_MASK; } else { if (tx_status & (TX_LOST_CTS | TX_LOST_CRS)) - znet->stats.tx_carrier_errors++; + dev->stats.tx_carrier_errors++; if (tx_status & TX_UND_RUN) - znet->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (!(tx_status & TX_HRT_BEAT)) - znet->stats.tx_heartbeat_errors++; + dev->stats.tx_heartbeat_errors++; if (tx_status & TX_MAX_COL) - znet->stats.tx_aborted_errors++; + dev->stats.tx_aborted_errors++; /* ...and the catch-all. */ if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) - znet->stats.tx_errors++; + dev->stats.tx_errors++; /* Transceiver may be stuck if cable * was removed while emiting a @@ -750,19 +746,19 @@ static void znet_rx(struct net_device *dev) this_rfp_ptr[-3]<<1); /* Once again we must assume that the i82586 docs apply. */ if ( ! (status & RX_RCV_OK)) { /* There was an error. */ - znet->stats.rx_errors++; - if (status & RX_CRC_ERR) znet->stats.rx_crc_errors++; - if (status & RX_ALG_ERR) znet->stats.rx_frame_errors++; + dev->stats.rx_errors++; + if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++; + if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++; #if 0 - if (status & 0x0200) znet->stats.rx_over_errors++; /* Wrong. */ - if (status & 0x0100) znet->stats.rx_fifo_errors++; + if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */ + if (status & 0x0100) dev->stats.rx_fifo_errors++; #else /* maz : Wild guess... */ - if (status & RX_OVRRUN) znet->stats.rx_over_errors++; + if (status & RX_OVRRUN) dev->stats.rx_over_errors++; #endif - if (status & RX_SRT_FRM) znet->stats.rx_length_errors++; + if (status & RX_SRT_FRM) dev->stats.rx_length_errors++; } else if (pkt_len > 1536) { - znet->stats.rx_length_errors++; + dev->stats.rx_length_errors++; } else { /* Malloc up new buffer. */ struct sk_buff *skb; @@ -771,7 +767,7 @@ static void znet_rx(struct net_device *dev) if (skb == NULL) { if (znet_debug) printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); - znet->stats.rx_dropped++; + dev->stats.rx_dropped++; break; } @@ -791,8 +787,8 @@ static void znet_rx(struct net_device *dev) skb->protocol=eth_type_trans(skb,dev); netif_rx(skb); dev->last_rx = jiffies; - znet->stats.rx_packets++; - znet->stats.rx_bytes += pkt_len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; } znet->rx_cur = this_rfp_ptr; if (znet->rx_cur >= znet->rx_end) @@ -829,15 +825,6 @@ static int znet_close(struct net_device *dev) return 0; } -/* Get the current statistics. This may be called with the card open or - closed. */ -static struct net_device_stats *net_get_stats(struct net_device *dev) -{ - struct znet_private *znet = dev->priv; - - return &znet->stats; -} - static void show_dma(struct net_device *dev) { short ioaddr = dev->base_addr; diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index d85e2ea0b6af6122a4e6b4cbe761d44b18d850d0..3926b2aa9cca4c7aaa761fbc486e65d727bf762b 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -125,7 +125,6 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; - SET_MODULE_OWNER(dev); if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { free_netdev(dev); return -EBUSY; @@ -152,6 +151,7 @@ static int __devinit zorro8390_init(struct net_device *dev, 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; + DECLARE_MAC_BUF(mac); /* Reset card. Who knows what dain-bramaged state it was left in. */ { @@ -191,7 +191,7 @@ static int __devinit zorro8390_init(struct net_device *dev, {0x00, NE_EN0_RSARHI}, {E8390_RREAD+E8390_START, NE_CMD}, }; - for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) { + for (i = 0; i < ARRAY_SIZE(program_seq); i++) { z_writeb(program_seq[i].value, ioaddr + program_seq[i].offset); } } @@ -212,12 +212,12 @@ static int __devinit zorro8390_init(struct net_device *dev, i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev); if (i) return i; - for(i = 0; i < ETHER_ADDR_LEN; i++) { + for(i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = SA_prom[i]; + #ifdef DEBUG - printk(" %2.2x", SA_prom[i]); + printk("%s", print_mac(mac, dev->dev_addr)); #endif - dev->dev_addr[i] = SA_prom[i]; - } ei_status.name = name; ei_status.tx_start_page = start_page; @@ -244,10 +244,8 @@ static int __devinit zorro8390_init(struct net_device *dev, return err; } - printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, name, board, - dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], - dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); + printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address %s\n", + dev->name, name, board, print_mac(mac, dev->dev_addr)); return 0; } diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index e5d7ed92d6f780ba746b177c14cf6eab9b814bfe..a6d6b2488ffc267c475a09c849f5f18c73e53cbb 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -359,7 +359,7 @@ static __inline__ int led_get_net_activity(void) * for reading should be OK */ read_lock(&dev_base_lock); rcu_read_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { struct net_device_stats *stats; struct in_device *in_dev = __in_dev_get_rcu(dev); if (!in_dev || !in_dev->ifa_list) diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 023455a0b34a06466b5f88daeebf7ad011cc80ad..399695f7b1af147e083d60026bb49a6f11bd2389 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -3891,7 +3891,6 @@ claw_init_netdevice(struct net_device * dev) dev->type = ARPHRD_SLIP; dev->tx_queue_len = 1300; dev->flags = IFF_POINTOPOINT | IFF_NOARP; - SET_MODULE_OWNER(dev); #ifdef FUNCTRACE printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); #endif diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 92e8a37b50220f85517bc5367667f1db526f653c..449937233732b5b7a054565de71e1c4cd565ead9 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c @@ -2823,7 +2823,6 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device, dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; dev->flags = IFF_POINTOPOINT | IFF_NOARP; - SET_MODULE_OWNER(dev); return dev; } diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 08a994fdd1a4f27bd0a226306f9fe2ad8e136cd4..0fd663b23d767c9a6e81777d51df467af90631e6 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1400,11 +1400,14 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", cdev->dev.bus_id, dstat, cstat); if (rc) { - lcs_schedule_recovery(card); - wake_up(&card->wait_q); - return; + channel->state = LCS_CH_STATE_ERROR; } } + if (channel->state == LCS_CH_STATE_ERROR) { + lcs_schedule_recovery(card); + wake_up(&card->wait_q); + return; + } /* How far in the ccw chain have we processed? */ if ((channel->state != LCS_CH_STATE_INIT) && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { @@ -1708,6 +1711,8 @@ lcs_stopcard(struct lcs_card *card) if (card->read.state != LCS_CH_STATE_STOPPED && card->write.state != LCS_CH_STATE_STOPPED && + card->read.state != LCS_CH_STATE_ERROR && + card->write.state != LCS_CH_STATE_ERROR && card->state == DEV_STATE_UP) { lcs_clear_multicast_list(card); rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); @@ -2145,7 +2150,6 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) card->dev->stop = lcs_stop_device; card->dev->hard_start_xmit = lcs_start_xmit; card->dev->get_stats = lcs_getstats; - SET_MODULE_OWNER(dev); memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); #ifdef CONFIG_IP_MULTICAST if (!lcs_check_multicast_support(card)) diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h index 0e1e4a0a88f04fac497d5d280f7329d47821f824..8976fb0b070a73944b102da520ca4f691461587f 100644 --- a/drivers/s390/net/lcs.h +++ b/drivers/s390/net/lcs.h @@ -138,6 +138,7 @@ enum lcs_channel_states { LCS_CH_STATE_RUNNING, LCS_CH_STATE_SUSPENDED, LCS_CH_STATE_CLEARED, + LCS_CH_STATE_ERROR, }; /** diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 2688894743396c429196bfdd93e6c4e0d3d06498..4d18d6419ddc016fd08560bdef7f46af58335b5b 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -1904,7 +1904,6 @@ static void netiucv_setup_netdevice(struct net_device *dev) dev->type = ARPHRD_SLIP; dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; dev->flags = IFF_POINTOPOINT | IFF_NOARP; - SET_MODULE_OWNER(dev); } /** diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 6d4959807abcc0e291f217bfcccb6898360c2de9..8c6b72d05b1d63e9655bf19223704182f74a18f7 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h @@ -833,8 +833,7 @@ struct qeth_card { struct qeth_qdio_info qdio; struct qeth_perf_stats perf_stats; int use_hard_stop; - int (*orig_hard_header)(struct sk_buff *,struct net_device *, - unsigned short,void *,void *,unsigned); + const struct header_ops *orig_header_ops; struct qeth_osn_info osn_info; atomic_t force_alloc_skb; }; diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 70108fb1690653f58c08d8a4e09e0f79834041af..e3c268cfbffee855eae1c17a8274b9929c37390e 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c @@ -159,13 +159,15 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, buffer = buf->buffer; /* fill one skb into buffer */ for (i = 0; i < ctx->elements_per_skb; ++i){ - buffer->element[buf->next_element_to_fill].addr = - ctx->elements[element].addr; - buffer->element[buf->next_element_to_fill].length = - ctx->elements[element].length; - buffer->element[buf->next_element_to_fill].flags = - ctx->elements[element].flags; - buf->next_element_to_fill++; + if (ctx->elements[element].length != 0) { + buffer->element[buf->next_element_to_fill]. + addr = ctx->elements[element].addr; + buffer->element[buf->next_element_to_fill]. + length = ctx->elements[element].length; + buffer->element[buf->next_element_to_fill]. + flags = ctx->elements[element].flags; + buf->next_element_to_fill++; + } element++; elements--; } diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index f3e6fbeb2123192d4f1b279e1bcaedfb60a30c23..a2d08c9ba3c4a9fe49885445c599353d8d2d8085 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -160,6 +160,9 @@ qeth_set_multicast_list(struct net_device *); static void qeth_setadp_promisc_mode(struct qeth_card *); +static int +qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr); + static void qeth_notify_processes(void) { @@ -820,14 +823,15 @@ __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) again: list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { if (addr->is_multicast) { + list_del(&addr->entry); spin_unlock_irqrestore(&card->ip_lock, *flags); rc = qeth_deregister_addr_entry(card, addr); spin_lock_irqsave(&card->ip_lock, *flags); if (!rc) { - list_del(&addr->entry); kfree(addr); goto again; - } + } else + list_add(&addr->entry, &card->ip_list); } } } @@ -2698,10 +2702,15 @@ qeth_process_inbound_buffer(struct qeth_card *card, qeth_layer2_rebuild_skb(card, skb, hdr); else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) vlan_tag = qeth_rebuild_skb(card, skb, hdr); - else { /*in case of OSN*/ + else if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN) { skb_push(skb, sizeof(struct qeth_hdr)); skb_copy_to_linear_data(skb, hdr, sizeof(struct qeth_hdr)); + } else { /* unknown header type */ + dev_kfree_skb_any(skb); + QETH_DBF_TEXT(trace, 3, "inbunkno"); + QETH_DBF_HEX(control, 3, hdr, QETH_DBF_CONTROL_LEN); + continue; } /* is device UP ? */ if (!(card->dev->flags & IFF_UP)){ @@ -3787,8 +3796,8 @@ qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype) /*hard_header fake function; used in case fake_ll is set */ static int qeth_fake_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, const void *daddr, const void *saddr, + unsigned len) { if(dev->type == ARPHRD_IEEE802_TR){ struct trh_hdr *hdr; @@ -3811,6 +3820,11 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev, } } +static const struct header_ops qeth_fake_ops = { + .create = qeth_fake_header, + .parse = qeth_hard_header_parse, +}; + static int qeth_send_packet(struct qeth_card *, struct sk_buff *); @@ -4500,7 +4514,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, /* check if we have enough elements (including following * free buffers) to handle eddp context */ if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){ - printk("eddp tx_dropped 1\n"); + if (net_ratelimit()) + PRINT_WARN("eddp tx_dropped 1\n"); rc = -EBUSY; goto out; } @@ -4649,7 +4664,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) [qeth_get_priority_queue(card, skb, ipv, cast_type)]; if (!card->options.layer2) { ipv = qeth_get_ip_version(skb); - if ((card->dev->hard_header == qeth_fake_header) && ipv) { + if ((card->dev->header_ops == &qeth_fake_ops) && ipv) { new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC); if (!new_skb) return -ENOMEM; @@ -4711,8 +4726,8 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements_needed, ctx); else { - if ((skb->protocol == htons(ETH_P_ARP)) && - (card->dev->flags & IFF_NOARP)) { + if ((!card->options.layer2) && + (ipv == 0)) { __qeth_free_new_skb(skb, new_skb); return -EPERM; } @@ -6561,12 +6576,16 @@ static struct ethtool_ops qeth_ethtool_ops = { }; static int -qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr) +qeth_hard_header_parse(const struct sk_buff *skb, unsigned char *haddr) { - struct qeth_card *card; - struct ethhdr *eth; + const struct qeth_card *card; + const struct ethhdr *eth; + struct net_device *dev = skb->dev; - card = qeth_get_card_from_dev(skb->dev); + if (dev->type != ARPHRD_IEEE802_TR) + return 0; + + card = qeth_get_card_from_dev(dev); if (card->options.layer2) goto haveheader; #ifdef CONFIG_QETH_IPV6 @@ -6596,6 +6615,10 @@ qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr) return ETH_ALEN; } +static const struct header_ops qeth_null_ops = { + .parse = qeth_hard_header_parse, +}; + static int qeth_netdev_init(struct net_device *dev) { @@ -6620,12 +6643,8 @@ qeth_netdev_init(struct net_device *dev) dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid; dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid; #endif - if (qeth_get_netdev_flags(card) & IFF_NOARP) { - dev->rebuild_header = NULL; - dev->hard_header = NULL; - dev->header_cache_update = NULL; - dev->hard_header_cache = NULL; - } + dev->header_ops = &qeth_null_ops; + #ifdef CONFIG_QETH_IPV6 /*IPv6 address autoconfiguration stuff*/ if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) @@ -6633,11 +6652,8 @@ qeth_netdev_init(struct net_device *dev) #endif if (card->options.fake_ll && (qeth_get_netdev_flags(card) & IFF_NOARP)) - dev->hard_header = qeth_fake_header; - if (dev->type == ARPHRD_IEEE802_TR) - dev->hard_header_parse = NULL; - else - dev->hard_header_parse = qeth_hard_header_parse; + dev->header_ops = &qeth_fake_ops; + dev->set_mac_address = qeth_layer2_set_mac_address; dev->flags |= qeth_get_netdev_flags(card); if ((card->options.fake_broadcast) || @@ -6649,7 +6665,6 @@ qeth_netdev_init(struct net_device *dev) dev->mtu = card->info.initial_mtu; if (card->info.type != QETH_CARD_TYPE_OSN) SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops); - SET_MODULE_OWNER(dev); return 0; } @@ -6741,10 +6756,10 @@ qeth_hardsetup_card(struct qeth_card *card) } /*network device will be recovered*/ if (card->dev) { - card->dev->hard_header = card->orig_hard_header; + card->dev->header_ops = card->orig_header_ops; if (card->options.fake_ll && (qeth_get_netdev_flags(card) & IFF_NOARP)) - card->dev->hard_header = qeth_fake_header; + card->dev->header_ops = &qeth_fake_ops; return 0; } /* at first set_online allocate netdev */ @@ -6758,7 +6773,7 @@ qeth_hardsetup_card(struct qeth_card *card) goto out; } card->dev->priv = card; - card->orig_hard_header = card->dev->hard_header; + card->orig_header_ops = card->dev->header_ops; card->dev->type = qeth_get_arphdr_type(card->info.type, card->info.link_type); card->dev->init = qeth_netdev_init; @@ -8309,7 +8324,7 @@ qeth_arp_constructor(struct neighbour *neigh) if (card == NULL) goto out; if((card->options.layer2) || - (card->dev->hard_header == qeth_fake_header)) + (card->dev->header_ops == &qeth_fake_ops)) goto out; rcu_read_lock(); diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 4bf9aa547c78cebbce7096bef1d4632c9a9abe8f..40579edca101a465f997266343a5862ccebcb2c0 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -64,7 +64,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) if (nlh->nlmsg_type != SCSI_TRANSPORT_MSG) { err = -EBADMSG; - goto next_msg; + return; } hdr = NLMSG_DATA(nlh); @@ -98,27 +98,6 @@ scsi_nl_rcv_msg(struct sk_buff *skb) } -/** - * scsi_nl_rcv_msg - - * Receive handler for a socket. Extracts a received message buffer from - * the socket, and starts message processing. - * - * @sk: socket - * @len: unused - * - **/ -static void -scsi_nl_rcv(struct sock *sk, int len) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(&sk->sk_receive_queue))) { - scsi_nl_rcv_msg(skb); - kfree_skb(skb); - } -} - - /** * scsi_nl_rcv_event - * Event handler for a netlink socket. @@ -167,8 +146,8 @@ scsi_netlink_init(void) return; } - scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, - SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL, + scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT, + SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL, THIS_MODULE); if (!scsi_nl_sock) { printk(KERN_ERR "%s: register of recieve handler failed\n", diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 34c1860a259d8ac5ea458db279e263c13ed0deed..5428d15f23c617d5bfc579baea6391664c1c47c4 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1097,61 +1097,49 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) } /* - * Get message from skb (based on rtnetlink_rcv_skb). Each message is - * processed by iscsi_if_recv_msg. Malformed skbs with wrong lengths or - * invalid creds are discarded silently. + * Get message from skb. Each message is processed by iscsi_if_recv_msg. + * Malformed skbs with wrong lengths or invalid creds are not processed. */ static void -iscsi_if_rx(struct sock *sk, int len) +iscsi_if_rx(struct sk_buff *skb) { - struct sk_buff *skb; - mutex_lock(&rx_queue_mutex); - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { - if (NETLINK_CREDS(skb)->uid) { - skb_pull(skb, skb->len); - goto free_skb; + while (skb->len >= NLMSG_SPACE(0)) { + int err; + uint32_t rlen; + struct nlmsghdr *nlh; + struct iscsi_uevent *ev; + + nlh = nlmsg_hdr(skb); + if (nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) { + break; } - while (skb->len >= NLMSG_SPACE(0)) { - int err; - uint32_t rlen; - struct nlmsghdr *nlh; - struct iscsi_uevent *ev; + ev = NLMSG_DATA(nlh); + rlen = NLMSG_ALIGN(nlh->nlmsg_len); + if (rlen > skb->len) + rlen = skb->len; - nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || - skb->len < nlh->nlmsg_len) { - break; - } - - ev = NLMSG_DATA(nlh); - rlen = NLMSG_ALIGN(nlh->nlmsg_len); - if (rlen > skb->len) - rlen = skb->len; - - err = iscsi_if_recv_msg(skb, nlh); - if (err) { - ev->type = ISCSI_KEVENT_IF_ERROR; - ev->iferror = err; - } - do { - /* - * special case for GET_STATS: - * on success - sending reply and stats from - * inside of if_recv_msg(), - * on error - fall through. - */ - if (ev->type == ISCSI_UEVENT_GET_STATS && !err) - break; - err = iscsi_if_send_reply( - NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq, - nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); - } while (err < 0 && err != -ECONNREFUSED); - skb_pull(skb, rlen); + err = iscsi_if_recv_msg(skb, nlh); + if (err) { + ev->type = ISCSI_KEVENT_IF_ERROR; + ev->iferror = err; } -free_skb: - kfree_skb(skb); + do { + /* + * special case for GET_STATS: + * on success - sending reply and stats from + * inside of if_recv_msg(), + * on error - fall through. + */ + if (ev->type == ISCSI_UEVENT_GET_STATS && !err) + break; + err = iscsi_if_send_reply( + NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq, + nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); + } while (err < 0 && err != -ECONNREFUSED); + skb_pull(skb, rlen); } mutex_unlock(&rx_queue_mutex); } @@ -1523,7 +1511,7 @@ static __init int iscsi_transport_init(void) if (err) goto unregister_conn_class; - nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL, + nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL, THIS_MODULE); if (!nls) { err = -ENOBUFS; diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..b4a5e5e9d9fc4bd15ae52a9d78a9700ff3ee2cf8 --- /dev/null +++ b/drivers/ssb/Kconfig @@ -0,0 +1,117 @@ +menu "Sonics Silicon Backplane" + +config SSB_POSSIBLE + bool + depends on HAS_IOMEM + default y + +config SSB + tristate "Sonics Silicon Backplane support" + depends on SSB_POSSIBLE + help + Support for the Sonics Silicon Backplane bus. + You only need to enable this option, if you are + configuring a kernel for an embedded system with + this bus. + It will be auto-selected if needed in other + environments. + + The module will be called ssb. + + If unsure, say N. + +config SSB_PCIHOST_POSSIBLE + bool + depends on SSB && PCI + default y + +config SSB_PCIHOST + bool "Support for SSB on PCI-bus host" + depends on SSB_PCIHOST_POSSIBLE + default y + help + Support for a Sonics Silicon Backplane on top + of a PCI device. + + If unsure, say Y + +config SSB_PCMCIAHOST_POSSIBLE + bool + depends on SSB && PCMCIA && EXPERIMENTAL + default y + +config SSB_PCMCIAHOST + bool "Support for SSB on PCMCIA-bus host (EXPERIMENTAL)" + depends on SSB_PCMCIAHOST_POSSIBLE + help + Support for a Sonics Silicon Backplane on top + of a PCMCIA device. + + If unsure, say N + +config SSB_SILENT + bool "No SSB kernel messages" + depends on SSB && EMBEDDED + help + This option turns off all Sonics Silicon Backplane printks. + Note that you won't be able to identify problems, once + messages are turned off. + This might only be desired for production kernels on + embedded devices to reduce the kernel size. + + Say N + +config SSB_DEBUG + bool "SSB debugging" + depends on SSB && !SSB_SILENT + help + This turns on additional runtime checks and debugging + messages. Turn this on for SSB troubleshooting. + + If unsure, say N + +config SSB_SERIAL + bool + depends on SSB + # ChipCommon and ExtIf serial support routines. + +config SSB_DRIVER_PCICORE_POSSIBLE + bool + depends on SSB_PCIHOST + default y + +config SSB_DRIVER_PCICORE + bool "SSB PCI core driver" + depends on SSB_DRIVER_PCICORE_POSSIBLE + help + Driver for the Sonics Silicon Backplane attached + Broadcom PCI core. + + If unsure, say Y + +config SSB_PCICORE_HOSTMODE + bool "Hostmode support for SSB PCI core (EXPERIMENTAL)" + depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && EXPERIMENTAL + help + PCIcore hostmode operation (external PCI bus). + +config SSB_DRIVER_MIPS + bool "SSB Broadcom MIPS core driver (EXPERIMENTAL)" + depends on SSB && MIPS && EXPERIMENTAL + select SSB_SERIAL + help + Driver for the Sonics Silicon Backplane attached + Broadcom MIPS core. + + If unsure, say N + +config SSB_DRIVER_EXTIF + bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)" + depends on SSB_DRIVER_MIPS && EXPERIMENTAL + help + Driver for the Sonics Silicon Backplane attached + Broadcom EXTIF core. + + If unsure, say N + +endmenu diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7be3975958058539f7a9175175d6ef72a7fc53ab --- /dev/null +++ b/drivers/ssb/Makefile @@ -0,0 +1,18 @@ +# core +ssb-y += main.o scan.o + +# host support +ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o +ssb-$(CONFIG_SSB_PCMCIAHOST) += pcmcia.o + +# built-in drivers +ssb-y += driver_chipcommon.o +ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o +ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o +ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o + +# b43 pci-ssb-bridge driver +# Not strictly a part of SSB, but kept here for convenience +ssb-$(CONFIG_SSB_PCIHOST) += b43_pci_bridge.o + +obj-$(CONFIG_SSB) += ssb.o diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c new file mode 100644 index 0000000000000000000000000000000000000000..f145d8a4cfdeb0b629f5d36f7e64df3007db181b --- /dev/null +++ b/drivers/ssb/b43_pci_bridge.c @@ -0,0 +1,48 @@ +/* + * Broadcom 43xx PCI-SSB bridge module + * + * This technically is a seperate PCI driver module, but + * because of its small size we include it in the SSB core + * instead of creating a standalone module. + * + * Copyright 2007 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include + +#include "ssb_private.h" + + +static const struct pci_device_id b43_pci_bridge_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4301) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4307) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4311) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4325) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); + +static struct pci_driver b43_pci_bridge_driver = { + .name = "b43-pci-bridge", + .id_table = b43_pci_bridge_tbl, +}; + + +int __init b43_pci_ssb_bridge_init(void) +{ + return ssb_pcihost_register(&b43_pci_bridge_driver); +} + +void __exit b43_pci_ssb_bridge_exit(void) +{ + ssb_pcihost_unregister(&b43_pci_bridge_driver); +} diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c new file mode 100644 index 0000000000000000000000000000000000000000..6fbf1c53b6f29a9c1ec796e267ec966211b3118e --- /dev/null +++ b/drivers/ssb/driver_chipcommon.c @@ -0,0 +1,445 @@ +/* + * Sonics Silicon Backplane + * Broadcom ChipCommon core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include + +#include "ssb_private.h" + + +/* Clock sources */ +enum ssb_clksrc { + /* PCI clock */ + SSB_CHIPCO_CLKSRC_PCI, + /* Crystal slow clock oscillator */ + SSB_CHIPCO_CLKSRC_XTALOS, + /* Low power oscillator */ + SSB_CHIPCO_CLKSRC_LOPWROS, +}; + + +static inline u32 chipco_read32(struct ssb_chipcommon *cc, + u16 offset) +{ + return ssb_read32(cc->dev, offset); +} + +static inline void chipco_write32(struct ssb_chipcommon *cc, + u16 offset, + u32 value) +{ + ssb_write32(cc->dev, offset, value); +} + +static inline void chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset, + u32 mask, u32 value) +{ + value &= mask; + value |= chipco_read32(cc, offset) & ~mask; + chipco_write32(cc, offset, value); +} + +void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, + enum ssb_clkmode mode) +{ + struct ssb_device *ccdev = cc->dev; + struct ssb_bus *bus; + u32 tmp; + + if (!ccdev) + return; + bus = ccdev->bus; + /* chipcommon cores prior to rev6 don't support dynamic clock control */ + if (ccdev->id.revision < 6) + return; + /* chipcommon cores rev10 are a whole new ball game */ + if (ccdev->id.revision >= 10) + return; + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) + return; + + switch (mode) { + case SSB_CLKMODE_SLOW: + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp |= SSB_CHIPCO_SLOWCLKCTL_FSLOW; + chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); + break; + case SSB_CLKMODE_FAST: + ssb_pci_xtal(bus, SSB_GPIO_XTAL, 1); /* Force crystal on */ + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW; + tmp |= SSB_CHIPCO_SLOWCLKCTL_IPLL; + chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); + break; + case SSB_CLKMODE_DYNAMIC: + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_FSLOW; + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_IPLL; + tmp &= ~SSB_CHIPCO_SLOWCLKCTL_ENXTAL; + if ((tmp & SSB_CHIPCO_SLOWCLKCTL_SRC) != SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL) + tmp |= SSB_CHIPCO_SLOWCLKCTL_ENXTAL; + chipco_write32(cc, SSB_CHIPCO_SLOWCLKCTL, tmp); + + /* for dynamic control, we have to release our xtal_pu "force on" */ + if (tmp & SSB_CHIPCO_SLOWCLKCTL_ENXTAL) + ssb_pci_xtal(bus, SSB_GPIO_XTAL, 0); + break; + default: + SSB_WARN_ON(1); + } +} + +/* Get the Slow Clock Source */ +static enum ssb_clksrc chipco_pctl_get_slowclksrc(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + u32 uninitialized_var(tmp); + + if (cc->dev->id.revision < 6) { + if (bus->bustype == SSB_BUSTYPE_SSB || + bus->bustype == SSB_BUSTYPE_PCMCIA) + return SSB_CHIPCO_CLKSRC_XTALOS; + if (bus->bustype == SSB_BUSTYPE_PCI) { + pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &tmp); + if (tmp & 0x10) + return SSB_CHIPCO_CLKSRC_PCI; + return SSB_CHIPCO_CLKSRC_XTALOS; + } + } + if (cc->dev->id.revision < 10) { + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + tmp &= 0x7; + if (tmp == 0) + return SSB_CHIPCO_CLKSRC_LOPWROS; + if (tmp == 1) + return SSB_CHIPCO_CLKSRC_XTALOS; + if (tmp == 2) + return SSB_CHIPCO_CLKSRC_PCI; + } + + return SSB_CHIPCO_CLKSRC_XTALOS; +} + +/* Get maximum or minimum (depending on get_max flag) slowclock frequency. */ +static int chipco_pctl_clockfreqlimit(struct ssb_chipcommon *cc, int get_max) +{ + int uninitialized_var(limit); + enum ssb_clksrc clocksrc; + int divisor = 1; + u32 tmp; + + clocksrc = chipco_pctl_get_slowclksrc(cc); + if (cc->dev->id.revision < 6) { + switch (clocksrc) { + case SSB_CHIPCO_CLKSRC_PCI: + divisor = 64; + break; + case SSB_CHIPCO_CLKSRC_XTALOS: + divisor = 32; + break; + default: + SSB_WARN_ON(1); + } + } else if (cc->dev->id.revision < 10) { + switch (clocksrc) { + case SSB_CHIPCO_CLKSRC_LOPWROS: + break; + case SSB_CHIPCO_CLKSRC_XTALOS: + case SSB_CHIPCO_CLKSRC_PCI: + tmp = chipco_read32(cc, SSB_CHIPCO_SLOWCLKCTL); + divisor = (tmp >> 16) + 1; + divisor *= 4; + break; + } + } else { + tmp = chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL); + divisor = (tmp >> 16) + 1; + divisor *= 4; + } + + switch (clocksrc) { + case SSB_CHIPCO_CLKSRC_LOPWROS: + if (get_max) + limit = 43000; + else + limit = 25000; + break; + case SSB_CHIPCO_CLKSRC_XTALOS: + if (get_max) + limit = 20200000; + else + limit = 19800000; + break; + case SSB_CHIPCO_CLKSRC_PCI: + if (get_max) + limit = 34000000; + else + limit = 25000000; + break; + } + limit /= divisor; + + return limit; +} + +static void chipco_powercontrol_init(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + + if (bus->chip_id == 0x4321) { + if (bus->chip_rev == 0) + chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0x3A4); + else if (bus->chip_rev == 1) + chipco_write32(cc, SSB_CHIPCO_CHIPCTL, 0xA4); + } + + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) + return; + + if (cc->dev->id.revision >= 10) { + /* Set Idle Power clock rate to 1Mhz */ + chipco_write32(cc, SSB_CHIPCO_SYSCLKCTL, + (chipco_read32(cc, SSB_CHIPCO_SYSCLKCTL) & + 0x0000FFFF) | 0x00040000); + } else { + int maxfreq; + + maxfreq = chipco_pctl_clockfreqlimit(cc, 1); + chipco_write32(cc, SSB_CHIPCO_PLLONDELAY, + (maxfreq * 150 + 999999) / 1000000); + chipco_write32(cc, SSB_CHIPCO_FREFSELDELAY, + (maxfreq * 15 + 999999) / 1000000); + } +} + +static void calc_fast_powerup_delay(struct ssb_chipcommon *cc) +{ + struct ssb_bus *bus = cc->dev->bus; + int minfreq; + unsigned int tmp; + u32 pll_on_delay; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return; + if (!(cc->capabilities & SSB_CHIPCO_CAP_PCTL)) + return; + + minfreq = chipco_pctl_clockfreqlimit(cc, 0); + pll_on_delay = chipco_read32(cc, SSB_CHIPCO_PLLONDELAY); + tmp = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq; + SSB_WARN_ON(tmp & ~0xFFFF); + + cc->fast_pwrup_delay = tmp; +} + +void ssb_chipcommon_init(struct ssb_chipcommon *cc) +{ + if (!cc->dev) + return; /* We don't have a ChipCommon */ + chipco_powercontrol_init(cc); + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); + calc_fast_powerup_delay(cc); +} + +void ssb_chipco_suspend(struct ssb_chipcommon *cc, pm_message_t state) +{ + if (!cc->dev) + return; + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW); +} + +void ssb_chipco_resume(struct ssb_chipcommon *cc) +{ + if (!cc->dev) + return; + chipco_powercontrol_init(cc); + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); +} + +/* Get the processor clock */ +void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m) +{ + *n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N); + *plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); + switch (*plltype) { + case SSB_PLLTYPE_2: + case SSB_PLLTYPE_4: + case SSB_PLLTYPE_6: + case SSB_PLLTYPE_7: + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS); + break; + case SSB_PLLTYPE_3: + /* 5350 uses m2 to control mips */ + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); + break; + default: + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); + break; + } +} + +/* Get the bus clock */ +void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m) +{ + *n = chipco_read32(cc, SSB_CHIPCO_CLOCK_N); + *plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); + switch (*plltype) { + case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */ + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_MIPS); + break; + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + if (cc->dev->bus->chip_id != 0x5365) { + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_M2); + break; + } + /* Fallthough */ + default: + *m = chipco_read32(cc, SSB_CHIPCO_CLOCK_SB); + } +} + +void ssb_chipco_timing_init(struct ssb_chipcommon *cc, + unsigned long ns) +{ + struct ssb_device *dev = cc->dev; + struct ssb_bus *bus = dev->bus; + u32 tmp; + + /* set register for external IO to control LED. */ + chipco_write32(cc, SSB_CHIPCO_PROG_CFG, 0x11); + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */ + tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 40ns */ + tmp |= DIV_ROUND_UP(240, ns); /* Waitcount-0 = 240ns */ + chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */ + + /* Set timing for the flash */ + tmp = DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_3_SHIFT; /* Waitcount-3 = 10nS */ + tmp |= DIV_ROUND_UP(10, ns) << SSB_FLASH_WCNT_1_SHIFT; /* Waitcount-1 = 10nS */ + tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120nS */ + if ((bus->chip_id == 0x5365) || + (dev->id.revision < 9)) + chipco_write32(cc, SSB_CHIPCO_FLASH_WAITCNT, tmp); + if ((bus->chip_id == 0x5365) || + (dev->id.revision < 9) || + ((bus->chip_id == 0x5350) && (bus->chip_rev == 0))) + chipco_write32(cc, SSB_CHIPCO_PCMCIA_MEMWAIT, tmp); + + if (bus->chip_id == 0x5350) { + /* Enable EXTIF */ + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; /* Waitcount-3 = 10ns */ + tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; /* Waitcount-2 = 20ns */ + tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; /* Waitcount-1 = 100ns */ + tmp |= DIV_ROUND_UP(120, ns); /* Waitcount-0 = 120ns */ + chipco_write32(cc, SSB_CHIPCO_PROG_WAITCNT, tmp); /* 0x01020a0c for a 100Mhz clock */ + } +} + +/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */ +void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks) +{ + /* instant NMI */ + chipco_write32(cc, SSB_CHIPCO_WATCHDOG, ticks); +} + +u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask) +{ + return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask; +} + +void ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value); +} + +void ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value) +{ + chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value); +} + +#ifdef CONFIG_SSB_SERIAL +int ssb_chipco_serial_init(struct ssb_chipcommon *cc, + struct ssb_serial_port *ports) +{ + struct ssb_bus *bus = cc->dev->bus; + int nr_ports = 0; + u32 plltype; + unsigned int irq; + u32 baud_base, div; + u32 i, n; + + plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); + irq = ssb_mips_irq(cc->dev); + + if (plltype == SSB_PLLTYPE_1) { + /* PLL clock */ + baud_base = ssb_calc_clock_rate(plltype, + chipco_read32(cc, SSB_CHIPCO_CLOCK_N), + chipco_read32(cc, SSB_CHIPCO_CLOCK_M2)); + div = 1; + } else { + if (cc->dev->id.revision >= 11) { + /* Fixed ALP clock */ + baud_base = 20000000; + div = 1; + /* Set the override bit so we don't divide it */ + chipco_write32(cc, SSB_CHIPCO_CORECTL, + SSB_CHIPCO_CORECTL_UARTCLK0); + } else if (cc->dev->id.revision >= 3) { + /* Internal backplane clock */ + baud_base = ssb_clockspeed(bus); + div = chipco_read32(cc, SSB_CHIPCO_CLKDIV) + & SSB_CHIPCO_CLKDIV_UART; + } else { + /* Fixed internal backplane clock */ + baud_base = 88000000; + div = 48; + } + + /* Clock source depends on strapping if UartClkOverride is unset */ + if ((cc->dev->id.revision > 0) && + !(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) { + if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) == + SSB_CHIPCO_CAP_UARTCLK_INT) { + /* Internal divided backplane clock */ + baud_base /= div; + } else { + /* Assume external clock of 1.8432 MHz */ + baud_base = 1843200; + } + } + } + + /* Determine the registers of the UARTs */ + n = (cc->capabilities & SSB_CHIPCO_CAP_NRUART); + for (i = 0; i < n; i++) { + void __iomem *cc_mmio; + void __iomem *uart_regs; + + cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE); + uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA; + /* Offset changed at after rev 0 */ + if (cc->dev->id.revision == 0) + uart_regs += (i * 8); + else + uart_regs += (i * 256); + + nr_ports++; + ports[i].regs = uart_regs; + ports[i].irq = irq; + ports[i].baud_base = baud_base; + ports[i].reg_shift = 0; + } + + return nr_ports; +} +#endif /* CONFIG_SSB_SERIAL */ diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c new file mode 100644 index 0000000000000000000000000000000000000000..fe55eb8b038a9e5d469a6936887c6c5fbcd96b33 --- /dev/null +++ b/drivers/ssb/driver_extif.c @@ -0,0 +1,129 @@ +/* + * Sonics Silicon Backplane + * Broadcom EXTIF core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * Copyright 2006, 2007, Felix Fietkau + * Copyright 2007, Aurelien Jarno + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include + +#include "ssb_private.h" + + +static inline u32 extif_read32(struct ssb_extif *extif, u16 offset) +{ + return ssb_read32(extif->dev, offset); +} + +static inline void extif_write32(struct ssb_extif *extif, u16 offset, u32 value) +{ + ssb_write32(extif->dev, offset, value); +} + +static inline void extif_write32_masked(struct ssb_extif *extif, u16 offset, + u32 mask, u32 value) +{ + value &= mask; + value |= extif_read32(extif, offset) & ~mask; + extif_write32(extif, offset, value); +} + +#ifdef CONFIG_SSB_SERIAL +static bool serial_exists(u8 *regs) +{ + u8 save_mcr, msr = 0; + + if (regs) { + save_mcr = regs[UART_MCR]; + regs[UART_MCR] = (UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_RTS); + msr = regs[UART_MSR] & (UART_MSR_DCD | UART_MSR_RI + | UART_MSR_CTS | UART_MSR_DSR); + regs[UART_MCR] = save_mcr; + } + return (msr == (UART_MSR_DCD | UART_MSR_CTS)); +} + +int ssb_extif_serial_init(struct ssb_extif *extif, struct ssb_serial_port *ports) +{ + u32 i, nr_ports = 0; + + /* Disable GPIO interrupt initially */ + extif_write32(extif, SSB_EXTIF_GPIO_INTPOL, 0); + extif_write32(extif, SSB_EXTIF_GPIO_INTMASK, 0); + + for (i = 0; i < 2; i++) { + void __iomem *uart_regs; + + uart_regs = ioremap_nocache(SSB_EUART, 16); + if (uart_regs) { + uart_regs += (i * 8); + + if (serial_exists(uart_regs) && ports) { + extif_write32(extif, SSB_EXTIF_GPIO_INTMASK, 2); + + nr_ports++; + ports[i].regs = uart_regs; + ports[i].irq = 2; + ports[i].baud_base = 13500000; + ports[i].reg_shift = 0; + } + iounmap(uart_regs); + } + } + return nr_ports; +} +#endif /* CONFIG_SSB_SERIAL */ + +void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns) +{ + u32 tmp; + + /* Initialize extif so we can get to the LEDs and external UART */ + extif_write32(extif, SSB_EXTIF_PROG_CFG, SSB_EXTCFG_EN); + + /* Set timing for the flash */ + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; + tmp |= DIV_ROUND_UP(40, ns) << SSB_PROG_WCNT_1_SHIFT; + tmp |= DIV_ROUND_UP(120, ns); + extif_write32(extif, SSB_EXTIF_PROG_WAITCNT, tmp); + + /* Set programmable interface timing for external uart */ + tmp = DIV_ROUND_UP(10, ns) << SSB_PROG_WCNT_3_SHIFT; + tmp |= DIV_ROUND_UP(20, ns) << SSB_PROG_WCNT_2_SHIFT; + tmp |= DIV_ROUND_UP(100, ns) << SSB_PROG_WCNT_1_SHIFT; + tmp |= DIV_ROUND_UP(120, ns); + extif_write32(extif, SSB_EXTIF_PROG_WAITCNT, tmp); +} + +void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *pll_type, u32 *n, u32 *m) +{ + *pll_type = SSB_PLLTYPE_1; + *n = extif_read32(extif, SSB_EXTIF_CLOCK_N); + *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB); +} + +u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) +{ + return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask; +} + +void ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) +{ + return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), + mask, value); +} + +void ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) +{ + return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), + mask, value); +} + diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c new file mode 100644 index 0000000000000000000000000000000000000000..ab8691a325802e19d374e601e6a3c5f3f60fc55a --- /dev/null +++ b/drivers/ssb/driver_mipscore.c @@ -0,0 +1,223 @@ +/* + * Sonics Silicon Backplane + * Broadcom MIPS core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include + +#include +#include +#include +#include + +#include "ssb_private.h" + + +static inline u32 mips_read32(struct ssb_mipscore *mcore, + u16 offset) +{ + return ssb_read32(mcore->dev, offset); +} + +static inline void mips_write32(struct ssb_mipscore *mcore, + u16 offset, + u32 value) +{ + ssb_write32(mcore->dev, offset, value); +} + +static const u32 ipsflag_irq_mask[] = { + 0, + SSB_IPSFLAG_IRQ1, + SSB_IPSFLAG_IRQ2, + SSB_IPSFLAG_IRQ3, + SSB_IPSFLAG_IRQ4, +}; + +static const u32 ipsflag_irq_shift[] = { + 0, + SSB_IPSFLAG_IRQ1_SHIFT, + SSB_IPSFLAG_IRQ2_SHIFT, + SSB_IPSFLAG_IRQ3_SHIFT, + SSB_IPSFLAG_IRQ4_SHIFT, +}; + +static inline u32 ssb_irqflag(struct ssb_device *dev) +{ + return ssb_read32(dev, SSB_TPSFLAG) & SSB_TPSFLAG_BPFLAG; +} + +/* Get the MIPS IRQ assignment for a specified device. + * If unassigned, 0 is returned. + */ +unsigned int ssb_mips_irq(struct ssb_device *dev) +{ + struct ssb_bus *bus = dev->bus; + u32 irqflag; + u32 ipsflag; + u32 tmp; + unsigned int irq; + + irqflag = ssb_irqflag(dev); + ipsflag = ssb_read32(bus->mipscore.dev, SSB_IPSFLAG); + for (irq = 1; irq <= 4; irq++) { + tmp = ((ipsflag & ipsflag_irq_mask[irq]) >> ipsflag_irq_shift[irq]); + if (tmp == irqflag) + break; + } + if (irq == 5) + irq = 0; + + return irq; +} + +static void clear_irq(struct ssb_bus *bus, unsigned int irq) +{ + struct ssb_device *dev = bus->mipscore.dev; + + /* Clear the IRQ in the MIPScore backplane registers */ + if (irq == 0) { + ssb_write32(dev, SSB_INTVEC, 0); + } else { + ssb_write32(dev, SSB_IPSFLAG, + ssb_read32(dev, SSB_IPSFLAG) | + ipsflag_irq_mask[irq]); + } +} + +static void set_irq(struct ssb_device *dev, unsigned int irq) +{ + unsigned int oldirq = ssb_mips_irq(dev); + struct ssb_bus *bus = dev->bus; + struct ssb_device *mdev = bus->mipscore.dev; + u32 irqflag = ssb_irqflag(dev); + + dev->irq = irq + 2; + + ssb_dprintk(KERN_INFO PFX + "set_irq: core 0x%04x, irq %d => %d\n", + dev->id.coreid, oldirq, irq); + /* clear the old irq */ + if (oldirq == 0) + ssb_write32(mdev, SSB_INTVEC, (~(1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); + else + clear_irq(bus, oldirq); + + /* assign the new one */ + if (irq == 0) + ssb_write32(mdev, SSB_INTVEC, ((1 << irqflag) & ssb_read32(mdev, SSB_INTVEC))); + + irqflag <<= ipsflag_irq_shift[irq]; + irqflag |= (ssb_read32(mdev, SSB_IPSFLAG) & ~ipsflag_irq_mask[irq]); + ssb_write32(mdev, SSB_IPSFLAG, irqflag); +} + +static void ssb_mips_serial_init(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + + if (bus->extif.dev) + mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports); + else if (bus->chipco.dev) + mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports); + else + mcore->nr_serial_ports = 0; +} + +static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + + mcore->flash_buswidth = 2; + if (bus->chipco.dev) { + mcore->flash_window = 0x1c000000; + mcore->flash_window_size = 0x02000000; + if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) + & SSB_CHIPCO_CFG_DS16) == 0) + mcore->flash_buswidth = 1; + } else { + mcore->flash_window = 0x1fc00000; + mcore->flash_window_size = 0x00400000; + } +} + +u32 ssb_cpu_clock(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + u32 pll_type, n, m, rate = 0; + + if (bus->extif.dev) { + ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); + } else if (bus->chipco.dev) { + ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); + } else + return 0; + + if ((pll_type == SSB_PLLTYPE_5) || (bus->chip_id == 0x5365)) { + rate = 200000000; + } else { + rate = ssb_calc_clock_rate(pll_type, n, m); + } + + if (pll_type == SSB_PLLTYPE_6) { + rate *= 2; + } + + return rate; +} + +void ssb_mipscore_init(struct ssb_mipscore *mcore) +{ + struct ssb_bus *bus = mcore->dev->bus; + struct ssb_device *dev; + unsigned long hz, ns; + unsigned int irq, i; + + if (!mcore->dev) + return; /* We don't have a MIPS core */ + + ssb_dprintk(KERN_INFO PFX "Initializing MIPS core...\n"); + + hz = ssb_clockspeed(bus); + if (!hz) + hz = 100000000; + ns = 1000000000 / hz; + + if (bus->extif.dev) + ssb_extif_timing_init(&bus->extif, ns); + else if (bus->chipco.dev) + ssb_chipco_timing_init(&bus->chipco, ns); + + /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ + for (irq = 2, i = 0; i < bus->nr_devices; i++) { + dev = &(bus->devices[i]); + dev->irq = ssb_mips_irq(dev) + 2; + switch (dev->id.coreid) { + case SSB_DEV_USB11_HOST: + /* shouldn't need a separate irq line for non-4710, most of them have a proper + * external usb controller on the pci */ + if ((bus->chip_id == 0x4710) && (irq <= 4)) { + set_irq(dev, irq++); + break; + } + /* fallthrough */ + case SSB_DEV_PCI: + case SSB_DEV_ETHERNET: + case SSB_DEV_80211: + case SSB_DEV_USB20_HOST: + /* These devices get their own IRQ line if available, the rest goes on IRQ0 */ + if (irq <= 4) { + set_irq(dev, irq++); + break; + } + } + } + + ssb_mips_serial_init(mcore); + ssb_mips_flash_detect(mcore); +} diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c new file mode 100644 index 0000000000000000000000000000000000000000..2faaa906d5d6bf98375728ee9af2b47f10e29e7c --- /dev/null +++ b/drivers/ssb/driver_pcicore.c @@ -0,0 +1,576 @@ +/* + * Sonics Silicon Backplane + * Broadcom PCI-core driver + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include + +#include "ssb_private.h" + + +static inline +u32 pcicore_read32(struct ssb_pcicore *pc, u16 offset) +{ + return ssb_read32(pc->dev, offset); +} + +static inline +void pcicore_write32(struct ssb_pcicore *pc, u16 offset, u32 value) +{ + ssb_write32(pc->dev, offset, value); +} + +/************************************************** + * Code for hostmode operation. + **************************************************/ + +#ifdef CONFIG_SSB_PCICORE_HOSTMODE + +#include +/* Probe a 32bit value on the bus and catch bus exceptions. + * Returns nonzero on a bus exception. + * This is MIPS specific */ +#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr))) + +/* Assume one-hot slot wiring */ +#define SSB_PCI_SLOT_MAX 16 + +/* Global lock is OK, as we won't have more than one extpci anyway. */ +static DEFINE_SPINLOCK(cfgspace_lock); +/* Core to access the external PCI config space. Can only have one. */ +static struct ssb_pcicore *extpci_core; + +static u32 ssb_pcicore_pcibus_iobase = 0x100; +static u32 ssb_pcicore_pcibus_membase = SSB_PCI_DMA; + +int pcibios_plat_dev_init(struct pci_dev *d) +{ + struct resource *res; + int pos, size; + u32 *base; + + ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", + pci_name(d)); + + /* Fix up resource bases */ + for (pos = 0; pos < 6; pos++) { + res = &d->resource[pos]; + if (res->flags & IORESOURCE_IO) + base = &ssb_pcicore_pcibus_iobase; + else + base = &ssb_pcicore_pcibus_membase; + if (res->end) { + size = res->end - res->start + 1; + if (*base & (size - 1)) + *base = (*base + size) & ~(size - 1); + res->start = *base; + res->end = res->start + size - 1; + *base += size; + pci_write_config_dword(d, PCI_BASE_ADDRESS_0 + (pos << 2), res->start); + } + /* Fix up PCI bridge BAR0 only */ + if (d->bus->number == 0 && PCI_SLOT(d->devfn) == 0) + break; + } + /* Fix up interrupt lines */ + d->irq = ssb_mips_irq(extpci_core->dev) + 2; + pci_write_config_byte(d, PCI_INTERRUPT_LINE, d->irq); + + return 0; +} + +static void __init ssb_fixup_pcibridge(struct pci_dev *dev) +{ + if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) + return; + + ssb_printk(KERN_INFO "PCI: fixing up bridge\n"); + + /* Enable PCI bridge bus mastering and memory space */ + pci_set_master(dev); + pcibios_enable_device(dev, ~0); + + /* Enable PCI bridge BAR1 prefetch and burst */ + pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3); + + /* Make sure our latency is high enough to handle the devices behind us */ + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xa8); +} +DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_fixup_pcibridge); + +int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return ssb_mips_irq(extpci_core->dev) + 2; +} + +static u32 get_cfgspace_addr(struct ssb_pcicore *pc, + unsigned int bus, unsigned int dev, + unsigned int func, unsigned int off) +{ + u32 addr = 0; + u32 tmp; + + if (unlikely(pc->cardbusmode && dev > 1)) + goto out; + if (bus == 0) { + /* Type 0 transaction */ + if (unlikely(dev >= SSB_PCI_SLOT_MAX)) + goto out; + /* Slide the window */ + tmp = SSB_PCICORE_SBTOPCI_CFG0; + tmp |= ((1 << (dev + 16)) & SSB_PCICORE_SBTOPCI1_MASK); + pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, tmp); + /* Calculate the address */ + addr = SSB_PCI_CFG; + addr |= ((1 << (dev + 16)) & ~SSB_PCICORE_SBTOPCI1_MASK); + addr |= (func << 8); + addr |= (off & ~3); + } else { + /* Type 1 transaction */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, + SSB_PCICORE_SBTOPCI_CFG1); + /* Calculate the address */ + addr = SSB_PCI_CFG; + addr |= (bus << 16); + addr |= (dev << 11); + addr |= (func << 8); + addr |= (off & ~3); + } +out: + return addr; +} + +static int ssb_extpci_read_config(struct ssb_pcicore *pc, + unsigned int bus, unsigned int dev, + unsigned int func, unsigned int off, + void *buf, int len) +{ + int err = -EINVAL; + u32 addr, val; + void __iomem *mmio; + + SSB_WARN_ON(!pc->hostmode); + if (unlikely(len != 1 && len != 2 && len != 4)) + goto out; + addr = get_cfgspace_addr(pc, bus, dev, func, off); + if (unlikely(!addr)) + goto out; + err = -ENOMEM; + mmio = ioremap_nocache(addr, len); + if (!mmio) + goto out; + + if (mips_busprobe32(val, mmio)) { + val = 0xffffffff; + goto unmap; + } + + val = readl(mmio); + val >>= (8 * (off & 3)); + + switch (len) { + case 1: + *((u8 *)buf) = (u8)val; + break; + case 2: + *((u16 *)buf) = (u16)val; + break; + case 4: + *((u32 *)buf) = (u32)val; + break; + } + err = 0; +unmap: + iounmap(mmio); +out: + return err; +} + +static int ssb_extpci_write_config(struct ssb_pcicore *pc, + unsigned int bus, unsigned int dev, + unsigned int func, unsigned int off, + const void *buf, int len) +{ + int err = -EINVAL; + u32 addr, val = 0; + void __iomem *mmio; + + SSB_WARN_ON(!pc->hostmode); + if (unlikely(len != 1 && len != 2 && len != 4)) + goto out; + addr = get_cfgspace_addr(pc, bus, dev, func, off); + if (unlikely(!addr)) + goto out; + err = -ENOMEM; + mmio = ioremap_nocache(addr, len); + if (!mmio) + goto out; + + if (mips_busprobe32(val, mmio)) { + val = 0xffffffff; + goto unmap; + } + + switch (len) { + case 1: + val = readl(mmio); + val &= ~(0xFF << (8 * (off & 3))); + val |= *((const u8 *)buf) << (8 * (off & 3)); + break; + case 2: + val = readl(mmio); + val &= ~(0xFFFF << (8 * (off & 3))); + val |= *((const u16 *)buf) << (8 * (off & 3)); + break; + case 4: + val = *((const u32 *)buf); + break; + } + writel(val, mmio); + + err = 0; +unmap: + iounmap(mmio); +out: + return err; +} + +static int ssb_pcicore_read_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 *val) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&cfgspace_lock, flags); + err = ssb_extpci_read_config(extpci_core, bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), reg, val, size); + spin_unlock_irqrestore(&cfgspace_lock, flags); + + return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; +} + +static int ssb_pcicore_write_config(struct pci_bus *bus, unsigned int devfn, + int reg, int size, u32 val) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&cfgspace_lock, flags); + err = ssb_extpci_write_config(extpci_core, bus->number, PCI_SLOT(devfn), + PCI_FUNC(devfn), reg, &val, size); + spin_unlock_irqrestore(&cfgspace_lock, flags); + + return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops ssb_pcicore_pciops = { + .read = ssb_pcicore_read_config, + .write = ssb_pcicore_write_config, +}; + +static struct resource ssb_pcicore_mem_resource = { + .name = "SSB PCIcore external memory", + .start = SSB_PCI_DMA, + .end = SSB_PCI_DMA + SSB_PCI_DMA_SZ - 1, + .flags = IORESOURCE_MEM, +}; + +static struct resource ssb_pcicore_io_resource = { + .name = "SSB PCIcore external I/O", + .start = 0x100, + .end = 0x7FF, + .flags = IORESOURCE_IO, +}; + +static struct pci_controller ssb_pcicore_controller = { + .pci_ops = &ssb_pcicore_pciops, + .io_resource = &ssb_pcicore_io_resource, + .mem_resource = &ssb_pcicore_mem_resource, + .mem_offset = 0x24000000, +}; + +static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) +{ + u32 val; + + if (WARN_ON(extpci_core)) + return; + extpci_core = pc; + + ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n"); + /* Reset devices on the external PCI bus */ + val = SSB_PCICORE_CTL_RST_OE; + val |= SSB_PCICORE_CTL_CLK_OE; + pcicore_write32(pc, SSB_PCICORE_CTL, val); + val |= SSB_PCICORE_CTL_CLK; /* Clock on */ + pcicore_write32(pc, SSB_PCICORE_CTL, val); + udelay(150); /* Assertion time demanded by the PCI standard */ + val |= SSB_PCICORE_CTL_RST; /* Deassert RST# */ + pcicore_write32(pc, SSB_PCICORE_CTL, val); + val = SSB_PCICORE_ARBCTL_INTERN; + pcicore_write32(pc, SSB_PCICORE_ARBCTL, val); + udelay(1); /* Assertion time demanded by the PCI standard */ + + /*TODO cardbus mode */ + + /* 64MB I/O window */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI0, + SSB_PCICORE_SBTOPCI_IO); + /* 64MB config space */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI1, + SSB_PCICORE_SBTOPCI_CFG0); + /* 1GB memory window */ + pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, + SSB_PCICORE_SBTOPCI_MEM | SSB_PCI_DMA); + + /* Enable PCI bridge BAR0 prefetch and burst */ + val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + ssb_extpci_write_config(pc, 0, 0, 0, PCI_COMMAND, &val, 2); + /* Clear error conditions */ + val = 0; + ssb_extpci_write_config(pc, 0, 0, 0, PCI_STATUS, &val, 2); + + /* Enable PCI interrupts */ + pcicore_write32(pc, SSB_PCICORE_IMASK, + SSB_PCICORE_IMASK_INTA); + + /* Ok, ready to run, register it to the system. + * The following needs change, if we want to port hostmode + * to non-MIPS platform. */ + set_io_port_base((unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000)); + /* Give some time to the PCI controller to configure itself with the new + * values. Not waiting at this point causes crashes of the machine. */ + mdelay(10); + register_pci_controller(&ssb_pcicore_controller); +} + +static int pcicore_is_in_hostmode(struct ssb_pcicore *pc) +{ + struct ssb_bus *bus = pc->dev->bus; + u16 chipid_top; + u32 tmp; + + chipid_top = (bus->chip_id & 0xFF00); + if (chipid_top != 0x4700 && + chipid_top != 0x5300) + return 0; + + if (bus->sprom.r1.boardflags_lo & SSB_PCICORE_BFL_NOPCI) + return 0; + + /* The 200-pin BCM4712 package does not bond out PCI. Even when + * PCI is bonded out, some boards may leave the pins floating. */ + if (bus->chip_id == 0x4712) { + if (bus->chip_package == SSB_CHIPPACK_BCM4712S) + return 0; + if (bus->chip_package == SSB_CHIPPACK_BCM4712M) + return 0; + } + if (bus->chip_id == 0x5350) + return 0; + + return !mips_busprobe32(tmp, (bus->mmio + (pc->dev->core_index * SSB_CORE_SIZE))); +} +#endif /* CONFIG_SSB_PCICORE_HOSTMODE */ + + +/************************************************** + * Generic and Clientmode operation code. + **************************************************/ + +static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) +{ + /* Disable PCI interrupts. */ + ssb_write32(pc->dev, SSB_INTVEC, 0); +} + +void ssb_pcicore_init(struct ssb_pcicore *pc) +{ + struct ssb_device *dev = pc->dev; + struct ssb_bus *bus; + + if (!dev) + return; + bus = dev->bus; + if (!ssb_device_is_enabled(dev)) + ssb_device_enable(dev, 0); + +#ifdef CONFIG_SSB_PCICORE_HOSTMODE + pc->hostmode = pcicore_is_in_hostmode(pc); + if (pc->hostmode) + ssb_pcicore_init_hostmode(pc); +#endif /* CONFIG_SSB_PCICORE_HOSTMODE */ + if (!pc->hostmode) + ssb_pcicore_init_clientmode(pc); +} + +static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address) +{ + pcicore_write32(pc, 0x130, address); + return pcicore_read32(pc, 0x134); +} + +static void ssb_pcie_write(struct ssb_pcicore *pc, u32 address, u32 data) +{ + pcicore_write32(pc, 0x130, address); + pcicore_write32(pc, 0x134, data); +} + +static void ssb_pcie_mdio_write(struct ssb_pcicore *pc, u8 device, + u8 address, u16 data) +{ + const u16 mdio_control = 0x128; + const u16 mdio_data = 0x12C; + u32 v; + int i; + + v = 0x80; /* Enable Preamble Sequence */ + v |= 0x2; /* MDIO Clock Divisor */ + pcicore_write32(pc, mdio_control, v); + + v = (1 << 30); /* Start of Transaction */ + v |= (1 << 28); /* Write Transaction */ + v |= (1 << 17); /* Turnaround */ + v |= (u32)device << 22; + v |= (u32)address << 18; + v |= data; + pcicore_write32(pc, mdio_data, v); + /* Wait for the device to complete the transaction */ + udelay(10); + for (i = 0; i < 10; i++) { + v = pcicore_read32(pc, mdio_control); + if (v & 0x100 /* Trans complete */) + break; + msleep(1); + } + pcicore_write32(pc, mdio_control, 0); +} + +static void ssb_broadcast_value(struct ssb_device *dev, + u32 address, u32 data) +{ + /* This is used for both, PCI and ChipCommon core, so be careful. */ + BUILD_BUG_ON(SSB_PCICORE_BCAST_ADDR != SSB_CHIPCO_BCAST_ADDR); + BUILD_BUG_ON(SSB_PCICORE_BCAST_DATA != SSB_CHIPCO_BCAST_DATA); + + ssb_write32(dev, SSB_PCICORE_BCAST_ADDR, address); + ssb_read32(dev, SSB_PCICORE_BCAST_ADDR); /* flush */ + ssb_write32(dev, SSB_PCICORE_BCAST_DATA, data); + ssb_read32(dev, SSB_PCICORE_BCAST_DATA); /* flush */ +} + +static void ssb_commit_settings(struct ssb_bus *bus) +{ + struct ssb_device *dev; + + dev = bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev; + if (WARN_ON(!dev)) + return; + /* This forces an update of the cached registers. */ + ssb_broadcast_value(dev, 0xFD8, 0); +} + +int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev) +{ + struct ssb_device *pdev = pc->dev; + struct ssb_bus *bus; + int err = 0; + u32 tmp; + + might_sleep(); + + if (!pdev) + goto out; + bus = pdev->bus; + + /* Enable interrupts for this device. */ + if (bus->host_pci && + ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) { + u32 coremask; + + /* Calculate the "coremask" for the device. */ + coremask = (1 << dev->core_index); + + err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); + if (err) + goto out; + tmp |= coremask << 8; + err = pci_write_config_dword(bus->host_pci, SSB_PCI_IRQMASK, tmp); + if (err) + goto out; + } else { + u32 intvec; + + intvec = ssb_read32(pdev, SSB_INTVEC); + if ((bus->chip_id & 0xFF00) == 0x4400) { + /* Workaround: On the BCM44XX the BPFLAG routing + * bit is wrong. Use a hardcoded constant. */ + intvec |= 0x00000002; + } else { + tmp = ssb_read32(dev, SSB_TPSFLAG); + tmp &= SSB_TPSFLAG_BPFLAG; + intvec |= tmp; + } + ssb_write32(pdev, SSB_INTVEC, intvec); + } + + /* Setup PCIcore operation. */ + if (pc->setup_done) + goto out; + if (pdev->id.coreid == SSB_DEV_PCI) { + tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); + tmp |= SSB_PCICORE_SBTOPCI_PREF; + tmp |= SSB_PCICORE_SBTOPCI_BURST; + pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); + + if (pdev->id.revision < 5) { + tmp = ssb_read32(pdev, SSB_IMCFGLO); + tmp &= ~SSB_IMCFGLO_SERTO; + tmp |= 2; + tmp &= ~SSB_IMCFGLO_REQTO; + tmp |= 3 << SSB_IMCFGLO_REQTO_SHIFT; + ssb_write32(pdev, SSB_IMCFGLO, tmp); + ssb_commit_settings(bus); + } else if (pdev->id.revision >= 11) { + tmp = pcicore_read32(pc, SSB_PCICORE_SBTOPCI2); + tmp |= SSB_PCICORE_SBTOPCI_MRM; + pcicore_write32(pc, SSB_PCICORE_SBTOPCI2, tmp); + } + } else { + WARN_ON(pdev->id.coreid != SSB_DEV_PCIE); + //TODO: Better make defines for all these magic PCIE values. + if ((pdev->id.revision == 0) || (pdev->id.revision == 1)) { + /* TLP Workaround register. */ + tmp = ssb_pcie_read(pc, 0x4); + tmp |= 0x8; + ssb_pcie_write(pc, 0x4, tmp); + } + if (pdev->id.revision == 0) { + const u8 serdes_rx_device = 0x1F; + + ssb_pcie_mdio_write(pc, serdes_rx_device, + 2 /* Timer */, 0x8128); + ssb_pcie_mdio_write(pc, serdes_rx_device, + 6 /* CDR */, 0x0100); + ssb_pcie_mdio_write(pc, serdes_rx_device, + 7 /* CDR BW */, 0x1466); + } else if (pdev->id.revision == 1) { + /* DLLP Link Control register. */ + tmp = ssb_pcie_read(pc, 0x100); + tmp |= 0x40; + ssb_pcie_write(pc, 0x100, tmp); + } + } + pc->setup_done = 1; +out: + return err; +} +EXPORT_SYMBOL(ssb_pcicore_dev_irqvecs_enable); diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c new file mode 100644 index 0000000000000000000000000000000000000000..74d5182db4b2b5fb2ea970b97777364e60984bec --- /dev/null +++ b/drivers/ssb/main.c @@ -0,0 +1,1162 @@ +/* + * Sonics Silicon Backplane + * Subsystem core + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, 2007, Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "ssb_private.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +MODULE_DESCRIPTION("Sonics Silicon Backplane driver"); +MODULE_LICENSE("GPL"); + + +/* Temporary list of yet-to-be-attached buses */ +static LIST_HEAD(attach_queue); +/* List if running buses */ +static LIST_HEAD(buses); +/* Software ID counter */ +static unsigned int next_busnumber; +/* buses_mutes locks the two buslists and the next_busnumber. + * Don't lock this directly, but use ssb_buses_[un]lock() below. */ +static DEFINE_MUTEX(buses_mutex); + +/* There are differences in the codeflow, if the bus is + * initialized from early boot, as various needed services + * are not available early. This is a mechanism to delay + * these initializations to after early boot has finished. + * It's also used to avoid mutex locking, as that's not + * available and needed early. */ +static bool ssb_is_early_boot = 1; + +static void ssb_buses_lock(void); +static void ssb_buses_unlock(void); + + +#ifdef CONFIG_SSB_PCIHOST +struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev) +{ + struct ssb_bus *bus; + + ssb_buses_lock(); + list_for_each_entry(bus, &buses, list) { + if (bus->bustype == SSB_BUSTYPE_PCI && + bus->host_pci == pdev) + goto found; + } + bus = NULL; +found: + ssb_buses_unlock(); + + return bus; +} +#endif /* CONFIG_SSB_PCIHOST */ + +static struct ssb_device *ssb_device_get(struct ssb_device *dev) +{ + if (dev) + get_device(dev->dev); + return dev; +} + +static void ssb_device_put(struct ssb_device *dev) +{ + if (dev) + put_device(dev->dev); +} + +static int ssb_bus_resume(struct ssb_bus *bus) +{ + int err; + + ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); + err = ssb_pcmcia_init(bus); + if (err) { + /* No need to disable XTAL, as we don't have one on PCMCIA. */ + return err; + } + ssb_chipco_resume(&bus->chipco); + + return 0; +} + +static int ssb_device_resume(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv; + struct ssb_bus *bus; + int err = 0; + + bus = ssb_dev->bus; + if (bus->suspend_cnt == bus->nr_devices) { + err = ssb_bus_resume(bus); + if (err) + return err; + } + bus->suspend_cnt--; + if (dev->driver) { + ssb_drv = drv_to_ssb_drv(dev->driver); + if (ssb_drv && ssb_drv->resume) + err = ssb_drv->resume(ssb_dev); + if (err) + goto out; + } +out: + return err; +} + +static void ssb_bus_suspend(struct ssb_bus *bus, pm_message_t state) +{ + ssb_chipco_suspend(&bus->chipco, state); + ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0); + + /* Reset HW state information in memory, so that HW is + * completely reinitialized on resume. */ + bus->mapped_device = NULL; +#ifdef CONFIG_SSB_DRIVER_PCICORE + bus->pcicore.setup_done = 0; +#endif +#ifdef CONFIG_SSB_DEBUG + bus->powered_up = 0; +#endif +} + +static int ssb_device_suspend(struct device *dev, pm_message_t state) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv; + struct ssb_bus *bus; + int err = 0; + + if (dev->driver) { + ssb_drv = drv_to_ssb_drv(dev->driver); + if (ssb_drv && ssb_drv->suspend) + err = ssb_drv->suspend(ssb_dev, state); + if (err) + goto out; + } + + bus = ssb_dev->bus; + bus->suspend_cnt++; + if (bus->suspend_cnt == bus->nr_devices) { + /* All devices suspended. Shutdown the bus. */ + ssb_bus_suspend(bus, state); + } + +out: + return err; +} + +#ifdef CONFIG_SSB_PCIHOST +int ssb_devices_freeze(struct ssb_bus *bus) +{ + struct ssb_device *dev; + struct ssb_driver *drv; + int err = 0; + int i; + pm_message_t state = PMSG_FREEZE; + + /* First check that we are capable to freeze all devices. */ + for (i = 0; i < bus->nr_devices; i++) { + dev = &(bus->devices[i]); + if (!dev->dev || + !dev->dev->driver || + !device_is_registered(dev->dev)) + continue; + drv = drv_to_ssb_drv(dev->dev->driver); + if (!drv) + continue; + if (!drv->suspend) { + /* Nope, can't suspend this one. */ + return -EOPNOTSUPP; + } + } + /* Now suspend all devices */ + for (i = 0; i < bus->nr_devices; i++) { + dev = &(bus->devices[i]); + if (!dev->dev || + !dev->dev->driver || + !device_is_registered(dev->dev)) + continue; + drv = drv_to_ssb_drv(dev->dev->driver); + if (!drv) + continue; + err = drv->suspend(dev, state); + if (err) { + ssb_printk(KERN_ERR PFX "Failed to freeze device %s\n", + dev->dev->bus_id); + goto err_unwind; + } + } + + return 0; +err_unwind: + for (i--; i >= 0; i--) { + dev = &(bus->devices[i]); + if (!dev->dev || + !dev->dev->driver || + !device_is_registered(dev->dev)) + continue; + drv = drv_to_ssb_drv(dev->dev->driver); + if (!drv) + continue; + if (drv->resume) + drv->resume(dev); + } + return err; +} + +int ssb_devices_thaw(struct ssb_bus *bus) +{ + struct ssb_device *dev; + struct ssb_driver *drv; + int err; + int i; + + for (i = 0; i < bus->nr_devices; i++) { + dev = &(bus->devices[i]); + if (!dev->dev || + !dev->dev->driver || + !device_is_registered(dev->dev)) + continue; + drv = drv_to_ssb_drv(dev->dev->driver); + if (!drv) + continue; + if (SSB_WARN_ON(!drv->resume)) + continue; + err = drv->resume(dev); + if (err) { + ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n", + dev->dev->bus_id); + } + } + + return 0; +} +#endif /* CONFIG_SSB_PCIHOST */ + +static void ssb_device_shutdown(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv; + + if (!dev->driver) + return; + ssb_drv = drv_to_ssb_drv(dev->driver); + if (ssb_drv && ssb_drv->shutdown) + ssb_drv->shutdown(ssb_dev); +} + +static int ssb_device_remove(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver); + + if (ssb_drv && ssb_drv->remove) + ssb_drv->remove(ssb_dev); + ssb_device_put(ssb_dev); + + return 0; +} + +static int ssb_device_probe(struct device *dev) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver); + int err = 0; + + ssb_device_get(ssb_dev); + if (ssb_drv && ssb_drv->probe) + err = ssb_drv->probe(ssb_dev, &ssb_dev->id); + if (err) + ssb_device_put(ssb_dev); + + return err; +} + +static int ssb_match_devid(const struct ssb_device_id *tabid, + const struct ssb_device_id *devid) +{ + if ((tabid->vendor != devid->vendor) && + tabid->vendor != SSB_ANY_VENDOR) + return 0; + if ((tabid->coreid != devid->coreid) && + tabid->coreid != SSB_ANY_ID) + return 0; + if ((tabid->revision != devid->revision) && + tabid->revision != SSB_ANY_REV) + return 0; + return 1; +} + +static int ssb_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + struct ssb_driver *ssb_drv = drv_to_ssb_drv(drv); + const struct ssb_device_id *id; + + for (id = ssb_drv->id_table; + id->vendor || id->coreid || id->revision; + id++) { + if (ssb_match_devid(id, &ssb_dev->id)) + return 1; /* found */ + } + + return 0; +} + +static int ssb_device_uevent(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); + int ret, i = 0, length = 0; + + if (!dev) + return -ENODEV; + + ret = add_uevent_var(envp, num_envp, &i, + buffer, buffer_size, &length, + "MODALIAS=ssb:v%04Xid%04Xrev%02X", + ssb_dev->id.vendor, ssb_dev->id.coreid, + ssb_dev->id.revision); + envp[i] = NULL; + + return ret; +} + +static struct bus_type ssb_bustype = { + .name = "ssb", + .match = ssb_bus_match, + .probe = ssb_device_probe, + .remove = ssb_device_remove, + .shutdown = ssb_device_shutdown, + .suspend = ssb_device_suspend, + .resume = ssb_device_resume, + .uevent = ssb_device_uevent, +}; + +static void ssb_buses_lock(void) +{ + /* See the comment at the ssb_is_early_boot definition */ + if (!ssb_is_early_boot) + mutex_lock(&buses_mutex); +} + +static void ssb_buses_unlock(void) +{ + /* See the comment at the ssb_is_early_boot definition */ + if (!ssb_is_early_boot) + mutex_unlock(&buses_mutex); +} + +static void ssb_devices_unregister(struct ssb_bus *bus) +{ + struct ssb_device *sdev; + int i; + + for (i = bus->nr_devices - 1; i >= 0; i--) { + sdev = &(bus->devices[i]); + if (sdev->dev) + device_unregister(sdev->dev); + } +} + +void ssb_bus_unregister(struct ssb_bus *bus) +{ + ssb_buses_lock(); + ssb_devices_unregister(bus); + list_del(&bus->list); + ssb_buses_unlock(); + + /* ssb_pcmcia_exit(bus); */ + ssb_pci_exit(bus); + ssb_iounmap(bus); +} +EXPORT_SYMBOL(ssb_bus_unregister); + +static void ssb_release_dev(struct device *dev) +{ + struct __ssb_dev_wrapper *devwrap; + + devwrap = container_of(dev, struct __ssb_dev_wrapper, dev); + kfree(devwrap); +} + +static int ssb_devices_register(struct ssb_bus *bus) +{ + struct ssb_device *sdev; + struct device *dev; + struct __ssb_dev_wrapper *devwrap; + int i, err = 0; + int dev_idx = 0; + + for (i = 0; i < bus->nr_devices; i++) { + sdev = &(bus->devices[i]); + + /* We don't register SSB-system devices to the kernel, + * as the drivers for them are built into SSB. */ + switch (sdev->id.coreid) { + case SSB_DEV_CHIPCOMMON: + case SSB_DEV_PCI: + case SSB_DEV_PCIE: + case SSB_DEV_PCMCIA: + case SSB_DEV_MIPS: + case SSB_DEV_MIPS_3302: + case SSB_DEV_EXTIF: + continue; + } + + devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL); + if (!devwrap) { + ssb_printk(KERN_ERR PFX + "Could not allocate device\n"); + err = -ENOMEM; + goto error; + } + dev = &devwrap->dev; + devwrap->sdev = sdev; + + dev->release = ssb_release_dev; + dev->bus = &ssb_bustype; + snprintf(dev->bus_id, sizeof(dev->bus_id), + "ssb%u:%d", bus->busnumber, dev_idx); + + switch (bus->bustype) { + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + sdev->irq = bus->host_pci->irq; + dev->parent = &bus->host_pci->dev; +#endif + break; + case SSB_BUSTYPE_PCMCIA: +#ifdef CONFIG_SSB_PCMCIAHOST + dev->parent = &bus->host_pcmcia->dev; +#endif + break; + case SSB_BUSTYPE_SSB: + break; + } + + sdev->dev = dev; + err = device_register(dev); + if (err) { + ssb_printk(KERN_ERR PFX + "Could not register %s\n", + dev->bus_id); + /* Set dev to NULL to not unregister + * dev on error unwinding. */ + sdev->dev = NULL; + kfree(devwrap); + goto error; + } + dev_idx++; + } + + return 0; +error: + /* Unwind the already registered devices. */ + ssb_devices_unregister(bus); + return err; +} + +/* Needs ssb_buses_lock() */ +static int ssb_attach_queued_buses(void) +{ + struct ssb_bus *bus, *n; + int err = 0; + int drop_them_all = 0; + + list_for_each_entry_safe(bus, n, &attach_queue, list) { + if (drop_them_all) { + list_del(&bus->list); + continue; + } + /* Can't init the PCIcore in ssb_bus_register(), as that + * is too early in boot for embedded systems + * (no udelay() available). So do it here in attach stage. + */ + err = ssb_bus_powerup(bus, 0); + if (err) + goto error; + ssb_pcicore_init(&bus->pcicore); + ssb_bus_may_powerdown(bus); + + err = ssb_devices_register(bus); +error: + if (err) { + drop_them_all = 1; + list_del(&bus->list); + continue; + } + list_move_tail(&bus->list, &buses); + } + + return err; +} + +static u16 ssb_ssb_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + return readw(bus->mmio + offset); +} + +static u32 ssb_ssb_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + return readl(bus->mmio + offset); +} + +static void ssb_ssb_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + writew(value, bus->mmio + offset); +} + +static void ssb_ssb_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + struct ssb_bus *bus = dev->bus; + + offset += dev->core_index * SSB_CORE_SIZE; + writel(value, bus->mmio + offset); +} + +/* Ops for the plain SSB bus without a host-device (no PCI or PCMCIA). */ +static const struct ssb_bus_ops ssb_ssb_ops = { + .read16 = ssb_ssb_read16, + .read32 = ssb_ssb_read32, + .write16 = ssb_ssb_write16, + .write32 = ssb_ssb_write32, +}; + +static int ssb_fetch_invariants(struct ssb_bus *bus, + ssb_invariants_func_t get_invariants) +{ + struct ssb_init_invariants iv; + int err; + + memset(&iv, 0, sizeof(iv)); + err = get_invariants(bus, &iv); + if (err) + goto out; + memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo)); + memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom)); +out: + return err; +} + +static int ssb_bus_register(struct ssb_bus *bus, + ssb_invariants_func_t get_invariants, + unsigned long baseaddr) +{ + int err; + + spin_lock_init(&bus->bar_lock); + INIT_LIST_HEAD(&bus->list); + + /* Powerup the bus */ + err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); + if (err) + goto out; + ssb_buses_lock(); + bus->busnumber = next_busnumber; + /* Scan for devices (cores) */ + err = ssb_bus_scan(bus, baseaddr); + if (err) + goto err_disable_xtal; + + /* Init PCI-host device (if any) */ + err = ssb_pci_init(bus); + if (err) + goto err_unmap; + /* Init PCMCIA-host device (if any) */ + err = ssb_pcmcia_init(bus); + if (err) + goto err_pci_exit; + + /* Initialize basic system devices (if available) */ + err = ssb_bus_powerup(bus, 0); + if (err) + goto err_pcmcia_exit; + ssb_chipcommon_init(&bus->chipco); + ssb_mipscore_init(&bus->mipscore); + err = ssb_fetch_invariants(bus, get_invariants); + if (err) { + ssb_bus_may_powerdown(bus); + goto err_pcmcia_exit; + } + ssb_bus_may_powerdown(bus); + + /* Queue it for attach. + * See the comment at the ssb_is_early_boot definition. */ + list_add_tail(&bus->list, &attach_queue); + if (!ssb_is_early_boot) { + /* This is not early boot, so we must attach the bus now */ + err = ssb_attach_queued_buses(); + if (err) + goto err_dequeue; + } + next_busnumber++; + ssb_buses_unlock(); + +out: + return err; + +err_dequeue: + list_del(&bus->list); +err_pcmcia_exit: +/* ssb_pcmcia_exit(bus); */ +err_pci_exit: + ssb_pci_exit(bus); +err_unmap: + ssb_iounmap(bus); +err_disable_xtal: + ssb_buses_unlock(); + ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0); + return err; +} + +#ifdef CONFIG_SSB_PCIHOST +int ssb_bus_pcibus_register(struct ssb_bus *bus, + struct pci_dev *host_pci) +{ + int err; + + bus->bustype = SSB_BUSTYPE_PCI; + bus->host_pci = host_pci; + bus->ops = &ssb_pci_ops; + + err = ssb_bus_register(bus, ssb_pci_get_invariants, 0); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " + "PCI device %s\n", host_pci->dev.bus_id); + } + + return err; +} +EXPORT_SYMBOL(ssb_bus_pcibus_register); +#endif /* CONFIG_SSB_PCIHOST */ + +#ifdef CONFIG_SSB_PCMCIAHOST +int ssb_bus_pcmciabus_register(struct ssb_bus *bus, + struct pcmcia_device *pcmcia_dev, + unsigned long baseaddr) +{ + int err; + + bus->bustype = SSB_BUSTYPE_PCMCIA; + bus->host_pcmcia = pcmcia_dev; + bus->ops = &ssb_pcmcia_ops; + + err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " + "PCMCIA device %s\n", pcmcia_dev->devname); + } + + return err; +} +EXPORT_SYMBOL(ssb_bus_pcmciabus_register); +#endif /* CONFIG_SSB_PCMCIAHOST */ + +int ssb_bus_ssbbus_register(struct ssb_bus *bus, + unsigned long baseaddr, + ssb_invariants_func_t get_invariants) +{ + int err; + + bus->bustype = SSB_BUSTYPE_SSB; + bus->ops = &ssb_ssb_ops; + + err = ssb_bus_register(bus, get_invariants, baseaddr); + if (!err) { + ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found at " + "address 0x%08lX\n", baseaddr); + } + + return err; +} + +int __ssb_driver_register(struct ssb_driver *drv, struct module *owner) +{ + drv->drv.name = drv->name; + drv->drv.bus = &ssb_bustype; + drv->drv.owner = owner; + + return driver_register(&drv->drv); +} +EXPORT_SYMBOL(__ssb_driver_register); + +void ssb_driver_unregister(struct ssb_driver *drv) +{ + driver_unregister(&drv->drv); +} +EXPORT_SYMBOL(ssb_driver_unregister); + +void ssb_set_devtypedata(struct ssb_device *dev, void *data) +{ + struct ssb_bus *bus = dev->bus; + struct ssb_device *ent; + int i; + + for (i = 0; i < bus->nr_devices; i++) { + ent = &(bus->devices[i]); + if (ent->id.vendor != dev->id.vendor) + continue; + if (ent->id.coreid != dev->id.coreid) + continue; + + ent->devtypedata = data; + } +} +EXPORT_SYMBOL(ssb_set_devtypedata); + +static u32 clkfactor_f6_resolve(u32 v) +{ + /* map the magic values */ + switch (v) { + case SSB_CHIPCO_CLK_F6_2: + return 2; + case SSB_CHIPCO_CLK_F6_3: + return 3; + case SSB_CHIPCO_CLK_F6_4: + return 4; + case SSB_CHIPCO_CLK_F6_5: + return 5; + case SSB_CHIPCO_CLK_F6_6: + return 6; + case SSB_CHIPCO_CLK_F6_7: + return 7; + } + return 0; +} + +/* Calculate the speed the backplane would run at a given set of clockcontrol values */ +u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m) +{ + u32 n1, n2, clock, m1, m2, m3, mc; + + n1 = (n & SSB_CHIPCO_CLK_N1); + n2 = ((n & SSB_CHIPCO_CLK_N2) >> SSB_CHIPCO_CLK_N2_SHIFT); + + switch (plltype) { + case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */ + if (m & SSB_CHIPCO_CLK_T6_MMASK) + return SSB_CHIPCO_CLK_T6_M0; + return SSB_CHIPCO_CLK_T6_M1; + case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */ + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */ + case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */ + n1 = clkfactor_f6_resolve(n1); + n2 += SSB_CHIPCO_CLK_F5_BIAS; + break; + case SSB_PLLTYPE_2: /* 48Mhz, 4 dividers */ + n1 += SSB_CHIPCO_CLK_T2_BIAS; + n2 += SSB_CHIPCO_CLK_T2_BIAS; + SSB_WARN_ON(!((n1 >= 2) && (n1 <= 7))); + SSB_WARN_ON(!((n2 >= 5) && (n2 <= 23))); + break; + case SSB_PLLTYPE_5: /* 25Mhz, 4 dividers */ + return 100000000; + default: + SSB_WARN_ON(1); + } + + switch (plltype) { + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */ + clock = SSB_CHIPCO_CLK_BASE2 * n1 * n2; + break; + default: + clock = SSB_CHIPCO_CLK_BASE1 * n1 * n2; + } + if (!clock) + return 0; + + m1 = (m & SSB_CHIPCO_CLK_M1); + m2 = ((m & SSB_CHIPCO_CLK_M2) >> SSB_CHIPCO_CLK_M2_SHIFT); + m3 = ((m & SSB_CHIPCO_CLK_M3) >> SSB_CHIPCO_CLK_M3_SHIFT); + mc = ((m & SSB_CHIPCO_CLK_MC) >> SSB_CHIPCO_CLK_MC_SHIFT); + + switch (plltype) { + case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */ + case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */ + case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */ + case SSB_PLLTYPE_7: /* 25Mhz, 4 dividers */ + m1 = clkfactor_f6_resolve(m1); + if ((plltype == SSB_PLLTYPE_1) || + (plltype == SSB_PLLTYPE_3)) + m2 += SSB_CHIPCO_CLK_F5_BIAS; + else + m2 = clkfactor_f6_resolve(m2); + m3 = clkfactor_f6_resolve(m3); + + switch (mc) { + case SSB_CHIPCO_CLK_MC_BYPASS: + return clock; + case SSB_CHIPCO_CLK_MC_M1: + return (clock / m1); + case SSB_CHIPCO_CLK_MC_M1M2: + return (clock / (m1 * m2)); + case SSB_CHIPCO_CLK_MC_M1M2M3: + return (clock / (m1 * m2 * m3)); + case SSB_CHIPCO_CLK_MC_M1M3: + return (clock / (m1 * m3)); + } + return 0; + case SSB_PLLTYPE_2: + m1 += SSB_CHIPCO_CLK_T2_BIAS; + m2 += SSB_CHIPCO_CLK_T2M2_BIAS; + m3 += SSB_CHIPCO_CLK_T2_BIAS; + SSB_WARN_ON(!((m1 >= 2) && (m1 <= 7))); + SSB_WARN_ON(!((m2 >= 3) && (m2 <= 10))); + SSB_WARN_ON(!((m3 >= 2) && (m3 <= 7))); + + if (!(mc & SSB_CHIPCO_CLK_T2MC_M1BYP)) + clock /= m1; + if (!(mc & SSB_CHIPCO_CLK_T2MC_M2BYP)) + clock /= m2; + if (!(mc & SSB_CHIPCO_CLK_T2MC_M3BYP)) + clock /= m3; + return clock; + default: + SSB_WARN_ON(1); + } + return 0; +} + +/* Get the current speed the backplane is running at */ +u32 ssb_clockspeed(struct ssb_bus *bus) +{ + u32 rate; + u32 plltype; + u32 clkctl_n, clkctl_m; + + if (ssb_extif_available(&bus->extif)) + ssb_extif_get_clockcontrol(&bus->extif, &plltype, + &clkctl_n, &clkctl_m); + else if (bus->chipco.dev) + ssb_chipco_get_clockcontrol(&bus->chipco, &plltype, + &clkctl_n, &clkctl_m); + else + return 0; + + if (bus->chip_id == 0x5365) { + rate = 100000000; + } else { + rate = ssb_calc_clock_rate(plltype, clkctl_n, clkctl_m); + if (plltype == SSB_PLLTYPE_3) /* 25Mhz, 2 dividers */ + rate /= 2; + } + + return rate; +} +EXPORT_SYMBOL(ssb_clockspeed); + +static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev) +{ + /* The REJECT bit changed position in TMSLOW between + * Backplane revisions. */ + switch (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_SSBREV) { + case SSB_IDLOW_SSBREV_22: + return SSB_TMSLOW_REJECT_22; + case SSB_IDLOW_SSBREV_23: + return SSB_TMSLOW_REJECT_23; + default: + WARN_ON(1); + } + return (SSB_TMSLOW_REJECT_22 | SSB_TMSLOW_REJECT_23); +} + +int ssb_device_is_enabled(struct ssb_device *dev) +{ + u32 val; + u32 reject; + + reject = ssb_tmslow_reject_bitmask(dev); + val = ssb_read32(dev, SSB_TMSLOW); + val &= SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET | reject; + + return (val == SSB_TMSLOW_CLOCK); +} +EXPORT_SYMBOL(ssb_device_is_enabled); + +static void ssb_flush_tmslow(struct ssb_device *dev) +{ + /* Make _really_ sure the device has finished the TMSLOW + * register write transaction, as we risk running into + * a machine check exception otherwise. + * Do this by reading the register back to commit the + * PCI write and delay an additional usec for the device + * to react to the change. */ + ssb_read32(dev, SSB_TMSLOW); + udelay(1); +} + +void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags) +{ + u32 val; + + ssb_device_disable(dev, core_specific_flags); + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_RESET | SSB_TMSLOW_CLOCK | + SSB_TMSLOW_FGC | core_specific_flags); + ssb_flush_tmslow(dev); + + /* Clear SERR if set. This is a hw bug workaround. */ + if (ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_SERR) + ssb_write32(dev, SSB_TMSHIGH, 0); + + val = ssb_read32(dev, SSB_IMSTATE); + if (val & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) { + val &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO); + ssb_write32(dev, SSB_IMSTATE, val); + } + + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_CLOCK | SSB_TMSLOW_FGC | + core_specific_flags); + ssb_flush_tmslow(dev); + + ssb_write32(dev, SSB_TMSLOW, SSB_TMSLOW_CLOCK | + core_specific_flags); + ssb_flush_tmslow(dev); +} +EXPORT_SYMBOL(ssb_device_enable); + +/* Wait for a bit in a register to get set or unset. + * timeout is in units of ten-microseconds */ +static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask, + int timeout, int set) +{ + int i; + u32 val; + + for (i = 0; i < timeout; i++) { + val = ssb_read32(dev, reg); + if (set) { + if (val & bitmask) + return 0; + } else { + if (!(val & bitmask)) + return 0; + } + udelay(10); + } + printk(KERN_ERR PFX "Timeout waiting for bitmask %08X on " + "register %04X to %s.\n", + bitmask, reg, (set ? "set" : "clear")); + + return -ETIMEDOUT; +} + +void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags) +{ + u32 reject; + + if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET) + return; + + reject = ssb_tmslow_reject_bitmask(dev); + ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK); + ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1); + ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0); + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | + reject | SSB_TMSLOW_RESET | + core_specific_flags); + ssb_flush_tmslow(dev); + + ssb_write32(dev, SSB_TMSLOW, + reject | SSB_TMSLOW_RESET | + core_specific_flags); + ssb_flush_tmslow(dev); +} +EXPORT_SYMBOL(ssb_device_disable); + +u32 ssb_dma_translation(struct ssb_device *dev) +{ + switch (dev->bus->bustype) { + case SSB_BUSTYPE_SSB: + return 0; + case SSB_BUSTYPE_PCI: + case SSB_BUSTYPE_PCMCIA: + return SSB_PCI_DMA; + } + return 0; +} +EXPORT_SYMBOL(ssb_dma_translation); + +int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask) +{ + struct device *dev = ssb_dev->dev; + +#ifdef CONFIG_SSB_PCIHOST + if (ssb_dev->bus->bustype == SSB_BUSTYPE_PCI && + !dma_supported(dev, mask)) + return -EIO; +#endif + dev->coherent_dma_mask = mask; + dev->dma_mask = &dev->coherent_dma_mask; + + return 0; +} +EXPORT_SYMBOL(ssb_dma_set_mask); + +int ssb_bus_may_powerdown(struct ssb_bus *bus) +{ + struct ssb_chipcommon *cc; + int err = 0; + + /* On buses where more than one core may be working + * at a time, we must not powerdown stuff if there are + * still cores that may want to run. */ + if (bus->bustype == SSB_BUSTYPE_SSB) + goto out; + + cc = &bus->chipco; + ssb_chipco_set_clockmode(cc, SSB_CLKMODE_SLOW); + err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 0); + if (err) + goto error; +out: +#ifdef CONFIG_SSB_DEBUG + bus->powered_up = 0; +#endif + return err; +error: + ssb_printk(KERN_ERR PFX "Bus powerdown failed\n"); + goto out; +} +EXPORT_SYMBOL(ssb_bus_may_powerdown); + +int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl) +{ + struct ssb_chipcommon *cc; + int err; + enum ssb_clkmode mode; + + err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); + if (err) + goto error; + cc = &bus->chipco; + mode = dynamic_pctl ? SSB_CLKMODE_DYNAMIC : SSB_CLKMODE_FAST; + ssb_chipco_set_clockmode(cc, mode); + +#ifdef CONFIG_SSB_DEBUG + bus->powered_up = 1; +#endif + return 0; +error: + ssb_printk(KERN_ERR PFX "Bus powerup failed\n"); + return err; +} +EXPORT_SYMBOL(ssb_bus_powerup); + +u32 ssb_admatch_base(u32 adm) +{ + u32 base = 0; + + switch (adm & SSB_ADM_TYPE) { + case SSB_ADM_TYPE0: + base = (adm & SSB_ADM_BASE0); + break; + case SSB_ADM_TYPE1: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + base = (adm & SSB_ADM_BASE1); + break; + case SSB_ADM_TYPE2: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + base = (adm & SSB_ADM_BASE2); + break; + default: + SSB_WARN_ON(1); + } + + return base; +} +EXPORT_SYMBOL(ssb_admatch_base); + +u32 ssb_admatch_size(u32 adm) +{ + u32 size = 0; + + switch (adm & SSB_ADM_TYPE) { + case SSB_ADM_TYPE0: + size = ((adm & SSB_ADM_SZ0) >> SSB_ADM_SZ0_SHIFT); + break; + case SSB_ADM_TYPE1: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + size = ((adm & SSB_ADM_SZ1) >> SSB_ADM_SZ1_SHIFT); + break; + case SSB_ADM_TYPE2: + SSB_WARN_ON(adm & SSB_ADM_NEG); /* unsupported */ + size = ((adm & SSB_ADM_SZ2) >> SSB_ADM_SZ2_SHIFT); + break; + default: + SSB_WARN_ON(1); + } + size = (1 << (size + 1)); + + return size; +} +EXPORT_SYMBOL(ssb_admatch_size); + +static int __init ssb_modinit(void) +{ + int err; + + /* See the comment at the ssb_is_early_boot definition */ + ssb_is_early_boot = 0; + err = bus_register(&ssb_bustype); + if (err) + return err; + + /* Maybe we already registered some buses at early boot. + * Check for this and attach them + */ + ssb_buses_lock(); + err = ssb_attach_queued_buses(); + ssb_buses_unlock(); + if (err) + bus_unregister(&ssb_bustype); + + err = b43_pci_ssb_bridge_init(); + if (err) { + ssb_printk(KERN_ERR "Broadcom 43xx PCI-SSB-bridge " + "initialization failed"); + /* don't fail SSB init because of this */ + err = 0; + } + + return err; +} +subsys_initcall(ssb_modinit); + +static void __exit ssb_modexit(void) +{ + b43_pci_ssb_bridge_exit(); + bus_unregister(&ssb_bustype); +} +module_exit(ssb_modexit) diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c new file mode 100644 index 0000000000000000000000000000000000000000..0ab095c6581afb2a06ca8403374be1ef11ab17a6 --- /dev/null +++ b/drivers/ssb/pci.c @@ -0,0 +1,740 @@ +/* + * Sonics Silicon Backplane PCI-Hostbus related functions. + * + * Copyright (C) 2005-2006 Michael Buesch + * Copyright (C) 2005 Martin Langer + * Copyright (C) 2005 Stefano Brivio + * Copyright (C) 2005 Danny van Dyk + * Copyright (C) 2005 Andreas Jaggi + * + * Derived from the Broadcom 4400 device driver. + * Copyright (C) 2002 David S. Miller (davem@redhat.com) + * Fixed by Pekka Pietikainen (pp@ee.oulu.fi) + * Copyright (C) 2006 Broadcom Corporation. + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include "ssb_private.h" + + +/* Define the following to 1 to enable a printk on each coreswitch. */ +#define SSB_VERBOSE_PCICORESWITCH_DEBUG 0 + + +/* Lowlevel coreswitching */ +int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx) +{ + int err; + int attempts = 0; + u32 cur_core; + + while (1) { + err = pci_write_config_dword(bus->host_pci, SSB_BAR0_WIN, + (coreidx * SSB_CORE_SIZE) + + SSB_ENUM_BASE); + if (err) + goto error; + err = pci_read_config_dword(bus->host_pci, SSB_BAR0_WIN, + &cur_core); + if (err) + goto error; + cur_core = (cur_core - SSB_ENUM_BASE) + / SSB_CORE_SIZE; + if (cur_core == coreidx) + break; + + if (attempts++ > SSB_BAR0_MAX_RETRIES) + goto error; + udelay(10); + } + return 0; +error: + ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); + return -ENODEV; +} + +int ssb_pci_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + int err; + unsigned long flags; + +#if SSB_VERBOSE_PCICORESWITCH_DEBUG + ssb_printk(KERN_INFO PFX + "Switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), + dev->core_index); +#endif + + spin_lock_irqsave(&bus->bar_lock, flags); + err = ssb_pci_switch_coreidx(bus, dev->core_index); + if (!err) + bus->mapped_device = dev; + spin_unlock_irqrestore(&bus->bar_lock, flags); + + return err; +} + +/* Enable/disable the on board crystal oscillator and/or PLL. */ +int ssb_pci_xtal(struct ssb_bus *bus, u32 what, int turn_on) +{ + int err; + u32 in, out, outenable; + u16 pci_status; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return 0; + + err = pci_read_config_dword(bus->host_pci, SSB_GPIO_IN, &in); + if (err) + goto err_pci; + err = pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT, &out); + if (err) + goto err_pci; + err = pci_read_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, &outenable); + if (err) + goto err_pci; + + outenable |= what; + + if (turn_on) { + /* Avoid glitching the clock if GPRS is already using it. + * We can't actually read the state of the PLLPD so we infer it + * by the value of XTAL_PU which *is* readable via gpioin. + */ + if (!(in & SSB_GPIO_XTAL)) { + if (what & SSB_GPIO_XTAL) { + /* Turn the crystal on */ + out |= SSB_GPIO_XTAL; + if (what & SSB_GPIO_PLL) + out |= SSB_GPIO_PLL; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); + if (err) + goto err_pci; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, + outenable); + if (err) + goto err_pci; + msleep(1); + } + if (what & SSB_GPIO_PLL) { + /* Turn the PLL on */ + out &= ~SSB_GPIO_PLL; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); + if (err) + goto err_pci; + msleep(5); + } + } + + err = pci_read_config_word(bus->host_pci, PCI_STATUS, &pci_status); + if (err) + goto err_pci; + pci_status &= ~PCI_STATUS_SIG_TARGET_ABORT; + err = pci_write_config_word(bus->host_pci, PCI_STATUS, pci_status); + if (err) + goto err_pci; + } else { + if (what & SSB_GPIO_XTAL) { + /* Turn the crystal off */ + out &= ~SSB_GPIO_XTAL; + } + if (what & SSB_GPIO_PLL) { + /* Turn the PLL off */ + out |= SSB_GPIO_PLL; + } + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT, out); + if (err) + goto err_pci; + err = pci_write_config_dword(bus->host_pci, SSB_GPIO_OUT_ENABLE, outenable); + if (err) + goto err_pci; + } + +out: + return err; + +err_pci: + printk(KERN_ERR PFX "Error: ssb_pci_xtal() could not access PCI config space!\n"); + err = -EBUSY; + goto out; +} + +/* Get the word-offset for a SSB_SPROM_XXX define. */ +#define SPOFF(offset) (((offset) - SSB_SPROM_BASE) / sizeof(u16)) +/* Helper to extract some _offset, which is one of the SSB_SPROM_XXX defines. */ +#define SPEX(_outvar, _offset, _mask, _shift) \ + out->_outvar = ((in[SPOFF(_offset)] & (_mask)) >> (_shift)) + +static inline u8 ssb_crc8(u8 crc, u8 data) +{ + /* Polynomial: x^8 + x^7 + x^6 + x^4 + x^2 + 1 */ + static const u8 t[] = { + 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, + 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, + 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, + 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, + 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, + 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, + 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, + 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, + 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, + 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, + 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, + 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, + 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, + 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, + 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, + 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, + 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, + 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, + 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, + 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, + 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, + 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, + 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, + 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, + 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, + 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, + 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, + 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, + 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, + 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, + 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, + 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F, + }; + return t[crc ^ data]; +} + +static u8 ssb_sprom_crc(const u16 *sprom) +{ + int word; + u8 crc = 0xFF; + + for (word = 0; word < SSB_SPROMSIZE_WORDS - 1; word++) { + crc = ssb_crc8(crc, sprom[word] & 0x00FF); + crc = ssb_crc8(crc, (sprom[word] & 0xFF00) >> 8); + } + crc = ssb_crc8(crc, sprom[SPOFF(SSB_SPROM_REVISION)] & 0x00FF); + crc ^= 0xFF; + + return crc; +} + +static int sprom_check_crc(const u16 *sprom) +{ + u8 crc; + u8 expected_crc; + u16 tmp; + + crc = ssb_sprom_crc(sprom); + tmp = sprom[SPOFF(SSB_SPROM_REVISION)] & SSB_SPROM_REVISION_CRC; + expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT; + if (crc != expected_crc) + return -EPROTO; + + return 0; +} + +static void sprom_do_read(struct ssb_bus *bus, u16 *sprom) +{ + int i; + + for (i = 0; i < SSB_SPROMSIZE_WORDS; i++) + sprom[i] = readw(bus->mmio + SSB_SPROM_BASE + (i * 2)); +} + +static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom) +{ + struct pci_dev *pdev = bus->host_pci; + int i, err; + u32 spromctl; + + ssb_printk(KERN_NOTICE PFX "Writing SPROM. Do NOT turn off the power! Please stand by...\n"); + err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); + if (err) + goto err_ctlreg; + spromctl |= SSB_SPROMCTL_WE; + err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); + if (err) + goto err_ctlreg; + ssb_printk(KERN_NOTICE PFX "[ 0%%"); + msleep(500); + for (i = 0; i < SSB_SPROMSIZE_WORDS; i++) { + if (i == SSB_SPROMSIZE_WORDS / 4) + ssb_printk("25%%"); + else if (i == SSB_SPROMSIZE_WORDS / 2) + ssb_printk("50%%"); + else if (i == (SSB_SPROMSIZE_WORDS / 4) * 3) + ssb_printk("75%%"); + else if (i % 2) + ssb_printk("."); + writew(sprom[i], bus->mmio + SSB_SPROM_BASE + (i * 2)); + mmiowb(); + msleep(20); + } + err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); + if (err) + goto err_ctlreg; + spromctl &= ~SSB_SPROMCTL_WE; + err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); + if (err) + goto err_ctlreg; + msleep(500); + ssb_printk("100%% ]\n"); + ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); + + return 0; +err_ctlreg: + ssb_printk(KERN_ERR PFX "Could not access SPROM control register.\n"); + return err; +} + +static void sprom_extract_r1(struct ssb_sprom_r1 *out, const u16 *in) +{ + int i; + u16 v; + + SPEX(pci_spid, SSB_SPROM1_SPID, 0xFFFF, 0); + SPEX(pci_svid, SSB_SPROM1_SVID, 0xFFFF, 0); + SPEX(pci_pid, SSB_SPROM1_PID, 0xFFFF, 0); + for (i = 0; i < 3; i++) { + v = in[SPOFF(SSB_SPROM1_IL0MAC) + i]; + *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); + } + for (i = 0; i < 3; i++) { + v = in[SPOFF(SSB_SPROM1_ET0MAC) + i]; + *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v); + } + for (i = 0; i < 3; i++) { + v = in[SPOFF(SSB_SPROM1_ET1MAC) + i]; + *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v); + } + SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); + SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, + SSB_SPROM1_ETHPHY_ET1A_SHIFT); + SPEX(et0mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0M, 14); + SPEX(et1mdcport, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1M, 15); + SPEX(board_rev, SSB_SPROM1_BINF, SSB_SPROM1_BINF_BREV, 0); + SPEX(country_code, SSB_SPROM1_BINF, SSB_SPROM1_BINF_CCODE, + SSB_SPROM1_BINF_CCODE_SHIFT); + SPEX(antenna_a, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTA, + SSB_SPROM1_BINF_ANTA_SHIFT); + SPEX(antenna_bg, SSB_SPROM1_BINF, SSB_SPROM1_BINF_ANTBG, + SSB_SPROM1_BINF_ANTBG_SHIFT); + SPEX(pa0b0, SSB_SPROM1_PA0B0, 0xFFFF, 0); + SPEX(pa0b1, SSB_SPROM1_PA0B1, 0xFFFF, 0); + SPEX(pa0b2, SSB_SPROM1_PA0B2, 0xFFFF, 0); + SPEX(pa1b0, SSB_SPROM1_PA1B0, 0xFFFF, 0); + SPEX(pa1b1, SSB_SPROM1_PA1B1, 0xFFFF, 0); + SPEX(pa1b2, SSB_SPROM1_PA1B2, 0xFFFF, 0); + SPEX(gpio0, SSB_SPROM1_GPIOA, SSB_SPROM1_GPIOA_P0, 0); + SPEX(gpio1, SSB_SPROM1_GPIOA, SSB_SPROM1_GPIOA_P1, + SSB_SPROM1_GPIOA_P1_SHIFT); + SPEX(gpio2, SSB_SPROM1_GPIOB, SSB_SPROM1_GPIOB_P2, 0); + SPEX(gpio3, SSB_SPROM1_GPIOB, SSB_SPROM1_GPIOB_P3, + SSB_SPROM1_GPIOB_P3_SHIFT); + SPEX(maxpwr_a, SSB_SPROM1_MAXPWR, SSB_SPROM1_MAXPWR_A, + SSB_SPROM1_MAXPWR_A_SHIFT); + SPEX(maxpwr_bg, SSB_SPROM1_MAXPWR, SSB_SPROM1_MAXPWR_BG, 0); + SPEX(itssi_a, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_A, + SSB_SPROM1_ITSSI_A_SHIFT); + SPEX(itssi_bg, SSB_SPROM1_ITSSI, SSB_SPROM1_ITSSI_BG, 0); + SPEX(boardflags_lo, SSB_SPROM1_BFLLO, 0xFFFF, 0); + SPEX(antenna_gain_a, SSB_SPROM1_AGAIN, SSB_SPROM1_AGAIN_A, 0); + SPEX(antenna_gain_bg, SSB_SPROM1_AGAIN, SSB_SPROM1_AGAIN_BG, + SSB_SPROM1_AGAIN_BG_SHIFT); + for (i = 0; i < 4; i++) { + v = in[SPOFF(SSB_SPROM1_OEM) + i]; + *(((__le16 *)out->oem) + i) = cpu_to_le16(v); + } +} + +static void sprom_extract_r2(struct ssb_sprom_r2 *out, const u16 *in) +{ + int i; + u16 v; + + SPEX(boardflags_hi, SSB_SPROM2_BFLHI, 0xFFFF, 0); + SPEX(maxpwr_a_hi, SSB_SPROM2_MAXP_A, SSB_SPROM2_MAXP_A_HI, 0); + SPEX(maxpwr_a_lo, SSB_SPROM2_MAXP_A, SSB_SPROM2_MAXP_A_LO, + SSB_SPROM2_MAXP_A_LO_SHIFT); + SPEX(pa1lob0, SSB_SPROM2_PA1LOB0, 0xFFFF, 0); + SPEX(pa1lob1, SSB_SPROM2_PA1LOB1, 0xFFFF, 0); + SPEX(pa1lob2, SSB_SPROM2_PA1LOB2, 0xFFFF, 0); + SPEX(pa1hib0, SSB_SPROM2_PA1HIB0, 0xFFFF, 0); + SPEX(pa1hib1, SSB_SPROM2_PA1HIB1, 0xFFFF, 0); + SPEX(pa1hib2, SSB_SPROM2_PA1HIB2, 0xFFFF, 0); + SPEX(ofdm_pwr_off, SSB_SPROM2_OPO, SSB_SPROM2_OPO_VALUE, 0); + for (i = 0; i < 4; i++) { + v = in[SPOFF(SSB_SPROM2_CCODE) + i]; + *(((__le16 *)out->country_str) + i) = cpu_to_le16(v); + } +} + +static void sprom_extract_r3(struct ssb_sprom_r3 *out, const u16 *in) +{ + out->ofdmapo = (in[SPOFF(SSB_SPROM3_OFDMAPO) + 0] & 0xFF00) >> 8; + out->ofdmapo |= (in[SPOFF(SSB_SPROM3_OFDMAPO) + 0] & 0x00FF) << 8; + out->ofdmapo <<= 16; + out->ofdmapo |= (in[SPOFF(SSB_SPROM3_OFDMAPO) + 1] & 0xFF00) >> 8; + out->ofdmapo |= (in[SPOFF(SSB_SPROM3_OFDMAPO) + 1] & 0x00FF) << 8; + + out->ofdmalpo = (in[SPOFF(SSB_SPROM3_OFDMALPO) + 0] & 0xFF00) >> 8; + out->ofdmalpo |= (in[SPOFF(SSB_SPROM3_OFDMALPO) + 0] & 0x00FF) << 8; + out->ofdmalpo <<= 16; + out->ofdmalpo |= (in[SPOFF(SSB_SPROM3_OFDMALPO) + 1] & 0xFF00) >> 8; + out->ofdmalpo |= (in[SPOFF(SSB_SPROM3_OFDMALPO) + 1] & 0x00FF) << 8; + + out->ofdmahpo = (in[SPOFF(SSB_SPROM3_OFDMAHPO) + 0] & 0xFF00) >> 8; + out->ofdmahpo |= (in[SPOFF(SSB_SPROM3_OFDMAHPO) + 0] & 0x00FF) << 8; + out->ofdmahpo <<= 16; + out->ofdmahpo |= (in[SPOFF(SSB_SPROM3_OFDMAHPO) + 1] & 0xFF00) >> 8; + out->ofdmahpo |= (in[SPOFF(SSB_SPROM3_OFDMAHPO) + 1] & 0x00FF) << 8; + + SPEX(gpioldc_on_cnt, SSB_SPROM3_GPIOLDC, SSB_SPROM3_GPIOLDC_ON, + SSB_SPROM3_GPIOLDC_ON_SHIFT); + SPEX(gpioldc_off_cnt, SSB_SPROM3_GPIOLDC, SSB_SPROM3_GPIOLDC_OFF, + SSB_SPROM3_GPIOLDC_OFF_SHIFT); + SPEX(cckpo_1M, SSB_SPROM3_CCKPO, SSB_SPROM3_CCKPO_1M, 0); + SPEX(cckpo_2M, SSB_SPROM3_CCKPO, SSB_SPROM3_CCKPO_2M, + SSB_SPROM3_CCKPO_2M_SHIFT); + SPEX(cckpo_55M, SSB_SPROM3_CCKPO, SSB_SPROM3_CCKPO_55M, + SSB_SPROM3_CCKPO_55M_SHIFT); + SPEX(cckpo_11M, SSB_SPROM3_CCKPO, SSB_SPROM3_CCKPO_11M, + SSB_SPROM3_CCKPO_11M_SHIFT); + + out->ofdmgpo = (in[SPOFF(SSB_SPROM3_OFDMGPO) + 0] & 0xFF00) >> 8; + out->ofdmgpo |= (in[SPOFF(SSB_SPROM3_OFDMGPO) + 0] & 0x00FF) << 8; + out->ofdmgpo <<= 16; + out->ofdmgpo |= (in[SPOFF(SSB_SPROM3_OFDMGPO) + 1] & 0xFF00) >> 8; + out->ofdmgpo |= (in[SPOFF(SSB_SPROM3_OFDMGPO) + 1] & 0x00FF) << 8; +} + +static int sprom_extract(struct ssb_bus *bus, + struct ssb_sprom *out, const u16 *in) +{ + memset(out, 0, sizeof(*out)); + + SPEX(revision, SSB_SPROM_REVISION, SSB_SPROM_REVISION_REV, 0); + SPEX(crc, SSB_SPROM_REVISION, SSB_SPROM_REVISION_CRC, + SSB_SPROM_REVISION_CRC_SHIFT); + + if ((bus->chip_id & 0xFF00) == 0x4400) { + /* Workaround: The BCM44XX chip has a stupid revision + * number stored in the SPROM. + * Always extract r1. */ + sprom_extract_r1(&out->r1, in); + } else { + if (out->revision == 0) + goto unsupported; + if (out->revision >= 1 && out->revision <= 3) + sprom_extract_r1(&out->r1, in); + if (out->revision >= 2 && out->revision <= 3) + sprom_extract_r2(&out->r2, in); + if (out->revision == 3) + sprom_extract_r3(&out->r3, in); + if (out->revision >= 4) + goto unsupported; + } + + return 0; +unsupported: + ssb_printk(KERN_WARNING PFX "Unsupported SPROM revision %d " + "detected. Will extract v1\n", out->revision); + sprom_extract_r1(&out->r1, in); + return 0; +} + +static int ssb_pci_sprom_get(struct ssb_bus *bus, + struct ssb_sprom *sprom) +{ + int err = -ENOMEM; + u16 *buf; + + buf = kcalloc(SSB_SPROMSIZE_WORDS, sizeof(u16), GFP_KERNEL); + if (!buf) + goto out; + sprom_do_read(bus, buf); + err = sprom_check_crc(buf); + if (err) { + ssb_printk(KERN_WARNING PFX + "WARNING: Invalid SPROM CRC (corrupt SPROM)\n"); + } + err = sprom_extract(bus, sprom, buf); + + kfree(buf); +out: + return err; +} + +static void ssb_pci_get_boardinfo(struct ssb_bus *bus, + struct ssb_boardinfo *bi) +{ + pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_VENDOR_ID, + &bi->vendor); + pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_ID, + &bi->type); + pci_read_config_word(bus->host_pci, PCI_REVISION_ID, + &bi->rev); +} + +int ssb_pci_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv) +{ + int err; + + err = ssb_pci_sprom_get(bus, &iv->sprom); + if (err) + goto out; + ssb_pci_get_boardinfo(bus, &iv->boardinfo); + +out: + return err; +} + +#ifdef CONFIG_SSB_DEBUG +static int ssb_pci_assert_buspower(struct ssb_bus *bus) +{ + if (likely(bus->powered_up)) + return 0; + + printk(KERN_ERR PFX "FATAL ERROR: Bus powered down " + "while accessing PCI MMIO space\n"); + if (bus->power_warn_count <= 10) { + bus->power_warn_count++; + dump_stack(); + } + + return -ENODEV; +} +#else /* DEBUG */ +static inline int ssb_pci_assert_buspower(struct ssb_bus *bus) +{ + return 0; +} +#endif /* DEBUG */ + +static u16 ssb_pci_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return 0xFFFF; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return 0xFFFF; + } + return ioread16(bus->mmio + offset); +} + +static u32 ssb_pci_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return 0xFFFFFFFF; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return 0xFFFFFFFF; + } + return ioread32(bus->mmio + offset); +} + +static void ssb_pci_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return; + } + iowrite16(value, bus->mmio + offset); +} + +static void ssb_pci_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(ssb_pci_assert_buspower(bus))) + return; + if (unlikely(bus->mapped_device != dev)) { + if (unlikely(ssb_pci_switch_core(bus, dev))) + return; + } + iowrite32(value, bus->mmio + offset); +} + +/* Not "static", as it's used in main.c */ +const struct ssb_bus_ops ssb_pci_ops = { + .read16 = ssb_pci_read16, + .read32 = ssb_pci_read32, + .write16 = ssb_pci_write16, + .write32 = ssb_pci_write32, +}; + +static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len) +{ + int i, pos = 0; + + for (i = 0; i < SSB_SPROMSIZE_WORDS; i++) { + pos += snprintf(buf + pos, buf_len - pos - 1, + "%04X", swab16(sprom[i]) & 0xFFFF); + } + pos += snprintf(buf + pos, buf_len - pos - 1, "\n"); + + return pos + 1; +} + +static int hex2sprom(u16 *sprom, const char *dump, size_t len) +{ + char tmp[5] = { 0 }; + int cnt = 0; + unsigned long parsed; + + if (len < SSB_SPROMSIZE_BYTES * 2) + return -EINVAL; + + while (cnt < SSB_SPROMSIZE_WORDS) { + memcpy(tmp, dump, 4); + dump += 4; + parsed = simple_strtoul(tmp, NULL, 16); + sprom[cnt++] = swab16((u16)parsed); + } + + return 0; +} + +static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev, + struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev); + struct ssb_bus *bus; + u16 *sprom; + int err = -ENODEV; + ssize_t count = 0; + + bus = ssb_pci_dev_to_bus(pdev); + if (!bus) + goto out; + err = -ENOMEM; + sprom = kcalloc(SSB_SPROMSIZE_WORDS, sizeof(u16), GFP_KERNEL); + if (!sprom) + goto out; + + /* Use interruptible locking, as the SPROM write might + * be holding the lock for several seconds. So allow userspace + * to cancel operation. */ + err = -ERESTARTSYS; + if (mutex_lock_interruptible(&bus->pci_sprom_mutex)) + goto out_kfree; + sprom_do_read(bus, sprom); + mutex_unlock(&bus->pci_sprom_mutex); + + count = sprom2hex(sprom, buf, PAGE_SIZE); + err = 0; + +out_kfree: + kfree(sprom); +out: + return err ? err : count; +} + +static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev); + struct ssb_bus *bus; + u16 *sprom; + int res = 0, err = -ENODEV; + + bus = ssb_pci_dev_to_bus(pdev); + if (!bus) + goto out; + err = -ENOMEM; + sprom = kcalloc(SSB_SPROMSIZE_WORDS, sizeof(u16), GFP_KERNEL); + if (!sprom) + goto out; + err = hex2sprom(sprom, buf, count); + if (err) { + err = -EINVAL; + goto out_kfree; + } + err = sprom_check_crc(sprom); + if (err) { + err = -EINVAL; + goto out_kfree; + } + + /* Use interruptible locking, as the SPROM write might + * be holding the lock for several seconds. So allow userspace + * to cancel operation. */ + err = -ERESTARTSYS; + if (mutex_lock_interruptible(&bus->pci_sprom_mutex)) + goto out_kfree; + err = ssb_devices_freeze(bus); + if (err == -EOPNOTSUPP) { + ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze devices. " + "No suspend support. Is CONFIG_PM enabled?\n"); + goto out_unlock; + } + if (err) { + ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); + goto out_unlock; + } + res = sprom_do_write(bus, sprom); + err = ssb_devices_thaw(bus); + if (err) + ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); +out_unlock: + mutex_unlock(&bus->pci_sprom_mutex); +out_kfree: + kfree(sprom); +out: + if (res) + return res; + return err ? err : count; +} + +static DEVICE_ATTR(ssb_sprom, 0600, + ssb_pci_attr_sprom_show, + ssb_pci_attr_sprom_store); + +void ssb_pci_exit(struct ssb_bus *bus) +{ + struct pci_dev *pdev; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return; + + pdev = bus->host_pci; + device_remove_file(&pdev->dev, &dev_attr_ssb_sprom); +} + +int ssb_pci_init(struct ssb_bus *bus) +{ + struct pci_dev *pdev; + int err; + + if (bus->bustype != SSB_BUSTYPE_PCI) + return 0; + + pdev = bus->host_pci; + mutex_init(&bus->pci_sprom_mutex); + err = device_create_file(&pdev->dev, &dev_attr_ssb_sprom); + if (err) + goto out; + +out: + return err; +} diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c new file mode 100644 index 0000000000000000000000000000000000000000..82a10abef64045053a6acfcfd6c5d45c5c4c156f --- /dev/null +++ b/drivers/ssb/pcihost_wrapper.c @@ -0,0 +1,104 @@ +/* + * Sonics Silicon Backplane + * PCI Hostdevice wrapper + * + * Copyright (c) 2005 Martin Langer + * Copyright (c) 2005 Stefano Brivio + * Copyright (c) 2005 Danny van Dyk + * Copyright (c) 2005 Andreas Jaggi + * Copyright (c) 2005-2007 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include + + +#ifdef CONFIG_PM +static int ssb_pcihost_suspend(struct pci_dev *dev, pm_message_t state) +{ + pci_save_state(dev); + pci_disable_device(dev); + pci_set_power_state(dev, pci_choose_state(dev, state)); + + return 0; +} + +static int ssb_pcihost_resume(struct pci_dev *dev) +{ + int err; + + pci_set_power_state(dev, 0); + err = pci_enable_device(dev); + if (err) + return err; + pci_restore_state(dev); + + return 0; +} +#else /* CONFIG_PM */ +# define ssb_pcihost_suspend NULL +# define ssb_pcihost_resume NULL +#endif /* CONFIG_PM */ + +static int ssb_pcihost_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct ssb_bus *ssb; + int err = -ENOMEM; + const char *name; + + ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); + if (!ssb) + goto out; + err = pci_enable_device(dev); + if (err) + goto err_kfree_ssb; + name = dev->dev.bus_id; + if (dev->driver && dev->driver->name) + name = dev->driver->name; + err = pci_request_regions(dev, name); + if (err) + goto err_pci_disable; + pci_set_master(dev); + + err = ssb_bus_pcibus_register(ssb, dev); + if (err) + goto err_pci_release_regions; + + pci_set_drvdata(dev, ssb); + +out: + return err; + +err_pci_release_regions: + pci_release_regions(dev); +err_pci_disable: + pci_disable_device(dev); +err_kfree_ssb: + kfree(ssb); + return err; +} + +static void ssb_pcihost_remove(struct pci_dev *dev) +{ + struct ssb_bus *ssb = pci_get_drvdata(dev); + + ssb_bus_unregister(ssb); + pci_release_regions(dev); + pci_disable_device(dev); + kfree(ssb); + pci_set_drvdata(dev, NULL); +} + +int ssb_pcihost_register(struct pci_driver *driver) +{ + driver->probe = ssb_pcihost_probe; + driver->remove = ssb_pcihost_remove; + driver->suspend = ssb_pcihost_suspend; + driver->resume = ssb_pcihost_resume; + + return pci_register_driver(driver); +} +EXPORT_SYMBOL(ssb_pcihost_register); diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c new file mode 100644 index 0000000000000000000000000000000000000000..7c773603b4025f41eacba761bc08484b60a470dd --- /dev/null +++ b/drivers/ssb/pcmcia.c @@ -0,0 +1,271 @@ +/* + * Sonics Silicon Backplane + * PCMCIA-Hostbus related functions + * + * Copyright 2006 Johannes Berg + * Copyright 2007 Michael Buesch + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ssb_private.h" + + +/* Define the following to 1 to enable a printk on each coreswitch. */ +#define SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 0 + + +int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, + u8 coreidx) +{ + struct pcmcia_device *pdev = bus->host_pcmcia; + int err; + int attempts = 0; + u32 cur_core; + conf_reg_t reg; + u32 addr; + u32 read_addr; + + addr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE; + while (1) { + reg.Action = CS_WRITE; + reg.Offset = 0x2E; + reg.Value = (addr & 0x0000F000) >> 12; + err = pcmcia_access_configuration_register(pdev, ®); + if (err != CS_SUCCESS) + goto error; + reg.Offset = 0x30; + reg.Value = (addr & 0x00FF0000) >> 16; + err = pcmcia_access_configuration_register(pdev, ®); + if (err != CS_SUCCESS) + goto error; + reg.Offset = 0x32; + reg.Value = (addr & 0xFF000000) >> 24; + err = pcmcia_access_configuration_register(pdev, ®); + if (err != CS_SUCCESS) + goto error; + + read_addr = 0; + + reg.Action = CS_READ; + reg.Offset = 0x2E; + err = pcmcia_access_configuration_register(pdev, ®); + if (err != CS_SUCCESS) + goto error; + read_addr |= (reg.Value & 0xF) << 12; + reg.Offset = 0x30; + err = pcmcia_access_configuration_register(pdev, ®); + if (err != CS_SUCCESS) + goto error; + read_addr |= reg.Value << 16; + reg.Offset = 0x32; + err = pcmcia_access_configuration_register(pdev, ®); + if (err != CS_SUCCESS) + goto error; + read_addr |= reg.Value << 24; + + cur_core = (read_addr - SSB_ENUM_BASE) / SSB_CORE_SIZE; + if (cur_core == coreidx) + break; + + if (attempts++ > SSB_BAR0_MAX_RETRIES) + goto error; + udelay(10); + } + + return 0; +error: + ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); + return -ENODEV; +} + +int ssb_pcmcia_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + int err; + unsigned long flags; + +#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG + ssb_printk(KERN_INFO PFX + "Switching to %s core, index %d\n", + ssb_core_name(dev->id.coreid), + dev->core_index); +#endif + + spin_lock_irqsave(&bus->bar_lock, flags); + err = ssb_pcmcia_switch_coreidx(bus, dev->core_index); + if (!err) + bus->mapped_device = dev; + spin_unlock_irqrestore(&bus->bar_lock, flags); + + return err; +} + +int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg) +{ + int attempts = 0; + unsigned long flags; + conf_reg_t reg; + int res, err = 0; + + SSB_WARN_ON((seg != 0) && (seg != 1)); + reg.Offset = 0x34; + reg.Function = 0; + spin_lock_irqsave(&bus->bar_lock, flags); + while (1) { + reg.Action = CS_WRITE; + reg.Value = seg; + res = pcmcia_access_configuration_register(bus->host_pcmcia, ®); + if (unlikely(res != CS_SUCCESS)) + goto error; + reg.Value = 0xFF; + reg.Action = CS_READ; + res = pcmcia_access_configuration_register(bus->host_pcmcia, ®); + if (unlikely(res != CS_SUCCESS)) + goto error; + + if (reg.Value == seg) + break; + + if (unlikely(attempts++ > SSB_BAR0_MAX_RETRIES)) + goto error; + udelay(10); + } + bus->mapped_pcmcia_seg = seg; +out_unlock: + spin_unlock_irqrestore(&bus->bar_lock, flags); + return err; +error: + ssb_printk(KERN_ERR PFX "Failed to switch pcmcia segment\n"); + err = -ENODEV; + goto out_unlock; +} + +/* These are the main device register access functions. + * do_select_core is inline to have the likely hotpath inline. + * All unlikely codepaths are out-of-line. */ +static inline int do_select_core(struct ssb_bus *bus, + struct ssb_device *dev, + u16 *offset) +{ + int err; + u8 need_seg = (*offset >= 0x800) ? 1 : 0; + + if (unlikely(dev != bus->mapped_device)) { + err = ssb_pcmcia_switch_core(bus, dev); + if (unlikely(err)) + return err; + } + if (unlikely(need_seg != bus->mapped_pcmcia_seg)) { + err = ssb_pcmcia_switch_segment(bus, need_seg); + if (unlikely(err)) + return err; + } + if (need_seg == 1) + *offset -= 0x800; + + return 0; +} + +static u16 ssb_pcmcia_read16(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + u16 x; + + if (unlikely(do_select_core(bus, dev, &offset))) + return 0xFFFF; + x = readw(bus->mmio + offset); + + return x; +} + +static u32 ssb_pcmcia_read32(struct ssb_device *dev, u16 offset) +{ + struct ssb_bus *bus = dev->bus; + u32 x; + + if (unlikely(do_select_core(bus, dev, &offset))) + return 0xFFFFFFFF; + x = readl(bus->mmio + offset); + + return x; +} + +static void ssb_pcmcia_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(do_select_core(bus, dev, &offset))) + return; + writew(value, bus->mmio + offset); +} + +static void ssb_pcmcia_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + struct ssb_bus *bus = dev->bus; + + if (unlikely(do_select_core(bus, dev, &offset))) + return; + readw(bus->mmio + offset); + writew(value >> 16, bus->mmio + offset + 2); + readw(bus->mmio + offset); + writew(value, bus->mmio + offset); +} + +/* Not "static", as it's used in main.c */ +const struct ssb_bus_ops ssb_pcmcia_ops = { + .read16 = ssb_pcmcia_read16, + .read32 = ssb_pcmcia_read32, + .write16 = ssb_pcmcia_write16, + .write32 = ssb_pcmcia_write32, +}; + +int ssb_pcmcia_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv) +{ + //TODO + return 0; +} + +int ssb_pcmcia_init(struct ssb_bus *bus) +{ + conf_reg_t reg; + int err; + + if (bus->bustype != SSB_BUSTYPE_PCMCIA) + return 0; + + /* Switch segment to a known state and sync + * bus->mapped_pcmcia_seg with hardware state. */ + ssb_pcmcia_switch_segment(bus, 0); + + /* Init IRQ routing */ + reg.Action = CS_READ; + reg.Function = 0; + if (bus->chip_id == 0x4306) + reg.Offset = 0x00; + else + reg.Offset = 0x80; + err = pcmcia_access_configuration_register(bus->host_pcmcia, ®); + if (err != CS_SUCCESS) + goto error; + reg.Action = CS_WRITE; + reg.Value |= 0x04 | 0x01; + err = pcmcia_access_configuration_register(bus->host_pcmcia, ®); + if (err != CS_SUCCESS) + goto error; + + return 0; +error: + return -ENODEV; +} diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c new file mode 100644 index 0000000000000000000000000000000000000000..96258c60919da86f84edd4896b0760f2a67ec13a --- /dev/null +++ b/drivers/ssb/scan.c @@ -0,0 +1,413 @@ +/* + * Sonics Silicon Backplane + * Bus scanning + * + * Copyright (C) 2005-2007 Michael Buesch + * Copyright (C) 2005 Martin Langer + * Copyright (C) 2005 Stefano Brivio + * Copyright (C) 2005 Danny van Dyk + * Copyright (C) 2005 Andreas Jaggi + * Copyright (C) 2006 Broadcom Corporation. + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ssb_private.h" + + +const char *ssb_core_name(u16 coreid) +{ + switch (coreid) { + case SSB_DEV_CHIPCOMMON: + return "ChipCommon"; + case SSB_DEV_ILINE20: + return "ILine 20"; + case SSB_DEV_SDRAM: + return "SDRAM"; + case SSB_DEV_PCI: + return "PCI"; + case SSB_DEV_MIPS: + return "MIPS"; + case SSB_DEV_ETHERNET: + return "Fast Ethernet"; + case SSB_DEV_V90: + return "V90"; + case SSB_DEV_USB11_HOSTDEV: + return "USB 1.1 Hostdev"; + case SSB_DEV_ADSL: + return "ADSL"; + case SSB_DEV_ILINE100: + return "ILine 100"; + case SSB_DEV_IPSEC: + return "IPSEC"; + case SSB_DEV_PCMCIA: + return "PCMCIA"; + case SSB_DEV_INTERNAL_MEM: + return "Internal Memory"; + case SSB_DEV_MEMC_SDRAM: + return "MEMC SDRAM"; + case SSB_DEV_EXTIF: + return "EXTIF"; + case SSB_DEV_80211: + return "IEEE 802.11"; + case SSB_DEV_MIPS_3302: + return "MIPS 3302"; + case SSB_DEV_USB11_HOST: + return "USB 1.1 Host"; + case SSB_DEV_USB11_DEV: + return "USB 1.1 Device"; + case SSB_DEV_USB20_HOST: + return "USB 2.0 Host"; + case SSB_DEV_USB20_DEV: + return "USB 2.0 Device"; + case SSB_DEV_SDIO_HOST: + return "SDIO Host"; + case SSB_DEV_ROBOSWITCH: + return "Roboswitch"; + case SSB_DEV_PARA_ATA: + return "PATA"; + case SSB_DEV_SATA_XORDMA: + return "SATA XOR-DMA"; + case SSB_DEV_ETHERNET_GBIT: + return "GBit Ethernet"; + case SSB_DEV_PCIE: + return "PCI-E"; + case SSB_DEV_MIMO_PHY: + return "MIMO PHY"; + case SSB_DEV_SRAM_CTRLR: + return "SRAM Controller"; + case SSB_DEV_MINI_MACPHY: + return "Mini MACPHY"; + case SSB_DEV_ARM_1176: + return "ARM 1176"; + case SSB_DEV_ARM_7TDMI: + return "ARM 7TDMI"; + } + return "UNKNOWN"; +} + +static u16 pcidev_to_chipid(struct pci_dev *pci_dev) +{ + u16 chipid_fallback = 0; + + switch (pci_dev->device) { + case 0x4301: + chipid_fallback = 0x4301; + break; + case 0x4305 ... 0x4307: + chipid_fallback = 0x4307; + break; + case 0x4403: + chipid_fallback = 0x4402; + break; + case 0x4610 ... 0x4615: + chipid_fallback = 0x4610; + break; + case 0x4710 ... 0x4715: + chipid_fallback = 0x4710; + break; + case 0x4320 ... 0x4325: + chipid_fallback = 0x4309; + break; + case PCI_DEVICE_ID_BCM4401: + case PCI_DEVICE_ID_BCM4401B0: + case PCI_DEVICE_ID_BCM4401B1: + chipid_fallback = 0x4401; + break; + default: + ssb_printk(KERN_ERR PFX + "PCI-ID not in fallback list\n"); + } + + return chipid_fallback; +} + +static u8 chipid_to_nrcores(u16 chipid) +{ + switch (chipid) { + case 0x5365: + return 7; + case 0x4306: + return 6; + case 0x4310: + return 8; + case 0x4307: + case 0x4301: + return 5; + case 0x4401: + case 0x4402: + return 3; + case 0x4710: + case 0x4610: + case 0x4704: + return 9; + default: + ssb_printk(KERN_ERR PFX + "CHIPID not in nrcores fallback list\n"); + } + + return 1; +} + +static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx, + u16 offset) +{ + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + offset += current_coreidx * SSB_CORE_SIZE; + break; + case SSB_BUSTYPE_PCI: + break; + case SSB_BUSTYPE_PCMCIA: + if (offset >= 0x800) { + ssb_pcmcia_switch_segment(bus, 1); + offset -= 0x800; + } else + ssb_pcmcia_switch_segment(bus, 0); + break; + } + return readl(bus->mmio + offset); +} + +static int scan_switchcore(struct ssb_bus *bus, u8 coreidx) +{ + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + break; + case SSB_BUSTYPE_PCI: + return ssb_pci_switch_coreidx(bus, coreidx); + case SSB_BUSTYPE_PCMCIA: + return ssb_pcmcia_switch_coreidx(bus, coreidx); + } + return 0; +} + +void ssb_iounmap(struct ssb_bus *bus) +{ + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + case SSB_BUSTYPE_PCMCIA: + iounmap(bus->mmio); + break; + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + pci_iounmap(bus->host_pci, bus->mmio); +#else + SSB_BUG_ON(1); /* Can't reach this code. */ +#endif + break; + } + bus->mmio = NULL; + bus->mapped_device = NULL; +} + +static void __iomem *ssb_ioremap(struct ssb_bus *bus, + unsigned long baseaddr) +{ + void __iomem *mmio = NULL; + + switch (bus->bustype) { + case SSB_BUSTYPE_SSB: + /* Only map the first core for now. */ + /* fallthrough... */ + case SSB_BUSTYPE_PCMCIA: + mmio = ioremap(baseaddr, SSB_CORE_SIZE); + break; + case SSB_BUSTYPE_PCI: +#ifdef CONFIG_SSB_PCIHOST + mmio = pci_iomap(bus->host_pci, 0, ~0UL); +#else + SSB_BUG_ON(1); /* Can't reach this code. */ +#endif + break; + } + + return mmio; +} + +static int we_support_multiple_80211_cores(struct ssb_bus *bus) +{ + /* More than one 802.11 core is only supported by special chips. + * There are chips with two 802.11 cores, but with dangling + * pins on the second core. Be careful and reject them here. + */ + +#ifdef CONFIG_SSB_PCIHOST + if (bus->bustype == SSB_BUSTYPE_PCI) { + if (bus->host_pci->vendor == PCI_VENDOR_ID_BROADCOM && + bus->host_pci->device == 0x4324) + return 1; + } +#endif /* CONFIG_SSB_PCIHOST */ + return 0; +} + +int ssb_bus_scan(struct ssb_bus *bus, + unsigned long baseaddr) +{ + int err = -ENOMEM; + void __iomem *mmio; + u32 idhi, cc, rev, tmp; + int dev_i, i; + struct ssb_device *dev; + int nr_80211_cores = 0; + + mmio = ssb_ioremap(bus, baseaddr); + if (!mmio) + goto out; + bus->mmio = mmio; + + err = scan_switchcore(bus, 0); /* Switch to first core */ + if (err) + goto err_unmap; + + idhi = scan_read32(bus, 0, SSB_IDHIGH); + cc = (idhi & SSB_IDHIGH_CC) >> SSB_IDHIGH_CC_SHIFT; + rev = (idhi & SSB_IDHIGH_RCLO); + rev |= (idhi & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT; + + bus->nr_devices = 0; + if (cc == SSB_DEV_CHIPCOMMON) { + tmp = scan_read32(bus, 0, SSB_CHIPCO_CHIPID); + + bus->chip_id = (tmp & SSB_CHIPCO_IDMASK); + bus->chip_rev = (tmp & SSB_CHIPCO_REVMASK) >> + SSB_CHIPCO_REVSHIFT; + bus->chip_package = (tmp & SSB_CHIPCO_PACKMASK) >> + SSB_CHIPCO_PACKSHIFT; + if (rev >= 4) { + bus->nr_devices = (tmp & SSB_CHIPCO_NRCORESMASK) >> + SSB_CHIPCO_NRCORESSHIFT; + } + tmp = scan_read32(bus, 0, SSB_CHIPCO_CAP); + bus->chipco.capabilities = tmp; + } else { + if (bus->bustype == SSB_BUSTYPE_PCI) { + bus->chip_id = pcidev_to_chipid(bus->host_pci); + pci_read_config_word(bus->host_pci, PCI_REVISION_ID, + &bus->chip_rev); + bus->chip_package = 0; + } else { + bus->chip_id = 0x4710; + bus->chip_rev = 0; + bus->chip_package = 0; + } + } + if (!bus->nr_devices) + bus->nr_devices = chipid_to_nrcores(bus->chip_id); + if (bus->nr_devices > ARRAY_SIZE(bus->devices)) { + ssb_printk(KERN_ERR PFX + "More than %d ssb cores found (%d)\n", + SSB_MAX_NR_CORES, bus->nr_devices); + goto err_unmap; + } + if (bus->bustype == SSB_BUSTYPE_SSB) { + /* Now that we know the number of cores, + * remap the whole IO space for all cores. + */ + err = -ENOMEM; + iounmap(mmio); + mmio = ioremap(baseaddr, SSB_CORE_SIZE * bus->nr_devices); + if (!mmio) + goto out; + bus->mmio = mmio; + } + + /* Fetch basic information about each core/device */ + for (i = 0, dev_i = 0; i < bus->nr_devices; i++) { + err = scan_switchcore(bus, i); + if (err) + goto err_unmap; + dev = &(bus->devices[dev_i]); + + idhi = scan_read32(bus, i, SSB_IDHIGH); + dev->id.coreid = (idhi & SSB_IDHIGH_CC) >> SSB_IDHIGH_CC_SHIFT; + dev->id.revision = (idhi & SSB_IDHIGH_RCLO); + dev->id.revision |= (idhi & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT; + dev->id.vendor = (idhi & SSB_IDHIGH_VC) >> SSB_IDHIGH_VC_SHIFT; + dev->core_index = i; + dev->bus = bus; + dev->ops = bus->ops; + + ssb_dprintk(KERN_INFO PFX + "Core %d found: %s " + "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n", + i, ssb_core_name(dev->id.coreid), + dev->id.coreid, dev->id.revision, dev->id.vendor); + + switch (dev->id.coreid) { + case SSB_DEV_80211: + nr_80211_cores++; + if (nr_80211_cores > 1) { + if (!we_support_multiple_80211_cores(bus)) { + ssb_dprintk(KERN_INFO PFX "Ignoring additional " + "802.11 core\n"); + continue; + } + } + break; + case SSB_DEV_EXTIF: +#ifdef CONFIG_SSB_DRIVER_EXTIF + if (bus->extif.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple EXTIFs found\n"); + break; + } + bus->extif.dev = dev; +#endif /* CONFIG_SSB_DRIVER_EXTIF */ + break; + case SSB_DEV_CHIPCOMMON: + if (bus->chipco.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple ChipCommon found\n"); + break; + } + bus->chipco.dev = dev; + break; + case SSB_DEV_MIPS: + case SSB_DEV_MIPS_3302: +#ifdef CONFIG_SSB_DRIVER_MIPS + if (bus->mipscore.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple MIPS cores found\n"); + break; + } + bus->mipscore.dev = dev; +#endif /* CONFIG_SSB_DRIVER_MIPS */ + break; + case SSB_DEV_PCI: + case SSB_DEV_PCIE: +#ifdef CONFIG_SSB_DRIVER_PCICORE + if (bus->pcicore.dev) { + ssb_printk(KERN_WARNING PFX + "WARNING: Multiple PCI(E) cores found\n"); + break; + } + bus->pcicore.dev = dev; +#endif /* CONFIG_SSB_DRIVER_PCICORE */ + break; + default: + break; + } + + dev_i++; + } + bus->nr_devices = dev_i; + + err = 0; +out: + return err; +err_unmap: + ssb_iounmap(bus); + goto out; +} diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h new file mode 100644 index 0000000000000000000000000000000000000000..a789364264a6bd81db1220ac380bae94ef2a477f --- /dev/null +++ b/drivers/ssb/ssb_private.h @@ -0,0 +1,136 @@ +#ifndef LINUX_SSB_PRIVATE_H_ +#define LINUX_SSB_PRIVATE_H_ + +#include +#include + + +#define PFX "ssb: " + +#ifdef CONFIG_SSB_SILENT +# define ssb_printk(fmt, x...) do { /* nothing */ } while (0) +#else +# define ssb_printk printk +#endif /* CONFIG_SSB_SILENT */ + +/* dprintk: Debugging printk; vanishes for non-debug compilation */ +#ifdef CONFIG_SSB_DEBUG +# define ssb_dprintk(fmt, x...) ssb_printk(fmt , ##x) +#else +# define ssb_dprintk(fmt, x...) do { /* nothing */ } while (0) +#endif + +#ifdef CONFIG_SSB_DEBUG +# define SSB_WARN_ON(x) WARN_ON(x) +# define SSB_BUG_ON(x) BUG_ON(x) +#else +static inline int __ssb_do_nothing(int x) { return x; } +# define SSB_WARN_ON(x) __ssb_do_nothing(unlikely(!!(x))) +# define SSB_BUG_ON(x) __ssb_do_nothing(unlikely(!!(x))) +#endif + + +/* pci.c */ +#ifdef CONFIG_SSB_PCIHOST +extern int ssb_pci_switch_core(struct ssb_bus *bus, + struct ssb_device *dev); +extern int ssb_pci_switch_coreidx(struct ssb_bus *bus, + u8 coreidx); +extern int ssb_pci_xtal(struct ssb_bus *bus, u32 what, + int turn_on); +extern int ssb_pci_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv); +extern void ssb_pci_exit(struct ssb_bus *bus); +extern int ssb_pci_init(struct ssb_bus *bus); +extern const struct ssb_bus_ops ssb_pci_ops; + +#else /* CONFIG_SSB_PCIHOST */ + +static inline int ssb_pci_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + return 0; +} +static inline int ssb_pci_switch_coreidx(struct ssb_bus *bus, + u8 coreidx) +{ + return 0; +} +static inline int ssb_pci_xtal(struct ssb_bus *bus, u32 what, + int turn_on) +{ + return 0; +} +static inline void ssb_pci_exit(struct ssb_bus *bus) +{ +} +static inline int ssb_pci_init(struct ssb_bus *bus) +{ + return 0; +} +#endif /* CONFIG_SSB_PCIHOST */ + + +/* pcmcia.c */ +#ifdef CONFIG_SSB_PCMCIAHOST +extern int ssb_pcmcia_switch_core(struct ssb_bus *bus, + struct ssb_device *dev); +extern int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, + u8 coreidx); +extern int ssb_pcmcia_switch_segment(struct ssb_bus *bus, + u8 seg); +extern int ssb_pcmcia_get_invariants(struct ssb_bus *bus, + struct ssb_init_invariants *iv); +extern int ssb_pcmcia_init(struct ssb_bus *bus); +extern const struct ssb_bus_ops ssb_pcmcia_ops; +#else /* CONFIG_SSB_PCMCIAHOST */ +static inline int ssb_pcmcia_switch_core(struct ssb_bus *bus, + struct ssb_device *dev) +{ + return 0; +} +static inline int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus, + u8 coreidx) +{ + return 0; +} +static inline int ssb_pcmcia_switch_segment(struct ssb_bus *bus, + u8 seg) +{ + return 0; +} +static inline int ssb_pcmcia_init(struct ssb_bus *bus) +{ + return 0; +} +#endif /* CONFIG_SSB_PCMCIAHOST */ + + +/* scan.c */ +extern const char *ssb_core_name(u16 coreid); +extern int ssb_bus_scan(struct ssb_bus *bus, + unsigned long baseaddr); +extern void ssb_iounmap(struct ssb_bus *ssb); + + +/* core.c */ +extern u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m); +extern int ssb_devices_freeze(struct ssb_bus *bus); +extern int ssb_devices_thaw(struct ssb_bus *bus); +extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev); + +/* b43_pci_bridge.c */ +#ifdef CONFIG_SSB_PCIHOST +extern int __init b43_pci_ssb_bridge_init(void); +extern void __exit b43_pci_ssb_bridge_exit(void); +#else /* CONFIG_SSB_PCIHOST */ +static inline int b43_pci_ssb_bridge_init(void) +{ + return 0; +} +static inline void b43_pci_ssb_bridge_exit(void) +{ +} +#endif /* CONFIG_SSB_PCIHOST */ + +#endif /* LINUX_SSB_PRIVATE_H_ */ diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 593e23507b1a79cff9b63fc0986d0ae088ac3d93..f70055473a00b25b4b83cf873de8301d7264449b 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -2484,7 +2484,6 @@ eth_bind (struct usb_gadget *gadget) /* network device setup */ dev->net = net; - SET_MODULE_OWNER (net); strcpy (net->name, "usb%d"); dev->cdc = cdc; dev->zlp = zlp; diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c index fc27d4b52e5fdf0d461348ff8cf588109e7081bb..49f189423063d6c760a35f5e2e63bf188cf2bd48 100644 --- a/fs/afs/netdevices.c +++ b/fs/afs/netdevices.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "internal.h" /* @@ -23,7 +24,7 @@ int afs_get_MAC_address(u8 *mac, size_t maclen) BUG(); rtnl_lock(); - dev = __dev_getfirstbyhwtype(ARPHRD_ETHER); + dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER); if (dev) { memcpy(mac, dev->dev_addr, maclen); ret = 0; @@ -47,7 +48,7 @@ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs, ASSERT(maxbufs > 0); rtnl_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (dev->type == ARPHRD_LOOPBACK && !wantloopback) continue; idev = __in_dev_get_rtnl(dev); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index b9e3357bcc2ead562e138ea71b89d13120d03f41..9c3fd07f35e0f25b04763a021c9c636e22ce43e5 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -319,22 +319,21 @@ struct ifconf32 { static int dev_ifname32(unsigned int fd, unsigned int cmd, unsigned long arg) { - struct net_device *dev; - struct ifreq32 ifr32; + struct ifreq __user *uifr; int err; - if (copy_from_user(&ifr32, compat_ptr(arg), sizeof(ifr32))) + uifr = compat_alloc_user_space(sizeof(struct ifreq)); + if (copy_in_user(uifr, compat_ptr(arg), sizeof(struct ifreq32))); return -EFAULT; - dev = dev_get_by_index(ifr32.ifr_ifindex); - if (!dev) - return -ENODEV; + err = sys_ioctl(fd, SIOCGIFNAME, (unsigned long)uifr); + if (err) + return err; - strlcpy(ifr32.ifr_name, dev->name, sizeof(ifr32.ifr_name)); - dev_put(dev); - - err = copy_to_user(compat_ptr(arg), &ifr32, sizeof(ifr32)); - return (err ? -EFAULT : 0); + if (copy_in_user(compat_ptr(arg), uifr, sizeof(struct ifreq32))) + return -EFAULT; + + return 0; } static int dev_ifconf(unsigned int fd, unsigned int cmd, unsigned long arg) diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c index fe9186312d7c26c5349a01181fec25acc82e392e..9aa345121e09cf2bf2f600259f95a03557f85de3 100644 --- a/fs/ecryptfs/netlink.c +++ b/fs/ecryptfs/netlink.c @@ -165,22 +165,10 @@ static int ecryptfs_process_nl_quit(struct sk_buff *skb) * it to its desired netlink context element and wake up the process * that is waiting for a response. */ -static void ecryptfs_receive_nl_message(struct sock *sk, int len) +static void ecryptfs_receive_nl_message(struct sk_buff *skb) { - struct sk_buff *skb; struct nlmsghdr *nlh; - int rc = 0; /* skb_recv_datagram requires this */ -receive: - skb = skb_recv_datagram(sk, 0, 0, &rc); - if (rc == -EINTR) - goto receive; - else if (rc < 0) { - ecryptfs_printk(KERN_ERR, "Error occurred while " - "receiving eCryptfs netlink message; " - "rc = [%d]\n", rc); - return; - } nlh = nlmsg_hdr(skb); if (!NLMSG_OK(nlh, skb->len)) { ecryptfs_printk(KERN_ERR, "Received corrupt netlink " @@ -227,7 +215,7 @@ int ecryptfs_init_netlink(void) { int rc; - ecryptfs_nl_sock = netlink_kernel_create(NETLINK_ECRYPTFS, 0, + ecryptfs_nl_sock = netlink_kernel_create(&init_net, NETLINK_ECRYPTFS, 0, ecryptfs_receive_nl_message, NULL, THIS_MODULE); if (!ecryptfs_nl_sock) { diff --git a/fs/proc/Makefile b/fs/proc/Makefile index bce38e3f06cbcf346d83335db673985df88f85f0..ebaba0213546a9cabc11539bb5cf047124e3a32e 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -11,6 +11,7 @@ proc-y += inode.o root.o base.o generic.o array.o \ proc_tty.o proc_misc.o proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o +proc-$(CONFIG_NET) += proc_net.o proc-$(CONFIG_PROC_KCORE) += kcore.o proc-$(CONFIG_PROC_VMCORE) += vmcore.o proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o diff --git a/fs/proc/internal.h b/fs/proc/internal.h index b215c3524fa685c572c874966128241d6747e9dd..1820eb2ef7623a2c222e9be9b61d0e749f259187 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -16,6 +16,11 @@ extern int proc_sys_init(void); #else static inline void proc_sys_init(void) { } #endif +#ifdef CONFIG_NET +extern int proc_net_init(void); +#else +static inline int proc_net_init(void) { return 0; } +#endif struct vmalloc_info { unsigned long used; diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c new file mode 100644 index 0000000000000000000000000000000000000000..2e91fb756e9ad27be30488bd88a5e5ac8856bf34 --- /dev/null +++ b/fs/proc/proc_net.c @@ -0,0 +1,200 @@ +/* + * linux/fs/proc/net.c + * + * Copyright (C) 2007 + * + * Author: Eric Biederman + * + * proc net directory handling functions + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + + +struct proc_dir_entry *proc_net_create(struct net *net, + const char *name, mode_t mode, get_info_t *get_info) +{ + return create_proc_info_entry(name,mode, net->proc_net, get_info); +} +EXPORT_SYMBOL_GPL(proc_net_create); + +struct proc_dir_entry *proc_net_fops_create(struct net *net, + const char *name, mode_t mode, const struct file_operations *fops) +{ + struct proc_dir_entry *res; + + res = create_proc_entry(name, mode, net->proc_net); + if (res) + res->proc_fops = fops; + return res; +} +EXPORT_SYMBOL_GPL(proc_net_fops_create); + +void proc_net_remove(struct net *net, const char *name) +{ + remove_proc_entry(name, net->proc_net); +} +EXPORT_SYMBOL_GPL(proc_net_remove); + +struct net *get_proc_net(const struct inode *inode) +{ + return maybe_get_net(PDE_NET(PDE(inode))); +} +EXPORT_SYMBOL_GPL(get_proc_net); + +static struct proc_dir_entry *proc_net_shadow; + +static struct dentry *proc_net_shadow_dentry(struct dentry *parent, + struct proc_dir_entry *de) +{ + struct dentry *shadow = NULL; + struct inode *inode; + if (!de) + goto out; + de_get(de); + inode = proc_get_inode(parent->d_inode->i_sb, de->low_ino, de); + if (!inode) + goto out_de_put; + shadow = d_alloc_name(parent, de->name); + if (!shadow) + goto out_iput; + shadow->d_op = parent->d_op; /* proc_dentry_operations */ + d_instantiate(shadow, inode); +out: + return shadow; +out_iput: + iput(inode); +out_de_put: + de_put(de); + goto out; +} + +static void *proc_net_follow_link(struct dentry *parent, struct nameidata *nd) +{ + struct net *net = current->nsproxy->net_ns; + struct dentry *shadow; + shadow = proc_net_shadow_dentry(parent, net->proc_net); + if (!shadow) + return ERR_PTR(-ENOENT); + + dput(nd->dentry); + /* My dentry count is 1 and that should be enough as the + * shadow dentry is thrown away immediately. + */ + nd->dentry = shadow; + return NULL; +} + +static struct dentry *proc_net_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +{ + struct net *net = current->nsproxy->net_ns; + struct dentry *shadow; + + shadow = proc_net_shadow_dentry(nd->dentry, net->proc_net); + if (!shadow) + return ERR_PTR(-ENOENT); + + dput(nd->dentry); + nd->dentry = shadow; + + return shadow->d_inode->i_op->lookup(shadow->d_inode, dentry, nd); +} + +static int proc_net_setattr(struct dentry *dentry, struct iattr *iattr) +{ + struct net *net = current->nsproxy->net_ns; + struct dentry *shadow; + int ret; + + shadow = proc_net_shadow_dentry(dentry->d_parent, net->proc_net); + if (!shadow) + return -ENOENT; + ret = shadow->d_inode->i_op->setattr(shadow, iattr); + dput(shadow); + return ret; +} + +static const struct file_operations proc_net_dir_operations = { + .read = generic_read_dir, +}; + +static struct inode_operations proc_net_dir_inode_operations = { + .follow_link = proc_net_follow_link, + .lookup = proc_net_lookup, + .setattr = proc_net_setattr, +}; + +static __net_init int proc_net_ns_init(struct net *net) +{ + struct proc_dir_entry *root, *netd, *net_statd; + int err; + + err = -ENOMEM; + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + goto out; + + err = -EEXIST; + netd = proc_mkdir("net", root); + if (!netd) + goto free_root; + + err = -EEXIST; + net_statd = proc_mkdir("stat", netd); + if (!net_statd) + goto free_net; + + root->data = net; + netd->data = net; + net_statd->data = net; + + net->proc_net_root = root; + net->proc_net = netd; + net->proc_net_stat = net_statd; + err = 0; + +out: + return err; +free_net: + remove_proc_entry("net", root); +free_root: + kfree(root); + goto out; +} + +static __net_exit void proc_net_ns_exit(struct net *net) +{ + remove_proc_entry("stat", net->proc_net); + remove_proc_entry("net", net->proc_net_root); + kfree(net->proc_net_root); +} + +struct pernet_operations __net_initdata proc_net_ns_ops = { + .init = proc_net_ns_init, + .exit = proc_net_ns_exit, +}; + +int __init proc_net_init(void) +{ + proc_net_shadow = proc_mkdir("net", NULL); + proc_net_shadow->proc_iops = &proc_net_dir_inode_operations; + proc_net_shadow->proc_fops = &proc_net_dir_operations; + + return register_pernet_subsys(&proc_net_ns_ops); +} diff --git a/fs/proc/root.c b/fs/proc/root.c index 41f17037f73890ad5551651bc776446c3798ac68..cf3046638b09406947198ba81ac4c93aa01ec7f8 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -21,7 +21,7 @@ #include "internal.h" -struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc_root_driver; +struct proc_dir_entry *proc_bus, *proc_root_fs, *proc_root_driver; static int proc_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) @@ -61,8 +61,8 @@ void __init proc_root_init(void) return; } proc_misc_init(); - proc_net = proc_mkdir("net", NULL); - proc_net_stat = proc_mkdir("net/stat", NULL); + + proc_net_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); @@ -159,7 +159,5 @@ EXPORT_SYMBOL(create_proc_entry); EXPORT_SYMBOL(remove_proc_entry); EXPORT_SYMBOL(proc_root); EXPORT_SYMBOL(proc_root_fs); -EXPORT_SYMBOL(proc_net); -EXPORT_SYMBOL(proc_net_stat); EXPORT_SYMBOL(proc_bus); EXPORT_SYMBOL(proc_root_driver); diff --git a/fs/seq_file.c b/fs/seq_file.c index bbb19be260ced1be701c3651f8e5b6a94a0bb593..ca71c115bdaa4f9494a2e2bc0b9f5cbb463fd9ce 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -429,6 +429,39 @@ int seq_release_private(struct inode *inode, struct file *file) } EXPORT_SYMBOL(seq_release_private); +void *__seq_open_private(struct file *f, const struct seq_operations *ops, + int psize) +{ + int rc; + void *private; + struct seq_file *seq; + + private = kzalloc(psize, GFP_KERNEL); + if (private == NULL) + goto out; + + rc = seq_open(f, ops); + if (rc < 0) + goto out_free; + + seq = f->private_data; + seq->private = private; + return private; + +out_free: + kfree(private); +out: + return NULL; +} +EXPORT_SYMBOL(__seq_open_private); + +int seq_open_private(struct file *filp, const struct seq_operations *ops, + int psize) +{ + return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM; +} +EXPORT_SYMBOL(seq_open_private); + int seq_putc(struct seq_file *m, char c) { if (m->count < m->size) { diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index b2b1e6efd812793ffaf6442ee7cb9d4424067f66..b9b05d399d2b49bbbe304fbb608336db94d58784 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -91,9 +91,11 @@ struct blkcipher_walk { u8 *iv; int flags; + unsigned int blocksize; }; extern const struct crypto_type crypto_ablkcipher_type; +extern const struct crypto_type crypto_aead_type; extern const struct crypto_type crypto_blkcipher_type; extern const struct crypto_type crypto_hash_type; @@ -111,7 +113,8 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); int crypto_check_attr_type(struct rtattr **tb, u32 type); -struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, u32 type, u32 mask); +struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); +int crypto_attr_u32(struct rtattr *rta, u32 *num); struct crypto_instance *crypto_alloc_instance(const char *name, struct crypto_alg *alg); @@ -127,6 +130,9 @@ int blkcipher_walk_virt(struct blkcipher_desc *desc, struct blkcipher_walk *walk); int blkcipher_walk_phys(struct blkcipher_desc *desc, struct blkcipher_walk *walk); +int blkcipher_walk_virt_block(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + unsigned int blocksize); static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) { @@ -160,6 +166,36 @@ static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm) +{ + return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead; +} + +static inline void *crypto_aead_ctx(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline struct crypto_instance *crypto_aead_alg_instance( + struct crypto_aead *aead) +{ + return crypto_tfm_alg_instance(&aead->base); +} + +static inline struct crypto_ablkcipher *crypto_spawn_ablkcipher( + struct crypto_spawn *spawn) +{ + u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; + u32 mask = CRYPTO_ALG_TYPE_MASK; + + return __crypto_ablkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); +} + static inline struct crypto_blkcipher *crypto_spawn_blkcipher( struct crypto_spawn *spawn) { @@ -223,16 +259,16 @@ static inline struct crypto_async_request *crypto_get_backlog( container_of(queue->backlog, struct crypto_async_request, list); } -static inline int ablkcipher_enqueue_request(struct ablkcipher_alg *alg, +static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, struct ablkcipher_request *request) { - return crypto_enqueue_request(alg->queue, &request->base); + return crypto_enqueue_request(queue, &request->base); } static inline struct ablkcipher_request *ablkcipher_dequeue_request( - struct ablkcipher_alg *alg) + struct crypto_queue *queue) { - return ablkcipher_request_cast(crypto_dequeue_request(alg->queue)); + return ablkcipher_request_cast(crypto_dequeue_request(queue)); } static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) @@ -240,10 +276,31 @@ static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) return req->__ctx; } -static inline int ablkcipher_tfm_in_queue(struct crypto_ablkcipher *tfm) +static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, + struct crypto_ablkcipher *tfm) +{ + return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); +} + +static inline void *aead_request_ctx(struct aead_request *req) +{ + return req->__ctx; +} + +static inline void aead_request_complete(struct aead_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 aead_request_flags(struct aead_request *req) +{ + return req->base.flags; +} + +static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, + u32 type, u32 mask) { - return crypto_tfm_in_queue(crypto_ablkcipher_alg(tfm)->queue, - crypto_ablkcipher_tfm(tfm)); + return crypto_attr_alg(tb[1], type, mask); } #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index 4fd3152024426d6b154b14a855521257a3828918..4086b8ebfafe196ea6320dab05f552eaa49cf3d8 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h @@ -161,6 +161,8 @@ void gf128mul_lle(be128 *a, const be128 *b); void gf128mul_bbe(be128 *a, const be128 *b); +/* multiply by x in ble format, needed by XTS */ +void gf128mul_x_ble(be128 *a, const be128 *b); /* 4k table optimization */ diff --git a/include/crypto/sha.h b/include/crypto/sha.h new file mode 100644 index 0000000000000000000000000000000000000000..0686e1f7a24b0375a421af0cf79da7288f56fdee --- /dev/null +++ b/include/crypto/sha.h @@ -0,0 +1,53 @@ +/* + * Common values for SHA algorithms + */ + +#ifndef _CRYPTO_SHA_H +#define _CRYPTO_SHA_H + +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +#endif diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 2f85049cfb3d1ca94b1b9680bf3db2802b590acf..fde675872c56407f5fdb3c8d84629a793be5b732 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h @@ -214,7 +214,7 @@ extern struct ArcProto *arc_proto_map[256], *arc_proto_default, */ struct Incoming { struct sk_buff *skb; /* packet data buffer */ - uint16_t sequence; /* sequence number of assembly */ + __be16 sequence; /* sequence number of assembly */ uint8_t lastpacket, /* number of last packet (from 1) */ numpackets; /* number of packets in split */ }; @@ -292,7 +292,7 @@ struct arcnet_local { struct { uint16_t sequence; /* sequence number (incs with each packet) */ - uint16_t aborted_seq; + __be16 aborted_seq; struct Incoming incoming[256]; /* one from each address */ } rfc1201; diff --git a/include/linux/audit.h b/include/linux/audit.h index d6579df8dadf629968c704c774081fa4ef422586..9ae740936a65268548acd512062dfc5c135f477a 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -108,10 +108,11 @@ #define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */ #define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */ #define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */ -#define AUDIT_MAC_IPSEC_ADDSA 1411 /* Add a XFRM state */ -#define AUDIT_MAC_IPSEC_DELSA 1412 /* Delete a XFRM state */ -#define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Add a XFRM policy */ -#define AUDIT_MAC_IPSEC_DELSPD 1414 /* Delete a XFRM policy */ +#define AUDIT_MAC_IPSEC_ADDSA 1411 /* Not used */ +#define AUDIT_MAC_IPSEC_DELSA 1412 /* Not used */ +#define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Not used */ +#define AUDIT_MAC_IPSEC_DELSPD 1414 /* Not used */ +#define AUDIT_MAC_IPSEC_EVENT 1415 /* Audit an IPSec event */ #define AUDIT_FIRST_KERN_ANOM_MSG 1700 #define AUDIT_LAST_KERN_ANOM_MSG 1799 diff --git a/include/linux/connector.h b/include/linux/connector.h index 10eb56b2940ad8c8b7f6775de247d552a99cc6fe..b62f823e90cf631537a23ec7e5f17f87c6ce64e9 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -153,7 +153,7 @@ struct cn_dev { u32 seq, groups; struct sock *nls; - void (*input) (struct sock * sk, int len); + void (*input) (struct sk_buff *skb); struct cn_queue_dev *cbdev; }; diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 357e8cfedc375674f27887f48c597cc2222e5287..fc32694287e21cee1b2508f0456bbd29552479a1 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -34,6 +34,7 @@ #define CRYPTO_ALG_TYPE_HASH 0x00000003 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000005 +#define CRYPTO_ALG_TYPE_AEAD 0x00000006 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e @@ -91,9 +92,9 @@ struct scatterlist; struct crypto_ablkcipher; struct crypto_async_request; +struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; -struct crypto_queue; struct crypto_tfm; struct crypto_type; @@ -121,6 +122,32 @@ struct ablkcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +/** + * struct aead_request - AEAD request + * @base: Common attributes for async crypto requests + * @assoclen: Length in bytes of associated data for authentication + * @cryptlen: Length of data to be encrypted or decrypted + * @iv: Initialisation vector + * @assoc: Associated data + * @src: Source data + * @dst: Destination data + * @__ctx: Start of private context data + */ +struct aead_request { + struct crypto_async_request base; + + unsigned int assoclen; + unsigned int cryptlen; + + u8 *iv; + + struct scatterlist *assoc; + struct scatterlist *src; + struct scatterlist *dst; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + struct blkcipher_desc { struct crypto_blkcipher *tfm; void *info; @@ -150,13 +177,21 @@ struct ablkcipher_alg { int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - struct crypto_queue *queue; - unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; }; +struct aead_alg { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + + unsigned int ivsize; + unsigned int authsize; +}; + struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); @@ -212,6 +247,7 @@ struct compress_alg { }; #define cra_ablkcipher cra_u.ablkcipher +#define cra_aead cra_u.aead #define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest @@ -237,6 +273,7 @@ struct crypto_alg { union { struct ablkcipher_alg ablkcipher; + struct aead_alg aead; struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct digest_alg digest; @@ -284,6 +321,16 @@ struct ablkcipher_tfm { unsigned int reqsize; }; +struct aead_tfm { + int (*setkey)(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct aead_request *req); + int (*decrypt)(struct aead_request *req); + unsigned int ivsize; + unsigned int authsize; + unsigned int reqsize; +}; + struct blkcipher_tfm { void *iv; int (*setkey)(struct crypto_tfm *tfm, const u8 *key, @@ -323,6 +370,7 @@ struct compress_tfm { }; #define crt_ablkcipher crt_u.ablkcipher +#define crt_aead crt_u.aead #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash @@ -334,6 +382,7 @@ struct crypto_tfm { union { struct ablkcipher_tfm ablkcipher; + struct aead_tfm aead; struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; @@ -349,6 +398,10 @@ struct crypto_ablkcipher { struct crypto_tfm base; }; +struct crypto_aead { + struct crypto_tfm base; +}; + struct crypto_blkcipher { struct crypto_tfm base; }; @@ -369,11 +422,15 @@ enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, CRYPTOA_TYPE, + CRYPTOA_U32, __CRYPTOA_MAX, }; #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) +/* Maximum number of (rtattr) parameters for each template. */ +#define CRYPTO_MAX_ATTRS 32 + struct crypto_attr_alg { char name[CRYPTO_MAX_ALG_NAME]; }; @@ -383,6 +440,10 @@ struct crypto_attr_type { u32 mask; }; +struct crypto_attr_u32 { + u32 num; +}; + /* * Transform user interface. */ @@ -563,7 +624,8 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) return crt->decrypt(req); } -static inline int crypto_ablkcipher_reqsize(struct crypto_ablkcipher *tfm) +static inline unsigned int crypto_ablkcipher_reqsize( + struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->reqsize; } @@ -619,6 +681,150 @@ static inline void ablkcipher_request_set_crypt( req->info = iv; } +static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_aead *)tfm; +} + +static inline struct crypto_aead *crypto_alloc_aead(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_AEAD; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_aead_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_aead(struct crypto_aead *tfm) +{ + crypto_free_tfm(crypto_aead_tfm(tfm)); +} + +static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) +{ + return &crypto_aead_tfm(tfm)->crt_aead; +} + +static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->ivsize; +} + +static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->authsize; +} + +static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); +} + +static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm)); +} + +static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm) +{ + return crypto_tfm_get_flags(crypto_aead_tfm(tfm)); +} + +static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags); +} + +static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); +} + +static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + return crypto_aead_crt(tfm)->setkey(tfm, key, keylen); +} + +static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) +{ + return __crypto_aead_cast(req->base.tfm); +} + +static inline int crypto_aead_encrypt(struct aead_request *req) +{ + return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); +} + +static inline int crypto_aead_decrypt(struct aead_request *req) +{ + return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); +} + +static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) +{ + return crypto_aead_crt(tfm)->reqsize; +} + +static inline void aead_request_set_tfm(struct aead_request *req, + struct crypto_aead *tfm) +{ + req->base.tfm = crypto_aead_tfm(tfm); +} + +static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, + gfp_t gfp) +{ + struct aead_request *req; + + req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp); + + if (likely(req)) + aead_request_set_tfm(req, tfm); + + return req; +} + +static inline void aead_request_free(struct aead_request *req) +{ + kfree(req); +} + +static inline void aead_request_set_callback(struct aead_request *req, + u32 flags, + crypto_completion_t complete, + void *data) +{ + req->base.complete = complete; + req->base.data = data; + req->base.flags = flags; +} + +static inline void aead_request_set_crypt(struct aead_request *req, + struct scatterlist *src, + struct scatterlist *dst, + unsigned int cryptlen, u8 *iv) +{ + req->src = src; + req->dst = dst; + req->cryptlen = cryptlen; + req->iv = iv; +} + +static inline void aead_request_set_assoc(struct aead_request *req, + struct scatterlist *assoc, + unsigned int assoclen) +{ + req->assoc = assoc; + req->assoclen = assoclen; +} + static inline struct crypto_blkcipher *__crypto_blkcipher_cast( struct crypto_tfm *tfm) { diff --git a/include/linux/dccp.h b/include/linux/dccp.h index fda2148d8c85cf3b58407f47b50ef0d8bdb6ec20..f3fc4392e93d74a470c94c40813851e5042d3518 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -56,10 +56,9 @@ struct dccp_hdr_ext { }; /** - * struct dccp_hdr_request - Conection initiation request header + * struct dccp_hdr_request - Connection initiation request header * * @dccph_req_service - Service to which the client app wants to connect - * @dccph_req_options - list of options (must be a multiple of 32 bits */ struct dccp_hdr_request { __be32 dccph_req_service; @@ -76,12 +75,10 @@ struct dccp_hdr_ack_bits { __be32 dccph_ack_nr_low; }; /** - * struct dccp_hdr_response - Conection initiation response header + * struct dccp_hdr_response - Connection initiation response header * - * @dccph_resp_ack_nr_high - 48 bit ack number high order bits, contains GSR - * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR + * @dccph_resp_ack - 48 bit Acknowledgment Number Subheader (5.3) * @dccph_resp_service - Echoes the Service Code on a received DCCP-Request - * @dccph_resp_options - list of options (must be a multiple of 32 bits */ struct dccp_hdr_response { struct dccp_hdr_ack_bits dccph_resp_ack; @@ -91,8 +88,9 @@ struct dccp_hdr_response { /** * struct dccp_hdr_reset - Unconditionally shut down a connection * - * @dccph_reset_service - Echoes the Service Code on a received DCCP-Request - * @dccph_reset_options - list of options (must be a multiple of 32 bits + * @dccph_reset_ack - 48 bit Acknowledgment Number Subheader (5.6) + * @dccph_reset_code - one of %dccp_reset_codes + * @dccph_reset_data - the Data 1 ... Data 3 fields from 5.6 */ struct dccp_hdr_reset { struct dccp_hdr_ack_bits dccph_reset_ack; @@ -204,6 +202,7 @@ struct dccp_so_feat { #define DCCP_SOCKOPT_SERVICE 2 #define DCCP_SOCKOPT_CHANGE_L 3 #define DCCP_SOCKOPT_CHANGE_R 4 +#define DCCP_SOCKOPT_GET_CUR_MPS 5 #define DCCP_SOCKOPT_SEND_CSCOV 10 #define DCCP_SOCKOPT_RECV_CSCOV 11 #define DCCP_SOCKOPT_CCID_RX_INFO 128 @@ -215,6 +214,7 @@ struct dccp_so_feat { #ifdef __KERNEL__ #include +#include #include #include #include @@ -391,7 +391,6 @@ struct dccp_opt_pend { struct dccp_opt_conf *dccpop_sc; }; -extern void __dccp_minisock_init(struct dccp_minisock *dmsk); extern void dccp_minisock_init(struct dccp_minisock *dmsk); extern int dccp_parse_options(struct sock *sk, struct sk_buff *skb); @@ -471,6 +470,7 @@ struct dccp_ackvec; * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) * @dccps_ndp_count - number of Non Data Packets since last data packet * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) + * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) * @dccps_minisock - associated minisock (accessed via dccp_msk) * @dccps_hc_rx_ackvec - rx half connection ack vector * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) @@ -498,7 +498,7 @@ struct dccp_sock { __u64 dccps_gar; __be32 dccps_service; struct dccp_service_list *dccps_service_list; - struct timeval dccps_timestamp_time; + ktime_t dccps_timestamp_time; __u32 dccps_timestamp_echo; __u16 dccps_l_ack_ratio; __u16 dccps_r_ack_ratio; @@ -506,12 +506,12 @@ struct dccp_sock { __u16 dccps_pcrlen; unsigned long dccps_ndp_count; __u32 dccps_mss_cache; + unsigned long dccps_rate_last; struct dccp_minisock dccps_minisock; struct dccp_ackvec *dccps_hc_rx_ackvec; struct ccid *dccps_hc_rx_ccid; struct ccid *dccps_hc_tx_ccid; struct dccp_options_received dccps_options_received; - struct timeval dccps_epoch; enum dccp_role dccps_role:2; __u8 dccps_hc_rx_insert_options:1; __u8 dccps_hc_tx_insert_options:1; diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index d774b7778c91896f3bbd93e18e863781d2f01809..a55c873e8b6667bcd79796b9439743fad7b655c2 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -21,13 +21,14 @@ /* Module: eeprom_93cx6 Abstract: EEPROM reader datastructures for 93cx6 chipsets. - Supported chipsets: 93c46 & 93c66. + Supported chipsets: 93c46, 93c56 and 93c66. */ /* * EEPROM operation defines. */ #define PCI_EEPROM_WIDTH_93C46 6 +#define PCI_EEPROM_WIDTH_93C56 8 #define PCI_EEPROM_WIDTH_93C66 8 #define PCI_EEPROM_WIDTH_OPCODE 3 #define PCI_EEPROM_WRITE_OPCODE 0x05 diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 6cdb97365e4734abde2ab88a04f9745661385912..b7558ec81ed58117042520f8cb7d764028252c60 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -29,15 +29,19 @@ #include #ifdef __KERNEL__ -extern int eth_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, - void *saddr, unsigned len); -extern int eth_rebuild_header(struct sk_buff *skb); extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); -extern void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev, - unsigned char * haddr); -extern int eth_header_cache(struct neighbour *neigh, - struct hh_cache *hh); +extern const struct header_ops eth_header_ops; + +extern int eth_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned len); +extern int eth_rebuild_header(struct sk_buff *skb); +extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); +extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh); +extern void eth_header_cache_update(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr); + extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 23ccea81129738095fddd0fcc2138e13d3832104..71d4ada6f31549342d75dbc50dd10045c43e3e17 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -39,7 +39,8 @@ struct ethtool_drvinfo { char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ /* For PCI devices, use pci_name(pci_dev). */ char reserved1[32]; - char reserved2[16]; + char reserved2[12]; + __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */ __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ __u32 testinfo_len; __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ @@ -219,6 +220,7 @@ struct ethtool_pauseparam { enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS, + ETH_SS_PRIV_FLAGS, }; /* for passing string sets for data tagging */ @@ -256,6 +258,19 @@ struct ethtool_perm_addr { __u8 data[0]; }; +/* boolean flags controlling per-interface behavior characteristics. + * When reading, the flag indicates whether or not a certain behavior + * is enabled/present. When writing, the flag indicates whether + * or not the driver should turn on (set) or off (clear) a behavior. + * + * Some behaviors may read-only (unconditionally absent or present). + * If such is the case, return EINVAL in the set-flags operation if the + * flag differs from the read-only value. + */ +enum ethtool_flags { + ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ +}; + #ifdef __KERNEL__ struct net_device; @@ -272,6 +287,8 @@ u32 ethtool_op_get_tso(struct net_device *dev); int ethtool_op_set_tso(struct net_device *dev, u32 data); u32 ethtool_op_get_ufo(struct net_device *dev); int ethtool_op_set_ufo(struct net_device *dev, u32 data); +u32 ethtool_op_get_flags(struct net_device *dev); +int ethtool_op_set_flags(struct net_device *dev, u32 data); /** * ðtool_ops - Alter and report network device settings @@ -307,6 +324,8 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data); * get_strings: Return a set of strings that describe the requested objects * phys_id: Identify the device * get_stats: Return statistics about the device + * get_flags: get 32-bit flags bitmap + * set_flags: set 32-bit flags bitmap * * Description: * @@ -359,16 +378,23 @@ struct ethtool_ops { int (*set_sg)(struct net_device *, u32); u32 (*get_tso)(struct net_device *); int (*set_tso)(struct net_device *, u32); - int (*self_test_count)(struct net_device *); void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); void (*get_strings)(struct net_device *, u32 stringset, u8 *); int (*phys_id)(struct net_device *, u32); - int (*get_stats_count)(struct net_device *); void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); int (*begin)(struct net_device *); void (*complete)(struct net_device *); u32 (*get_ufo)(struct net_device *); int (*set_ufo)(struct net_device *, u32); + u32 (*get_flags)(struct net_device *); + int (*set_flags)(struct net_device *, u32); + u32 (*get_priv_flags)(struct net_device *); + int (*set_priv_flags)(struct net_device *, u32); + int (*get_sset_count)(struct net_device *, int); + + /* the following hooks are obsolete */ + int (*self_test_count)(struct net_device *);/* use get_sset_count */ + int (*get_stats_count)(struct net_device *);/* use get_sset_count */ }; #endif /* __KERNEL__ */ @@ -410,6 +436,10 @@ struct ethtool_ops { #define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ #define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ #define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ +#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ +#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 543cd3cd9e779eb3694bbb74ecbd585c0e14e921..9bc045b8c4789a3c621d2eaf4251801796a78d42 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -16,6 +16,7 @@ #ifndef FS_ENET_PD_H #define FS_ENET_PD_H +#include #include #define FS_ENET_NAME "fs_enet" @@ -119,6 +120,7 @@ struct fs_platform_info { u32 cp_page; /* CPM page */ u32 cp_block; /* CPM sblock */ + u32 cp_command; /* CPM page/sblock/mcn */ u32 clk_trx; /* some stuff for pins & mux configuration*/ u32 clk_rx; @@ -133,7 +135,11 @@ struct fs_platform_info { u32 device_flags; int phy_addr; /* the phy address (-1 no phy) */ +#ifdef CONFIG_PPC_CPM_NEW_BINDING + char bus_id[16]; +#else const char* bus_id; +#endif int phy_irq; /* the phy irq (if it exists) */ const struct fs_mii_bus_info *bus_info; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 272f8c8c90da609da135879020be6a7c3a582f4d..30621c27159fda57e3946ba82a0daa0bdcea6b5a 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -16,6 +16,7 @@ #define IEEE80211_H #include +#include #define FCS_LEN 4 @@ -350,4 +351,64 @@ enum ieee80211_eid { #define WLAN_MAX_KEY_LEN 32 +/** + * ieee80211_get_SA - get pointer to SA + * + * Given an 802.11 frame, this function returns the offset + * to the source address (SA). It does not verify that the + * header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + * + * @hdr: the frame + */ +static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) +{ + u8 *raw = (u8 *) hdr; + u8 tofrom = (*(raw+1)) & 3; /* get the TODS and FROMDS bits */ + + switch (tofrom) { + case 2: + return hdr->addr3; + case 3: + return hdr->addr4; + } + return hdr->addr2; +} + +/** + * ieee80211_get_DA - get pointer to DA + * + * Given an 802.11 frame, this function returns the offset + * to the destination address (DA). It does not verify that + * the header is long enough to contain the address, and the + * header must be long enough to contain the frame control + * field. + * + * @hdr: the frame + */ +static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) +{ + u8 *raw = (u8 *) hdr; + u8 to_ds = (*(raw+1)) & 1; /* get the TODS bit */ + + if (to_ds) + return hdr->addr3; + return hdr->addr1; +} + +/** + * ieee80211_get_morefrag - determine whether the MOREFRAGS bit is set + * + * This function determines whether the "more fragments" bit is set + * in the frame. + * + * @hdr: the frame + */ +static inline int ieee80211_get_morefrag(struct ieee80211_hdr *hdr) +{ + return (le16_to_cpu(hdr->frame_control) & + IEEE80211_FCTL_MOREFRAGS) != 0; +} + #endif /* IEEE80211_H */ diff --git a/include/linux/if_arcnet.h b/include/linux/if_arcnet.h index af380cb876a05a87119e9c27af95b776555997e8..27ea2ac445ad11f727258f152e3d013bb75a96e5 100644 --- a/include/linux/if_arcnet.h +++ b/include/linux/if_arcnet.h @@ -59,7 +59,7 @@ struct arc_rfc1201 { uint8_t proto; /* protocol ID field - varies */ uint8_t split_flag; /* for use with split packets */ - uint16_t sequence; /* sequence number */ + __be16 sequence; /* sequence number */ uint8_t payload[0]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 4ff211d98769d532d3c1be2a7f5e6194ebac1390..99e3a1a0009955472f4eacb84040613f75e21cc9 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -104,7 +104,7 @@ struct __fdb_entry #include -extern void brioctl_set(int (*ioctl_hook)(unsigned int, void __user *)); +extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff *skb); extern int (*br_should_route_hook)(struct sk_buff **pskb); diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h index b68752fdc5c4cbf5481e42a531413ea5c92a8cff..79c4f268410d2223ee31986cfae0dda8c0442fb4 100644 --- a/include/linux/if_eql.h +++ b/include/linux/if_eql.h @@ -58,7 +58,6 @@ typedef struct equalizer { slave_queue_t queue; int min_slaves; int max_slaves; - struct net_device_stats stats; struct timer_list timer; } equalizer_t; diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 3213f6f4aa580972f154a7d21791711d0d0e3eb9..5f9297793661784f05d2e8567f6f4eddc36d43b8 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -117,9 +117,19 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); + #ifdef CONFIG_SYSCTL extern struct ctl_table ether_table[]; #endif + +/* + * Display a 6 byte device address (MAC) in a readable format. + */ +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +extern char *print_mac(char *buf, const u8 *addr); +#define DECLARE_MAC_BUF(var) char var[18] __maybe_unused + #endif #endif /* _LINUX_IF_ETHER_H */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 422084d18ce1af9162d4c40ceead599f1094bcd4..84c3492ae5cb510604c50dc4ba09941ac95f7300 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -78,6 +78,7 @@ enum IFLA_LINKMODE, IFLA_LINKINFO, #define IFLA_LINKINFO IFLA_LINKINFO + IFLA_NET_NS_PID, __IFLA_MAX }; diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 25652545ba6e19683a9efff8172ec83d4b98efd4..40743e0328457116f44c8427a686ca740381a3cc 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -40,7 +40,7 @@ /************************************************************************ * PPPoE addressing definition */ -typedef __u16 sid_t; +typedef __be16 sid_t; struct pppoe_addr{ sid_t sid; /* Session identifier */ unsigned char remote[ETH_ALEN]; /* Remote address */ @@ -90,8 +90,8 @@ struct sockaddr_pppol2tp { #define PADS_CODE 0x65 #define PADT_CODE 0xa7 struct pppoe_tag { - __u16 tag_type; - __u16 tag_len; + __be16 tag_type; + __be16 tag_len; char tag_data[0]; } __attribute ((packed)); @@ -118,8 +118,8 @@ struct pppoe_hdr { #error "Please fix " #endif __u8 code; - __u16 sid; - __u16 length; + __be16 sid; + __be16 length; struct pppoe_tag tag[0]; } __attribute__ ((packed)); @@ -152,7 +152,7 @@ struct pppox_sock { union { struct pppoe_opt pppoe; } proto; - unsigned short num; + __be16 num; }; #define pppoe_dev proto.pppoe.dev #define pppoe_ifindex proto.pppoe.ifindex @@ -172,7 +172,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po) struct module; struct pppox_proto { - int (*create)(struct socket *sock); + int (*create)(struct net *net, struct socket *sock); int (*ioctl)(struct socket *sock, unsigned int cmd, unsigned long arg); struct module *owner; diff --git a/include/linux/if_shaper.h b/include/linux/if_shaper.h index 68c896a36a3441b9da82fc65713b334201b06215..3b1b7ba198250955db9e53da9d7006e0616149f9 100644 --- a/include/linux/if_shaper.h +++ b/include/linux/if_shaper.h @@ -24,19 +24,7 @@ struct shaper unsigned long recovery; /* Time we can next clock a packet out on an empty queue */ spinlock_t lock; - struct net_device_stats stats; struct net_device *dev; - int (*hard_start_xmit) (struct sk_buff *skb, - struct net_device *dev); - int (*hard_header) (struct sk_buff *skb, - struct net_device *dev, - unsigned short type, - void *daddr, - void *saddr, - unsigned len); - int (*rebuild_header)(struct sk_buff *skb); - int (*hard_header_cache)(struct neighbour *neigh, struct hh_cache *hh); - void (*header_cache_update)(struct hh_cache *hh, struct net_device *dev, unsigned char * haddr); struct net_device_stats* (*get_stats)(struct net_device *dev); struct timer_list timer; }; diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 42eb6945b93ede5bc06463e1b5cfda9e5e940109..33e489d5bb33ed4e476360577474a90f580e2b2c 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -42,7 +42,6 @@ struct tun_struct { struct sk_buff_head readq; struct net_device *dev; - struct net_device_stats stats; struct fasync_struct *fasync; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index f8443fdb124a7fc2e6ebc824fc8aac5b1423a80e..976d4b1067d1fb80ac72abc23f9e66e466e523f9 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -62,7 +62,7 @@ struct vlan_hdr { #define VLAN_VID_MASK 0xfff /* found in socket.c */ -extern void vlan_ioctl_set(int (*hook)(void __user *)); +extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); #define VLAN_NAME "vlan" diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h new file mode 100644 index 0000000000000000000000000000000000000000..e1fc1d16d3cde35a0c150773cb950e0b09cbaebc --- /dev/null +++ b/include/linux/inet_lro.h @@ -0,0 +1,177 @@ +/* + * linux/include/linux/inet_lro.h + * + * Large Receive Offload (ipv4 / tcp) + * + * (C) Copyright IBM Corp. 2007 + * + * Authors: + * Jan-Bernd Themann + * Christoph Raisch + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __INET_LRO_H_ +#define __INET_LRO_H_ + +#include +#include + +/* + * LRO statistics + */ + +struct net_lro_stats { + unsigned long aggregated; + unsigned long flushed; + unsigned long no_desc; +}; + +/* + * LRO descriptor for a tcp session + */ +struct net_lro_desc { + struct sk_buff *parent; + struct sk_buff *last_skb; + struct skb_frag_struct *next_frag; + struct iphdr *iph; + struct tcphdr *tcph; + struct vlan_group *vgrp; + __wsum data_csum; + u32 tcp_rcv_tsecr; + u32 tcp_rcv_tsval; + u32 tcp_ack; + u32 tcp_next_seq; + u32 skb_tot_frags_len; + u16 ip_tot_len; + u16 tcp_saw_tstamp; /* timestamps enabled */ + u16 tcp_window; + u16 vlan_tag; + int pkt_aggr_cnt; /* counts aggregated packets */ + int vlan_packet; + int mss; + int active; +}; + +/* + * Large Receive Offload (LRO) Manager + * + * Fields must be set by driver + */ + +struct net_lro_mgr { + struct net_device *dev; + struct net_lro_stats stats; + + /* LRO features */ + unsigned long features; +#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */ +#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted + from received packets and eth protocol + is still ETH_P_8021Q */ + + u32 ip_summed; /* Set in non generated SKBs in page mode */ + u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY + * or CHECKSUM_NONE */ + + int max_desc; /* Max number of LRO descriptors */ + int max_aggr; /* Max number of LRO packets to be aggregated */ + + struct net_lro_desc *lro_arr; /* Array of LRO descriptors */ + + /* + * Optimized driver functions + * + * get_skb_header: returns tcp and ip header for packet in SKB + */ + int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr, + void **tcpudp_hdr, u64 *hdr_flags, void *priv); + + /* hdr_flags: */ +#define LRO_IPV4 1 /* ip_hdr is IPv4 header */ +#define LRO_TCP 2 /* tcpudp_hdr is TCP header */ + + /* + * get_frag_header: returns mac, tcp and ip header for packet in SKB + * + * @hdr_flags: Indicate what kind of LRO has to be done + * (IPv4/IPv6/TCP/UDP) + */ + int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr, + void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, + void *priv); +}; + +/* + * Processes a SKB + * + * @lro_mgr: LRO manager to use + * @skb: SKB to aggregate + * @priv: Private data that may be used by driver functions + * (for example get_tcp_ip_hdr) + */ + +void lro_receive_skb(struct net_lro_mgr *lro_mgr, + struct sk_buff *skb, + void *priv); + +/* + * Processes a SKB with VLAN HW acceleration support + */ + +void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr, + struct sk_buff *skb, + struct vlan_group *vgrp, + u16 vlan_tag, + void *priv); + +/* + * Processes a fragment list + * + * This functions aggregate fragments and generate SKBs do pass + * the packets to the stack. + * + * @lro_mgr: LRO manager to use + * @frags: Fragment to be processed. Must contain entire header in first + * element. + * @len: Length of received data + * @true_size: Actual size of memory the fragment is consuming + * @priv: Private data that may be used by driver functions + * (for example get_tcp_ip_hdr) + */ + +void lro_receive_frags(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, void *priv, __wsum sum); + +void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, + struct vlan_group *vgrp, + u16 vlan_tag, + void *priv, __wsum sum); + +/* + * Forward all aggregated SKBs held by lro_mgr to network stack + */ + +void lro_flush_all(struct net_lro_mgr *lro_mgr); + +void lro_flush_pkt(struct net_lro_mgr *lro_mgr, + struct iphdr *iph, struct tcphdr *tcph); + +#endif diff --git a/include/linux/init.h b/include/linux/init.h index 74b1f43bf9825b260be74c1bf71be116f9bd40fc..f8d9d0b5cffcad16fecacaafaca541a338aec951 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -57,6 +57,7 @@ * The markers follow same syntax rules as __init / __initdata. */ #define __init_refok noinline __attribute__ ((__section__ (".text.init.refok"))) #define __initdata_refok __attribute__ ((__section__ (".data.init.refok"))) +#define __exit_refok noinline __attribute__ ((__section__ (".exit.text.refok"))) #ifdef MODULE #define __exit __attribute__ ((__section__(".exit.text"))) __cold diff --git a/include/linux/init_task.h b/include/linux/init_task.h index f8abfa349ef924344a11b1fd9739e68bd4d72b24..513bc3e489f018f33495867568d708a9335b0c06 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -9,6 +9,7 @@ #include #include #include +#include #define INIT_FDTABLE \ { \ @@ -78,6 +79,7 @@ extern struct nsproxy init_nsproxy; .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \ .uts_ns = &init_uts_ns, \ .mnt_ns = NULL, \ + INIT_NET_NS(net_ns) \ INIT_IPC_NS(ipc_ns) \ .user_ns = &init_user_ns, \ } diff --git a/include/linux/input.h b/include/linux/input.h index 36e00aa6f03bcf84705ba49ed0cefc7f3729257b..6eb3aead7f1d93775ba4020e9d252df5210c757a 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -360,6 +360,7 @@ struct input_absinfo { #define KEY_BLUETOOTH 237 #define KEY_WLAN 238 +#define KEY_UWB 239 #define KEY_UNKNOWN 240 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 4ca60c3320fb6d89b0870c9a2d0cec2031847f73..5d35a4cc3bfff3a965132151db9f0ccd1c13df7e 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -96,27 +96,6 @@ struct ipv6_destopt_hao { struct in6_addr addr; } __attribute__ ((__packed__)); -struct ipv6_auth_hdr { - __u8 nexthdr; - __u8 hdrlen; /* This one is measured in 32 bit units! */ - __be16 reserved; - __be32 spi; - __be32 seq_no; /* Sequence number */ - __u8 auth_data[0]; /* Length variable but >=4. Mind the 64 bit alignment! */ -}; - -struct ipv6_esp_hdr { - __be32 spi; - __be32 seq_no; /* Sequence number */ - __u8 enc_data[0]; /* Length variable but >=8. Mind the 64 bit alignment! */ -}; - -struct ipv6_comp_hdr { - __u8 nexthdr; - __u8 flags; - __be16 cpi; -}; - /* * IPv6 fixed header * diff --git a/include/linux/isdn.h b/include/linux/isdn.h index 3c7875b7ab5be00cd0ee375cf61904be3c6ca122..ad09506554a3a8faca20749746075b74fe2b9198 100644 --- a/include/linux/isdn.h +++ b/include/linux/isdn.h @@ -353,13 +353,6 @@ typedef struct isdn_net_local_s { /* a particular channel (including */ /* the frame_cnt */ - int (*org_hhc)( - struct neighbour *neigh, - struct hh_cache *hh); - /* Ptr to orig. header_cache_update */ - void (*org_hcu)(struct hh_cache *, - struct net_device *, - unsigned char *); int pppbind; /* ippp device for bindings */ int dialtimeout; /* How long shall we try on dialing? (jiffies) */ int dialwait; /* How long shall we wait after failed attempt? (jiffies) */ @@ -389,7 +382,7 @@ typedef struct isdn_net_dev_s { online */ spinlock_t queue_lock; /* lock to protect queue */ void *next; /* Pointer to next isdn-interface */ - struct net_device dev; /* interface to upper levels */ + struct net_device *dev; /* interface to upper levels */ #ifdef CONFIG_ISDN_PPP ippp_bundle * pb; /* pointer to the common bundle structure * with the per-bundle data */ diff --git a/include/linux/ktime.h b/include/linux/ktime.h index dae7143644fe8b87c18aa1fb57e175d6f61865c2..a6ddec141f96c83752ddb060079d0efa4e517aa4 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -102,6 +102,13 @@ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) #define ktime_add_ns(kt, nsval) \ ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) +/* + * Subtract a scalar nanosecod from a ktime_t variable + * res = kt - nsval: + */ +#define ktime_sub_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) + /* convert a timespec to ktime_t format: */ static inline ktime_t timespec_to_ktime(struct timespec ts) { @@ -199,6 +206,15 @@ static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2) */ extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec); +/** + * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable + * @kt: minuend + * @nsec: the scalar nsec value to subtract + * + * Returns the subtraction of @nsec from @kt in ktime_t format + */ +extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec); + /** * timespec_to_ktime - convert a timespec to ktime_t format * @ts: the timespec variable to convert @@ -289,6 +305,11 @@ static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) return ktime_add_ns(kt, usec * 1000); } +static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) +{ + return ktime_sub_ns(kt, usec * 1000); +} + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an diff --git a/include/linux/list.h b/include/linux/list.h index f29fc9c1a964f299e525c65e2ffa847db35ec07c..ad9dcb9e337523337d5d2bacaa7c2e9f679f786a 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -524,6 +524,20 @@ static inline void list_splice_init_rcu(struct list_head *list, prefetch(pos->member.next), &pos->member != (head); \ pos = list_entry(pos->member.next, typeof(*pos), member)) +/** + * list_for_each_entry_continue_reverse - iterate backwards from the given point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Start to iterate over list of given type backwards, continuing after + * the current position. + */ +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + /** * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h new file mode 100644 index 0000000000000000000000000000000000000000..8ea9a42a4c02c6702f562fd46e60137f5e4f7a0a --- /dev/null +++ b/include/linux/mdio-bitbang.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_MDIO_BITBANG_H +#define __LINUX_MDIO_BITBANG_H + +#include +#include + +struct mdiobb_ctrl; + +struct mdiobb_ops { + struct module *owner; + + /* Set the Management Data Clock high if level is one, + * low if level is zero. + */ + void (*set_mdc)(struct mdiobb_ctrl *ctrl, int level); + + /* Configure the Management Data I/O pin as an input if + * "output" is zero, or an output if "output" is one. + */ + void (*set_mdio_dir)(struct mdiobb_ctrl *ctrl, int output); + + /* Set the Management Data I/O pin high if value is one, + * low if "value" is zero. This may only be called + * when the MDIO pin is configured as an output. + */ + void (*set_mdio_data)(struct mdiobb_ctrl *ctrl, int value); + + /* Retrieve the state Management Data I/O pin. */ + int (*get_mdio_data)(struct mdiobb_ctrl *ctrl); +}; + +struct mdiobb_ctrl { + const struct mdiobb_ops *ops; +}; + +/* The returned bus is not yet registered with the phy layer. */ +struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl); + +/* The bus must already have been unregistered. */ +void free_mdio_bitbang(struct mii_bus *bus); + +#endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index e47e5951058be53593f68e9c8d955418a0e82ccf..74523d999f7a9a99f4e5890102e653c49f094ade 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -351,4 +351,19 @@ struct sdio_device_id { kernel_ulong_t driver_data; /* Data private to the driver */ }; +/* SSB core, see drivers/ssb/ */ +struct ssb_device_id { + __u16 vendor; + __u16 coreid; + __u8 revision; +}; +#define SSB_DEVICE(_vendor, _coreid, _revision) \ + { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } +#define SSB_DEVTABLE_END \ + { 0, }, + +#define SSB_ANY_VENDOR 0xFFFF +#define SSB_ANY_ID 0xFFFF +#define SSB_ANY_REV 0xFF + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/net.h b/include/linux/net.h index efc45177b50359cf6cccb218ab75f6a3cb580e0c..c136abce7ef628291325c5ce00a49a1763db6101 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -23,6 +23,7 @@ struct poll_table_struct; struct inode; +struct net; #define NPROTO 34 /* should be enough for now.. */ @@ -169,7 +170,7 @@ struct proto_ops { struct net_proto_family { int family; - int (*create)(struct socket *sock, int protocol); + int (*create)(struct net *net, struct socket *sock, int protocol); struct module *owner; }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e679b27516659699280c42b1e6e15839cfedb787..5a11f889e56a7f0780eb44572e4b9e2b68239c70 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -31,6 +31,7 @@ #ifdef __KERNEL__ #include +#include #include #include #include @@ -38,6 +39,9 @@ #include #include #include +#include + +#include struct vlan_group; struct ethtool_ops; @@ -246,6 +250,19 @@ struct hh_cache #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) +struct header_ops { + int (*create) (struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len); + int (*parse)(const struct sk_buff *skb, unsigned char *haddr); + int (*rebuild)(struct sk_buff *skb); +#define HAVE_HEADER_CACHE + int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); + void (*cache_update)(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr); +}; + /* These flag bits are private to the generic network queueing * layer, they may not be explicitly referenced by any other * code. @@ -258,7 +275,6 @@ enum netdev_state_t __LINK_STATE_PRESENT, __LINK_STATE_SCHED, __LINK_STATE_NOCARRIER, - __LINK_STATE_RX_SCHED, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, __LINK_STATE_QDISC_RUNNING, @@ -277,6 +293,120 @@ struct netdev_boot_setup { extern int __init netdev_boot_setup(char *str); +/* + * Structure for NAPI scheduling similar to tasklet but with weighting + */ +struct napi_struct { + /* The poll_list must only be managed by the entity which + * changes the state of the NAPI_STATE_SCHED bit. This means + * whoever atomically sets that bit can add this napi_struct + * to the per-cpu poll_list, and whoever clears that bit + * can remove from the list right before clearing the bit. + */ + struct list_head poll_list; + + unsigned long state; + int weight; + int (*poll)(struct napi_struct *, int); +#ifdef CONFIG_NETPOLL + spinlock_t poll_lock; + int poll_owner; + struct net_device *dev; + struct list_head dev_list; +#endif +}; + +enum +{ + NAPI_STATE_SCHED, /* Poll is scheduled */ +}; + +extern void FASTCALL(__napi_schedule(struct napi_struct *n)); + +/** + * napi_schedule_prep - check if napi can be scheduled + * @n: napi context + * + * Test if NAPI routine is already running, and if not mark + * it as running. This is used as a condition variable + * insure only one NAPI poll instance runs + */ +static inline int napi_schedule_prep(struct napi_struct *n) +{ + return !test_and_set_bit(NAPI_STATE_SCHED, &n->state); +} + +/** + * napi_schedule - schedule NAPI poll + * @n: napi context + * + * Schedule NAPI poll routine to be called if it is not already + * running. + */ +static inline void napi_schedule(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule(n); +} + +/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ +static inline int napi_reschedule(struct napi_struct *napi) +{ + if (napi_schedule_prep(napi)) { + __napi_schedule(napi); + return 1; + } + return 0; +} + +/** + * napi_complete - NAPI processing complete + * @n: napi context + * + * Mark NAPI processing as complete. + */ +static inline void __napi_complete(struct napi_struct *n) +{ + BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); + list_del(&n->poll_list); + smp_mb__before_clear_bit(); + clear_bit(NAPI_STATE_SCHED, &n->state); +} + +static inline void napi_complete(struct napi_struct *n) +{ + local_irq_disable(); + __napi_complete(n); + local_irq_enable(); +} + +/** + * napi_disable - prevent NAPI from scheduling + * @n: napi context + * + * Stop NAPI from being scheduled on this context. + * Waits till any outstanding processing completes. + */ +static inline void napi_disable(struct napi_struct *n) +{ + while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) + msleep_interruptible(1); +} + +/** + * napi_enable - enable NAPI scheduling + * @n: napi context + * + * Resume NAPI from being scheduled on this context. + * Must be paired with napi_disable. + */ +static inline void napi_enable(struct napi_struct *n) +{ + BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); + smp_mb__before_clear_bit(); + clear_bit(NAPI_STATE_SCHED, &n->state); +} + /* * The DEVICE structure. * Actually, this whole structure is a big mistake. It mixes I/O @@ -319,6 +449,9 @@ struct net_device unsigned long state; struct list_head dev_list; +#ifdef CONFIG_NETPOLL + struct list_head napi_list; +#endif /* The device initialization function. Called only once. */ int (*init)(struct net_device *dev); @@ -339,8 +472,11 @@ struct net_device #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ #define NETIF_F_GSO 2048 /* Enable software GSO. */ -#define NETIF_F_LLTX 4096 /* LockLess TX */ +#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ + /* do not use LLTX in new drivers */ +#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ +#define NETIF_F_LRO 32768 /* large receive offload */ /* Segmentation offload features */ #define NETIF_F_GSO_SHIFT 16 @@ -379,6 +515,9 @@ struct net_device #endif const struct ethtool_ops *ethtool_ops; + /* Hardware header description */ + const struct header_ops *header_ops; + /* * This marks the end of the "visible" part of the structure. All * fields hereafter are internal to the system, and may change at @@ -430,12 +569,6 @@ struct net_device /* * Cache line mostly used on receive path (including eth_type_trans()) */ - struct list_head poll_list ____cacheline_aligned_in_smp; - /* Link to poll list */ - - int (*poll) (struct net_device *dev, int *quota); - int quota; - int weight; unsigned long last_rx; /* Time of last Rx */ /* Interface address info used in eth_type_trans() */ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast @@ -508,13 +641,6 @@ struct net_device int (*open)(struct net_device *dev); int (*stop)(struct net_device *dev); #define HAVE_NETDEV_POLL - int (*hard_header) (struct sk_buff *skb, - struct net_device *dev, - unsigned short type, - void *daddr, - void *saddr, - unsigned len); - int (*rebuild_header)(struct sk_buff *skb); #define HAVE_CHANGE_RX_FLAGS void (*change_rx_flags)(struct net_device *dev, int flags); @@ -531,12 +657,6 @@ struct net_device #define HAVE_SET_CONFIG int (*set_config)(struct net_device *dev, struct ifmap *map); -#define HAVE_HEADER_CACHE - int (*hard_header_cache)(struct neighbour *neigh, - struct hh_cache *hh); - void (*header_cache_update)(struct hh_cache *hh, - struct net_device *dev, - unsigned char * haddr); #define HAVE_CHANGE_MTU int (*change_mtu)(struct net_device *dev, int new_mtu); @@ -550,8 +670,6 @@ struct net_device void (*vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); - int (*hard_header_parse)(struct sk_buff *skb, - unsigned char *haddr); int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); #ifdef CONFIG_NETPOLL struct netpoll_info *npinfo; @@ -560,6 +678,9 @@ struct net_device void (*poll_controller)(struct net_device *dev); #endif + /* Network namespace this network device is inside */ + struct net *nd_net; + /* bridge stuff */ struct net_bridge_port *br_port; /* macvlan */ @@ -582,17 +703,39 @@ struct net_device #define NETDEV_ALIGN 32 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) +/** + * netdev_priv - access network device private data + * @dev: network device + * + * Get network device private data + */ static inline void *netdev_priv(const struct net_device *dev) { return dev->priv; } -#define SET_MODULE_OWNER(dev) do { } while (0) /* Set the sysfs physical device reference for the network logical device * if set prior to registration will cause a symlink during initialization. */ #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) +static inline void netif_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + INIT_LIST_HEAD(&napi->poll_list); + napi->poll = poll; + napi->weight = weight; +#ifdef CONFIG_NETPOLL + napi->dev = dev; + list_add(&napi->dev_list, &dev->napi_list); + spin_lock_init(&napi->poll_lock); + napi->poll_owner = -1; +#endif + set_bit(NAPI_STATE_SCHED, &napi->state); +} + struct packet_type { __be16 type; /* This is really htons(ether_type). */ struct net_device *dev; /* NULL is wildcarded here */ @@ -610,45 +753,46 @@ struct packet_type { #include #include -extern struct net_device loopback_dev; /* The loopback */ -extern struct list_head dev_base_head; /* All devices */ extern rwlock_t dev_base_lock; /* Device list lock */ -#define for_each_netdev(d) \ - list_for_each_entry(d, &dev_base_head, dev_list) -#define for_each_netdev_safe(d, n) \ - list_for_each_entry_safe(d, n, &dev_base_head, dev_list) -#define for_each_netdev_continue(d) \ - list_for_each_entry_continue(d, &dev_base_head, dev_list) + +#define for_each_netdev(net, d) \ + list_for_each_entry(d, &(net)->dev_base_head, dev_list) +#define for_each_netdev_safe(net, d, n) \ + list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) +#define for_each_netdev_continue(net, d) \ + list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) static inline struct net_device *next_net_device(struct net_device *dev) { struct list_head *lh; + struct net *net; + net = dev->nd_net; lh = dev->dev_list.next; - return lh == &dev_base_head ? NULL : net_device_entry(lh); + return lh == &net->dev_base_head ? NULL : net_device_entry(lh); } -static inline struct net_device *first_net_device(void) +static inline struct net_device *first_net_device(struct net *net) { - return list_empty(&dev_base_head) ? NULL : - net_device_entry(dev_base_head.next); + return list_empty(&net->dev_base_head) ? NULL : + net_device_entry(net->dev_base_head.next); } extern int netdev_boot_setup_check(struct net_device *dev); extern unsigned long netdev_boot_base(const char *prefix, int unit); -extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); -extern struct net_device *dev_getfirstbyhwtype(unsigned short type); -extern struct net_device *__dev_getfirstbyhwtype(unsigned short type); +extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); +extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); +extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); extern void dev_add_pack(struct packet_type *pt); extern void dev_remove_pack(struct packet_type *pt); extern void __dev_remove_pack(struct packet_type *pt); -extern struct net_device *dev_get_by_flags(unsigned short flags, +extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, unsigned short mask); -extern struct net_device *dev_get_by_name(const char *name); -extern struct net_device *__dev_get_by_name(const char *name); +extern struct net_device *dev_get_by_name(struct net *net, const char *name); +extern struct net_device *__dev_get_by_name(struct net *net, const char *name); extern int dev_alloc_name(struct net_device *dev, const char *name); extern int dev_open(struct net_device *dev); extern int dev_close(struct net_device *dev); @@ -659,14 +803,35 @@ extern void free_netdev(struct net_device *dev); extern void synchronize_net(void); extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); -extern int call_netdevice_notifiers(unsigned long val, void *v); -extern struct net_device *dev_get_by_index(int ifindex); -extern struct net_device *__dev_get_by_index(int ifindex); +extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); +extern struct net_device *dev_get_by_index(struct net *net, int ifindex); +extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); extern int dev_restart(struct net_device *dev); #ifdef CONFIG_NETPOLL_TRAP extern int netpoll_trap(void); #endif +static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, + unsigned len) +{ + if (!dev->header_ops) + return 0; + + return dev->header_ops->create(skb, dev, type, daddr, saddr, len); +} + +static inline int dev_parse_header(const struct sk_buff *skb, + unsigned char *haddr) +{ + const struct net_device *dev = skb->dev; + + if (!dev->header_ops->parse) + return 0; + return dev->header_ops->parse(skb, haddr); +} + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); static inline int unregister_gifconf(unsigned int family) @@ -678,7 +843,6 @@ static inline int unregister_gifconf(unsigned int family) * Incoming packets are placed on per-cpu queues so that * no locking is needed. */ - struct softnet_data { struct net_device *output_queue; @@ -686,7 +850,7 @@ struct softnet_data struct list_head poll_list; struct sk_buff *completion_queue; - struct net_device backlog_dev; /* Sorry. 8) */ + struct napi_struct backlog; #ifdef CONFIG_NET_DMA struct dma_chan *net_dma; #endif @@ -704,11 +868,24 @@ static inline void netif_schedule(struct net_device *dev) __netif_schedule(dev); } +/** + * netif_start_queue - allow transmit + * @dev: network device + * + * Allow upper layers to call the device hard_start_xmit routine. + */ static inline void netif_start_queue(struct net_device *dev) { clear_bit(__LINK_STATE_XOFF, &dev->state); } +/** + * netif_wake_queue - restart transmit + * @dev: network device + * + * Allow upper layers to call the device hard_start_xmit routine. + * Used for flow control when transmit resources are available. + */ static inline void netif_wake_queue(struct net_device *dev) { #ifdef CONFIG_NETPOLL_TRAP @@ -721,16 +898,35 @@ static inline void netif_wake_queue(struct net_device *dev) __netif_schedule(dev); } +/** + * netif_stop_queue - stop transmitted packets + * @dev: network device + * + * Stop upper layers calling the device hard_start_xmit routine. + * Used for flow control when transmit resources are unavailable. + */ static inline void netif_stop_queue(struct net_device *dev) { set_bit(__LINK_STATE_XOFF, &dev->state); } +/** + * netif_queue_stopped - test if transmit queue is flowblocked + * @dev: network device + * + * Test if transmit queue on device is currently unable to send. + */ static inline int netif_queue_stopped(const struct net_device *dev) { return test_bit(__LINK_STATE_XOFF, &dev->state); } +/** + * netif_running - test if up + * @dev: network device + * + * Test if the device has been brought up. + */ static inline int netif_running(const struct net_device *dev) { return test_bit(__LINK_STATE_START, &dev->state); @@ -742,6 +938,14 @@ static inline int netif_running(const struct net_device *dev) * done at the overall netdevice level. * Also test the device if we're multiqueue. */ + +/** + * netif_start_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Start individual transmit queue of a device with multiple transmit queues. + */ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) { #ifdef CONFIG_NETDEVICES_MULTIQUEUE @@ -749,6 +953,13 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) #endif } +/** + * netif_stop_subqueue - stop sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Stop individual transmit queue of a device with multiple transmit queues. + */ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) { #ifdef CONFIG_NETDEVICES_MULTIQUEUE @@ -760,6 +971,13 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) #endif } +/** + * netif_subqueue_stopped - test status of subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Check individual transmit queue of a device with multiple transmit queues. + */ static inline int netif_subqueue_stopped(const struct net_device *dev, u16 queue_index) { @@ -771,6 +989,14 @@ static inline int netif_subqueue_stopped(const struct net_device *dev, #endif } + +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) { #ifdef CONFIG_NETDEVICES_MULTIQUEUE @@ -784,6 +1010,13 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) #endif } +/** + * netif_is_multiqueue - test if device has multiple transmit queues + * @dev: network device + * + * Check if device has multiple transmit queues + * Always falls if NETDEVICE_MULTIQUEUE is not configured + */ static inline int netif_is_multiqueue(const struct net_device *dev) { #ifdef CONFIG_NETDEVICES_MULTIQUEUE @@ -796,20 +1029,7 @@ static inline int netif_is_multiqueue(const struct net_device *dev) /* Use this variant when it is known for sure that it * is executing from interrupt context. */ -static inline void dev_kfree_skb_irq(struct sk_buff *skb) -{ - if (atomic_dec_and_test(&skb->users)) { - struct softnet_data *sd; - unsigned long flags; - - local_irq_save(flags); - sd = &__get_cpu_var(softnet_data); - skb->next = sd->completion_queue; - sd->completion_queue = skb; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); - } -} +extern void dev_kfree_skb_irq(struct sk_buff *skb); /* Use this variant in places where it could be invoked * either from interrupt or non-interrupt context. @@ -822,29 +1042,41 @@ extern int netif_rx_ni(struct sk_buff *skb); #define HAVE_NETIF_RECEIVE_SKB 1 extern int netif_receive_skb(struct sk_buff *skb); extern int dev_valid_name(const char *name); -extern int dev_ioctl(unsigned int cmd, void __user *); -extern int dev_ethtool(struct ifreq *); +extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); +extern int dev_ethtool(struct net *net, struct ifreq *); extern unsigned dev_get_flags(const struct net_device *); extern int dev_change_flags(struct net_device *, unsigned); extern int dev_change_name(struct net_device *, char *); +extern int dev_change_net_namespace(struct net_device *, + struct net *, const char *); extern int dev_set_mtu(struct net_device *, int); extern int dev_set_mac_address(struct net_device *, struct sockaddr *); extern int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); -extern void dev_init(void); - extern int netdev_budget; /* Called by rtnetlink.c:rtnl_unlock() */ extern void netdev_run_todo(void); +/** + * dev_put - release reference to device + * @dev: network device + * + * Release reference to device to allow it to be freed. + */ static inline void dev_put(struct net_device *dev) { atomic_dec(&dev->refcnt); } +/** + * dev_hold - get reference to device + * @dev: network device + * + * Hold reference to device to keep it from being freed. + */ static inline void dev_hold(struct net_device *dev) { atomic_inc(&dev->refcnt); @@ -861,6 +1093,12 @@ static inline void dev_hold(struct net_device *dev) extern void linkwatch_fire_event(struct net_device *dev); +/** + * netif_carrier_ok - test if carrier present + * @dev: network device + * + * Check if carrier is present on device + */ static inline int netif_carrier_ok(const struct net_device *dev) { return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); @@ -872,30 +1110,66 @@ extern void netif_carrier_on(struct net_device *dev); extern void netif_carrier_off(struct net_device *dev); +/** + * netif_dormant_on - mark device as dormant. + * @dev: network device + * + * Mark device as dormant (as per RFC2863). + * + * The dormant state indicates that the relevant interface is not + * actually in a condition to pass packets (i.e., it is not 'up') but is + * in a "pending" state, waiting for some external event. For "on- + * demand" interfaces, this new state identifies the situation where the + * interface is waiting for events to place it in the up state. + * + */ static inline void netif_dormant_on(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev); } +/** + * netif_dormant_off - set device as not dormant. + * @dev: network device + * + * Device is not in dormant state. + */ static inline void netif_dormant_off(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) linkwatch_fire_event(dev); } +/** + * netif_dormant - test if carrier present + * @dev: network device + * + * Check if carrier is present on device + */ static inline int netif_dormant(const struct net_device *dev) { return test_bit(__LINK_STATE_DORMANT, &dev->state); } +/** + * netif_oper_up - test if device is operational + * @dev: network device + * + * Check if carrier is operational + */ static inline int netif_oper_up(const struct net_device *dev) { return (dev->operstate == IF_OPER_UP || dev->operstate == IF_OPER_UNKNOWN /* backward compat */); } -/* Hot-plugging. */ +/** + * netif_device_present - is device available or removed + * @dev: network device + * + * Check if device has not been removed from system. + */ static inline int netif_device_present(struct net_device *dev) { return test_bit(__LINK_STATE_PRESENT, &dev->state); @@ -955,46 +1229,38 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1 << debug_value) - 1; } -/* Test if receive needs to be scheduled */ -static inline int __netif_rx_schedule_prep(struct net_device *dev) -{ - return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); -} - /* Test if receive needs to be scheduled but only if up */ -static inline int netif_rx_schedule_prep(struct net_device *dev) +static inline int netif_rx_schedule_prep(struct net_device *dev, + struct napi_struct *napi) { - return netif_running(dev) && __netif_rx_schedule_prep(dev); + return netif_running(dev) && napi_schedule_prep(napi); } /* Add interface to tail of rx poll list. This assumes that _prep has * already been called and returned 1. */ - -extern void __netif_rx_schedule(struct net_device *dev); +static inline void __netif_rx_schedule(struct net_device *dev, + struct napi_struct *napi) +{ + dev_hold(dev); + __napi_schedule(napi); +} /* Try to reschedule poll. Called by irq handler. */ -static inline void netif_rx_schedule(struct net_device *dev) +static inline void netif_rx_schedule(struct net_device *dev, + struct napi_struct *napi) { - if (netif_rx_schedule_prep(dev)) - __netif_rx_schedule(dev); + if (netif_rx_schedule_prep(dev, napi)) + __netif_rx_schedule(dev, napi); } -/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). - * Do not inline this? - */ -static inline int netif_rx_reschedule(struct net_device *dev, int undo) +/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ +static inline int netif_rx_reschedule(struct net_device *dev, + struct napi_struct *napi) { - if (netif_rx_schedule_prep(dev)) { - unsigned long flags; - - dev->quota += undo; - - local_irq_save(flags); - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); - __raise_softirq_irqoff(NET_RX_SOFTIRQ); - local_irq_restore(flags); + if (napi_schedule_prep(napi)) { + __netif_rx_schedule(dev, napi); return 1; } return 0; @@ -1003,12 +1269,11 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) /* same as netif_rx_complete, except that local_irq_save(flags) * has already been issued */ -static inline void __netif_rx_complete(struct net_device *dev) +static inline void __netif_rx_complete(struct net_device *dev, + struct napi_struct *napi) { - BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); - list_del(&dev->poll_list); - smp_mb__before_clear_bit(); - clear_bit(__LINK_STATE_RX_SCHED, &dev->state); + __napi_complete(napi); + dev_put(dev); } /* Remove interface from poll list: it must be in the poll list @@ -1016,32 +1281,31 @@ static inline void __netif_rx_complete(struct net_device *dev) * it completes the work. The device cannot be out of poll list at this * moment, it is BUG(). */ -static inline void netif_rx_complete(struct net_device *dev) +static inline void netif_rx_complete(struct net_device *dev, + struct napi_struct *napi) { unsigned long flags; local_irq_save(flags); - __netif_rx_complete(dev); + __netif_rx_complete(dev, napi); local_irq_restore(flags); } -static inline void netif_poll_disable(struct net_device *dev) -{ - while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) - /* No hurry. */ - schedule_timeout_interruptible(1); -} - -static inline void netif_poll_enable(struct net_device *dev) +/** + * netif_tx_lock - grab network device transmit lock + * @dev: network device + * + * Get network device transmit lock + */ +static inline void __netif_tx_lock(struct net_device *dev, int cpu) { - smp_mb__before_clear_bit(); - clear_bit(__LINK_STATE_RX_SCHED, &dev->state); + spin_lock(&dev->_xmit_lock); + dev->xmit_lock_owner = cpu; } static inline void netif_tx_lock(struct net_device *dev) { - spin_lock(&dev->_xmit_lock); - dev->xmit_lock_owner = smp_processor_id(); + __netif_tx_lock(dev, smp_processor_id()); } static inline void netif_tx_lock_bh(struct net_device *dev) @@ -1070,6 +1334,18 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) spin_unlock_bh(&dev->_xmit_lock); } +#define HARD_TX_LOCK(dev, cpu) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + __netif_tx_lock(dev, cpu); \ + } \ +} + +#define HARD_TX_UNLOCK(dev) { \ + if ((dev->features & NETIF_F_LLTX) == 0) { \ + netif_tx_unlock(dev); \ + } \ +} + static inline void netif_tx_disable(struct net_device *dev) { netif_tx_lock_bh(dev); @@ -1105,7 +1381,7 @@ extern void dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ -extern void dev_load(const char *name); +extern void dev_load(struct net *net, const char *name); extern void dev_mcast_init(void); extern int netdev_max_backlog; extern int weight_p; diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index ab57cb7d7c61067fd0bf4f7d0a74a6d348886d60..f2eaea2234ec3a16702fd0d881f4285d48300f73 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -40,5 +40,6 @@ unifdef-y += nf_conntrack_common.h unifdef-y += nf_conntrack_ftp.h unifdef-y += nf_conntrack_tcp.h unifdef-y += nfnetlink.h +unifdef-y += nfnetlink_compat.h unifdef-y += x_tables.h unifdef-y += xt_physdev.h diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 0f9311df1559a212d7b2cd7b73e781e93190ccbc..0d8424f76899f11d5c96dce8d525c1f54fbe4b1a 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -1,16 +1,7 @@ #ifndef _NFNETLINK_H #define _NFNETLINK_H #include - -#ifndef __KERNEL__ -/* nfnetlink groups: Up to 32 maximum - backwards compatibility for userspace */ -#define NF_NETLINK_CONNTRACK_NEW 0x00000001 -#define NF_NETLINK_CONNTRACK_UPDATE 0x00000002 -#define NF_NETLINK_CONNTRACK_DESTROY 0x00000004 -#define NF_NETLINK_CONNTRACK_EXP_NEW 0x00000008 -#define NF_NETLINK_CONNTRACK_EXP_UPDATE 0x00000010 -#define NF_NETLINK_CONNTRACK_EXP_DESTROY 0x00000020 -#endif +#include enum nfnetlink_groups { NFNLGRP_NONE, @@ -31,48 +22,6 @@ enum nfnetlink_groups { }; #define NFNLGRP_MAX (__NFNLGRP_MAX - 1) -/* Generic structure for encapsulation optional netfilter information. - * It is reminiscent of sockaddr, but with sa_family replaced - * with attribute type. - * ! This should someday be put somewhere generic as now rtnetlink and - * ! nfnetlink use the same attributes methods. - J. Schulist. - */ - -struct nfattr -{ - u_int16_t nfa_len; - u_int16_t nfa_type; /* we use 15 bits for the type, and the highest - * bit to indicate whether the payload is nested */ -}; - -/* FIXME: Apart from NFNL_NFA_NESTED shamelessly copy and pasted from - * rtnetlink.h, it's time to put this in a generic file */ - -#define NFNL_NFA_NEST 0x8000 -#define NFA_TYPE(attr) ((attr)->nfa_type & 0x7fff) - -#define NFA_ALIGNTO 4 -#define NFA_ALIGN(len) (((len) + NFA_ALIGNTO - 1) & ~(NFA_ALIGNTO - 1)) -#define NFA_OK(nfa,len) ((len) > 0 && (nfa)->nfa_len >= sizeof(struct nfattr) \ - && (nfa)->nfa_len <= (len)) -#define NFA_NEXT(nfa,attrlen) ((attrlen) -= NFA_ALIGN((nfa)->nfa_len), \ - (struct nfattr *)(((char *)(nfa)) + NFA_ALIGN((nfa)->nfa_len))) -#define NFA_LENGTH(len) (NFA_ALIGN(sizeof(struct nfattr)) + (len)) -#define NFA_SPACE(len) NFA_ALIGN(NFA_LENGTH(len)) -#define NFA_DATA(nfa) ((void *)(((char *)(nfa)) + NFA_LENGTH(0))) -#define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0)) -#define NFA_NEST(skb, type) \ -({ struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \ - NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \ - __start; }) -#define NFA_NEST_END(skb, start) \ -({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ - (skb)->len; }) -#define NFA_NEST_CANCEL(skb, start) \ -({ if (start) \ - skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ - -1; }) - /* General form of address family dependent message. */ struct nfgenmsg { @@ -83,10 +32,6 @@ struct nfgenmsg { #define NFNETLINK_V0 0 -#define NFM_NFA(n) ((struct nfattr *)(((char *)(n)) \ - + NLMSG_ALIGN(sizeof(struct nfgenmsg)))) -#define NFM_PAYLOAD(n) NLMSG_PAYLOAD(n, sizeof(struct nfgenmsg)) - /* netfilter netlink message types are split in two pieces: * 8 bit subsystem, 8bit operation. */ @@ -107,49 +52,26 @@ struct nfgenmsg { #include #include +#include struct nfnl_callback { int (*call)(struct sock *nl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]); - u_int16_t attr_count; /* number of nfattr's */ + struct nlmsghdr *nlh, struct nlattr *cda[]); + const struct nla_policy *policy; /* netlink attribute policy */ + const u_int16_t attr_count; /* number of nlattr's */ }; struct nfnetlink_subsystem { const char *name; - __u8 subsys_id; /* nfnetlink subsystem ID */ - __u8 cb_count; /* number of callbacks */ - struct nfnl_callback *cb; /* callback for individual types */ + __u8 subsys_id; /* nfnetlink subsystem ID */ + __u8 cb_count; /* number of callbacks */ + const struct nfnl_callback *cb; /* callback for individual types */ }; -extern void __nfa_fill(struct sk_buff *skb, int attrtype, - int attrlen, const void *data); -#define NFA_PUT(skb, attrtype, attrlen, data) \ -({ if (skb_tailroom(skb) < (int)NFA_SPACE(attrlen)) goto nfattr_failure; \ - __nfa_fill(skb, attrtype, attrlen, data); }) - -extern int nfnetlink_subsys_register(struct nfnetlink_subsystem *n); -extern int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n); - -extern void nfattr_parse(struct nfattr *tb[], int maxattr, - struct nfattr *nfa, int len); - -#define nfattr_parse_nested(tb, max, nfa) \ - nfattr_parse((tb), (max), NFA_DATA((nfa)), NFA_PAYLOAD((nfa))) - -#define nfattr_bad_size(tb, max, cta_min) \ -({ int __i, __res = 0; \ - for (__i=0; __infa_type & 0x7fff) + +#define NFA_ALIGNTO 4 +#define NFA_ALIGN(len) (((len) + NFA_ALIGNTO - 1) & ~(NFA_ALIGNTO - 1)) +#define NFA_OK(nfa,len) ((len) > 0 && (nfa)->nfa_len >= sizeof(struct nfattr) \ + && (nfa)->nfa_len <= (len)) +#define NFA_NEXT(nfa,attrlen) ((attrlen) -= NFA_ALIGN((nfa)->nfa_len), \ + (struct nfattr *)(((char *)(nfa)) + NFA_ALIGN((nfa)->nfa_len))) +#define NFA_LENGTH(len) (NFA_ALIGN(sizeof(struct nfattr)) + (len)) +#define NFA_SPACE(len) NFA_ALIGN(NFA_LENGTH(len)) +#define NFA_DATA(nfa) ((void *)(((char *)(nfa)) + NFA_LENGTH(0))) +#define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0)) +#define NFA_NEST(skb, type) \ +({ struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \ + NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \ + __start; }) +#define NFA_NEST_END(skb, start) \ +({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ + (skb)->len; }) +#define NFA_NEST_CANCEL(skb, start) \ +({ if (start) \ + skb_trim(skb, (unsigned char *) (start) - (skb)->data); \ + -1; }) + +#define NFM_NFA(n) ((struct nfattr *)(((char *)(n)) \ + + NLMSG_ALIGN(sizeof(struct nfgenmsg)))) +#define NFM_PAYLOAD(n) NLMSG_PAYLOAD(n, sizeof(struct nfgenmsg)) + +#endif /* ! __KERNEL__ */ +#endif /* _NFNETLINK_COMPAT_H */ diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index d7c35039721eb5c0cb117af01b13396cd845a21f..4affa3fe78e0dbd0cd68e0fb3a2afc0d5c40675b 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -36,6 +36,7 @@ enum ctattr_type { CTA_USE, CTA_ID, CTA_NAT_DST, + CTA_TUPLE_MASTER, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h new file mode 100644 index 0000000000000000000000000000000000000000..14b6df412c9fa1674267be11f0c3ca698b5411aa --- /dev/null +++ b/include/linux/netfilter/xt_time.h @@ -0,0 +1,25 @@ +#ifndef _XT_TIME_H +#define _XT_TIME_H 1 + +struct xt_time_info { + u_int32_t date_start; + u_int32_t date_stop; + u_int32_t daytime_start; + u_int32_t daytime_stop; + u_int32_t monthdays_match; + u_int8_t weekdays_match; + u_int8_t flags; +}; + +enum { + /* Match against local time (instead of UTC) */ + XT_TIME_LOCAL_TZ = 1 << 0, + + /* Shortcuts */ + XT_TIME_ALL_MONTHDAYS = 0xFFFFFFFE, + XT_TIME_ALL_WEEKDAYS = 0xFE, + XT_TIME_MIN_DAYTIME = 0, + XT_TIME_MAX_DAYTIME = 24 * 60 * 60 - 1, +}; + +#endif /* _XT_TIME_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 83d8239f0cce60ae79b79b7adaab7670073839cc..7c1f3b1d2ee5a3e3b6c6cda0794a4847e6e044ae 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -27,6 +27,8 @@ #define MAX_LINKS 32 +struct net; + struct sockaddr_nl { sa_family_t nl_family; /* AF_NETLINK */ @@ -129,6 +131,20 @@ struct nlattr __u16 nla_type; }; +/* + * nla_type (16 bits) + * +---+---+-------------------------------+ + * | N | O | Attribute Type | + * +---+---+-------------------------------+ + * N := Carries nested attributes + * O := Payload stored in network byte order + * + * Note: The N and O flag are mutually exclusive. + */ +#define NLA_F_NESTED (1 << 15) +#define NLA_F_NET_BYTEORDER (1 << 14) +#define NLA_TYPE_MASK ~(NLA_F_NESTED | NLA_F_NET_BYTEORDER) + #define NLA_ALIGNTO 4 #define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) #define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) @@ -157,8 +173,9 @@ struct netlink_skb_parms #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) -extern struct sock *netlink_kernel_create(int unit, unsigned int groups, - void (*input)(struct sock *sk, int len), +extern struct sock *netlink_kernel_create(struct net *net, + int unit,unsigned int groups, + void (*input)(struct sk_buff *skb), struct mutex *cb_mutex, struct module *module); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); @@ -177,7 +194,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp); int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); -int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol); +int netlink_sendskb(struct sock *sk, struct sk_buff *skb); /* * skb should fit one page. This choice is good for headerless malloc. @@ -206,6 +223,7 @@ struct netlink_callback struct netlink_notify { + struct net *net; int pid; int protocol; }; diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 29930b71a9aab0b263a470c74ba49171c466d31c..20250d963d72014a81a687ca7652bad4a873bceb 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -25,8 +25,6 @@ struct netpoll { struct netpoll_info { atomic_t refcnt; - spinlock_t poll_lock; - int poll_owner; int rx_flags; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ @@ -37,6 +35,7 @@ struct netpoll_info { void netpoll_poll(struct netpoll *np); void netpoll_send_udp(struct netpoll *np, const char *msg, int len); +void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); int netpoll_setup(struct netpoll *np); int netpoll_trap(void); @@ -64,32 +63,61 @@ static inline int netpoll_rx(struct sk_buff *skb) return ret; } -static inline void *netpoll_poll_lock(struct net_device *dev) +static inline int netpoll_receive_skb(struct sk_buff *skb) { + if (!list_empty(&skb->dev->napi_list)) + return netpoll_rx(skb); + return 0; +} + +static inline void *netpoll_poll_lock(struct napi_struct *napi) +{ + struct net_device *dev = napi->dev; + rcu_read_lock(); /* deal with race on ->npinfo */ - if (dev->npinfo) { - spin_lock(&dev->npinfo->poll_lock); - dev->npinfo->poll_owner = smp_processor_id(); - return dev->npinfo; + if (dev && dev->npinfo) { + spin_lock(&napi->poll_lock); + napi->poll_owner = smp_processor_id(); + return napi; } return NULL; } static inline void netpoll_poll_unlock(void *have) { - struct netpoll_info *npi = have; + struct napi_struct *napi = have; - if (npi) { - npi->poll_owner = -1; - spin_unlock(&npi->poll_lock); + if (napi) { + napi->poll_owner = -1; + spin_unlock(&napi->poll_lock); } rcu_read_unlock(); } +static inline void netpoll_netdev_init(struct net_device *dev) +{ + INIT_LIST_HEAD(&dev->napi_list); +} + #else -#define netpoll_rx(a) 0 -#define netpoll_poll_lock(a) NULL -#define netpoll_poll_unlock(a) +static inline int netpoll_rx(struct sk_buff *skb) +{ + return 0; +} +static inline int netpoll_receive_skb(struct sk_buff *skb) +{ + return 0; +} +static inline void *netpoll_poll_lock(struct napi_struct *napi) +{ + return NULL; +} +static inline void netpoll_poll_unlock(void *have) +{ +} +static inline void netpoll_netdev_init(struct net_device *dev) +{ +} #endif #endif diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 9a30ba2ca75eed3dcdb1709010fe20204e81335e..538ee1dd3d0af43959a44d1a7b37a549eb128162 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -6,8 +6,98 @@ * Copyright 2006, 2007 Johannes Berg */ +/** + * enum nl80211_commands - supported nl80211 commands + * + * @NL80211_CMD_UNSPEC: unspecified command to catch errors + * + * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request + * to get a list of all present wiphys. + * @NL80211_CMD_SET_WIPHY: set wiphy name, needs %NL80211_ATTR_WIPHY and + * %NL80211_ATTR_WIPHY_NAME. + * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request + * or rename notification. Has attributes %NL80211_ATTR_WIPHY and + * %NL80211_ATTR_WIPHY_NAME. + * @NL80211_CMD_DEL_WIPHY: Wiphy deleted. Has attributes + * %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME. + * + * @NL80211_CMD_GET_INTERFACE: Request an interface's configuration; + * either a dump request on a %NL80211_ATTR_WIPHY or a specific get + * on an %NL80211_ATTR_IFINDEX is supported. + * @NL80211_CMD_SET_INTERFACE: Set type of a virtual interface, requires + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_IFTYPE. + * @NL80211_CMD_NEW_INTERFACE: Newly created virtual interface or response + * to %NL80211_CMD_GET_INTERFACE. Has %NL80211_ATTR_IFINDEX, + * %NL80211_ATTR_WIPHY and %NL80211_ATTR_IFTYPE attributes. Can also + * be sent from userspace to request creation of a new virtual interface, + * then requires attributes %NL80211_ATTR_WIPHY, %NL80211_ATTR_IFTYPE and + * %NL80211_ATTR_IFNAME. + * @NL80211_CMD_DEL_INTERFACE: Virtual interface was deleted, has attributes + * %NL80211_ATTR_IFINDEX and %NL80211_ATTR_WIPHY. Can also be sent from + * userspace to request deletion of a virtual interface, then requires + * attribute %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_MAX: highest used command number + * @__NL80211_CMD_AFTER_LAST: internal use + */ +enum nl80211_commands { +/* don't change the order or add anything inbetween, this is ABI! */ + NL80211_CMD_UNSPEC, + + NL80211_CMD_GET_WIPHY, /* can dump */ + NL80211_CMD_SET_WIPHY, + NL80211_CMD_NEW_WIPHY, + NL80211_CMD_DEL_WIPHY, + + NL80211_CMD_GET_INTERFACE, /* can dump */ + NL80211_CMD_SET_INTERFACE, + NL80211_CMD_NEW_INTERFACE, + NL80211_CMD_DEL_INTERFACE, + + /* add commands here */ + + /* used to define NL80211_CMD_MAX below */ + __NL80211_CMD_AFTER_LAST, + NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 +}; + + +/** + * enum nl80211_attrs - nl80211 netlink attributes + * + * @NL80211_ATTR_UNSPEC: unspecified attribute to catch errors + * + * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf. + * /sys/class/ieee80211//index + * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming) + * + * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on + * @NL80211_ATTR_IFNAME: network interface name + * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype + * + * @NL80211_ATTR_MAX: highest attribute number currently defined + * @__NL80211_ATTR_AFTER_LAST: internal use + */ +enum nl80211_attrs { +/* don't change the order or add anything inbetween, this is ABI! */ + NL80211_ATTR_UNSPEC, + + NL80211_ATTR_WIPHY, + NL80211_ATTR_WIPHY_NAME, + + NL80211_ATTR_IFINDEX, + NL80211_ATTR_IFNAME, + NL80211_ATTR_IFTYPE, + + /* add attributes here, update the policy in nl80211.c */ + + __NL80211_ATTR_AFTER_LAST, + NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 +}; + /** * enum nl80211_iftype - (virtual) interface types + * * @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides * @NL80211_IFTYPE_ADHOC: independent BSS member * @NL80211_IFTYPE_STATION: managed BSS member @@ -15,9 +105,10 @@ * @NL80211_IFTYPE_AP_VLAN: VLAN interface for access points * @NL80211_IFTYPE_WDS: wireless distribution interface * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames + * @NL80211_IFTYPE_MAX: highest interface type number currently defined * @__NL80211_IFTYPE_AFTER_LAST: internal use * - * These values are used with the NL80211_ATTR_IFTYPE + * These values are used with the %NL80211_ATTR_IFTYPE * to set the type of an interface. * */ @@ -31,8 +122,8 @@ enum nl80211_iftype { NL80211_IFTYPE_MONITOR, /* keep last */ - __NL80211_IFTYPE_AFTER_LAST + __NL80211_IFTYPE_AFTER_LAST, + NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 }; -#define NL80211_IFTYPE_MAX (__NL80211_IFTYPE_AFTER_LAST - 1) #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index ce06188b7a56796cb63736a7d03f62f7c623b10c..bec4485e3d76d0a91cba665b4716e5a6480bb42e 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -29,6 +29,7 @@ struct nsproxy { struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns; struct user_namespace *user_ns; + struct net *net_ns; }; extern struct nsproxy init_nsproxy; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index cfee06bca2d3e615c7bfcc6c2b3d6d670f666496..8acae4eeaa763373f6908c52189218c228e5f23d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1848,6 +1848,8 @@ #define PCI_VENDOR_ID_ABOCOM 0x13D1 #define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1 +#define PCI_VENDOR_ID_SUNDANCE 0x13f0 + #define PCI_VENDOR_ID_CMEDIA 0x13f6 #define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100 #define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101 @@ -1955,8 +1957,12 @@ #define PCI_DEVICE_ID_TIGON3_5751M 0x167d #define PCI_DEVICE_ID_TIGON3_5751F 0x167e #define PCI_DEVICE_ID_TIGON3_5787F 0x167f +#define PCI_DEVICE_ID_TIGON3_5761E 0x1680 +#define PCI_DEVICE_ID_TIGON3_5761 0x1681 +#define PCI_DEVICE_ID_TIGON3_5764 0x1684 #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 +#define PCI_DEVICE_ID_TIGON3_5784 0x1698 #define PCI_DEVICE_ID_TIGON3_5786 0x169a #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c @@ -2130,6 +2136,11 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_TEHUTI 0x1fc9 +#define PCI_DEVICE_ID_TEHUTI_3009 0x3009 +#define PCI_DEVICE_ID_TEHUTI_3010 0x3010 +#define PCI_DEVICE_ID_TEHUTI_3014 0x3014 + #define PCI_VENDOR_ID_HINT 0x3388 #define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 495d368390e0b00b69b3492ed1ee25338c5c56a6..423d592c55d5a6827e6b3719b0a7b7e570e2099d 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -316,7 +316,20 @@ #define PCI_X_CMD 2 /* Modes & Features */ #define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */ #define PCI_X_CMD_ERO 0x0002 /* Enable Relaxed Ordering */ +#define PCI_X_CMD_READ_512 0x0000 /* 512 byte maximum read byte count */ +#define PCI_X_CMD_READ_1K 0x0004 /* 1Kbyte maximum read byte count */ +#define PCI_X_CMD_READ_2K 0x0008 /* 2Kbyte maximum read byte count */ +#define PCI_X_CMD_READ_4K 0x000c /* 4Kbyte maximum read byte count */ #define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */ + /* Max # of outstanding split transactions */ +#define PCI_X_CMD_SPLIT_1 0x0000 /* Max 1 */ +#define PCI_X_CMD_SPLIT_2 0x0010 /* Max 2 */ +#define PCI_X_CMD_SPLIT_3 0x0020 /* Max 3 */ +#define PCI_X_CMD_SPLIT_4 0x0030 /* Max 4 */ +#define PCI_X_CMD_SPLIT_8 0x0040 /* Max 8 */ +#define PCI_X_CMD_SPLIT_12 0x0050 /* Max 12 */ +#define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */ +#define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */ #define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */ #define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */ #define PCI_X_STATUS 4 /* PCI-X capabilities */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 2a659789f9ca10bcca00497dcfde170023906475..f0742b6aaa649ba49b6a9a8543bcd4083bb12b1e 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -25,6 +25,8 @@ #include #include +#include + #define PHY_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | \ @@ -281,6 +283,7 @@ struct phy_device { /* Interrupt and Polling infrastructure */ struct work_struct phy_queue; struct timer_list phy_timer; + atomic_t irq_disable; spinlock_t lock; diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h new file mode 100644 index 0000000000000000000000000000000000000000..04ba70d49fb8a99c17b966d7171428c020d59361 --- /dev/null +++ b/include/linux/phy_fixed.h @@ -0,0 +1,38 @@ +#ifndef __PHY_FIXED_H +#define __PHY_FIXED_H + +#define MII_REGS_NUM 29 + +/* max number of virtual phy stuff */ +#define MAX_PHY_AMNT 10 +/* + The idea is to emulate normal phy behavior by responding with + pre-defined values to mii BMCR read, so that read_status hook could + take all the needed info. +*/ + +struct fixed_phy_status { + u8 link; + u16 speed; + u8 duplex; +}; + +/*----------------------------------------------------------------------------- + * Private information hoder for mii_bus + *-----------------------------------------------------------------------------*/ +struct fixed_info { + u16 *regs; + u8 regs_num; + struct fixed_phy_status phy_status; + struct phy_device *phydev; /* pointer to the container */ + /* link & speed cb */ + int (*link_update) (struct net_device *, struct fixed_phy_status *); + +}; + + +int fixed_mdio_set_link_update(struct phy_device *, + int (*link_update) (struct net_device *, struct fixed_phy_status *)); +struct fixed_info *fixed_mdio_get_phydev (int phydev_ind); + +#endif /* __PHY_FIXED_H */ diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index 268c51599eb8dc9343ffcdc6ef76e161858f6772..919af93b70596e70a06bdd10363c1d57a1aa50f4 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -77,8 +77,8 @@ struct tc_ratespec { unsigned char cell_log; unsigned char __reserved; - unsigned short feature; - short addend; + unsigned short overhead; + short cell_align; unsigned short mpu; __u32 rate; }; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index cd13a78c5db84584c59c130899604c2dfeb40d73..20741f668f7bebbba1cd018a07bab5810ae93428 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -7,6 +7,7 @@ #include #include +struct net; struct completion; /* @@ -97,8 +98,6 @@ struct vmcore { extern struct proc_dir_entry proc_root; extern struct proc_dir_entry *proc_root_fs; -extern struct proc_dir_entry *proc_net; -extern struct proc_dir_entry *proc_net_stat; extern struct proc_dir_entry *proc_bus; extern struct proc_dir_entry *proc_root_driver; extern struct proc_dir_entry *proc_root_kcore; @@ -192,36 +191,21 @@ static inline struct proc_dir_entry *create_proc_info_entry(const char *name, if (res) res->get_info=get_info; return res; } - -static inline struct proc_dir_entry *proc_net_create(const char *name, - mode_t mode, get_info_t *get_info) -{ - return create_proc_info_entry(name,mode,proc_net,get_info); -} -static inline struct proc_dir_entry *proc_net_fops_create(const char *name, - mode_t mode, const struct file_operations *fops) -{ - struct proc_dir_entry *res = create_proc_entry(name, mode, proc_net); - if (res) - res->proc_fops = fops; - return res; -} - -static inline void proc_net_remove(const char *name) -{ - remove_proc_entry(name,proc_net); -} +extern struct proc_dir_entry *proc_net_create(struct net *net, + const char *name, mode_t mode, get_info_t *get_info); +extern struct proc_dir_entry *proc_net_fops_create(struct net *net, + const char *name, mode_t mode, const struct file_operations *fops); +extern void proc_net_remove(struct net *net, const char *name); #else #define proc_root_driver NULL -#define proc_net NULL #define proc_bus NULL -#define proc_net_fops_create(name, mode, fops) ({ (void)(mode), NULL; }) -#define proc_net_create(name, mode, info) ({ (void)(mode), NULL; }) -static inline void proc_net_remove(const char *name) {} +#define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; }) +#define proc_net_create(net, name, mode, info) ({ (void)(mode), NULL; }) +static inline void proc_net_remove(struct net *net, const char *name) {} static inline void proc_flush_task(struct task_struct *task) { } @@ -281,6 +265,13 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode) return PROC_I(inode)->pde; } +static inline struct net *PDE_NET(struct proc_dir_entry *pde) +{ + return pde->parent->data; +} + +struct net *get_proc_net(const struct inode *inode); + struct proc_maps_private { struct pid *pid; struct task_struct *task; diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index a8a6ea809da096d19fbb0d164c986be671dcef12..0ce5e0b52dbdef816607be8beae4c1b620b8b95a 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -2,7 +2,7 @@ #define __RFKILL_H /* - * Copyright (C) 2006 Ivo van Doorn + * Copyright (C) 2006 - 2007 Ivo van Doorn * Copyright (C) 2007 Dmitry Torokhov * * This program is free software; you can redistribute it and/or modify @@ -26,18 +26,19 @@ #include #include #include +#include /** * enum rfkill_type - type of rfkill switch. - * RFKILL_TYPE_WLAN: switch is no a Wireless network devices. - * RFKILL_TYPE_BlUETOOTH: switch is on a bluetooth device. - * RFKILL_TYPE_IRDA: switch is on an infrared devices. + * RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device. + * RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. + * RFKILL_TYPE_UWB: switch is on a ultra wideband device. */ enum rfkill_type { - RFKILL_TYPE_WLAN = 0, - RFKILL_TYPE_BLUETOOTH = 1, - RFKILL_TYPE_IRDA = 2, - RFKILL_TYPE_MAX = 3, + RFKILL_TYPE_WLAN , + RFKILL_TYPE_BLUETOOTH, + RFKILL_TYPE_UWB, + RFKILL_TYPE_MAX, }; enum rfkill_state { @@ -51,11 +52,14 @@ enum rfkill_state { * @type: Radio type which the button controls, the value stored * here should be a value from enum rfkill_type. * @state: State of the switch (on/off). + * @user_claim_unsupported: Whether the hardware supports exclusive + * RF-kill control by userspace. Set this before registering. * @user_claim: Set when the switch is controlled exlusively by userspace. * @mutex: Guards switch state transitions * @data: Pointer to the RF button drivers private data which will be * passed along when toggling radio state. * @toggle_radio(): Mandatory handler to control state of the radio. + * @led_trigger: A LED trigger for this button's LED. * @dev: Device structure integrating the switch into device tree. * @node: Used to place switch into list of all switches known to the * the system. @@ -67,6 +71,7 @@ struct rfkill { enum rfkill_type type; enum rfkill_state state; + bool user_claim_unsupported; bool user_claim; struct mutex mutex; @@ -74,6 +79,10 @@ struct rfkill { void *data; int (*toggle_radio)(void *data, enum rfkill_state state); +#ifdef CONFIG_RFKILL_LEDS + struct led_trigger led_trigger; +#endif + struct device dev; struct list_head node; }; @@ -84,6 +93,19 @@ void rfkill_free(struct rfkill *rfkill); int rfkill_register(struct rfkill *rfkill); void rfkill_unregister(struct rfkill *rfkill); -void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); +/** + * rfkill_get_led_name - Get the LED trigger name for the button's LED. + * This function might return a NULL pointer if registering of the + * LED trigger failed. + * Use this as "default_trigger" for the LED. + */ +static inline char *rfkill_get_led_name(struct rfkill *rfkill) +{ +#ifdef CONFIG_RFKILL_LEDS + return (char *)(rfkill->led_trigger.name); +#else + return NULL; +#endif +} #endif /* RFKILL_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index dff3192374f8f0af665715a12e309d471ae70b97..5bf618241ab910eaf1c92a53026ccc672bf6165a 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -97,6 +97,9 @@ enum { RTM_SETNEIGHTBL, #define RTM_SETNEIGHTBL RTM_SETNEIGHTBL + RTM_NEWNDUSEROPT = 68, +#define RTM_NEWNDUSEROPT RTM_NEWNDUSEROPT + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -479,6 +482,30 @@ enum #define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg)))) #define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg)) +/******************************************************************** + * Neighbor Discovery userland options + ****/ + +struct nduseroptmsg +{ + unsigned char nduseropt_family; + unsigned char nduseropt_pad1; + unsigned short nduseropt_opts_len; /* Total length of options */ + __u8 nduseropt_icmp_type; + __u8 nduseropt_icmp_code; + unsigned short nduseropt_pad2; + /* Followed by one or more ND options */ +}; + +enum +{ + NDUSEROPT_UNSPEC, + NDUSEROPT_SRCADDR, + __NDUSEROPT_MAX +}; + +#define NDUSEROPT_MAX (__NDUSEROPT_MAX - 1) + #ifndef __KERNEL__ /* RTnetlink multicast groups - backwards compatibility for userspace */ #define RTMGRP_LINK 1 @@ -542,6 +569,8 @@ enum rtnetlink_groups { #define RTNLGRP_IPV6_PREFIX RTNLGRP_IPV6_PREFIX RTNLGRP_IPV6_RULE, #define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE + RTNLGRP_ND_USEROPT, +#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/linux/sched.h b/include/linux/sched.h index 313c6b6e774f887438c3926c539186ed03e6939e..833f7dc2b8de7ae5a822e15d1ce6c7d97564c9a4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -27,6 +27,7 @@ #define CLONE_NEWUTS 0x04000000 /* New utsname group? */ #define CLONE_NEWIPC 0x08000000 /* New ipcs */ #define CLONE_NEWUSER 0x10000000 /* New user namespace */ +#define CLONE_NEWNET 0x40000000 /* New network namespace */ /* * Scheduling policies diff --git a/include/linux/sctp.h b/include/linux/sctp.h index d70df61a029f78a8c2234bde6f6625b4c0f71cf2..5eb38cc0e5a41626a567267aca3809cb5ec114b4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -102,6 +102,9 @@ typedef enum { SCTP_CID_ECN_CWR = 13, SCTP_CID_SHUTDOWN_COMPLETE = 14, + /* AUTH Extension Section 4.1 */ + SCTP_CID_AUTH = 0x0F, + /* PR-SCTP Sec 3.2 */ SCTP_CID_FWD_TSN = 0xC0, @@ -180,6 +183,14 @@ typedef enum { SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12), SCTP_PARAM_ECN_CAPABLE = __constant_htons(0x8000), + /* AUTH Extension Section 3 */ + SCTP_PARAM_RANDOM = __constant_htons(0x8002), + SCTP_PARAM_CHUNKS = __constant_htons(0x8003), + SCTP_PARAM_HMAC_ALGO = __constant_htons(0x8004), + + /* Add-IP: Supported Extensions, Section 4.2 */ + SCTP_PARAM_SUPPORTED_EXT = __constant_htons(0x8008), + /* PR-SCTP Sec 3.1 */ SCTP_PARAM_FWD_TSN_SUPPORT = __constant_htons(0xc000), @@ -296,6 +307,30 @@ typedef struct sctp_adaptation_ind_param { __be32 adaptation_ind; } __attribute__((packed)) sctp_adaptation_ind_param_t; +/* ADDIP Section 4.2.7 Supported Extensions Parameter */ +typedef struct sctp_supported_ext_param { + struct sctp_paramhdr param_hdr; + __u8 chunks[0]; +} __attribute__((packed)) sctp_supported_ext_param_t; + +/* AUTH Section 3.1 Random */ +typedef struct sctp_random_param { + sctp_paramhdr_t param_hdr; + __u8 random_val[0]; +} __attribute__((packed)) sctp_random_param_t; + +/* AUTH Section 3.2 Chunk List */ +typedef struct sctp_chunks_param { + sctp_paramhdr_t param_hdr; + __u8 chunks[0]; +} __attribute__((packed)) sctp_chunks_param_t; + +/* AUTH Section 3.3 HMAC Algorithm */ +typedef struct sctp_hmac_algo_param { + sctp_paramhdr_t param_hdr; + __be16 hmac_ids[0]; +} __attribute__((packed)) sctp_hmac_algo_param_t; + /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP * association. @@ -462,7 +497,19 @@ typedef enum { SCTP_ERROR_RSRC_LOW = __constant_htons(0x0101), SCTP_ERROR_DEL_SRC_IP = __constant_htons(0x0102), SCTP_ERROR_ASCONF_ACK = __constant_htons(0x0103), - SCTP_ERROR_REQ_REFUSED = __constant_htons(0x0104) + SCTP_ERROR_REQ_REFUSED = __constant_htons(0x0104), + + /* AUTH Section 4. New Error Cause + * + * This section defines a new error cause that will be sent if an AUTH + * chunk is received with an unsupported HMAC identifier. + * illustrates the new error cause. + * + * Cause Code Error Cause Name + * -------------------------------------------------------------- + * 0x0105 Unsupported HMAC Identifier + */ + SCTP_ERROR_UNSUP_HMAC = __constant_htons(0x0105) } sctp_error_t; @@ -600,4 +647,64 @@ typedef struct sctp_addip_chunk { sctp_addiphdr_t addip_hdr; } __attribute__((packed)) sctp_addip_chunk_t; +/* AUTH + * Section 4.1 Authentication Chunk (AUTH) + * + * This chunk is used to hold the result of the HMAC calculation. + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0x0F | Flags=0 | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Shared Key Identifier | HMAC Identifier | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * \ HMAC / + * / \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Type: 1 byte (unsigned integer) + * This value MUST be set to 0x0F for all AUTH-chunks. + * + * Flags: 1 byte (unsigned integer) + * Set to zero on transmit and ignored on receipt. + * + * Length: 2 bytes (unsigned integer) + * This value holds the length of the HMAC in bytes plus 8. + * + * Shared Key Identifier: 2 bytes (unsigned integer) + * This value describes which endpoint pair shared key is used. + * + * HMAC Identifier: 2 bytes (unsigned integer) + * This value describes which message digest is being used. Table 2 + * shows the currently defined values. + * + * The following Table 2 shows the currently defined values for HMAC + * identifiers. + * + * +-----------------+--------------------------+ + * | HMAC Identifier | Message Digest Algorithm | + * +-----------------+--------------------------+ + * | 0 | Reserved | + * | 1 | SHA-1 defined in [8] | + * | 2 | Reserved | + * | 3 | SHA-256 defined in [8] | + * +-----------------+--------------------------+ + * + * + * HMAC: n bytes (unsigned integer) This hold the result of the HMAC + * calculation. + */ +typedef struct sctp_authhdr { + __be16 shkey_id; + __be16 hmac_id; + __u8 hmac[0]; +} __attribute__((packed)) sctp_authhdr_t; + +typedef struct sctp_auth_chunk { + sctp_chunkhdr_t chunk_hdr; + sctp_authhdr_t auth_hdr; +} __attribute__((packed)) sctp_auth_chunk_t; + #endif /* __LINUX_SCTP_H__ */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 83783ab0f5524d7ad5e4e3092aa1790cdec527c9..8bf1e05115b42c341de6e9c1e0af615c897e656c 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -46,6 +46,8 @@ int seq_path(struct seq_file *, struct vfsmount *, struct dentry *, char *); int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_release(struct inode *, struct file *); +void *__seq_open_private(struct file *, const struct seq_operations *, int); +int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); #define SEQ_START_TOKEN ((void *)1) diff --git a/include/linux/snmp.h b/include/linux/snmp.h index 802b3a38b0416e3f00d8057eb6182307e0a74be4..89f0c2b5f405d27e15d9defd989a859f2434772c 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -82,6 +82,8 @@ enum __ICMP_MIB_MAX }; +#define __ICMPMSG_MIB_MAX 512 /* Out+In for all 8-bit ICMP types */ + /* icmp6 mib definitions */ /* * RFC 2466: ICMPv6-MIB @@ -91,35 +93,12 @@ enum ICMP6_MIB_NUM = 0, ICMP6_MIB_INMSGS, /* InMsgs */ ICMP6_MIB_INERRORS, /* InErrors */ - ICMP6_MIB_INDESTUNREACHS, /* InDestUnreachs */ - ICMP6_MIB_INPKTTOOBIGS, /* InPktTooBigs */ - ICMP6_MIB_INTIMEEXCDS, /* InTimeExcds */ - ICMP6_MIB_INPARMPROBLEMS, /* InParmProblems */ - ICMP6_MIB_INECHOS, /* InEchos */ - ICMP6_MIB_INECHOREPLIES, /* InEchoReplies */ - ICMP6_MIB_INGROUPMEMBQUERIES, /* InGroupMembQueries */ - ICMP6_MIB_INGROUPMEMBRESPONSES, /* InGroupMembResponses */ - ICMP6_MIB_INGROUPMEMBREDUCTIONS, /* InGroupMembReductions */ - ICMP6_MIB_INROUTERSOLICITS, /* InRouterSolicits */ - ICMP6_MIB_INROUTERADVERTISEMENTS, /* InRouterAdvertisements */ - ICMP6_MIB_INNEIGHBORSOLICITS, /* InNeighborSolicits */ - ICMP6_MIB_INNEIGHBORADVERTISEMENTS, /* InNeighborAdvertisements */ - ICMP6_MIB_INREDIRECTS, /* InRedirects */ ICMP6_MIB_OUTMSGS, /* OutMsgs */ - ICMP6_MIB_OUTDESTUNREACHS, /* OutDestUnreachs */ - ICMP6_MIB_OUTPKTTOOBIGS, /* OutPktTooBigs */ - ICMP6_MIB_OUTTIMEEXCDS, /* OutTimeExcds */ - ICMP6_MIB_OUTPARMPROBLEMS, /* OutParmProblems */ - ICMP6_MIB_OUTECHOREPLIES, /* OutEchoReplies */ - ICMP6_MIB_OUTROUTERSOLICITS, /* OutRouterSolicits */ - ICMP6_MIB_OUTNEIGHBORSOLICITS, /* OutNeighborSolicits */ - ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS, /* OutNeighborAdvertisements */ - ICMP6_MIB_OUTREDIRECTS, /* OutRedirects */ - ICMP6_MIB_OUTGROUPMEMBRESPONSES, /* OutGroupMembResponses */ - ICMP6_MIB_OUTGROUPMEMBREDUCTIONS, /* OutGroupMembReductions */ __ICMP6_MIB_MAX }; +#define __ICMP6MSG_MIB_MAX 512 /* Out+In for all 8-bit ICMPv6 types */ + /* tcp mib definitions */ /* * RFC 1213: MIB-II TCP group @@ -231,6 +210,10 @@ enum LINUX_MIB_TCPABORTONLINGER, /* TCPAbortOnLinger */ LINUX_MIB_TCPABORTFAILED, /* TCPAbortFailed */ LINUX_MIB_TCPMEMORYPRESSURES, /* TCPMemoryPressures */ + LINUX_MIB_TCPSACKDISCARD, /* TCPSACKDiscard */ + LINUX_MIB_TCPDSACKIGNOREDOLD, /* TCPSACKIgnoredOld */ + LINUX_MIB_TCPDSACKIGNOREDNOUNDO, /* TCPSACKIgnoredNoUndo */ + LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ __LINUX_MIB_MAX }; diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h new file mode 100644 index 0000000000000000000000000000000000000000..2b5c312c496057bc9ffdb09ed292f16075952a0a --- /dev/null +++ b/include/linux/ssb/ssb.h @@ -0,0 +1,424 @@ +#ifndef LINUX_SSB_H_ +#define LINUX_SSB_H_ + +#include +#include +#include +#include +#include +#include + +#include + + +struct pcmcia_device; +struct ssb_bus; +struct ssb_driver; + + +struct ssb_sprom_r1 { + u16 pci_spid; /* Subsystem Product ID for PCI */ + u16 pci_svid; /* Subsystem Vendor ID for PCI */ + u16 pci_pid; /* Product ID for PCI */ + u8 il0mac[6]; /* MAC address for 802.11b/g */ + u8 et0mac[6]; /* MAC address for Ethernet */ + u8 et1mac[6]; /* MAC address for 802.11a */ + u8 et0phyaddr:5; /* MII address for enet0 */ + u8 et1phyaddr:5; /* MII address for enet1 */ + u8 et0mdcport:1; /* MDIO for enet0 */ + u8 et1mdcport:1; /* MDIO for enet1 */ + u8 board_rev; /* Board revision */ + u8 country_code:4; /* Country Code */ + u8 antenna_a:2; /* Antenna 0/1 available for A-PHY */ + u8 antenna_bg:2; /* Antenna 0/1 available for B-PHY and G-PHY */ + u16 pa0b0; + u16 pa0b1; + u16 pa0b2; + u16 pa1b0; + u16 pa1b1; + u16 pa1b2; + u8 gpio0; /* GPIO pin 0 */ + u8 gpio1; /* GPIO pin 1 */ + u8 gpio2; /* GPIO pin 2 */ + u8 gpio3; /* GPIO pin 3 */ + u16 maxpwr_a; /* A-PHY Power Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_bg; /* B/G-PHY Power Amplifier Max Power (in dBm Q5.2) */ + u8 itssi_a; /* Idle TSSI Target for A-PHY */ + u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ + u16 boardflags_lo; /* Boardflags (low 16 bits) */ + u8 antenna_gain_a; /* A-PHY Antenna gain (in dBm Q5.2) */ + u8 antenna_gain_bg; /* B/G-PHY Antenna gain (in dBm Q5.2) */ + u8 oem[8]; /* OEM string (rev 1 only) */ +}; + +struct ssb_sprom_r2 { + u16 boardflags_hi; /* Boardflags (high 16 bits) */ + u8 maxpwr_a_lo; /* A-PHY Max Power Low */ + u8 maxpwr_a_hi; /* A-PHY Max Power High */ + u16 pa1lob0; /* A-PHY PA Low Settings */ + u16 pa1lob1; /* A-PHY PA Low Settings */ + u16 pa1lob2; /* A-PHY PA Low Settings */ + u16 pa1hib0; /* A-PHY PA High Settings */ + u16 pa1hib1; /* A-PHY PA High Settings */ + u16 pa1hib2; /* A-PHY PA High Settings */ + u8 ofdm_pwr_off; /* OFDM Power Offset from CCK Level */ + u8 country_str[2]; /* Two char Country Code */ +}; + +struct ssb_sprom_r3 { + u32 ofdmapo; /* A-PHY OFDM Mid Power Offset */ + u32 ofdmalpo; /* A-PHY OFDM Low Power Offset */ + u32 ofdmahpo; /* A-PHY OFDM High Power Offset */ + u8 gpioldc_on_cnt; /* GPIO LED Powersave Duty Cycle ON count */ + u8 gpioldc_off_cnt; /* GPIO LED Powersave Duty Cycle OFF count */ + u8 cckpo_1M:4; /* CCK Power Offset for Rate 1M */ + u8 cckpo_2M:4; /* CCK Power Offset for Rate 2M */ + u8 cckpo_55M:4; /* CCK Power Offset for Rate 5.5M */ + u8 cckpo_11M:4; /* CCK Power Offset for Rate 11M */ + u32 ofdmgpo; /* G-PHY OFDM Power Offset */ +}; + +struct ssb_sprom_r4 { + /* TODO */ +}; + +struct ssb_sprom { + u8 revision; + u8 crc; + /* The valid r# fields are selected by the "revision". + * Revision 3 and lower inherit from lower revisions. + */ + union { + struct { + struct ssb_sprom_r1 r1; + struct ssb_sprom_r2 r2; + struct ssb_sprom_r3 r3; + }; + struct ssb_sprom_r4 r4; + }; +}; + +/* Information about the PCB the circuitry is soldered on. */ +struct ssb_boardinfo { + u16 vendor; + u16 type; + u16 rev; +}; + + +struct ssb_device; +/* Lowlevel read/write operations on the device MMIO. + * Internal, don't use that outside of ssb. */ +struct ssb_bus_ops { + u16 (*read16)(struct ssb_device *dev, u16 offset); + u32 (*read32)(struct ssb_device *dev, u16 offset); + void (*write16)(struct ssb_device *dev, u16 offset, u16 value); + void (*write32)(struct ssb_device *dev, u16 offset, u32 value); +}; + + +/* Core-ID values. */ +#define SSB_DEV_CHIPCOMMON 0x800 +#define SSB_DEV_ILINE20 0x801 +#define SSB_DEV_SDRAM 0x803 +#define SSB_DEV_PCI 0x804 +#define SSB_DEV_MIPS 0x805 +#define SSB_DEV_ETHERNET 0x806 +#define SSB_DEV_V90 0x807 +#define SSB_DEV_USB11_HOSTDEV 0x808 +#define SSB_DEV_ADSL 0x809 +#define SSB_DEV_ILINE100 0x80A +#define SSB_DEV_IPSEC 0x80B +#define SSB_DEV_PCMCIA 0x80D +#define SSB_DEV_INTERNAL_MEM 0x80E +#define SSB_DEV_MEMC_SDRAM 0x80F +#define SSB_DEV_EXTIF 0x811 +#define SSB_DEV_80211 0x812 +#define SSB_DEV_MIPS_3302 0x816 +#define SSB_DEV_USB11_HOST 0x817 +#define SSB_DEV_USB11_DEV 0x818 +#define SSB_DEV_USB20_HOST 0x819 +#define SSB_DEV_USB20_DEV 0x81A +#define SSB_DEV_SDIO_HOST 0x81B +#define SSB_DEV_ROBOSWITCH 0x81C +#define SSB_DEV_PARA_ATA 0x81D +#define SSB_DEV_SATA_XORDMA 0x81E +#define SSB_DEV_ETHERNET_GBIT 0x81F +#define SSB_DEV_PCIE 0x820 +#define SSB_DEV_MIMO_PHY 0x821 +#define SSB_DEV_SRAM_CTRLR 0x822 +#define SSB_DEV_MINI_MACPHY 0x823 +#define SSB_DEV_ARM_1176 0x824 +#define SSB_DEV_ARM_7TDMI 0x825 + +/* Vendor-ID values */ +#define SSB_VENDOR_BROADCOM 0x4243 + +/* Some kernel subsystems poke with dev->drvdata, so we must use the + * following ugly workaround to get from struct device to struct ssb_device */ +struct __ssb_dev_wrapper { + struct device dev; + struct ssb_device *sdev; +}; + +struct ssb_device { + /* Having a copy of the ops pointer in each dev struct + * is an optimization. */ + const struct ssb_bus_ops *ops; + + struct device *dev; + struct ssb_bus *bus; + struct ssb_device_id id; + + u8 core_index; + unsigned int irq; + + /* Internal-only stuff follows. */ + void *drvdata; /* Per-device data */ + void *devtypedata; /* Per-devicetype (eg 802.11) data */ +}; + +/* Go from struct device to struct ssb_device. */ +static inline +struct ssb_device * dev_to_ssb_dev(struct device *dev) +{ + struct __ssb_dev_wrapper *wrap; + wrap = container_of(dev, struct __ssb_dev_wrapper, dev); + return wrap->sdev; +} + +/* Device specific user data */ +static inline +void ssb_set_drvdata(struct ssb_device *dev, void *data) +{ + dev->drvdata = data; +} +static inline +void * ssb_get_drvdata(struct ssb_device *dev) +{ + return dev->drvdata; +} + +/* Devicetype specific user data. This is per device-type (not per device) */ +void ssb_set_devtypedata(struct ssb_device *dev, void *data); +static inline +void * ssb_get_devtypedata(struct ssb_device *dev) +{ + return dev->devtypedata; +} + + +struct ssb_driver { + const char *name; + const struct ssb_device_id *id_table; + + int (*probe)(struct ssb_device *dev, const struct ssb_device_id *id); + void (*remove)(struct ssb_device *dev); + int (*suspend)(struct ssb_device *dev, pm_message_t state); + int (*resume)(struct ssb_device *dev); + void (*shutdown)(struct ssb_device *dev); + + struct device_driver drv; +}; +#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv) + +extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner); +static inline int ssb_driver_register(struct ssb_driver *drv) +{ + return __ssb_driver_register(drv, THIS_MODULE); +} +extern void ssb_driver_unregister(struct ssb_driver *drv); + + + + +enum ssb_bustype { + SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */ + SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */ + SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */ +}; + +/* board_vendor */ +#define SSB_BOARDVENDOR_BCM 0x14E4 /* Broadcom */ +#define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */ +#define SSB_BOARDVENDOR_HP 0x0E11 /* HP */ +/* board_type */ +#define SSB_BOARD_BCM94306MP 0x0418 +#define SSB_BOARD_BCM4309G 0x0421 +#define SSB_BOARD_BCM4306CB 0x0417 +#define SSB_BOARD_BCM4309MP 0x040C +#define SSB_BOARD_MP4318 0x044A +#define SSB_BOARD_BU4306 0x0416 +#define SSB_BOARD_BU4309 0x040A +/* chip_package */ +#define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */ +#define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */ +#define SSB_CHIPPACK_BCM4712L 0 /* Large 340pin 4712 */ + +#include +#include +#include +#include + +struct ssb_bus { + /* The MMIO area. */ + void __iomem *mmio; + + const struct ssb_bus_ops *ops; + + /* The core in the basic address register window. (PCI bus only) */ + struct ssb_device *mapped_device; + /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ + u8 mapped_pcmcia_seg; + /* Lock for core and segment switching. */ + spinlock_t bar_lock; + + /* The bus this backplane is running on. */ + enum ssb_bustype bustype; + /* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */ + struct pci_dev *host_pci; + /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ + struct pcmcia_device *host_pcmcia; + +#ifdef CONFIG_SSB_PCIHOST + /* Mutex to protect the SPROM writing. */ + struct mutex pci_sprom_mutex; +#endif + + /* ID information about the Chip. */ + u16 chip_id; + u16 chip_rev; + u8 chip_package; + + /* List of devices (cores) on the backplane. */ + struct ssb_device devices[SSB_MAX_NR_CORES]; + u8 nr_devices; + + /* Reference count. Number of suspended devices. */ + u8 suspend_cnt; + + /* Software ID number for this bus. */ + unsigned int busnumber; + + /* The ChipCommon device (if available). */ + struct ssb_chipcommon chipco; + /* The PCI-core device (if available). */ + struct ssb_pcicore pcicore; + /* The MIPS-core device (if available). */ + struct ssb_mipscore mipscore; + /* The EXTif-core device (if available). */ + struct ssb_extif extif; + + /* The following structure elements are not available in early + * SSB initialization. Though, they are available for regular + * registered drivers at any stage. So be careful when + * using them in the ssb core code. */ + + /* ID information about the PCB. */ + struct ssb_boardinfo boardinfo; + /* Contents of the SPROM. */ + struct ssb_sprom sprom; + + /* Internal-only stuff follows. Do not touch. */ + struct list_head list; +#ifdef CONFIG_SSB_DEBUG + /* Is the bus already powered up? */ + bool powered_up; + int power_warn_count; +#endif /* DEBUG */ +}; + +/* The initialization-invariants. */ +struct ssb_init_invariants { + struct ssb_boardinfo boardinfo; + struct ssb_sprom sprom; +}; +/* Type of function to fetch the invariants. */ +typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, + struct ssb_init_invariants *iv); + +/* Register a SSB system bus. get_invariants() is called after the + * basic system devices are initialized. + * The invariants are usually fetched from some NVRAM. + * Put the invariants into the struct pointed to by iv. */ +extern int ssb_bus_ssbbus_register(struct ssb_bus *bus, + unsigned long baseaddr, + ssb_invariants_func_t get_invariants); +#ifdef CONFIG_SSB_PCIHOST +extern int ssb_bus_pcibus_register(struct ssb_bus *bus, + struct pci_dev *host_pci); +#endif /* CONFIG_SSB_PCIHOST */ +#ifdef CONFIG_SSB_PCMCIAHOST +extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, + struct pcmcia_device *pcmcia_dev, + unsigned long baseaddr); +#endif /* CONFIG_SSB_PCMCIAHOST */ + +extern void ssb_bus_unregister(struct ssb_bus *bus); + +extern u32 ssb_clockspeed(struct ssb_bus *bus); + +/* Is the device enabled in hardware? */ +int ssb_device_is_enabled(struct ssb_device *dev); +/* Enable a device and pass device-specific SSB_TMSLOW flags. + * If no device-specific flags are available, use 0. */ +void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags); +/* Disable a device in hardware and pass SSB_TMSLOW flags (if any). */ +void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags); + + +/* Device MMIO register read/write functions. */ +static inline u16 ssb_read16(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read16(dev, offset); +} +static inline u32 ssb_read32(struct ssb_device *dev, u16 offset) +{ + return dev->ops->read32(dev, offset); +} +static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value) +{ + dev->ops->write16(dev, offset, value); +} +static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value) +{ + dev->ops->write32(dev, offset, value); +} + + +/* Translation (routing) bits that need to be ORed to DMA + * addresses before they are given to a device. */ +extern u32 ssb_dma_translation(struct ssb_device *dev); +#define SSB_DMA_TRANSLATION_MASK 0xC0000000 +#define SSB_DMA_TRANSLATION_SHIFT 30 + +extern int ssb_dma_set_mask(struct ssb_device *ssb_dev, u64 mask); + + +#ifdef CONFIG_SSB_PCIHOST +/* PCI-host wrapper driver */ +extern int ssb_pcihost_register(struct pci_driver *driver); +static inline void ssb_pcihost_unregister(struct pci_driver *driver) +{ + pci_unregister_driver(driver); +} +#endif /* CONFIG_SSB_PCIHOST */ + + +/* If a driver is shutdown or suspended, call this to signal + * that the bus may be completely powered down. SSB will decide, + * if it's really time to power down the bus, based on if there + * are other devices that want to run. */ +extern int ssb_bus_may_powerdown(struct ssb_bus *bus); +/* Before initializing and enabling a device, call this to power-up the bus. + * If you want to allow use of dynamic-power-control, pass the flag. + * Otherwise static always-on powercontrol will be used. */ +extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl); + + +/* Various helper functions */ +extern u32 ssb_admatch_base(u32 adm); +extern u32 ssb_admatch_size(u32 adm); + + +#endif /* LINUX_SSB_H_ */ diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h new file mode 100644 index 0000000000000000000000000000000000000000..4cb99549466297a9021a482f14dafdab79048e73 --- /dev/null +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -0,0 +1,396 @@ +#ifndef LINUX_SSB_CHIPCO_H_ +#define LINUX_SSB_CHIPCO_H_ + +/* SonicsSiliconBackplane CHIPCOMMON core hardware definitions + * + * The chipcommon core provides chip identification, SB control, + * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer, + * gpio interface, extbus, and support for serial and parallel flashes. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ + +/** ChipCommon core registers. **/ + +#define SSB_CHIPCO_CHIPID 0x0000 +#define SSB_CHIPCO_IDMASK 0x0000FFFF +#define SSB_CHIPCO_REVMASK 0x000F0000 +#define SSB_CHIPCO_REVSHIFT 16 +#define SSB_CHIPCO_PACKMASK 0x00F00000 +#define SSB_CHIPCO_PACKSHIFT 20 +#define SSB_CHIPCO_NRCORESMASK 0x0F000000 +#define SSB_CHIPCO_NRCORESSHIFT 24 +#define SSB_CHIPCO_CAP 0x0004 /* Capabilities */ +#define SSB_CHIPCO_CAP_NRUART 0x00000003 /* # of UARTs */ +#define SSB_CHIPCO_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */ +#define SSB_CHIPCO_CAP_UARTCLK 0x00000018 /* UART clock select */ +#define SSB_CHIPCO_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */ +#define SSB_CHIPCO_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */ +#define SSB_CHIPCO_CAP_EXTBUS 0x000000C0 /* External buses present */ +#define SSB_CHIPCO_CAP_FLASHT 0x00000700 /* Flash Type */ +#define SSB_CHIPCO_FLASHT_NONE 0x00000000 /* No flash */ +#define SSB_CHIPCO_FLASHT_STSER 0x00000100 /* ST serial flash */ +#define SSB_CHIPCO_FLASHT_ATSER 0x00000200 /* Atmel serial flash */ +#define SSB_CHIPCO_FLASHT_PARA 0x00000700 /* Parallel flash */ +#define SSB_CHIPCO_CAP_PLLT 0x00038000 /* PLL Type */ +#define SSB_PLLTYPE_NONE 0x00000000 +#define SSB_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */ +#define SSB_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */ +#define SSB_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */ +#define SSB_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */ +#define SSB_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */ +#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */ +#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */ +#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */ +#define SSB_CHIPCO_CAP_OTPS_SHIFT 19 +#define SSB_CHIPCO_CAP_OTPS_BASE 5 +#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */ +#define SSB_CHIPCO_CAP_BROM 0x00800000 /* Internal boot ROM active */ +#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */ +#define SSB_CHIPCO_CORECTL 0x0008 +#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */ +#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ +#define SSB_CHIPCO_BIST 0x000C +#define SSB_CHIPCO_OTPS 0x0010 /* OTP status */ +#define SSB_CHIPCO_OTPS_PROGFAIL 0x80000000 +#define SSB_CHIPCO_OTPS_PROTECT 0x00000007 +#define SSB_CHIPCO_OTPS_HW_PROTECT 0x00000001 +#define SSB_CHIPCO_OTPS_SW_PROTECT 0x00000002 +#define SSB_CHIPCO_OTPS_CID_PROTECT 0x00000004 +#define SSB_CHIPCO_OTPC 0x0014 /* OTP control */ +#define SSB_CHIPCO_OTPC_RECWAIT 0xFF000000 +#define SSB_CHIPCO_OTPC_PROGWAIT 0x00FFFF00 +#define SSB_CHIPCO_OTPC_PRW_SHIFT 8 +#define SSB_CHIPCO_OTPC_MAXFAIL 0x00000038 +#define SSB_CHIPCO_OTPC_VSEL 0x00000006 +#define SSB_CHIPCO_OTPC_SELVL 0x00000001 +#define SSB_CHIPCO_OTPP 0x0018 /* OTP prog */ +#define SSB_CHIPCO_OTPP_COL 0x000000FF +#define SSB_CHIPCO_OTPP_ROW 0x0000FF00 +#define SSB_CHIPCO_OTPP_ROW_SHIFT 8 +#define SSB_CHIPCO_OTPP_READERR 0x10000000 +#define SSB_CHIPCO_OTPP_VALUE 0x20000000 +#define SSB_CHIPCO_OTPP_READ 0x40000000 +#define SSB_CHIPCO_OTPP_START 0x80000000 +#define SSB_CHIPCO_OTPP_BUSY 0x80000000 +#define SSB_CHIPCO_IRQSTAT 0x0020 +#define SSB_CHIPCO_IRQMASK 0x0024 +#define SSB_CHIPCO_IRQ_GPIO 0x00000001 /* gpio intr */ +#define SSB_CHIPCO_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */ +#define SSB_CHIPCO_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */ +#define SSB_CHIPCO_CHIPCTL 0x0028 /* Rev >= 11 only */ +#define SSB_CHIPCO_CHIPSTAT 0x002C /* Rev >= 11 only */ +#define SSB_CHIPCO_JCMD 0x0030 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCMD_START 0x80000000 +#define SSB_CHIPCO_JCMD_BUSY 0x80000000 +#define SSB_CHIPCO_JCMD_PAUSE 0x40000000 +#define SSB_CHIPCO_JCMD0_ACC_MASK 0x0000F000 +#define SSB_CHIPCO_JCMD0_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD0_ACC_DR 0x00001000 +#define SSB_CHIPCO_JCMD0_ACC_IR 0x00002000 +#define SSB_CHIPCO_JCMD0_ACC_RESET 0x00003000 +#define SSB_CHIPCO_JCMD0_ACC_IRPDR 0x00004000 +#define SSB_CHIPCO_JCMD0_ACC_PDR 0x00005000 +#define SSB_CHIPCO_JCMD0_IRW_MASK 0x00000F00 +#define SSB_CHIPCO_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */ +#define SSB_CHIPCO_JCMD_ACC_IRDR 0x00000000 +#define SSB_CHIPCO_JCMD_ACC_DR 0x00010000 +#define SSB_CHIPCO_JCMD_ACC_IR 0x00020000 +#define SSB_CHIPCO_JCMD_ACC_RESET 0x00030000 +#define SSB_CHIPCO_JCMD_ACC_IRPDR 0x00040000 +#define SSB_CHIPCO_JCMD_ACC_PDR 0x00050000 +#define SSB_CHIPCO_JCMD_IRW_MASK 0x00001F00 +#define SSB_CHIPCO_JCMD_IRW_SHIFT 8 +#define SSB_CHIPCO_JCMD_DRW_MASK 0x0000003F +#define SSB_CHIPCO_JIR 0x0034 /* Rev >= 10 only */ +#define SSB_CHIPCO_JDR 0x0038 /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL 0x003C /* Rev >= 10 only */ +#define SSB_CHIPCO_JCTL_FORCE_CLK 4 /* Force clock */ +#define SSB_CHIPCO_JCTL_EXT_EN 2 /* Enable external targets */ +#define SSB_CHIPCO_JCTL_EN 1 /* Enable Jtag master */ +#define SSB_CHIPCO_FLASHCTL 0x0040 +#define SSB_CHIPCO_FLASHCTL_START 0x80000000 +#define SSB_CHIPCO_FLASHCTL_BUSY SSB_CHIPCO_FLASHCTL_START +#define SSB_CHIPCO_FLASHADDR 0x0044 +#define SSB_CHIPCO_FLASHDATA 0x0048 +#define SSB_CHIPCO_BCAST_ADDR 0x0050 +#define SSB_CHIPCO_BCAST_DATA 0x0054 +#define SSB_CHIPCO_GPIOIN 0x0060 +#define SSB_CHIPCO_GPIOOUT 0x0064 +#define SSB_CHIPCO_GPIOOUTEN 0x0068 +#define SSB_CHIPCO_GPIOCTL 0x006C +#define SSB_CHIPCO_GPIOPOL 0x0070 +#define SSB_CHIPCO_GPIOIRQ 0x0074 +#define SSB_CHIPCO_WATCHDOG 0x0080 +#define SSB_CHIPCO_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_GPIOTIMER_ONTIME_SHIFT 16 +#define SSB_CHIPCO_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */ +#define SSB_CHIPCO_CLOCK_N 0x0090 +#define SSB_CHIPCO_CLOCK_SB 0x0094 +#define SSB_CHIPCO_CLOCK_PCI 0x0098 +#define SSB_CHIPCO_CLOCK_M2 0x009C +#define SSB_CHIPCO_CLOCK_MIPS 0x00A0 +#define SSB_CHIPCO_CLKDIV 0x00A4 /* Rev >= 3 only */ +#define SSB_CHIPCO_CLKDIV_SFLASH 0x0F000000 +#define SSB_CHIPCO_CLKDIV_SFLASH_SHIFT 24 +#define SSB_CHIPCO_CLKDIV_OTP 0x000F0000 +#define SSB_CHIPCO_CLKDIV_OTP_SHIFT 16 +#define SSB_CHIPCO_CLKDIV_JTAG 0x00000F00 +#define SSB_CHIPCO_CLKDIV_JTAG_SHIFT 8 +#define SSB_CHIPCO_CLKDIV_UART 0x000000FF +#define SSB_CHIPCO_PLLONDELAY 0x00B0 /* Rev >= 4 only */ +#define SSB_CHIPCO_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ +#define SSB_CHIPCO_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */ +#define SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */ +#define SSB_CHIPCO_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ +#define SSB_CHIPCO_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */ +#define SSB_CHIPCO_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */ +#define SSB_CHIPCO_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */ +#define SSB_CHIPCO_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */ +#define SSB_CHIPCO_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ +#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_SYSCLKCTL 0x00C0 /* Rev >= 3 only */ +#define SSB_CHIPCO_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */ +#define SSB_CHIPCO_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */ +#define SSB_CHIPCO_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */ +#define SSB_CHIPCO_SYSCLKCTL_CLKDIV_SHIFT 16 +#define SSB_CHIPCO_CLKSTSTR 0x00C4 /* Rev >= 3 only */ +#define SSB_CHIPCO_PCMCIA_CFG 0x0100 +#define SSB_CHIPCO_PCMCIA_MEMWAIT 0x0104 +#define SSB_CHIPCO_PCMCIA_ATTRWAIT 0x0108 +#define SSB_CHIPCO_PCMCIA_IOWAIT 0x010C +#define SSB_CHIPCO_IDE_CFG 0x0110 +#define SSB_CHIPCO_IDE_MEMWAIT 0x0114 +#define SSB_CHIPCO_IDE_ATTRWAIT 0x0118 +#define SSB_CHIPCO_IDE_IOWAIT 0x011C +#define SSB_CHIPCO_PROG_CFG 0x0120 +#define SSB_CHIPCO_PROG_WAITCNT 0x0124 +#define SSB_CHIPCO_FLASH_CFG 0x0128 +#define SSB_CHIPCO_FLASH_WAITCNT 0x012C +#define SSB_CHIPCO_UART0_DATA 0x0300 +#define SSB_CHIPCO_UART0_IMR 0x0304 +#define SSB_CHIPCO_UART0_FCR 0x0308 +#define SSB_CHIPCO_UART0_LCR 0x030C +#define SSB_CHIPCO_UART0_MCR 0x0310 +#define SSB_CHIPCO_UART0_LSR 0x0314 +#define SSB_CHIPCO_UART0_MSR 0x0318 +#define SSB_CHIPCO_UART0_SCRATCH 0x031C +#define SSB_CHIPCO_UART1_DATA 0x0400 +#define SSB_CHIPCO_UART1_IMR 0x0404 +#define SSB_CHIPCO_UART1_FCR 0x0408 +#define SSB_CHIPCO_UART1_LCR 0x040C +#define SSB_CHIPCO_UART1_MCR 0x0410 +#define SSB_CHIPCO_UART1_LSR 0x0414 +#define SSB_CHIPCO_UART1_MSR 0x0418 +#define SSB_CHIPCO_UART1_SCRATCH 0x041C + + + +/** Clockcontrol masks and values **/ + +/* SSB_CHIPCO_CLOCK_N */ +#define SSB_CHIPCO_CLK_N1 0x0000003F /* n1 control */ +#define SSB_CHIPCO_CLK_N2 0x00003F00 /* n2 control */ +#define SSB_CHIPCO_CLK_N2_SHIFT 8 +#define SSB_CHIPCO_CLK_PLLC 0x000F0000 /* pll control */ +#define SSB_CHIPCO_CLK_PLLC_SHIFT 16 + +/* SSB_CHIPCO_CLOCK_SB/PCI/UART */ +#define SSB_CHIPCO_CLK_M1 0x0000003F /* m1 control */ +#define SSB_CHIPCO_CLK_M2 0x00003F00 /* m2 control */ +#define SSB_CHIPCO_CLK_M2_SHIFT 8 +#define SSB_CHIPCO_CLK_M3 0x003F0000 /* m3 control */ +#define SSB_CHIPCO_CLK_M3_SHIFT 16 +#define SSB_CHIPCO_CLK_MC 0x1F000000 /* mux control */ +#define SSB_CHIPCO_CLK_MC_SHIFT 24 + +/* N3M Clock control magic field values */ +#define SSB_CHIPCO_CLK_F6_2 0x02 /* A factor of 2 in */ +#define SSB_CHIPCO_CLK_F6_3 0x03 /* 6-bit fields like */ +#define SSB_CHIPCO_CLK_F6_4 0x05 /* N1, M1 or M3 */ +#define SSB_CHIPCO_CLK_F6_5 0x09 +#define SSB_CHIPCO_CLK_F6_6 0x11 +#define SSB_CHIPCO_CLK_F6_7 0x21 + +#define SSB_CHIPCO_CLK_F5_BIAS 5 /* 5-bit fields get this added */ + +#define SSB_CHIPCO_CLK_MC_BYPASS 0x08 +#define SSB_CHIPCO_CLK_MC_M1 0x04 +#define SSB_CHIPCO_CLK_MC_M1M2 0x02 +#define SSB_CHIPCO_CLK_MC_M1M2M3 0x01 +#define SSB_CHIPCO_CLK_MC_M1M3 0x11 + +/* Type 2 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ +#define SSB_CHIPCO_CLK_T2M2_BIAS 3 /* m2 bias */ + +#define SSB_CHIPCO_CLK_T2MC_M1BYP 1 +#define SSB_CHIPCO_CLK_T2MC_M2BYP 2 +#define SSB_CHIPCO_CLK_T2MC_M3BYP 4 + +/* Type 6 Clock control magic field values */ +#define SSB_CHIPCO_CLK_T6_MMASK 1 /* bits of interest in m */ +#define SSB_CHIPCO_CLK_T6_M0 120000000 /* sb clock for m = 0 */ +#define SSB_CHIPCO_CLK_T6_M1 100000000 /* sb clock for m = 1 */ +#define SSB_CHIPCO_CLK_SB2MIPS_T6(sb) (2 * (sb)) + +/* Common clock base */ +#define SSB_CHIPCO_CLK_BASE1 24000000 /* Half the clock freq */ +#define SSB_CHIPCO_CLK_BASE2 12500000 /* Alternate crystal on some PLL's */ + +/* Clock control values for 200Mhz in 5350 */ +#define SSB_CHIPCO_CLK_5350_N 0x0311 +#define SSB_CHIPCO_CLK_5350_M 0x04020009 + + +/** Bits in the config registers **/ + +#define SSB_CHIPCO_CFG_EN 0x0001 /* Enable */ +#define SSB_CHIPCO_CFG_EXTM 0x000E /* Extif Mode */ +#define SSB_CHIPCO_CFG_EXTM_ASYNC 0x0002 /* Async/Parallel flash */ +#define SSB_CHIPCO_CFG_EXTM_SYNC 0x0004 /* Synchronous */ +#define SSB_CHIPCO_CFG_EXTM_PCMCIA 0x0008 /* PCMCIA */ +#define SSB_CHIPCO_CFG_EXTM_IDE 0x000A /* IDE */ +#define SSB_CHIPCO_CFG_DS16 0x0010 /* Data size, 0=8bit, 1=16bit */ +#define SSB_CHIPCO_CFG_CLKDIV 0x0060 /* Sync: Clock divisor */ +#define SSB_CHIPCO_CFG_CLKEN 0x0080 /* Sync: Clock enable */ +#define SSB_CHIPCO_CFG_BSTRO 0x0100 /* Sync: Size/Bytestrobe */ + + +/** Flash-specific control/status values */ + +/* flashcontrol opcodes for ST flashes */ +#define SSB_CHIPCO_FLASHCTL_ST_WREN 0x0006 /* Write Enable */ +#define SSB_CHIPCO_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */ +#define SSB_CHIPCO_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */ +#define SSB_CHIPCO_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */ +#define SSB_CHIPCO_FLASHCTL_ST_PP 0x0302 /* Page Program */ +#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */ +#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */ +#define SSB_CHIPCO_FLASHCTL_ST_RSIG 0x03AB /* Read Electronic Signature */ + +/* Status register bits for ST flashes */ +#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */ +#define SSB_CHIPCO_FLASHSTA_ST_WEL 0x02 /* Write Enable Latch */ +#define SSB_CHIPCO_FLASHSTA_ST_BP 0x1C /* Block Protect */ +#define SSB_CHIPCO_FLASHSTA_ST_BP_SHIFT 2 +#define SSB_CHIPCO_FLASHSTA_ST_SRWD 0x80 /* Status Register Write Disable */ + +/* flashcontrol opcodes for Atmel flashes */ +#define SSB_CHIPCO_FLASHCTL_AT_READ 0x07E8 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_READ 0x07D2 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_READ /* FIXME */ +#define SSB_CHIPCO_FLASHCTL_AT_STATUS 0x01D7 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRITE 0x0384 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRITE 0x0387 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_ERASE_PRGM 0x0283 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_ERASE_PRGM 0x0286 /* Erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_PROGRAM 0x0288 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_PROGRAM 0x0289 +#define SSB_CHIPCO_FLASHCTL_AT_PAGE_ERASE 0x0281 +#define SSB_CHIPCO_FLASHCTL_AT_BLOCK_ERASE 0x0250 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRER_PRGM 0x0382 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRER_PRGM 0x0385 /* Write erase program */ +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_LOAD 0x0253 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_LOAD 0x0255 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_COMPARE 0x0260 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_COMPARE 0x0261 +#define SSB_CHIPCO_FLASHCTL_AT_BUF1_REPROGRAM 0x0258 +#define SSB_CHIPCO_FLASHCTL_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for Atmel flashes */ +#define SSB_CHIPCO_FLASHSTA_AT_READY 0x80 +#define SSB_CHIPCO_FLASHSTA_AT_MISMATCH 0x40 +#define SSB_CHIPCO_FLASHSTA_AT_ID 0x38 +#define SSB_CHIPCO_FLASHSTA_AT_ID_SHIFT 3 + + +/** OTP **/ + +/* OTP regions */ +#define SSB_CHIPCO_OTP_HW_REGION SSB_CHIPCO_OTPS_HW_PROTECT +#define SSB_CHIPCO_OTP_SW_REGION SSB_CHIPCO_OTPS_SW_PROTECT +#define SSB_CHIPCO_OTP_CID_REGION SSB_CHIPCO_OTPS_CID_PROTECT + +/* OTP regions (Byte offsets from otp size) */ +#define SSB_CHIPCO_OTP_SWLIM_OFF (-8) +#define SSB_CHIPCO_OTP_CIDBASE_OFF 0 +#define SSB_CHIPCO_OTP_CIDLIM_OFF 8 + +/* Predefined OTP words (Word offset from otp size) */ +#define SSB_CHIPCO_OTP_BOUNDARY_OFF (-4) +#define SSB_CHIPCO_OTP_HWSIGN_OFF (-3) +#define SSB_CHIPCO_OTP_SWSIGN_OFF (-2) +#define SSB_CHIPCO_OTP_CIDSIGN_OFF (-1) + +#define SSB_CHIPCO_OTP_CID_OFF 0 +#define SSB_CHIPCO_OTP_PKG_OFF 1 +#define SSB_CHIPCO_OTP_FID_OFF 2 +#define SSB_CHIPCO_OTP_RSV_OFF 3 +#define SSB_CHIPCO_OTP_LIM_OFF 4 + +#define SSB_CHIPCO_OTP_SIGNATURE 0x578A +#define SSB_CHIPCO_OTP_MAGIC 0x4E56 + + +struct ssb_device; +struct ssb_serial_port; + +struct ssb_chipcommon { + struct ssb_device *dev; + u32 capabilities; + /* Fast Powerup Delay constant */ + u16 fast_pwrup_delay; +}; + +extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); + +#include +extern void ssb_chipco_suspend(struct ssb_chipcommon *cc, pm_message_t state); +extern void ssb_chipco_resume(struct ssb_chipcommon *cc); + +extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc, + u32 *plltype, u32 *n, u32 *m); +extern void ssb_chipco_timing_init(struct ssb_chipcommon *cc, + unsigned long ns_per_cycle); + +enum ssb_clkmode { + SSB_CLKMODE_SLOW, + SSB_CLKMODE_FAST, + SSB_CLKMODE_DYNAMIC, +}; + +extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, + enum ssb_clkmode mode); + +extern void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, + u32 ticks); + +u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask); + +void ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value); + +void ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + +#endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h new file mode 100644 index 0000000000000000000000000000000000000000..a9164357b5ae7325bb6bcfee861c7ff941aeace4 --- /dev/null +++ b/include/linux/ssb/ssb_driver_extif.h @@ -0,0 +1,204 @@ +/* + * Hardware-specific External Interface I/O core definitions + * for the BCM47xx family of SiliconBackplane-based chips. + * + * The External Interface core supports a total of three external chip selects + * supporting external interfaces. One of the external chip selects is + * used for Flash, one is used for PCMCIA, and the other may be + * programmed to support either a synchronous interface or an + * asynchronous interface. The asynchronous interface can be used to + * support external devices such as UARTs and the BCM2019 Bluetooth + * baseband processor. + * The external interface core also contains 2 on-chip 16550 UARTs, clock + * frequency control, a watchdog interrupt timer, and a GPIO interface. + * + * Copyright 2005, Broadcom Corporation + * Copyright 2006, Michael Buesch + * + * Licensed under the GPL version 2. See COPYING for details. + */ +#ifndef LINUX_SSB_EXTIFCORE_H_ +#define LINUX_SSB_EXTIFCORE_H_ + +/* external interface address space */ +#define SSB_EXTIF_PCMCIA_MEMBASE(x) (x) +#define SSB_EXTIF_PCMCIA_IOBASE(x) ((x) + 0x100000) +#define SSB_EXTIF_PCMCIA_CFGBASE(x) ((x) + 0x200000) +#define SSB_EXTIF_CFGIF_BASE(x) ((x) + 0x800000) +#define SSB_EXTIF_FLASH_BASE(x) ((x) + 0xc00000) + +#define SSB_EXTIF_NR_GPIOOUT 5 +/* GPIO NOTE: + * The multiple instances of output and output enable registers + * are present to allow driver software for multiple cores to control + * gpio outputs without needing to share a single register pair. + * Use the following helper macro to get a register offset value. + */ +#define SSB_EXTIF_GPIO_OUT(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUT_BASE + ((index) * 8); \ + }) +#define SSB_EXTIF_GPIO_OUTEN(index) ({ \ + BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \ + SSB_EXTIF_GPIO_OUTEN_BASE + ((index) * 8); \ + }) + +/** EXTIF core registers **/ + +#define SSB_EXTIF_CTL 0x0000 +#define SSB_EXTIF_CTL_UARTEN (1 << 0) /* UART enable */ +#define SSB_EXTIF_EXTSTAT 0x0004 +#define SSB_EXTIF_EXTSTAT_EMODE (1 << 0) /* Endian mode (ro) */ +#define SSB_EXTIF_EXTSTAT_EIRQPIN (1 << 1) /* External interrupt pin (ro) */ +#define SSB_EXTIF_EXTSTAT_GPIOIRQPIN (1 << 2) /* GPIO interrupt pin (ro) */ +#define SSB_EXTIF_PCMCIA_CFG 0x0010 +#define SSB_EXTIF_PCMCIA_MEMWAIT 0x0014 +#define SSB_EXTIF_PCMCIA_ATTRWAIT 0x0018 +#define SSB_EXTIF_PCMCIA_IOWAIT 0x001C +#define SSB_EXTIF_PROG_CFG 0x0020 +#define SSB_EXTIF_PROG_WAITCNT 0x0024 +#define SSB_EXTIF_FLASH_CFG 0x0028 +#define SSB_EXTIF_FLASH_WAITCNT 0x002C +#define SSB_EXTIF_WATCHDOG 0x0040 +#define SSB_EXTIF_CLOCK_N 0x0044 +#define SSB_EXTIF_CLOCK_SB 0x0048 +#define SSB_EXTIF_CLOCK_PCI 0x004C +#define SSB_EXTIF_CLOCK_MII 0x0050 +#define SSB_EXTIF_GPIO_IN 0x0060 +#define SSB_EXTIF_GPIO_OUT_BASE 0x0064 +#define SSB_EXTIF_GPIO_OUTEN_BASE 0x0068 +#define SSB_EXTIF_EJTAG_OUTEN 0x0090 +#define SSB_EXTIF_GPIO_INTPOL 0x0094 +#define SSB_EXTIF_GPIO_INTMASK 0x0098 +#define SSB_EXTIF_UART_DATA 0x0300 +#define SSB_EXTIF_UART_TIMER 0x0310 +#define SSB_EXTIF_UART_FCR 0x0320 +#define SSB_EXTIF_UART_LCR 0x0330 +#define SSB_EXTIF_UART_MCR 0x0340 +#define SSB_EXTIF_UART_LSR 0x0350 +#define SSB_EXTIF_UART_MSR 0x0360 +#define SSB_EXTIF_UART_SCRATCH 0x0370 + + + + +/* pcmcia/prog/flash_config */ +#define SSB_EXTCFG_EN (1 << 0) /* enable */ +#define SSB_EXTCFG_MODE 0xE /* mode */ +#define SSB_EXTCFG_MODE_SHIFT 1 +#define SSB_EXTCFG_MODE_FLASH 0x0 /* flash/asynchronous mode */ +#define SSB_EXTCFG_MODE_SYNC 0x2 /* synchronous mode */ +#define SSB_EXTCFG_MODE_PCMCIA 0x4 /* pcmcia mode */ +#define SSB_EXTCFG_DS16 (1 << 4) /* destsize: 0=8bit, 1=16bit */ +#define SSB_EXTCFG_BSWAP (1 << 5) /* byteswap */ +#define SSB_EXTCFG_CLKDIV 0xC0 /* clock divider */ +#define SSB_EXTCFG_CLKDIV_SHIFT 6 +#define SSB_EXTCFG_CLKDIV_2 0x0 /* backplane/2 */ +#define SSB_EXTCFG_CLKDIV_3 0x40 /* backplane/3 */ +#define SSB_EXTCFG_CLKDIV_4 0x80 /* backplane/4 */ +#define SSB_EXTCFG_CLKEN (1 << 8) /* clock enable */ +#define SSB_EXTCFG_STROBE (1 << 9) /* size/bytestrobe (synch only) */ + +/* pcmcia_memwait */ +#define SSB_PCMCIA_MEMW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_MEMW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_MEMW_1_SHIFT 8 +#define SSB_PCMCIA_MEMW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_MEMW_2_SHIFT 16 +#define SSB_PCMCIA_MEMW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_MEMW_3_SHIFT 24 + +/* pcmcia_attrwait */ +#define SSB_PCMCIA_ATTW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_ATTW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_ATTW_1_SHIFT 8 +#define SSB_PCMCIA_ATTW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_ATTW_2_SHIFT 16 +#define SSB_PCMCIA_ATTW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_ATTW_3_SHIFT 24 + +/* pcmcia_iowait */ +#define SSB_PCMCIA_IOW_0 0x0000003F /* waitcount0 */ +#define SSB_PCMCIA_IOW_1 0x00001F00 /* waitcount1 */ +#define SSB_PCMCIA_IOW_1_SHIFT 8 +#define SSB_PCMCIA_IOW_2 0x001F0000 /* waitcount2 */ +#define SSB_PCMCIA_IOW_2_SHIFT 16 +#define SSB_PCMCIA_IOW_3 0x1F000000 /* waitcount3 */ +#define SSB_PCMCIA_IOW_3_SHIFT 24 + +/* prog_waitcount */ +#define SSB_PROG_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_PROG_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_PROG_WCNT_1_SHIFT 8 +#define SSB_PROG_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_PROG_WCNT_2_SHIFT 16 +#define SSB_PROG_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_PROG_WCNT_3_SHIFT 24 + +#define SSB_PROG_W0 0x0000000C +#define SSB_PROG_W1 0x00000A00 +#define SSB_PROG_W2 0x00020000 +#define SSB_PROG_W3 0x01000000 + +/* flash_waitcount */ +#define SSB_FLASH_WCNT_0 0x0000001F /* waitcount0 */ +#define SSB_FLASH_WCNT_1 0x00001F00 /* waitcount1 */ +#define SSB_FLASH_WCNT_1_SHIFT 8 +#define SSB_FLASH_WCNT_2 0x001F0000 /* waitcount2 */ +#define SSB_FLASH_WCNT_2_SHIFT 16 +#define SSB_FLASH_WCNT_3 0x1F000000 /* waitcount3 */ +#define SSB_FLASH_WCNT_3_SHIFT 24 + +/* watchdog */ +#define SSB_EXTIF_WATCHDOG_CLK 48000000 /* Hz */ + + + +#ifdef CONFIG_SSB_DRIVER_EXTIF + +struct ssb_extif { + struct ssb_device *dev; +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return (extif->dev != NULL); +} + +extern void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m); + +extern void ssb_extif_timing_init(struct ssb_extif *extif, + unsigned long ns); + +u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask); + +void ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value); + +void ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value); + +#ifdef CONFIG_SSB_SERIAL +extern int ssb_extif_serial_init(struct ssb_extif *extif, + struct ssb_serial_port *ports); +#endif /* CONFIG_SSB_SERIAL */ + + +#else /* CONFIG_SSB_DRIVER_EXTIF */ +/* extif disabled */ + +struct ssb_extif { +}; + +static inline bool ssb_extif_available(struct ssb_extif *extif) +{ + return 0; +} + +static inline +void ssb_extif_get_clockcontrol(struct ssb_extif *extif, + u32 *plltype, u32 *n, u32 *m) +{ +} + +#endif /* CONFIG_SSB_DRIVER_EXTIF */ +#endif /* LINUX_SSB_EXTIFCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h new file mode 100644 index 0000000000000000000000000000000000000000..5f44e9740cd2a3453da6767778911fb3aa59eab1 --- /dev/null +++ b/include/linux/ssb/ssb_driver_mips.h @@ -0,0 +1,46 @@ +#ifndef LINUX_SSB_MIPSCORE_H_ +#define LINUX_SSB_MIPSCORE_H_ + +#ifdef CONFIG_SSB_DRIVER_MIPS + +struct ssb_device; + +struct ssb_serial_port { + void *regs; + unsigned long clockspeed; + unsigned int irq; + unsigned int baud_base; + unsigned int reg_shift; +}; + + +struct ssb_mipscore { + struct ssb_device *dev; + + int nr_serial_ports; + struct ssb_serial_port serial_ports[4]; + + u8 flash_buswidth; + u32 flash_window; + u32 flash_window_size; +}; + +extern void ssb_mipscore_init(struct ssb_mipscore *mcore); +extern u32 ssb_cpu_clock(struct ssb_mipscore *mcore); + +extern unsigned int ssb_mips_irq(struct ssb_device *dev); + + +#else /* CONFIG_SSB_DRIVER_MIPS */ + +struct ssb_mipscore { +}; + +static inline +void ssb_mipscore_init(struct ssb_mipscore *mcore) +{ +} + +#endif /* CONFIG_SSB_DRIVER_MIPS */ + +#endif /* LINUX_SSB_MIPSCORE_H_ */ diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..9cfffb7b1a27f6aaab74b0bc5dd5795c7e2aab85 --- /dev/null +++ b/include/linux/ssb/ssb_driver_pci.h @@ -0,0 +1,106 @@ +#ifndef LINUX_SSB_PCICORE_H_ +#define LINUX_SSB_PCICORE_H_ + +#ifdef CONFIG_SSB_DRIVER_PCICORE + +/* PCI core registers. */ +#define SSB_PCICORE_CTL 0x0000 /* PCI Control */ +#define SSB_PCICORE_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */ +#define SSB_PCICORE_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */ +#define SSB_PCICORE_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */ +#define SSB_PCICORE_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */ +#define SSB_PCICORE_ARBCTL 0x0010 /* PCI Arbiter Control */ +#define SSB_PCICORE_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */ +#define SSB_PCICORE_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */ +#define SSB_PCICORE_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */ +#define SSB_PCICORE_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */ +#define SSB_PCICORE_ARBCTL_PARKID_4710 0x00000002 /* 4710 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */ +#define SSB_PCICORE_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */ +#define SSB_PCICORE_ISTAT 0x0020 /* Interrupt status */ +#define SSB_PCICORE_ISTAT_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_ISTAT_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */ +#define SSB_PCICORE_ISTAT_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_IMASK 0x0024 /* Interrupt mask */ +#define SSB_PCICORE_IMASK_INTA 0x00000001 /* PCI INTA# */ +#define SSB_PCICORE_IMASK_INTB 0x00000002 /* PCI INTB# */ +#define SSB_PCICORE_IMASK_SERR 0x00000004 /* PCI SERR# */ +#define SSB_PCICORE_IMASK_PERR 0x00000008 /* PCI PERR# */ +#define SSB_PCICORE_IMASK_PME 0x00000010 /* PCI PME# */ +#define SSB_PCICORE_MBOX 0x0028 /* Backplane to PCI Mailbox */ +#define SSB_PCICORE_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */ +#define SSB_PCICORE_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */ +#define SSB_PCICORE_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */ +#define SSB_PCICORE_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */ +#define SSB_PCICORE_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */ +#define SSB_PCICORE_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */ +#define SSB_PCICORE_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */ +#define SSB_PCICORE_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */ +#define SSB_PCICORE_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */ +#define SSB_PCICORE_BCAST_ADDR_MASK 0x000000FF +#define SSB_PCICORE_BCAST_DATA 0x0054 /* Backplane Broadcast Data */ +#define SSB_PCICORE_GPIO_IN 0x0060 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_OUT 0x0064 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_ENABLE 0x0068 /* rev >= 2 only */ +#define SSB_PCICORE_GPIO_CTL 0x006C /* rev >= 2 only */ +#define SSB_PCICORE_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */ +#define SSB_PCICORE_SBTOPCI0_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */ +#define SSB_PCICORE_SBTOPCI1_MASK 0xFC000000 +#define SSB_PCICORE_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */ +#define SSB_PCICORE_SBTOPCI2_MASK 0xC0000000 + +/* SBtoPCIx */ +#define SSB_PCICORE_SBTOPCI_MEM 0x00000000 +#define SSB_PCICORE_SBTOPCI_IO 0x00000001 +#define SSB_PCICORE_SBTOPCI_CFG0 0x00000002 +#define SSB_PCICORE_SBTOPCI_CFG1 0x00000003 +#define SSB_PCICORE_SBTOPCI_PREF 0x00000004 /* Prefetch enable */ +#define SSB_PCICORE_SBTOPCI_BURST 0x00000008 /* Burst enable */ +#define SSB_PCICORE_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */ +#define SSB_PCICORE_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */ +#define SSB_PCICORE_SBTOPCI_RC_READ 0x00000000 /* Memory read */ +#define SSB_PCICORE_SBTOPCI_RC_READL 0x00000010 /* Memory read line */ +#define SSB_PCICORE_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */ + + +/* PCIcore specific boardflags */ +#define SSB_PCICORE_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ + + +struct ssb_pcicore { + struct ssb_device *dev; + u8 setup_done:1; + u8 hostmode:1; + u8 cardbusmode:1; +}; + +extern void ssb_pcicore_init(struct ssb_pcicore *pc); + +/* Enable IRQ routing for a specific device */ +extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev); + + +#else /* CONFIG_SSB_DRIVER_PCICORE */ + + +struct ssb_pcicore { +}; + +static inline +void ssb_pcicore_init(struct ssb_pcicore *pc) +{ +} + +static inline +int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc, + struct ssb_device *dev) +{ + return 0; +} + +#endif /* CONFIG_SSB_DRIVER_PCICORE */ +#endif /* LINUX_SSB_PCICORE_H_ */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..47c7c71a5acf844d5dbb92e7cfba2f2dfb7d3b2a --- /dev/null +++ b/include/linux/ssb/ssb_regs.h @@ -0,0 +1,292 @@ +#ifndef LINUX_SSB_REGS_H_ +#define LINUX_SSB_REGS_H_ + + +/* SiliconBackplane Address Map. + * All regions may not exist on all chips. + */ +#define SSB_SDRAM_BASE 0x00000000U /* Physical SDRAM */ +#define SSB_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */ +#define SSB_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */ +#define SSB_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */ +#define SSB_ENUM_BASE 0x18000000U /* Enumeration space base */ +#define SSB_ENUM_LIMIT 0x18010000U /* Enumeration space limit */ + +#define SSB_FLASH2 0x1c000000U /* Flash Region 2 (region 1 shadowed here) */ +#define SSB_FLASH2_SZ 0x02000000U /* Size of Flash Region 2 */ + +#define SSB_EXTIF_BASE 0x1f000000U /* External Interface region base address */ +#define SSB_FLASH1 0x1fc00000U /* Flash Region 1 */ +#define SSB_FLASH1_SZ 0x00400000U /* Size of Flash Region 1 */ + +#define SSB_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */ +#define SSB_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */ +#define SSB_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */ +#define SSB_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */ +#define SSB_EUART (SSB_EXTIF_BASE + 0x00800000) +#define SSB_LED (SSB_EXTIF_BASE + 0x00900000) + + +/* Enumeration space constants */ +#define SSB_CORE_SIZE 0x1000 /* Size of a core MMIO area */ +#define SSB_MAX_NR_CORES ((SSB_ENUM_LIMIT - SSB_ENUM_BASE) / SSB_CORE_SIZE) + + +/* mips address */ +#define SSB_EJTAG 0xff200000 /* MIPS EJTAG space (2M) */ + + +/* SSB PCI config space registers. */ +#define SSB_PMCSR 0x44 +#define SSB_PE 0x100 +#define SSB_BAR0_WIN 0x80 /* Backplane address space 0 */ +#define SSB_BAR1_WIN 0x84 /* Backplane address space 1 */ +#define SSB_SPROMCTL 0x88 /* SPROM control */ +#define SSB_SPROMCTL_WE 0x10 /* SPROM write enable */ +#define SSB_BAR1_CONTROL 0x8c /* Address space 1 burst control */ +#define SSB_PCI_IRQS 0x90 /* PCI interrupts */ +#define SSB_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */ +#define SSB_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */ +#define SSB_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */ +#define SSB_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */ +#define SSB_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */ +#define SSB_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ +#define SSB_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ +#define SSB_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */ +#define SSB_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */ + + +#define SSB_BAR0_MAX_RETRIES 50 + +/* Silicon backplane configuration register definitions */ +#define SSB_IPSFLAG 0x0F08 +#define SSB_IPSFLAG_IRQ1 0x0000003F /* which sbflags get routed to mips interrupt 1 */ +#define SSB_IPSFLAG_IRQ1_SHIFT 0 +#define SSB_IPSFLAG_IRQ2 0x00003F00 /* which sbflags get routed to mips interrupt 2 */ +#define SSB_IPSFLAG_IRQ2_SHIFT 8 +#define SSB_IPSFLAG_IRQ3 0x003F0000 /* which sbflags get routed to mips interrupt 3 */ +#define SSB_IPSFLAG_IRQ3_SHIFT 16 +#define SSB_IPSFLAG_IRQ4 0x3F000000 /* which sbflags get routed to mips interrupt 4 */ +#define SSB_IPSFLAG_IRQ4_SHIFT 24 +#define SSB_TPSFLAG 0x0F18 +#define SSB_TPSFLAG_BPFLAG 0x0000003F /* Backplane flag # */ +#define SSB_TPSFLAG_ALWAYSIRQ 0x00000040 /* IRQ is always sent on the Backplane */ +#define SSB_TMERRLOGA 0x0F48 +#define SSB_TMERRLOG 0x0F50 +#define SSB_ADMATCH3 0x0F60 +#define SSB_ADMATCH2 0x0F68 +#define SSB_ADMATCH1 0x0F70 +#define SSB_IMSTATE 0x0F90 /* SB Initiator Agent State */ +#define SSB_IMSTATE_PC 0x0000000f /* Pipe Count */ +#define SSB_IMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */ +#define SSB_IMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */ +#define SSB_IMSTATE_AP_TS 0x00000010 /* Use timeslices only */ +#define SSB_IMSTATE_AP_TK 0x00000020 /* Use token only */ +#define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */ +#define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */ +#define SSB_IMSTATE_TO 0x00040000 /* Timeout */ +#define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */ +#define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */ +#define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */ +#define SSB_INTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */ +#define SSB_INTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */ +#define SSB_INTVEC_USB 0x00000010 /* Enable interrupts for usb */ +#define SSB_INTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */ +#define SSB_INTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */ +#define SSB_TMSLOW 0x0F98 /* SB Target State Low */ +#define SSB_TMSLOW_RESET 0x00000001 /* Reset */ +#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ +#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ +#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ +#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ +#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ +#define SSB_TMSLOW_BE 0x80000000 /* BIST Enable */ +#define SSB_TMSHIGH 0x0F9C /* SB Target State High */ +#define SSB_TMSHIGH_SERR 0x00000001 /* S-error */ +#define SSB_TMSHIGH_INT 0x00000002 /* Interrupt */ +#define SSB_TMSHIGH_BUSY 0x00000004 /* Busy */ +#define SSB_TMSHIGH_TO 0x00000020 /* Timeout. Backplane rev >= 2.3 only */ +#define SSB_TMSHIGH_COREFL 0x1FFF0000 /* Core specific flags */ +#define SSB_TMSHIGH_COREFL_SHIFT 16 +#define SSB_TMSHIGH_DMA64 0x10000000 /* 64bit DMA supported */ +#define SSB_TMSHIGH_GCR 0x20000000 /* Gated Clock Request */ +#define SSB_TMSHIGH_BISTF 0x40000000 /* BIST Failed */ +#define SSB_TMSHIGH_BISTD 0x80000000 /* BIST Done */ +#define SSB_BWA0 0x0FA0 +#define SSB_IMCFGLO 0x0FA8 +#define SSB_IMCFGLO_SERTO 0x00000007 /* Service timeout */ +#define SSB_IMCFGLO_REQTO 0x00000070 /* Request timeout */ +#define SSB_IMCFGLO_REQTO_SHIFT 4 +#define SSB_IMCFGLO_CONNID 0x00FF0000 /* Connection ID */ +#define SSB_IMCFGLO_CONNID_SHIFT 16 +#define SSB_IMCFGHI 0x0FAC +#define SSB_ADMATCH0 0x0FB0 +#define SSB_TMCFGLO 0x0FB8 +#define SSB_TMCFGHI 0x0FBC +#define SSB_BCONFIG 0x0FC0 +#define SSB_BSTATE 0x0FC8 +#define SSB_ACTCFG 0x0FD8 +#define SSB_FLAGST 0x0FE8 +#define SSB_IDLOW 0x0FF8 +#define SSB_IDLOW_CFGSP 0x00000003 /* Config Space */ +#define SSB_IDLOW_ADDRNGE 0x00000038 /* Address Ranges supported */ +#define SSB_IDLOW_ADDRNGE_SHIFT 3 +#define SSB_IDLOW_SYNC 0x00000040 +#define SSB_IDLOW_INITIATOR 0x00000080 +#define SSB_IDLOW_MIBL 0x00000F00 /* Minimum Backplane latency */ +#define SSB_IDLOW_MIBL_SHIFT 8 +#define SSB_IDLOW_MABL 0x0000F000 /* Maximum Backplane latency */ +#define SSB_IDLOW_MABL_SHIFT 12 +#define SSB_IDLOW_TIF 0x00010000 /* This Initiator is first */ +#define SSB_IDLOW_CCW 0x000C0000 /* Cycle counter width */ +#define SSB_IDLOW_CCW_SHIFT 18 +#define SSB_IDLOW_TPT 0x00F00000 /* Target ports */ +#define SSB_IDLOW_TPT_SHIFT 20 +#define SSB_IDLOW_INITP 0x0F000000 /* Initiator ports */ +#define SSB_IDLOW_INITP_SHIFT 24 +#define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */ +#define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */ +#define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */ +#define SSB_IDHIGH 0x0FFC /* SB Identification High */ +#define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */ +#define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */ +#define SSB_IDHIGH_CC_SHIFT 4 +#define SSB_IDHIGH_RCHI 0x00007000 /* Revision Code (high part) */ +#define SSB_IDHIGH_RCHI_SHIFT 8 /* yes, shift 8 is right */ +#define SSB_IDHIGH_VC 0xFFFF0000 /* Vendor Code */ +#define SSB_IDHIGH_VC_SHIFT 16 + +/* SPROM shadow area. If not otherwise noted, fields are + * two bytes wide. Note that the SPROM can _only_ be read + * in two-byte quantinies. + */ +#define SSB_SPROMSIZE_WORDS 64 +#define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) +#define SSB_SPROM_BASE 0x1000 +#define SSB_SPROM_REVISION 0x107E +#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */ +#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */ +#define SSB_SPROM_REVISION_CRC_SHIFT 8 +/* SPROM Revision 1 */ +#define SSB_SPROM1_SPID 0x1004 /* Subsystem Product ID for PCI */ +#define SSB_SPROM1_SVID 0x1006 /* Subsystem Vendor ID for PCI */ +#define SSB_SPROM1_PID 0x1008 /* Product ID for PCI */ +#define SSB_SPROM1_IL0MAC 0x1048 /* 6 bytes MAC address for 802.11b/g */ +#define SSB_SPROM1_ET0MAC 0x104E /* 6 bytes MAC address for Ethernet */ +#define SSB_SPROM1_ET1MAC 0x1054 /* 6 bytes MAC address for 802.11a */ +#define SSB_SPROM1_ETHPHY 0x105A /* Ethernet PHY settings */ +#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ +#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5 +#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */ +#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */ +#define SSB_SPROM1_BINF 0x105C /* Board info */ +#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */ +#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */ +#define SSB_SPROM1_BINF_CCODE_SHIFT 8 +#define SSB_SPROM1_BINF_ANTA 0x3000 /* Available A-PHY antennas */ +#define SSB_SPROM1_BINF_ANTA_SHIFT 12 +#define SSB_SPROM1_BINF_ANTBG 0xC000 /* Available B-PHY antennas */ +#define SSB_SPROM1_BINF_ANTBG_SHIFT 14 +#define SSB_SPROM1_PA0B0 0x105E +#define SSB_SPROM1_PA0B1 0x1060 +#define SSB_SPROM1_PA0B2 0x1062 +#define SSB_SPROM1_GPIOA 0x1064 /* General Purpose IO pins 0 and 1 */ +#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM1_GPIOA_P1_SHIFT 8 +#define SSB_SPROM1_GPIOB 0x1066 /* General Purpuse IO pins 2 and 3 */ +#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM1_GPIOB_P3_SHIFT 8 +#define SSB_SPROM1_MAXPWR 0x1068 /* Power Amplifier Max Power */ +#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */ +#define SSB_SPROM1_MAXPWR_A_SHIFT 8 +#define SSB_SPROM1_PA1B0 0x106A +#define SSB_SPROM1_PA1B1 0x106C +#define SSB_SPROM1_PA1B2 0x106E +#define SSB_SPROM1_ITSSI 0x1070 /* Idle TSSI Target */ +#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/ +#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */ +#define SSB_SPROM1_ITSSI_A_SHIFT 8 +#define SSB_SPROM1_BFLLO 0x1072 /* Boardflags (low 16 bits) */ +#define SSB_SPROM1_AGAIN 0x1074 /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM1_AGAIN_A 0x00FF /* A-PHY */ +#define SSB_SPROM1_AGAIN_BG 0xFF00 /* B-PHY and G-PHY */ +#define SSB_SPROM1_AGAIN_BG_SHIFT 8 +#define SSB_SPROM1_OEM 0x1076 /* 8 bytes OEM string (rev 1 only) */ +/* SPROM Revision 2 (inherits from rev 1) */ +#define SSB_SPROM2_BFLHI 0x1038 /* Boardflags (high 16 bits) */ +#define SSB_SPROM2_MAXP_A 0x103A /* A-PHY Max Power */ +#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */ +#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */ +#define SSB_SPROM2_MAXP_A_LO_SHIFT 8 +#define SSB_SPROM2_PA1LOB0 0x103C /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB1 0x103E /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1LOB2 0x1040 /* A-PHY PowerAmplifier Low Settings */ +#define SSB_SPROM2_PA1HIB0 0x1042 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB1 0x1044 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_PA1HIB2 0x1046 /* A-PHY PowerAmplifier High Settings */ +#define SSB_SPROM2_OPO 0x1078 /* OFDM Power Offset from CCK Level */ +#define SSB_SPROM2_OPO_VALUE 0x00FF +#define SSB_SPROM2_OPO_UNUSED 0xFF00 +#define SSB_SPROM2_CCODE 0x107C /* Two char Country Code */ +/* SPROM Revision 3 (inherits from rev 2) */ +#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC 0x1042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */ +#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */ +#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8 +#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */ +#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16 +#define SSB_SPROM3_CCKPO 0x1078 /* CCK Power Offset */ +#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */ +#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */ +#define SSB_SPROM3_CCKPO_2M_SHIFT 4 +#define SSB_SPROM3_CCKPO_55M 0x0F00 /* 5.5M Rate PO */ +#define SSB_SPROM3_CCKPO_55M_SHIFT 8 +#define SSB_SPROM3_CCKPO_11M 0xF000 /* 11M Rate PO */ +#define SSB_SPROM3_CCKPO_11M_SHIFT 12 +#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */ + +/* Values for SSB_SPROM1_BINF_CCODE */ +enum { + SSB_SPROM1CCODE_WORLD = 0, + SSB_SPROM1CCODE_THAILAND, + SSB_SPROM1CCODE_ISRAEL, + SSB_SPROM1CCODE_JORDAN, + SSB_SPROM1CCODE_CHINA, + SSB_SPROM1CCODE_JAPAN, + SSB_SPROM1CCODE_USA_CANADA_ANZ, + SSB_SPROM1CCODE_EUROPE, + SSB_SPROM1CCODE_USA_LOW, + SSB_SPROM1CCODE_JAPAN_HIGH, + SSB_SPROM1CCODE_ALL, + SSB_SPROM1CCODE_NONE, +}; + +/* Address-Match values and masks (SSB_ADMATCHxxx) */ +#define SSB_ADM_TYPE 0x00000003 /* Address type */ +#define SSB_ADM_TYPE0 0 +#define SSB_ADM_TYPE1 1 +#define SSB_ADM_TYPE2 2 +#define SSB_ADM_AD64 0x00000004 +#define SSB_ADM_SZ0 0x000000F8 /* Type0 size */ +#define SSB_ADM_SZ0_SHIFT 3 +#define SSB_ADM_SZ1 0x000001F8 /* Type1 size */ +#define SSB_ADM_SZ1_SHIFT 3 +#define SSB_ADM_SZ2 0x000001F8 /* Type2 size */ +#define SSB_ADM_SZ2_SHIFT 3 +#define SSB_ADM_EN 0x00000400 /* Enable */ +#define SSB_ADM_NEG 0x00000800 /* Negative decode */ +#define SSB_ADM_BASE0 0xFFFFFF00 /* Type0 base address */ +#define SSB_ADM_BASE0_SHIFT 8 +#define SSB_ADM_BASE1 0xFFFFF000 /* Type1 base address for the core */ +#define SSB_ADM_BASE1_SHIFT 12 +#define SSB_ADM_BASE2 0xFFFF0000 /* Type2 base address for the core */ +#define SSB_ADM_BASE2_SHIFT 16 + + +#endif /* LINUX_SSB_REGS_H_ */ diff --git a/include/linux/tc_act/tc_nat.h b/include/linux/tc_act/tc_nat.h new file mode 100644 index 0000000000000000000000000000000000000000..e7cf31e8ba796d1a9bd42b71da8196e95540d3b4 --- /dev/null +++ b/include/linux/tc_act/tc_nat.h @@ -0,0 +1,29 @@ +#ifndef __LINUX_TC_NAT_H +#define __LINUX_TC_NAT_H + +#include +#include + +#define TCA_ACT_NAT 9 + +enum +{ + TCA_NAT_UNSPEC, + TCA_NAT_PARMS, + TCA_NAT_TM, + __TCA_NAT_MAX +}; +#define TCA_NAT_MAX (__TCA_NAT_MAX - 1) + +#define TCA_NAT_FLAG_EGRESS 1 + +struct tc_nat +{ + tc_gen; + __be32 old_addr; + __be32 new_addr; + __be32 mask; + __u32 flags; +}; + +#endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c6b9f92e8289949bde61f0ea384f48349d5a083f..c5b94c1a5ee288885e639182a34484aa090ceeb1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -304,7 +304,6 @@ struct tcp_sock { u32 rtt_seq; /* sequence number to update rttvar */ u32 packets_out; /* Packets which are "in flight" */ - u32 left_out; /* Packets which leaved network */ u32 retrans_out; /* Retransmitted packets out */ /* * Options received (usually on last packet, some only on SYN packets). @@ -333,6 +332,9 @@ struct tcp_sock { struct tcp_sack_block_wire recv_sack_cache[4]; + u32 highest_sack; /* Start seq of globally highest revd SACK + * (validity guaranteed only if sacked_out > 0) */ + /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; @@ -341,10 +343,12 @@ struct tcp_sock { struct sk_buff *forward_skb_hint; struct sk_buff *fastpath_skb_hint; - int fastpath_cnt_hint; + int fastpath_cnt_hint; /* Lags behind by current skb's pcount + * compared to respective fackets_out */ int lost_cnt_hint; int retransmit_cnt_hint; - int forward_cnt_hint; + + u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ u16 advmss; /* Advertised MSS */ u16 prior_ssthresh; /* ssthresh saved at recovery start */ diff --git a/include/linux/zlib.h b/include/linux/zlib.h index 9e3192a7dc6fa7ca7bf6746766234cf9261ed382..40c49cb3eb518355f14d05ec8d043e00f1bb45c3 100644 --- a/include/linux/zlib.h +++ b/include/linux/zlib.h @@ -82,7 +82,7 @@ struct internal_state; typedef struct z_stream_s { - Byte *next_in; /* next input byte */ + const Byte *next_in; /* next input byte */ uInt avail_in; /* number of bytes available at next_in */ uLong total_in; /* total nb of input bytes read so far */ @@ -699,4 +699,8 @@ extern int zlib_inflateInit2(z_streamp strm, int windowBits); struct internal_state {int dummy;}; /* hack for buggy compilers */ #endif +/* Utility function: initialize zlib, unpack binary blob, clean up zlib, + * return len or negative error code. */ +extern int zlib_inflate_blob(void *dst, unsigned dst_sz, const void *src, unsigned src_sz); + #endif /* _ZLIB_H */ diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 88884d39f28f76616554f7d9804bb43084658677..7726ff41c3e68722026bd91469fe2148c633c1e2 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -412,6 +412,18 @@ int p9_idpool_check(int id, struct p9_idpool *p); int p9_error_init(void); int p9_errstr2errno(char *, int); + +#ifdef CONFIG_SYSCTL int __init p9_sysctl_register(void); void __exit p9_sysctl_unregister(void); +#else +static inline int p9_sysctl_register(void) +{ + return 0; +} +static inline void p9_sysctl_unregister(void) +{ +} +#endif + #endif /* NET_9P_H */ diff --git a/include/net/ah.h b/include/net/ah.h index 8f257c1599028e94128e2734f3ad2caa2082dd57..ae1c322f42429100a21eb623691a293b71b20da0 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -9,8 +9,6 @@ struct ah_data { - u8 *key; - int key_len; u8 *work_icv; int icv_full_len; int icv_trunc_len; @@ -40,4 +38,11 @@ static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb, return err; } +struct ip_auth_hdr; + +static inline struct ip_auth_hdr *ip_auth_hdr(const struct sk_buff *skb) +{ + return (struct ip_auth_hdr *)skb_transport_header(skb); +} + #endif diff --git a/include/net/ax25.h b/include/net/ax25.h index 99a4e364c74ae6db4efa55849890617e11cc7258..4e3cd93f81fcf0418f1fea085afcc91008d7f992 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -363,8 +363,11 @@ extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *); extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); /* ax25_ip.c */ -extern int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short, void *, void *, unsigned int); +extern int ax25_hard_header(struct sk_buff *, struct net_device *, + unsigned short, const void *, + const void *, unsigned int); extern int ax25_rebuild_header(struct sk_buff *); +extern const struct header_ops ax25_header_ops; /* ax25_out.c */ extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *); diff --git a/include/net/ax88796.h b/include/net/ax88796.h index ee786a043b3d0ae48d35a622db9f8f41a56bbea4..51329dae44e6db615baf9d86cfa2010aa7004391 100644 --- a/include/net/ax88796.h +++ b/include/net/ax88796.h @@ -14,6 +14,7 @@ #define AXFLG_HAS_EEPROM (1<<0) #define AXFLG_MAC_FROMDEV (1<<1) /* device already has MAC */ +#define AXFLG_HAS_93CX6 (1<<2) /* use eeprom_93cx6 driver */ struct ax_plat_data { unsigned int flags; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 7edaef6b29d63b4c6a5fd7af2c4c45a0e1ce5098..d30960e1755c64d32bbd651b195265c1aacca0d5 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3,15 +3,15 @@ #include #include +#include #include /* * 802.11 configuration in-kernel interface * - * Copyright 2006 Johannes Berg + * Copyright 2006, 2007 Johannes Berg */ - /* Radiotap header iteration * implemented in net/wireless/radiotap.c * docs in Documentation/networking/radiotap-headers.txt @@ -68,11 +68,16 @@ struct wiphy; * @add_virtual_intf: create a new virtual interface with the given name * * @del_virtual_intf: remove the virtual interface determined by ifindex. + * + * @change_virtual_intf: change type of virtual interface + * */ struct cfg80211_ops { int (*add_virtual_intf)(struct wiphy *wiphy, char *name, - unsigned int type); + enum nl80211_iftype type); int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex); + int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex, + enum nl80211_iftype type); }; #endif /* __NET_CFG80211_H */ diff --git a/include/net/dn_route.h b/include/net/dn_route.h index c10e8e7e59a7a2b0a5c3aa4adc28f2b79d20036b..60c9f22d86943f0aa19ecdc18d513a24e763b9b1 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -100,8 +100,7 @@ static inline void dn_rt_finish_output(struct sk_buff *skb, char *dst, char *src if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) dst = NULL; - if (!dev->hard_header || (dev->hard_header(skb, dev, ETH_P_DNA_RT, - dst, src, skb->len) >= 0)) + if (dev_hard_header(skb, dev, ETH_P_DNA_RT, dst, src, skb->len) >= 0) dn_rt_send(skb); else kfree_skb(skb); diff --git a/include/net/esp.h b/include/net/esp.h index d05d8d2c78f460dda1ff1fde442083ff3c0795dc..c1bc529809daad155865ed444049fdb1396b1b21 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -13,8 +13,6 @@ struct esp_data /* Confidentiality */ struct { - u8 *key; /* Key */ - int key_len; /* Key length */ int padlen; /* 0..255 */ /* ivlen is offset from enc_data, where encrypted data start. * It is logically different of crypto_tfm_alg_ivsize(tfm). @@ -28,14 +26,9 @@ struct esp_data /* Integrity. It is active when icv_full_len != 0 */ struct { - u8 *key; /* Key */ - int key_len; /* Length of the key */ u8 *work_icv; int icv_full_len; int icv_trunc_len; - void (*icv)(struct esp_data*, - struct sk_buff *skb, - int offset, int len, u8 *icv); struct crypto_hash *tfm; } auth; }; @@ -60,4 +53,11 @@ static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb, return crypto_hash_final(&desc, esp->auth.work_icv); } +struct ip_esp_hdr; + +static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) +{ + return (struct ip_esp_hdr *)skb_transport_header(skb); +} + #endif diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 83e41dd15ccde6e88b304687dac539462815d7dd..017aebd9068317f387df8f62de160cc7bad36a08 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -65,7 +65,7 @@ struct fib_rules_ops int nlgroup; const struct nla_policy *policy; - struct list_head *rules_list; + struct list_head rules_list; struct module *owner; }; diff --git a/include/net/icmp.h b/include/net/icmp.h index dc09474efcf38e2cb02329a4ef0bf2fd8633bf7f..9f7ef3c8baef162bb99171824e120ea0248aa222 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -30,9 +30,16 @@ struct icmp_err { extern struct icmp_err icmp_err_convert[]; DECLARE_SNMP_STAT(struct icmp_mib, icmp_statistics); +DECLARE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics); #define ICMP_INC_STATS(field) SNMP_INC_STATS(icmp_statistics, field) #define ICMP_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmp_statistics, field) #define ICMP_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmp_statistics, field) +#define ICMPMSGOUT_INC_STATS(field) SNMP_INC_STATS(icmpmsg_statistics, field+256) +#define ICMPMSGOUT_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmpmsg_statistics, field+256) +#define ICMPMSGOUT_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmpmsg_statistics, field+256) +#define ICMPMSGIN_INC_STATS(field) SNMP_INC_STATS(icmpmsg_statistics, field) +#define ICMPMSGIN_INC_STATS_BH(field) SNMP_INC_STATS_BH(icmpmsg_statistics, field) +#define ICMPMSGIN_INC_STATS_USER(field) SNMP_INC_STATS_USER(icmpmsg_statistics, field) struct dst_entry; struct net_proto_family; @@ -42,6 +49,7 @@ extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); extern int icmp_rcv(struct sk_buff *skb); extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern void icmp_init(struct net_proto_family *ops); +extern void icmp_out_count(unsigned char type); /* Move into dst.h ? */ extern int xrlim_allow(struct dst_entry *dst, int timeout); diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index bbd85cd61ed5d57482ed0180998f808414078054..164d13211165422ef32ed5e206025d8daf060ed0 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -119,11 +119,6 @@ do { if (ieee80211_debug_level & (level)) \ #define IEEE80211_DEBUG(level, fmt, args...) do {} while (0) #endif /* CONFIG_IEEE80211_DEBUG */ -/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */ - -#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" -#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5] - /* escape_essid() is intended to be used in debug (and possibly error) * messages. It should never be used for passing essid to user space. */ const char *escape_essid(const char *essid, u8 essid_len); diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index a0c2b41a24d7d2174cd7ca9d7f8c98d8ca1ea56c..dfd8bf66ce274f99e486cf80794980f255b398ae 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -40,6 +40,7 @@ #include #include +#include /* Radiotap header version (from official NetBSD feed) */ #define IEEE80211RADIOTAP_VERSION "1.5" @@ -255,4 +256,13 @@ enum ieee80211_radiotap_type { (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ ((x) + 1000) * 5) +/* helpers */ +static inline int ieee80211_get_radiotap_len(unsigned char *data) +{ + struct ieee80211_radiotap_header *hdr = + (struct ieee80211_radiotap_header *)data; + + return le16_to_cpu(get_unaligned(&hdr->it_len)); +} + #endif /* IEEE80211_RADIOTAP_H */ diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h index 89119277553dbb8dbe90085088d4242c27f3c023..1ef6282fdded66520c5a15956848a77dfaaecbfa 100644 --- a/include/net/ieee80211softmac.h +++ b/include/net/ieee80211softmac.h @@ -229,6 +229,8 @@ struct ieee80211softmac_device { /* this lock protects this structure */ spinlock_t lock; + struct workqueue_struct *wq; + u8 running; /* SoftMAC started? */ u8 scanning; diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 3ec7d07346d63d7a192b8668f5f4f896d1fd0bd0..448eccb206384f541a43c530fa5ac3add38eb329 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -154,6 +154,7 @@ struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry; DEFINE_SNMP_STAT(struct ipstats_mib, ipv6); DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6); + DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg); }; struct inet6_dev diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index d27ee8c0da3f2cc5ac84f29a642118734eade381..8228b57eb18f544aebec20063b18c91ab5d1e323 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -107,7 +107,7 @@ struct inet_hashinfo { */ struct inet_bind_hashbucket *bhash; - int bhash_size; + unsigned int bhash_size; unsigned int ehash_size; /* All sockets in TCP_LISTEN state will be in here. This is the only diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 47d52b2414dbac4b1c5a294c46483e9db36901ef..abaff05972707feac79c5ca7199996e3f252167b 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -115,6 +115,7 @@ struct inet_timewait_sock { #define tw_refcnt __tw_common.skc_refcnt #define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot +#define tw_net __tw_common.skc_net volatile unsigned char tw_substate; /* 3 bits hole, try to pack */ unsigned char tw_rcv_wscale; diff --git a/include/net/ip.h b/include/net/ip.h index abf2820a1125d73af2a83b8dab7a6e66509c88e5..3af3ed9d320bbdeb1e996dfceb7b8cd10cdcfc21 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -171,7 +171,8 @@ extern unsigned long snmp_fold_field(void *mib[], int offt); extern int snmp_mib_init(void *ptr[2], size_t mibsize, size_t mibalign); extern void snmp_mib_free(void *ptr[2]); -extern int sysctl_local_port_range[2]; +extern void inet_get_local_port_range(int *low, int *high); + extern int sysctl_ip_default_ttl; extern int sysctl_ip_nonlocal_bind; diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index c48ea873f1e091271d3e377fe16f67c4860ba453..857821360bb6cf99af1d5694722d588044b6dd5e 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -105,6 +105,10 @@ struct rt6_info struct rt6key rt6i_src; u8 rt6i_protocol; + +#ifdef CONFIG_XFRM + u32 rt6i_flow_cache_genid; +#endif }; static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h index 87c1af3e5e8243be480e0cec57860242c722a914..330b74e813a9b834d2cb8b7e155fac7e02e8685b 100644 --- a/include/net/ipcomp.h +++ b/include/net/ipcomp.h @@ -1,14 +1,23 @@ #ifndef _NET_IPCOMP_H #define _NET_IPCOMP_H -#include #include #define IPCOMP_SCRATCH_SIZE 65400 +struct crypto_comp; + struct ipcomp_data { u16 threshold; struct crypto_comp **tfms; }; +struct ip_comp_hdr; +struct sk_buff; + +static inline struct ip_comp_hdr *ip_comp_hdr(const struct sk_buff *skb) +{ + return (struct ip_comp_hdr *)skb_transport_header(skb); +} + #endif diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 9059e0ed7fe375e70d12740a1703526d89aba944..31b3f1b45a2bfd1479bd685224e2a22238aa2ee6 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -112,45 +112,28 @@ struct frag_hdr { extern int sysctl_ipv6_bindv6only; extern int sysctl_mld_max_msf; -/* MIBs */ -DECLARE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); -#define IP6_INC_STATS(idev,field) ({ \ - struct inet6_dev *_idev = (idev); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS(_idev->stats.ipv6, field); \ - SNMP_INC_STATS(ipv6_statistics, field); \ -}) -#define IP6_INC_STATS_BH(idev,field) ({ \ - struct inet6_dev *_idev = (idev); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS_BH(_idev->stats.ipv6, field); \ - SNMP_INC_STATS_BH(ipv6_statistics, field); \ -}) -#define IP6_INC_STATS_USER(idev,field) ({ \ +#define _DEVINC(statname, modifier, idev, field) \ +({ \ struct inet6_dev *_idev = (idev); \ if (likely(_idev != NULL)) \ - SNMP_INC_STATS_USER(_idev->stats.ipv6, field); \ - SNMP_INC_STATS_USER(ipv6_statistics, field); \ + SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \ + SNMP_INC_STATS##modifier(statname##_statistics, (field)); \ }) + +/* MIBs */ +DECLARE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); + +#define IP6_INC_STATS(idev,field) _DEVINC(ipv6, , idev, field) +#define IP6_INC_STATS_BH(idev,field) _DEVINC(ipv6, _BH, idev, field) +#define IP6_INC_STATS_USER(idev,field) _DEVINC(ipv6, _USER, idev, field) + DECLARE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); -#define ICMP6_INC_STATS(idev, field) ({ \ - struct inet6_dev *_idev = (idev); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS(idev->stats.icmpv6, field); \ - SNMP_INC_STATS(icmpv6_statistics, field); \ -}) -#define ICMP6_INC_STATS_BH(idev, field) ({ \ - struct inet6_dev *_idev = (idev); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS_BH((_idev)->stats.icmpv6, field); \ - SNMP_INC_STATS_BH(icmpv6_statistics, field); \ -}) -#define ICMP6_INC_STATS_USER(idev, field) ({ \ - struct inet6_dev *_idev = (idev); \ - if (likely(_idev != NULL)) \ - SNMP_INC_STATS_USER(_idev->stats.icmpv6, field); \ - SNMP_INC_STATS_USER(icmpv6_statistics, field); \ -}) +DECLARE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics); + +#define ICMP6_INC_STATS(idev, field) _DEVINC(icmpv6, , idev, field) +#define ICMP6_INC_STATS_BH(idev, field) _DEVINC(icmpv6, _BH, idev, field) +#define ICMP6_INC_STATS_USER(idev, field) _DEVINC(icmpv6, _USER, idev, field) + #define ICMP6_INC_STATS_OFFSET_BH(idev, field, offset) ({ \ struct inet6_dev *_idev = idev; \ __typeof__(offset) _offset = (offset); \ @@ -158,6 +141,20 @@ DECLARE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); SNMP_INC_STATS_OFFSET_BH(_idev->stats.icmpv6, field, _offset); \ SNMP_INC_STATS_OFFSET_BH(icmpv6_statistics, field, _offset); \ }) + +#define ICMP6MSGOUT_INC_STATS(idev, field) \ + _DEVINC(icmpv6msg, , idev, field +256) +#define ICMP6MSGOUT_INC_STATS_BH(idev, field) \ + _DEVINC(icmpv6msg, _BH, idev, field +256) +#define ICMP6MSGOUT_INC_STATS_USER(idev, field) \ + _DEVINC(icmpv6msg, _USER, idev, field +256) +#define ICMP6MSGIN_INC_STATS(idev, field) \ + _DEVINC(icmpv6msg, , idev, field) +#define ICMP6MSGIN_INC_STATS_BH(idev, field) \ + _DEVINC(icmpv6msg, _BH, idev, field) +#define ICMP6MSGIN_INC_STATS_USER(idev, field) \ + _DEVINC(icmpv6msg, _USER, idev, field) + DECLARE_SNMP_STAT(struct udp_mib, udp_stats_in6); DECLARE_SNMP_STAT(struct udp_mib, udplite_stats_in6); #define UDP6_INC_STATS_BH(field, is_udplite) do { \ @@ -377,6 +374,12 @@ static inline int ipv6_addr_any(const struct in6_addr *a) a->s6_addr32[2] | a->s6_addr32[3] ) == 0); } +static inline int ipv6_addr_v4mapped(const struct in6_addr *a) +{ + return ((a->s6_addr32[0] | a->s6_addr32[1]) == 0 && + a->s6_addr32[2] == htonl(0x0000ffff)); +} + /* * find the first different bit between two addresses * length of address must be a multiple of 32bits diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index b6c468cd7f5ba9fb61f2627fd15a54cd135206af..85f80eadfa35fae702a8377dddebf9e92d6aaf12 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -50,6 +50,12 @@ struct sockaddr_iucv { /* Common socket structures and functions */ +struct sock_msg_q { + struct iucv_path *path; + struct iucv_message msg; + struct list_head list; + spinlock_t lock; +}; #define iucv_sk(__sk) ((struct iucv_sock *) __sk) @@ -65,6 +71,7 @@ struct iucv_sock { struct iucv_path *path; struct sk_buff_head send_skb_q; struct sk_buff_head backlog_skb_q; + struct sock_msg_q message_q; unsigned int send_tag; }; @@ -74,29 +81,8 @@ struct iucv_sock_list { atomic_t autobind_name; }; -static void iucv_sock_destruct(struct sock *sk); -static void iucv_sock_cleanup_listen(struct sock *parent); -static void iucv_sock_kill(struct sock *sk); -static void iucv_sock_close(struct sock *sk); -static int iucv_sock_create(struct socket *sock, int proto); -static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, - int addr_len); -static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, - int alen, int flags); -static int iucv_sock_listen(struct socket *sock, int backlog); -static int iucv_sock_accept(struct socket *sock, struct socket *newsock, - int flags); -static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, - int *len, int peer); -static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len); -static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags); unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait); -static int iucv_sock_release(struct socket *sock); -static int iucv_sock_shutdown(struct socket *sock, int how); - void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); int iucv_sock_wait_state(struct sock *sk, int state, int state2, diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index f23d07ca7c59e060c38a8c4a1aa6a9c6c2e38474..369d50e08b99619874ad8d4b6588228871728c53 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -431,7 +431,13 @@ struct iw_public_data { * Those may be called only within the kernel. */ -/* functions that may be called by driver modules */ +/* First : function strictly used inside the kernel */ + +/* Handle /proc/net/wireless, called in net/code/dev.c */ +extern int dev_get_wireless_info(char * buffer, char **start, off_t offset, + int length); + +/* Second : functions that may be called by driver modules */ /* Send a single event to user space */ extern void wireless_send_event(struct net_device * dev, diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index 00730d21b522cc8edc9f1d14de0d73e883833d91..e2374e34989f07d9c39d7bdf30c44266bc1f5ad0 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -93,7 +93,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) return skb->cb[sizeof(skb->cb) - 1]; } -extern struct sock *llc_sk_alloc(int family, gfp_t priority, +extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot); extern void llc_sk_free(struct sock *sk); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c34fd9a6160a8ce2a67689728a59368277a89ccb..5fcc4c1043401cac93ab97a96a5fcb97da0ca031 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1,7 +1,9 @@ /* - * Low-level hardware driver -- IEEE 802.11 driver (80211.o) interface + * mac80211 <-> driver interface + * * Copyright 2002-2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -20,29 +22,51 @@ #include #include -/* Note! Only ieee80211_tx_status_irqsafe() and ieee80211_rx_irqsafe() can be +/** + * DOC: Introduction + * + * mac80211 is the Linux stack for 802.11 hardware that implements + * only partial functionality in hard- or firmware. This document + * defines the interface between mac80211 and low-level hardware + * drivers. + */ + +/** + * DOC: Calling mac80211 from interrupts + * + * Only ieee80211_tx_status_irqsafe() and ieee80211_rx_irqsafe() can be * called in hardware interrupt context. The low-level driver must not call any * other functions in hardware interrupt context. If there is a need for such * call, the low-level driver should first ACK the interrupt and perform the - * IEEE 802.11 code call after this, e.g., from a scheduled tasklet (in - * software interrupt context). + * IEEE 802.11 code call after this, e.g. from a scheduled workqueue function. */ -/* - * Frame format used when passing frame between low-level hardware drivers - * and IEEE 802.11 driver the same as used in the wireless media, i.e., - * buffers start with IEEE 802.11 header and include the same octets that - * are sent over air. - * - * If hardware uses IEEE 802.3 headers (and perform 802.3 <-> 802.11 - * conversion in firmware), upper layer 802.11 code needs to be changed to - * support this. - * - * If the receive frame format is not the same as the real frame sent - * on the wireless media (e.g., due to padding etc.), upper layer 802.11 code - * could be updated to provide support for such format assuming this would - * optimize the performance, e.g., by removing need to re-allocation and - * copying of the data. +/** + * DOC: Warning + * + * If you're reading this document and not the header file itself, it will + * be incomplete because not all documentation has been converted yet. + */ + +/** + * DOC: Frame format + * + * As a general rule, when frames are passed between mac80211 and the driver, + * they start with the IEEE 802.11 header and include the same octets that are + * sent over the air except for the FCS which should be calculated by the + * hardware. + * + * There are, however, various exceptions to this rule for advanced features: + * + * The first exception is for hardware encryption and decryption offload + * where the IV/ICV may or may not be generated in hardware. + * + * Secondly, when the hardware handles fragmentation, the frame handed to + * the driver from mac80211 is the MSDU, not the MPDU. + * + * Finally, for received frames, the driver is able to indicate that it has + * filled a radiotap header and put that in front of the frame; if it does + * not do so then mac80211 may add this under certain circumstances. */ #define IEEE80211_CHAN_W_SCAN 0x00000001 @@ -71,14 +95,13 @@ struct ieee80211_channel { #define IEEE80211_RATE_SUPPORTED 0x00000010 #define IEEE80211_RATE_OFDM 0x00000020 #define IEEE80211_RATE_CCK 0x00000040 -#define IEEE80211_RATE_TURBO 0x00000080 #define IEEE80211_RATE_MANDATORY 0x00000100 #define IEEE80211_RATE_CCK_2 (IEEE80211_RATE_CCK | IEEE80211_RATE_PREAMBLE2) #define IEEE80211_RATE_MODULATION(f) \ (f & (IEEE80211_RATE_CCK | IEEE80211_RATE_OFDM)) -/* Low-level driver should set PREAMBLE2, OFDM, CCK, and TURBO flags. +/* Low-level driver should set PREAMBLE2, OFDM and CCK flags. * BASIC, SUPPORTED, ERP, and MANDATORY flags are set in 80211.o based on the * configuration. */ struct ieee80211_rate { @@ -97,44 +120,96 @@ struct ieee80211_rate { * optimizing channel utilization estimates */ }; -/* 802.11g is backwards-compatible with 802.11b, so a wlan card can - * actually be both in 11b and 11g modes at the same time. */ -enum { - MODE_IEEE80211A, /* IEEE 802.11a */ - MODE_IEEE80211B, /* IEEE 802.11b only */ - MODE_ATHEROS_TURBO, /* Atheros Turbo mode (2x.11a at 5 GHz) */ - MODE_IEEE80211G, /* IEEE 802.11g (and 802.11b compatibility) */ - MODE_ATHEROS_TURBOG, /* Atheros Turbo mode (2x.11g at 2.4 GHz) */ +/** + * enum ieee80211_phymode - PHY modes + * + * @MODE_IEEE80211A: 5GHz as defined by 802.11a/802.11h + * @MODE_IEEE80211B: 2.4 GHz as defined by 802.11b + * @MODE_IEEE80211G: 2.4 GHz as defined by 802.11g (with OFDM), + * backwards compatible with 11b mode + * @NUM_IEEE80211_MODES: internal + */ +enum ieee80211_phymode { + MODE_IEEE80211A, + MODE_IEEE80211B, + MODE_IEEE80211G, /* keep last */ NUM_IEEE80211_MODES }; +/** + * struct ieee80211_hw_mode - PHY mode definition + * + * This structure describes the capabilities supported by the device + * in a single PHY mode. + * + * @mode: the PHY mode for this definition + * @num_channels: number of supported channels + * @channels: pointer to array of supported channels + * @num_rates: number of supported bitrates + * @rates: pointer to array of supported bitrates + * @list: internal + */ struct ieee80211_hw_mode { - int mode; /* MODE_IEEE80211... */ - int num_channels; /* Number of channels (below) */ - struct ieee80211_channel *channels; /* Array of supported channels */ - int num_rates; /* Number of rates (below) */ - struct ieee80211_rate *rates; /* Array of supported rates */ - - struct list_head list; /* Internal, don't touch */ + struct list_head list; + struct ieee80211_channel *channels; + struct ieee80211_rate *rates; + enum ieee80211_phymode mode; + int num_channels; + int num_rates; }; +/** + * struct ieee80211_tx_queue_params - transmit queue configuration + * + * The information provided in this structure is required for QoS + * transmit queue configuration. + * + * @aifs: arbitration interface space [0..255, -1: use default] + * @cw_min: minimum contention window [will be a value of the form + * 2^n-1 in the range 1..1023; 0: use default] + * @cw_max: maximum contention window [like @cw_min] + * @burst_time: maximum burst time in units of 0.1ms, 0 meaning disabled + */ struct ieee80211_tx_queue_params { - int aifs; /* 0 .. 255; -1 = use default */ - int cw_min; /* 2^n-1: 1, 3, 7, .. , 1023; 0 = use default */ - int cw_max; /* 2^n-1: 1, 3, 7, .. , 1023; 0 = use default */ - int burst_time; /* maximum burst time in 0.1 ms (i.e., 10 = 1 ms); - * 0 = disabled */ + int aifs; + int cw_min; + int cw_max; + int burst_time; }; +/** + * struct ieee80211_tx_queue_stats_data - transmit queue statistics + * + * @len: number of packets in queue + * @limit: queue length limit + * @count: number of frames sent + */ struct ieee80211_tx_queue_stats_data { - unsigned int len; /* num packets in queue */ - unsigned int limit; /* queue len (soft) limit */ - unsigned int count; /* total num frames sent */ + unsigned int len; + unsigned int limit; + unsigned int count; }; -enum { +/** + * enum ieee80211_tx_queue - transmit queue number + * + * These constants are used with some callbacks that take a + * queue number to set parameters for a queue. + * + * @IEEE80211_TX_QUEUE_DATA0: data queue 0 + * @IEEE80211_TX_QUEUE_DATA1: data queue 1 + * @IEEE80211_TX_QUEUE_DATA2: data queue 2 + * @IEEE80211_TX_QUEUE_DATA3: data queue 3 + * @IEEE80211_TX_QUEUE_DATA4: data queue 4 + * @IEEE80211_TX_QUEUE_SVP: ?? + * @NUM_TX_DATA_QUEUES: number of data queues + * @IEEE80211_TX_QUEUE_AFTER_BEACON: transmit queue for frames to be + * sent after a beacon + * @IEEE80211_TX_QUEUE_BEACON: transmit queue for beacon frames + */ +enum ieee80211_tx_queue { IEEE80211_TX_QUEUE_DATA0, IEEE80211_TX_QUEUE_DATA1, IEEE80211_TX_QUEUE_DATA2, @@ -165,7 +240,6 @@ struct ieee80211_low_level_stats { /* Transmit control fields. This data structure is passed to low-level driver * with each TX frame. The low-level driver is responsible for configuring * the hardware to use given values (depending on what is supported). */ -#define HW_KEY_IDX_INVALID -1 struct ieee80211_tx_control { int tx_rate; /* Transmit rate, given as the hw specific value for the @@ -191,22 +265,23 @@ struct ieee80211_tx_control { #define IEEE80211_TXCTL_REQUEUE (1<<7) #define IEEE80211_TXCTL_FIRST_FRAGMENT (1<<8) /* this is a first fragment of * the frame */ -#define IEEE80211_TXCTL_TKIP_NEW_PHASE1_KEY (1<<9) +#define IEEE80211_TXCTL_LONG_RETRY_LIMIT (1<<10) /* this frame should be send + * using the through + * set_retry_limit configured + * long retry value */ u32 flags; /* tx control flags defined * above */ - u8 retry_limit; /* 1 = only first attempt, 2 = one retry, .. */ + u8 key_idx; /* keyidx from hw->set_key(), undefined if + * IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */ + u8 retry_limit; /* 1 = only first attempt, 2 = one retry, .. + * This could be used when set_retry_limit + * is not implemented by the driver */ u8 power_level; /* per-packet transmit power level, in dBm */ u8 antenna_sel_tx; /* 0 = default/diversity, 1 = Ant0, 2 = Ant1 */ - s8 key_idx; /* -1 = do not encrypt, >= 0 keyidx from - * hw->set_key() */ u8 icv_len; /* length of the ICV/MIC field in octets */ u8 iv_len; /* length of the IV field in octets */ - u8 tkip_key[16]; /* generated phase2/phase1 key for hw TKIP */ u8 queue; /* hardware queue to use for this frame; * 0 = highest, hw->queues-1 = lowest */ - u8 sw_retry_attempt; /* number of times hw has tried to - * transmit frame (not incl. hw retries) */ - struct ieee80211_rate *rate; /* internal 80211.o rate */ struct ieee80211_rate *rts_rate; /* internal 80211.o rate * for RTS/CTS */ @@ -219,44 +294,124 @@ struct ieee80211_tx_control { int ifindex; /* internal */ }; -/* Receive status. The low-level driver should provide this information - * (the subset supported by hardware) to the 802.11 code with each received - * frame. */ + +/** + * enum mac80211_rx_flags - receive flags + * + * These flags are used with the @flag member of &struct ieee80211_rx_status. + * @RX_FLAG_MMIC_ERROR: Michael MIC error was reported on this frame. + * Use together with %RX_FLAG_MMIC_STRIPPED. + * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware. + * @RX_FLAG_RADIOTAP: This frame starts with a radiotap header. + * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame, + * verification has been done by the hardware. + * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. + * If this flag is set, the stack cannot do any replay detection + * hence the driver or hardware will have to do that. + * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on + * the frame. + * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on + * the frame. + */ +enum mac80211_rx_flags { + RX_FLAG_MMIC_ERROR = 1<<0, + RX_FLAG_DECRYPTED = 1<<1, + RX_FLAG_RADIOTAP = 1<<2, + RX_FLAG_MMIC_STRIPPED = 1<<3, + RX_FLAG_IV_STRIPPED = 1<<4, + RX_FLAG_FAILED_FCS_CRC = 1<<5, + RX_FLAG_FAILED_PLCP_CRC = 1<<6, +}; + +/** + * struct ieee80211_rx_status - receive status + * + * The low-level driver should provide this information (the subset + * supported by hardware) to the 802.11 code with each received + * frame. + * @mactime: MAC timestamp as defined by 802.11 + * @freq: frequency the radio was tuned to when receiving this frame, in MHz + * @channel: channel the radio was tuned to + * @phymode: active PHY mode + * @ssi: signal strength when receiving this frame + * @signal: used as 'qual' in statistics reporting + * @noise: PHY noise when receiving this frame + * @antenna: antenna used + * @rate: data rate + * @flag: %RX_FLAG_* + */ struct ieee80211_rx_status { u64 mactime; - int freq; /* receive frequency in Mhz */ + int freq; int channel; - int phymode; + enum ieee80211_phymode phymode; int ssi; - int signal; /* used as qual in statistics reporting */ + int signal; int noise; int antenna; int rate; -#define RX_FLAG_MMIC_ERROR (1<<0) -#define RX_FLAG_DECRYPTED (1<<1) -#define RX_FLAG_RADIOTAP (1<<2) int flag; }; -/* Transmit status. The low-level driver should provide this information - * (the subset supported by hardware) to the 802.11 code for each transmit - * frame. */ +/** + * enum ieee80211_tx_status_flags - transmit status flags + * + * Status flags to indicate various transmit conditions. + * + * @IEEE80211_TX_STATUS_TX_FILTERED: The frame was not transmitted + * because the destination STA was in powersave mode. + * + * @IEEE80211_TX_STATUS_ACK: Frame was acknowledged + */ +enum ieee80211_tx_status_flags { + IEEE80211_TX_STATUS_TX_FILTERED = 1<<0, + IEEE80211_TX_STATUS_ACK = 1<<1, +}; + +/** + * struct ieee80211_tx_status - transmit status + * + * As much information as possible should be provided for each transmitted + * frame with ieee80211_tx_status(). + * + * @control: a copy of the &struct ieee80211_tx_control passed to the driver + * in the tx() callback. + * + * @flags: transmit status flags, defined above + * + * @ack_signal: signal strength of the ACK frame + * + * @excessive_retries: set to 1 if the frame was retried many times + * but not acknowledged + * + * @retry_count: number of retries + * + * @queue_length: ?? REMOVE + * @queue_number: ?? REMOVE + */ struct ieee80211_tx_status { - /* copied ieee80211_tx_control structure */ struct ieee80211_tx_control control; - -#define IEEE80211_TX_STATUS_TX_FILTERED (1<<0) -#define IEEE80211_TX_STATUS_ACK (1<<1) /* whether the TX frame was ACKed */ - u32 flags; /* tx staus flags defined above */ - - int ack_signal; /* measured signal strength of the ACK frame */ - int excessive_retries; - int retry_count; - - int queue_length; /* information about TX queue */ + u8 flags; + bool excessive_retries; + u8 retry_count; + int ack_signal; + int queue_length; int queue_number; }; +/** + * enum ieee80211_conf_flags - configuration flags + * + * Flags to define PHY configuration options + * + * @IEEE80211_CONF_SHORT_SLOT_TIME: use 802.11g short slot time + * @IEEE80211_CONF_RADIOTAP: add radiotap header at receive time (if supported) + * + */ +enum ieee80211_conf_flags { + IEEE80211_CONF_SHORT_SLOT_TIME = 1<<0, + IEEE80211_CONF_RADIOTAP = 1<<1, +}; /** * struct ieee80211_conf - configuration of the device @@ -264,57 +419,46 @@ struct ieee80211_tx_status { * This struct indicates how the driver shall configure the hardware. * * @radio_enabled: when zero, driver is required to switch off the radio. + * TODO make a flag + * @channel: IEEE 802.11 channel number + * @freq: frequency in MHz + * @channel_val: hardware specific channel value for the channel + * @phymode: PHY mode to activate (REMOVE) + * @chan: channel to switch to, pointer to the channel information + * @mode: pointer to mode definition + * @regulatory_domain: ?? + * @beacon_int: beacon interval (TODO make interface config) + * @flags: configuration flags defined above + * @power_level: transmit power limit for current regulatory domain in dBm + * @antenna_max: maximum antenna gain + * @antenna_sel_tx: transmit antenna selection, 0: default/diversity, + * 1/2: antenna 0/1 + * @antenna_sel_rx: receive antenna selection, like @antenna_sel_tx */ struct ieee80211_conf { int channel; /* IEEE 802.11 channel number */ int freq; /* MHz */ int channel_val; /* hw specific value for the channel */ - int phymode; /* MODE_IEEE80211A, .. */ + enum ieee80211_phymode phymode; struct ieee80211_channel *chan; struct ieee80211_hw_mode *mode; unsigned int regulatory_domain; int radio_enabled; int beacon_int; - -#define IEEE80211_CONF_SHORT_SLOT_TIME (1<<0) /* use IEEE 802.11g Short Slot - * Time */ -#define IEEE80211_CONF_SSID_HIDDEN (1<<1) /* do not broadcast the ssid */ -#define IEEE80211_CONF_RADIOTAP (1<<2) /* use radiotap if supported - check this bit at RX time */ - u32 flags; /* configuration flags defined above */ - - u8 power_level; /* transmit power limit for current - * regulatory domain; in dBm */ - u8 antenna_max; /* maximum antenna gain */ - short tx_power_reduction; /* in 0.1 dBm */ - - /* 0 = default/diversity, 1 = Ant0, 2 = Ant1 */ + u32 flags; + u8 power_level; + u8 antenna_max; u8 antenna_sel_tx; u8 antenna_sel_rx; - - int antenna_def; - int antenna_mode; - - /* Following five fields are used for IEEE 802.11H */ - unsigned int radar_detect; - unsigned int spect_mgmt; - /* All following fields are currently unused. */ - unsigned int quiet_duration; /* duration of quiet period */ - unsigned int quiet_offset; /* how far into the beacon is the quiet - * period */ - unsigned int quiet_period; - u8 radar_firpwr_threshold; - u8 radar_rssi_threshold; - u8 pulse_height_threshold; - u8 pulse_rssi_threshold; - u8 pulse_inband_threshold; }; /** * enum ieee80211_if_types - types of 802.11 network interfaces * + * @IEEE80211_IF_TYPE_INVALID: invalid interface type, not used + * by mac80211 itself * @IEEE80211_IF_TYPE_AP: interface in AP mode. * @IEEE80211_IF_TYPE_MGMT: special interface for communication with hostap * daemon. Drivers should never see this type. @@ -322,16 +466,17 @@ struct ieee80211_conf { * @IEEE80211_IF_TYPE_IBSS: interface in IBSS (ad-hoc) mode. * @IEEE80211_IF_TYPE_MNTR: interface in monitor (rfmon) mode. * @IEEE80211_IF_TYPE_WDS: interface in WDS mode. - * @IEEE80211_IF_TYPE_VLAN: not used. + * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers + * will never see this type. */ enum ieee80211_if_types { - IEEE80211_IF_TYPE_AP = 0x00000000, - IEEE80211_IF_TYPE_MGMT = 0x00000001, - IEEE80211_IF_TYPE_STA = 0x00000002, - IEEE80211_IF_TYPE_IBSS = 0x00000003, - IEEE80211_IF_TYPE_MNTR = 0x00000004, - IEEE80211_IF_TYPE_WDS = 0x5A580211, - IEEE80211_IF_TYPE_VLAN = 0x00080211, + IEEE80211_IF_TYPE_INVALID, + IEEE80211_IF_TYPE_AP, + IEEE80211_IF_TYPE_STA, + IEEE80211_IF_TYPE_IBSS, + IEEE80211_IF_TYPE_MNTR, + IEEE80211_IF_TYPE_WDS, + IEEE80211_IF_TYPE_VLAN, }; /** @@ -347,7 +492,6 @@ enum ieee80211_if_types { * @mac_addr: pointer to MAC address of the interface. This pointer is valid * until the interface is removed (i.e. it cannot be used after * remove_interface() callback was called for this interface). - * This pointer will be %NULL for monitor interfaces, be careful. * * This structure is used in add_interface() and remove_interface() * callbacks of &struct ieee80211_hw. @@ -360,7 +504,7 @@ enum ieee80211_if_types { */ struct ieee80211_if_init_conf { int if_id; - int type; + enum ieee80211_if_types type; void *mac_addr; }; @@ -377,11 +521,6 @@ struct ieee80211_if_init_conf { * config_interface() call, so copy the value somewhere if you need * it. * @ssid_len: length of the @ssid field. - * @generic_elem: used (together with @generic_elem_len) by drivers for - * hardware that generate beacons independently. The pointer is valid - * only during the config_interface() call, so copy the value somewhere - * if you need it. - * @generic_elem_len: length of the generic element. * @beacon: beacon template. Valid only if @host_gen_beacon_template in * &struct ieee80211_hw is set. The driver is responsible of freeing * the sk_buff. @@ -396,368 +535,571 @@ struct ieee80211_if_conf { u8 *bssid; u8 *ssid; size_t ssid_len; - u8 *generic_elem; - size_t generic_elem_len; struct sk_buff *beacon; struct ieee80211_tx_control *beacon_control; }; -typedef enum { ALG_NONE, ALG_WEP, ALG_TKIP, ALG_CCMP, ALG_NULL } -ieee80211_key_alg; +/** + * enum ieee80211_key_alg - key algorithm + * @ALG_WEP: WEP40 or WEP104 + * @ALG_TKIP: TKIP + * @ALG_CCMP: CCMP (AES) + */ +enum ieee80211_key_alg { + ALG_WEP, + ALG_TKIP, + ALG_CCMP, +}; -struct ieee80211_key_conf { +/** + * enum ieee80211_key_flags - key flags + * + * These flags are used for communication about keys between the driver + * and mac80211, with the @flags parameter of &struct ieee80211_key_conf. + * + * @IEEE80211_KEY_FLAG_WMM_STA: Set by mac80211, this flag indicates + * that the STA this key will be used with could be using QoS. + * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the + * driver to indicate that it requires IV generation for this + * particular key. + * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by + * the driver for a TKIP key if it requires Michael MIC + * generation in software. + */ +enum ieee80211_key_flags { + IEEE80211_KEY_FLAG_WMM_STA = 1<<0, + IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, + IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, +}; - int hw_key_idx; /* filled + used by low-level driver */ - ieee80211_key_alg alg; - int keylen; - -#define IEEE80211_KEY_FORCE_SW_ENCRYPT (1<<0) /* to be cleared by low-level - driver */ -#define IEEE80211_KEY_DEFAULT_TX_KEY (1<<1) /* This key is the new default TX - key (used only for broadcast - keys). */ -#define IEEE80211_KEY_DEFAULT_WEP_ONLY (1<<2) /* static WEP is the only - configured security policy; - this allows some low-level - drivers to determine when - hwaccel can be used */ - u32 flags; /* key configuration flags defined above */ - - s8 keyidx; /* WEP key index */ +/** + * struct ieee80211_key_conf - key information + * + * This key information is given by mac80211 to the driver by + * the set_key() callback in &struct ieee80211_ops. + * + * @hw_key_idx: To be set by the driver, this is the key index the driver + * wants to be given when a frame is transmitted and needs to be + * encrypted in hardware. + * @alg: The key algorithm. + * @flags: key flags, see &enum ieee80211_key_flags. + * @keyidx: the key index (0-3) + * @keylen: key material length + * @key: key material + */ +struct ieee80211_key_conf { + enum ieee80211_key_alg alg; + u8 hw_key_idx; + u8 flags; + s8 keyidx; + u8 keylen; u8 key[0]; }; #define IEEE80211_SEQ_COUNTER_RX 0 #define IEEE80211_SEQ_COUNTER_TX 1 -typedef enum { - SET_KEY, DISABLE_KEY, REMOVE_ALL_KEYS, -} set_key_cmd; +/** + * enum set_key_cmd - key command + * + * Used with the set_key() callback in &struct ieee80211_ops, this + * indicates whether a key is being removed or added. + * + * @SET_KEY: a key is set + * @DISABLE_KEY: a key must be disabled + */ +enum set_key_cmd { + SET_KEY, DISABLE_KEY, +}; -/* This is driver-visible part of the per-hw state the stack keeps. */ -struct ieee80211_hw { - /* points to the cfg80211 wiphy for this piece. Note - * that you must fill in the perm_addr and dev fields - * of this structure, use the macros provided below. */ - struct wiphy *wiphy; +/** + * enum sta_notify_cmd - sta notify command + * + * Used with the sta_notify() callback in &struct ieee80211_ops, this + * indicates addition and removal of a station to station table + * + * @STA_NOTIFY_ADD: a station was added to the station table + * @STA_NOTIFY_REMOVE: a station being removed from the station table + */ +enum sta_notify_cmd { + STA_NOTIFY_ADD, STA_NOTIFY_REMOVE +}; - /* assigned by mac80211, don't write */ - struct ieee80211_conf conf; +/** + * enum ieee80211_hw_flags - hardware flags + * + * These flags are used to indicate hardware capabilities to + * the stack. Generally, flags here should have their meaning + * done in a way that the simplest hardware doesn't need setting + * any particular flags. There are some exceptions to this rule, + * however, so you are advised to review these flags carefully. + * + * @IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE: + * The device only needs to be supplied with a beacon template. + * If you need the host to generate each beacon then don't use + * this flag and call ieee80211_beacon_get() when you need the + * next beacon frame. Note that if you set this flag, you must + * implement the set_tim() callback for powersave mode to work + * properly. + * This flag is only relevant for access-point mode. + * + * @IEEE80211_HW_RX_INCLUDES_FCS: + * Indicates that received frames passed to the stack include + * the FCS at the end. + * + * @IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING: + * Some wireless LAN chipsets buffer broadcast/multicast frames + * for power saving stations in the hardware/firmware and others + * rely on the host system for such buffering. This option is used + * to configure the IEEE 802.11 upper layer to buffer broadcast and + * multicast frames when there are power saving stations so that + * the driver can fetch them with ieee80211_get_buffered_bc(). Note + * that not setting this flag works properly only when the + * %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is also not set because + * otherwise the stack will not know when the DTIM beacon was sent. + * + * @IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED: + * Channels are already configured to the default regulatory domain + * specified in the device's EEPROM + */ +enum ieee80211_hw_flags { + IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0, + IEEE80211_HW_RX_INCLUDES_FCS = 1<<1, + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2, + IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED = 1<<3, +}; - /* Single thread workqueue available for driver use - * Allocated by mac80211 on registration */ +/** + * struct ieee80211_hw - hardware information and state + * + * This structure contains the configuration and hardware + * information for an 802.11 PHY. + * + * @wiphy: This points to the &struct wiphy allocated for this + * 802.11 PHY. You must fill in the @perm_addr and @dev + * members of this structure using SET_IEEE80211_DEV() + * and SET_IEEE80211_PERM_ADDR(). + * + * @conf: &struct ieee80211_conf, device configuration, don't use. + * + * @workqueue: single threaded workqueue available for driver use, + * allocated by mac80211 on registration and flushed on + * unregistration. + * + * @priv: pointer to private area that was allocated for driver use + * along with this structure. + * + * @flags: hardware flags, see &enum ieee80211_hw_flags. + * + * @extra_tx_headroom: headroom to reserve in each transmit skb + * for use by the driver (e.g. for transmit headers.) + * + * @channel_change_time: time (in microseconds) it takes to change channels. + * + * @max_rssi: Maximum value for ssi in RX information, use + * negative numbers for dBm and 0 to indicate no support. + * + * @max_signal: like @max_rssi, but for the signal value. + * + * @max_noise: like @max_rssi, but for the noise value. + * + * @queues: number of available hardware transmit queues for + * data packets. WMM/QoS requires at least four. + */ +struct ieee80211_hw { + struct ieee80211_conf conf; + struct wiphy *wiphy; struct workqueue_struct *workqueue; - - /* Pointer to the private area that was - * allocated with this struct for you. */ void *priv; - - /* The rest is information about your hardware */ - - /* TODO: frame_type 802.11/802.3, sw_encryption requirements */ - - /* Some wireless LAN chipsets generate beacons in the hardware/firmware - * and others rely on host generated beacons. This option is used to - * configure the upper layer IEEE 802.11 module to generate beacons. - * The low-level driver can use ieee80211_beacon_get() to fetch the - * next beacon frame. */ -#define IEEE80211_HW_HOST_GEN_BEACON (1<<0) - - /* The device needs to be supplied with a beacon template only. */ -#define IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE (1<<1) - - /* Some devices handle decryption internally and do not - * indicate whether the frame was encrypted (unencrypted frames - * will be dropped by the hardware, unless specifically allowed - * through) */ -#define IEEE80211_HW_DEVICE_HIDES_WEP (1<<2) - - /* Whether RX frames passed to ieee80211_rx() include FCS in the end */ -#define IEEE80211_HW_RX_INCLUDES_FCS (1<<3) - - /* Some wireless LAN chipsets buffer broadcast/multicast frames for - * power saving stations in the hardware/firmware and others rely on - * the host system for such buffering. This option is used to - * configure the IEEE 802.11 upper layer to buffer broadcast/multicast - * frames when there are power saving stations so that low-level driver - * can fetch them with ieee80211_get_buffered_bc(). */ -#define IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING (1<<4) - -#define IEEE80211_HW_WEP_INCLUDE_IV (1<<5) - - /* will data nullfunc frames get proper TX status callback */ -#define IEEE80211_HW_DATA_NULLFUNC_ACK (1<<6) - - /* Force software encryption for TKIP packets if WMM is enabled. */ -#define IEEE80211_HW_NO_TKIP_WMM_HWACCEL (1<<7) - - /* Some devices handle Michael MIC internally and do not include MIC in - * the received packets passed up. device_strips_mic must be set - * for such devices. The 'encryption' frame control bit is expected to - * be still set in the IEEE 802.11 header with this option unlike with - * the device_hides_wep configuration option. - */ -#define IEEE80211_HW_DEVICE_STRIPS_MIC (1<<8) - - /* Device is capable of performing full monitor mode even during - * normal operation. */ -#define IEEE80211_HW_MONITOR_DURING_OPER (1<<9) - - /* Device does not need BSSID filter set to broadcast in order to - * receive all probe responses while scanning */ -#define IEEE80211_HW_NO_PROBE_FILTERING (1<<10) - - /* Channels are already configured to the default regulatory domain - * specified in the device's EEPROM */ -#define IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED (1<<11) - - /* calculate Michael MIC for an MSDU when doing hwcrypto */ -#define IEEE80211_HW_TKIP_INCLUDE_MMIC (1<<12) - /* Do TKIP phase1 key mixing in stack to support cards only do - * phase2 key mixing when doing hwcrypto */ -#define IEEE80211_HW_TKIP_REQ_PHASE1_KEY (1<<13) - /* Do TKIP phase1 and phase2 key mixing in stack and send the generated - * per-packet RC4 key with each TX frame when doing hwcrypto */ -#define IEEE80211_HW_TKIP_REQ_PHASE2_KEY (1<<14) - - u32 flags; /* hardware flags defined above */ - - /* Set to the size of a needed device specific skb headroom for TX skbs. */ + u32 flags; unsigned int extra_tx_headroom; - - /* This is the time in us to change channels - */ int channel_change_time; - /* Maximum values for various statistics. - * Leave at 0 to indicate no support. Use negative numbers for dBm. */ + u8 queues; s8 max_rssi; s8 max_signal; s8 max_noise; - - /* Number of available hardware TX queues for data packets. - * WMM requires at least four queues. */ - int queues; }; +/** + * SET_IEEE80211_DEV - set device for 802.11 hardware + * + * @hw: the &struct ieee80211_hw to set the device for + * @dev: the &struct device of this 802.11 device + */ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev) { set_wiphy_dev(hw->wiphy, dev); } +/** + * SET_IEEE80211_PERM_ADDR - set the permanenet MAC address for 802.11 hardware + * + * @hw: the &struct ieee80211_hw to set the MAC address for + * @addr: the address to set + */ static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr) { memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN); } -/* Configuration block used by the low-level driver to tell the 802.11 code - * about supported hardware features and to pass function pointers to callback - * functions. */ +/** + * DOC: Hardware crypto acceleration + * + * mac80211 is capable of taking advantage of many hardware + * acceleration designs for encryption and decryption operations. + * + * The set_key() callback in the &struct ieee80211_ops for a given + * device is called to enable hardware acceleration of encryption and + * decryption. The callback takes an @address parameter that will be + * the broadcast address for default keys, the other station's hardware + * address for individual keys or the zero address for keys that will + * be used only for transmission. + * Multiple transmission keys with the same key index may be used when + * VLANs are configured for an access point. + * + * The @local_address parameter will always be set to our own address, + * this is only relevant if you support multiple local addresses. + * + * When transmitting, the TX control data will use the @hw_key_idx + * selected by the driver by modifying the &struct ieee80211_key_conf + * pointed to by the @key parameter to the set_key() function. + * + * The set_key() call for the %SET_KEY command should return 0 if + * the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be + * added; if you return 0 then hw_key_idx must be assigned to the + * hardware key index, you are free to use the full u8 range. + * + * When the cmd is %DISABLE_KEY then it must succeed. + * + * Note that it is permissible to not decrypt a frame even if a key + * for it has been uploaded to hardware, the stack will not make any + * decision based on whether a key has been uploaded or not but rather + * based on the receive flags. + * + * The &struct ieee80211_key_conf structure pointed to by the @key + * parameter is guaranteed to be valid until another call to set_key() + * removes it, but it can only be used as a cookie to differentiate + * keys. + */ + +/** + * DOC: Frame filtering + * + * mac80211 requires to see many management frames for proper + * operation, and users may want to see many more frames when + * in monitor mode. However, for best CPU usage and power consumption, + * having as few frames as possible percolate through the stack is + * desirable. Hence, the hardware should filter as much as possible. + * + * To achieve this, mac80211 uses filter flags (see below) to tell + * the driver's configure_filter() function which frames should be + * passed to mac80211 and which should be filtered out. + * + * The configure_filter() callback is invoked with the parameters + * @mc_count and @mc_list for the combined multicast address list + * of all virtual interfaces, @changed_flags telling which flags + * were changed and @total_flags with the new flag states. + * + * If your device has no multicast address filters your driver will + * need to check both the %FIF_ALLMULTI flag and the @mc_count + * parameter to see whether multicast frames should be accepted + * or dropped. + * + * All unsupported flags in @total_flags must be cleared, i.e. you + * should clear all bits except those you honoured. + */ + +/** + * enum ieee80211_filter_flags - hardware filter flags + * + * These flags determine what the filter in hardware should be + * programmed to let through and what should not be passed to the + * stack. It is always safe to pass more frames than requested, + * but this has negative impact on power consumption. + * + * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS, + * think of the BSS as your network segment and then this corresponds + * to the regular ethernet device promiscuous mode. + * + * @FIF_ALLMULTI: pass all multicast frames, this is used if requested + * by the user or if the hardware is not capable of filtering by + * multicast address. + * + * @FIF_FCSFAIL: pass frames with failed FCS (but you need to set the + * %RX_FLAG_FAILED_FCS_CRC for them) + * + * @FIF_PLCPFAIL: pass frames with failed PLCP CRC (but you need to set + * the %RX_FLAG_FAILED_PLCP_CRC for them + * + * @FIF_BCN_PRBRESP_PROMISC: This flag is set during scanning to indicate + * to the hardware that it should not filter beacons or probe responses + * by BSSID. Filtering them can greatly reduce the amount of processing + * mac80211 needs to do and the amount of CPU wakeups, so you should + * honour this flag if possible. + * + * @FIF_CONTROL: pass control frames, if PROMISC_IN_BSS is not set then + * only those addressed to this station + * + * @FIF_OTHER_BSS: pass frames destined to other BSSes + */ +enum ieee80211_filter_flags { + FIF_PROMISC_IN_BSS = 1<<0, + FIF_ALLMULTI = 1<<1, + FIF_FCSFAIL = 1<<2, + FIF_PLCPFAIL = 1<<3, + FIF_BCN_PRBRESP_PROMISC = 1<<4, + FIF_CONTROL = 1<<5, + FIF_OTHER_BSS = 1<<6, +}; + +/** + * enum ieee80211_erp_change_flags - erp change flags + * + * These flags are used with the erp_ie_changed() callback in + * &struct ieee80211_ops to indicate which parameter(s) changed. + * @IEEE80211_ERP_CHANGE_PROTECTION: protection changed + * @IEEE80211_ERP_CHANGE_PREAMBLE: barker preamble mode changed + */ +enum ieee80211_erp_change_flags { + IEEE80211_ERP_CHANGE_PROTECTION = 1<<0, + IEEE80211_ERP_CHANGE_PREAMBLE = 1<<1, +}; + + +/** + * struct ieee80211_ops - callbacks from mac80211 to the driver + * + * This structure contains various callbacks that the driver may + * handle or, in some cases, must handle, for example to configure + * the hardware to a new channel or to transmit a frame. + * + * @tx: Handler that 802.11 module calls for each transmitted frame. + * skb contains the buffer starting from the IEEE 802.11 header. + * The low-level driver should send the frame out based on + * configuration in the TX control data. Must be implemented and + * atomic. + * + * @start: Called before the first netdevice attached to the hardware + * is enabled. This should turn on the hardware and must turn on + * frame reception (for possibly enabled monitor interfaces.) + * Returns negative error codes, these may be seen in userspace, + * or zero. + * When the device is started it should not have a MAC address + * to avoid acknowledging frames before a non-monitor device + * is added. + * Must be implemented. + * + * @stop: Called after last netdevice attached to the hardware + * is disabled. This should turn off the hardware (at least + * it must turn off frame reception.) + * May be called right after add_interface if that rejects + * an interface. + * Must be implemented. + * + * @add_interface: Called when a netdevice attached to the hardware is + * enabled. Because it is not called for monitor mode devices, @open + * and @stop must be implemented. + * The driver should perform any initialization it needs before + * the device can be enabled. The initial configuration for the + * interface is given in the conf parameter. + * The callback may refuse to add an interface by returning a + * negative error code (which will be seen in userspace.) + * Must be implemented. + * + * @remove_interface: Notifies a driver that an interface is going down. + * The @stop callback is called after this if it is the last interface + * and no monitor interfaces are present. + * When all interfaces are removed, the MAC address in the hardware + * must be cleared so the device no longer acknowledges packets, + * the mac_addr member of the conf structure is, however, set to the + * MAC address of the device going away. + * Hence, this callback must be implemented. + * + * @config: Handler for configuration requests. IEEE 802.11 code calls this + * function to change hardware configuration, e.g., channel. + * + * @config_interface: Handler for configuration requests related to interfaces + * (e.g. BSSID changes.) + * + * @configure_filter: Configure the device's RX filter. + * See the section "Frame filtering" for more information. + * This callback must be implemented and atomic. + * + * @set_tim: Set TIM bit. If the hardware/firmware takes care of beacon + * generation (that is, %IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE is set) + * mac80211 calls this function when a TIM bit must be set or cleared + * for a given AID. Must be atomic. + * + * @set_key: See the section "Hardware crypto acceleration" + * This callback can sleep, and is only called between add_interface + * and remove_interface calls, i.e. while the interface with the + * given local_address is enabled. + * + * @set_ieee8021x: Enable/disable IEEE 802.1X. This item requests wlan card + * to pass unencrypted EAPOL-Key frames even when encryption is + * configured. If the wlan card does not require such a configuration, + * this function pointer can be set to NULL. + * + * @set_port_auth: Set port authorization state (IEEE 802.1X PAE) to be + * authorized (@authorized=1) or unauthorized (=0). This function can be + * used if the wlan hardware or low-level driver implements PAE. + * mac80211 will filter frames based on authorization state in any case, + * so this function pointer can be NULL if low-level driver does not + * require event notification about port state changes. + * + * @hw_scan: Ask the hardware to service the scan request, no need to start + * the scan state machine in stack. + * + * @get_stats: return low-level statistics + * + * @set_privacy_invoked: For devices that generate their own beacons and probe + * response or association responses this updates the state of privacy_invoked + * returns 0 for success or an error number. + * + * @get_sequence_counter: For devices that have internal sequence counters this + * callback allows mac80211 to access the current value of a counter. + * This callback seems not well-defined, tell us if you need it. + * + * @set_rts_threshold: Configuration of RTS threshold (if device needs it) + * + * @set_frag_threshold: Configuration of fragmentation threshold. Assign this if + * the device does fragmentation by itself; if this method is assigned then + * the stack will not do fragmentation. + * + * @set_retry_limit: Configuration of retry limits (if device needs it) + * + * @sta_notify: Notifies low level driver about addition or removal + * of assocaited station or AP. + * + * @erp_ie_changed: Handle ERP IE change notifications. Must be atomic. + * + * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), + * bursting) for a hardware TX queue. The @queue parameter uses the + * %IEEE80211_TX_QUEUE_* constants. Must be atomic. + * + * @get_tx_stats: Get statistics of the current TX queue status. This is used + * to get number of currently queued packets (queue length), maximum queue + * size (limit), and total number of packets sent using each TX queue + * (count). This information is used for WMM to find out which TX + * queues have room for more packets and by hostapd to provide + * statistics about the current queueing state to external programs. + * + * @get_tsf: Get the current TSF timer value from firmware/hardware. Currently, + * this is only used for IBSS mode debugging and, as such, is not a + * required function. Must be atomic. + * + * @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize + * with other STAs in the IBSS. This is only used in IBSS mode. This + * function is optional if the firmware/hardware takes full care of + * TSF synchronization. + * + * @beacon_update: Setup beacon data for IBSS beacons. Unlike access point, + * IBSS uses a fixed beacon frame which is configured using this + * function. + * If the driver returns success (0) from this callback, it owns + * the skb. That means the driver is responsible to kfree_skb() it. + * The control structure is not dynamically allocated. That means the + * driver does not own the pointer and if it needs it somewhere + * outside of the context of this function, it must copy it + * somewhere else. + * This handler is required only for IBSS mode. + * + * @tx_last_beacon: Determine whether the last IBSS beacon was sent by us. + * This is needed only for IBSS mode and the result of this function is + * used to determine whether to reply to Probe Requests. + */ struct ieee80211_ops { - /* Handler that 802.11 module calls for each transmitted frame. - * skb contains the buffer starting from the IEEE 802.11 header. - * The low-level driver should send the frame out based on - * configuration in the TX control data. - * Must be atomic. */ int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_control *control); - - /* Handler for performing hardware reset. */ - int (*reset)(struct ieee80211_hw *hw); - - /* Handler that is called when any netdevice attached to the hardware - * device is set UP for the first time. This can be used, e.g., to - * enable interrupts and beacon sending. */ - int (*open)(struct ieee80211_hw *hw); - - /* Handler that is called when the last netdevice attached to the - * hardware device is set DOWN. This can be used, e.g., to disable - * interrupts and beacon sending. */ - int (*stop)(struct ieee80211_hw *hw); - - /* Handler for asking a driver if a new interface can be added (or, - * more exactly, set UP). If the handler returns zero, the interface - * is added. Driver should perform any initialization it needs prior - * to returning zero. By returning non-zero addition of the interface - * is inhibited. Unless monitor_during_oper is set, it is guaranteed - * that monitor interfaces and normal interfaces are mutually - * exclusive. If assigned, the open() handler is called after - * add_interface() if this is the first device added. The - * add_interface() callback has to be assigned because it is the only - * way to obtain the requested MAC address for any interface. - */ + int (*start)(struct ieee80211_hw *hw); + void (*stop)(struct ieee80211_hw *hw); int (*add_interface)(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf); - - /* Notify a driver that an interface is going down. The stop() handler - * is called prior to this if this is a last interface. */ void (*remove_interface)(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf); - - /* Handler for configuration requests. IEEE 802.11 code calls this - * function to change hardware configuration, e.g., channel. */ int (*config)(struct ieee80211_hw *hw, struct ieee80211_conf *conf); - - /* Handler for configuration requests related to interfaces (e.g. - * BSSID). */ int (*config_interface)(struct ieee80211_hw *hw, int if_id, struct ieee80211_if_conf *conf); - - /* ieee80211 drivers do not have access to the &struct net_device - * that is (are) connected with their device. Hence (and because - * we need to combine the multicast lists and flags for multiple - * virtual interfaces), they cannot assign set_multicast_list. - * The parameters here replace dev->flags and dev->mc_count, - * dev->mc_list is replaced by calling ieee80211_get_mc_list_item. - * Must be atomic. */ - void (*set_multicast_list)(struct ieee80211_hw *hw, - unsigned short flags, int mc_count); - - /* Set TIM bit handler. If the hardware/firmware takes care of beacon - * generation, IEEE 802.11 code uses this function to tell the - * low-level to set (or clear if set==0) TIM bit for the given aid. If - * host system is used to generate beacons, this handler is not used - * and low-level driver should set it to NULL. - * Must be atomic. */ + void (*configure_filter)(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, struct dev_addr_list *mc_list); int (*set_tim)(struct ieee80211_hw *hw, int aid, int set); - - /* Set encryption key. IEEE 802.11 module calls this function to set - * encryption keys. addr is ff:ff:ff:ff:ff:ff for default keys and - * station hwaddr for individual keys. aid of the station is given - * to help low-level driver in selecting which key->hw_key_idx to use - * for this key. TX control data will use the hw_key_idx selected by - * the low-level driver. - * Must be atomic. */ - int (*set_key)(struct ieee80211_hw *hw, set_key_cmd cmd, - u8 *addr, struct ieee80211_key_conf *key, int aid); - - /* Set TX key index for default/broadcast keys. This is needed in cases - * where wlan card is doing full WEP/TKIP encapsulation (wep_include_iv - * is not set), in other cases, this function pointer can be set to - * NULL since the IEEE 802. 11 module takes care of selecting the key - * index for each TX frame. */ - int (*set_key_idx)(struct ieee80211_hw *hw, int idx); - - /* Enable/disable IEEE 802.1X. This item requests wlan card to pass - * unencrypted EAPOL-Key frames even when encryption is configured. - * If the wlan card does not require such a configuration, this - * function pointer can be set to NULL. */ + int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, + const u8 *local_address, const u8 *address, + struct ieee80211_key_conf *key); int (*set_ieee8021x)(struct ieee80211_hw *hw, int use_ieee8021x); - - /* Set port authorization state (IEEE 802.1X PAE) to be authorized - * (authorized=1) or unauthorized (authorized=0). This function can be - * used if the wlan hardware or low-level driver implements PAE. - * 80211.o module will anyway filter frames based on authorization - * state, so this function pointer can be NULL if low-level driver does - * not require event notification about port state changes. - * Currently unused. */ int (*set_port_auth)(struct ieee80211_hw *hw, u8 *addr, int authorized); - - /* Ask the hardware to service the scan request, no need to start - * the scan state machine in stack. */ int (*hw_scan)(struct ieee80211_hw *hw, u8 *ssid, size_t len); - - /* return low-level statistics */ int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); - - /* For devices that generate their own beacons and probe response - * or association responses this updates the state of privacy_invoked - * returns 0 for success or an error number */ int (*set_privacy_invoked)(struct ieee80211_hw *hw, int privacy_invoked); - - /* For devices that have internal sequence counters, allow 802.11 - * code to access the current value of a counter */ int (*get_sequence_counter)(struct ieee80211_hw *hw, u8* addr, u8 keyidx, u8 txrx, u32* iv32, u16* iv16); - - /* Configuration of RTS threshold (if device needs it) */ int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value); - - /* Configuration of fragmentation threshold. - * Assign this if the device does fragmentation by itself, - * if this method is assigned then the stack will not do - * fragmentation. */ int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value); - - /* Configuration of retry limits (if device needs it) */ int (*set_retry_limit)(struct ieee80211_hw *hw, u32 short_retry, u32 long_retr); - - /* Number of STAs in STA table notification (NULL = disabled). - * Must be atomic. */ - void (*sta_table_notification)(struct ieee80211_hw *hw, - int num_sta); - - /* Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), - * bursting) for a hardware TX queue. - * queue = IEEE80211_TX_QUEUE_*. - * Must be atomic. */ + void (*sta_notify)(struct ieee80211_hw *hw, int if_id, + enum sta_notify_cmd, const u8 *addr); + void (*erp_ie_changed)(struct ieee80211_hw *hw, u8 changes, + int cts_protection, int preamble); int (*conf_tx)(struct ieee80211_hw *hw, int queue, const struct ieee80211_tx_queue_params *params); - - /* Get statistics of the current TX queue status. This is used to get - * number of currently queued packets (queue length), maximum queue - * size (limit), and total number of packets sent using each TX queue - * (count). - * Currently unused. */ int (*get_tx_stats)(struct ieee80211_hw *hw, struct ieee80211_tx_queue_stats *stats); - - /* Get the current TSF timer value from firmware/hardware. Currently, - * this is only used for IBSS mode debugging and, as such, is not a - * required function. - * Must be atomic. */ u64 (*get_tsf)(struct ieee80211_hw *hw); - - /* Reset the TSF timer and allow firmware/hardware to synchronize with - * other STAs in the IBSS. This is only used in IBSS mode. This - * function is optional if the firmware/hardware takes full care of - * TSF synchronization. */ void (*reset_tsf)(struct ieee80211_hw *hw); - - /* Setup beacon data for IBSS beacons. Unlike access point (Master), - * IBSS uses a fixed beacon frame which is configured using this - * function. This handler is required only for IBSS mode. */ int (*beacon_update)(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_control *control); - - /* Determine whether the last IBSS beacon was sent by us. This is - * needed only for IBSS mode and the result of this function is used to - * determine whether to reply to Probe Requests. */ int (*tx_last_beacon)(struct ieee80211_hw *hw); }; -/* Allocate a new hardware device. This must be called once for each - * hardware device. The returned pointer must be used to refer to this - * device when calling other functions. 802.11 code allocates a private data - * area for the low-level driver. The size of this area is given as - * priv_data_len. +/** + * ieee80211_alloc_hw - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac80211 allocates a private data area for the driver pointed to by + * @priv in &struct ieee80211_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device */ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops); -/* Register hardware device to the IEEE 802.11 code and kernel. Low-level - * drivers must call this function before using any other IEEE 802.11 - * function except ieee80211_register_hwmode. */ +/** + * ieee80211_register_hw - Register hardware device + * + * You must call this function before any other functions + * except ieee80211_register_hwmode. + * + * @hw: the device to register as returned by ieee80211_alloc_hw() + */ int ieee80211_register_hw(struct ieee80211_hw *hw); -/* driver can use this and ieee80211_get_rx_led_name to get the - * name of the registered LEDs after ieee80211_register_hw - * was called. - * This is useful to set the default trigger on the LED class - * device that your driver should export for each LED the device - * has, that way the default behaviour will be as expected but - * the user can still change it/turn off the LED etc. - */ #ifdef CONFIG_MAC80211_LEDS extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw); extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw); +extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw); #endif +/** + * ieee80211_get_tx_led_name - get name of TX LED + * + * mac80211 creates a transmit LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) { #ifdef CONFIG_MAC80211_LEDS @@ -767,6 +1109,16 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw) #endif } +/** + * ieee80211_get_rx_led_name - get name of RX LED + * + * mac80211 creates a receive LED trigger for each wireless hardware + * that can be used to drive LEDs if your driver registers a LED device. + * This function returns the name (or %NULL if not configured for LEDs) + * of the trigger so you can automatically link the LED device. + * + * @hw: the hardware to get the LED trigger name for + */ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) { #ifdef CONFIG_MAC80211_LEDS @@ -776,33 +1128,94 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw) #endif } +static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_MAC80211_LEDS + return __ieee80211_get_assoc_led_name(hw); +#else + return NULL; +#endif +} + + /* Register a new hardware PHYMODE capability to the stack. */ int ieee80211_register_hwmode(struct ieee80211_hw *hw, struct ieee80211_hw_mode *mode); -/* Unregister a hardware device. This function instructs 802.11 code to free - * allocated resources and unregister netdevices from the kernel. */ +/** + * ieee80211_unregister_hw - Unregister a hardware device + * + * This function instructs mac80211 to free allocated resources + * and unregister netdevices from the networking subsystem. + * + * @hw: the hardware to unregister + */ void ieee80211_unregister_hw(struct ieee80211_hw *hw); -/* Free everything that was allocated including private data of a driver. */ +/** + * ieee80211_free_hw - free hardware descriptor + * + * This function frees everything that was allocated, including the + * private data for the driver. You must call ieee80211_unregister_hw() + * before calling this function + * + * @hw: the hardware to free + */ void ieee80211_free_hw(struct ieee80211_hw *hw); -/* Receive frame callback function. The low-level driver uses this function to - * send received frames to the IEEE 802.11 code. Receive buffer (skb) must - * start with IEEE 802.11 header. */ +/* trick to avoid symbol clashes with the ieee80211 subsystem */ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_rx_status *status); + +/** + * ieee80211_rx - receive frame + * + * Use this function to hand received frames to mac80211. The receive + * buffer in @skb must start with an IEEE 802.11 header or a radiotap + * header if %RX_FLAG_RADIOTAP is set in the @status flags. + * + * This function may not be called in IRQ context. + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + * @status: status of this frame; the status pointer need not be valid + * after this function returns + */ +static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_rx_status *status) +{ + __ieee80211_rx(hw, skb, status); +} + +/** + * ieee80211_rx_irqsafe - receive frame + * + * Like ieee80211_rx() but can be called in IRQ context + * (internally defers to a workqueue.) + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + * @status: status of this frame; the status pointer need not be valid + * after this function returns and is not freed by mac80211, + * it is recommended that it points to a stack area + */ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_rx_status *status); -/* Transmit status callback function. The low-level driver must call this - * function to report transmit status for all the TX frames that had - * req_tx_status set in the transmit control fields. In addition, this should - * be called at least for all unicast frames to provide information for TX rate - * control algorithm. In order to maintain all statistics, this function is - * recommended to be called after each frame, including multicast/broadcast, is - * sent. */ +/** + * ieee80211_tx_status - transmit status callback + * + * Call this function for all transmitted frames after they have been + * transmitted. It is permissible to not call this function for + * multicast frames but this can affect statistics. + * + * @hw: the hardware the frame was transmitted by + * @skb: the frame that was transmitted, owned by mac80211 after this call + * @status: status information for this frame; the status pointer need not + * be valid after this function returns and is not freed by mac80211, + * it is recommended that it points to a stack area + */ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_status *status); @@ -830,6 +1243,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, /** * ieee80211_rts_get - RTS frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). + * @if_id: interface ID from &struct ieee80211_if_init_conf. * @frame: pointer to the frame that is going to be protected by the RTS. * @frame_len: the frame length (in octets). * @frame_txctl: &struct ieee80211_tx_control of the frame. @@ -840,7 +1254,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, * the next RTS frame from the 802.11 code. The low-level is responsible * for calling this function before and RTS frame is needed. */ -void ieee80211_rts_get(struct ieee80211_hw *hw, +void ieee80211_rts_get(struct ieee80211_hw *hw, int if_id, const void *frame, size_t frame_len, const struct ieee80211_tx_control *frame_txctl, struct ieee80211_rts *rts); @@ -848,6 +1262,7 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, /** * ieee80211_rts_duration - Get the duration field for an RTS frame * @hw: pointer obtained from ieee80211_alloc_hw(). + * @if_id: interface ID from &struct ieee80211_if_init_conf. * @frame_len: the length of the frame that is going to be protected by the RTS. * @frame_txctl: &struct ieee80211_tx_control of the frame. * @@ -855,13 +1270,14 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. */ -__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, +__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, int if_id, size_t frame_len, const struct ieee80211_tx_control *frame_txctl); /** * ieee80211_ctstoself_get - CTS-to-self frame generation function * @hw: pointer obtained from ieee80211_alloc_hw(). + * @if_id: interface ID from &struct ieee80211_if_init_conf. * @frame: pointer to the frame that is going to be protected by the CTS-to-self. * @frame_len: the frame length (in octets). * @frame_txctl: &struct ieee80211_tx_control of the frame. @@ -872,7 +1288,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, * the next CTS-to-self frame from the 802.11 code. The low-level is responsible * for calling this function before and CTS-to-self frame is needed. */ -void ieee80211_ctstoself_get(struct ieee80211_hw *hw, +void ieee80211_ctstoself_get(struct ieee80211_hw *hw, int if_id, const void *frame, size_t frame_len, const struct ieee80211_tx_control *frame_txctl, struct ieee80211_cts *cts); @@ -880,6 +1296,7 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw, /** * ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame * @hw: pointer obtained from ieee80211_alloc_hw(). + * @if_id: interface ID from &struct ieee80211_if_init_conf. * @frame_len: the length of the frame that is going to be protected by the CTS-to-self. * @frame_txctl: &struct ieee80211_tx_control of the frame. * @@ -887,20 +1304,21 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw, * the duration field, the low-level driver uses this function to receive * the duration field value in little-endian byteorder. */ -__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, +__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, int if_id, size_t frame_len, const struct ieee80211_tx_control *frame_txctl); /** * ieee80211_generic_frame_duration - Calculate the duration field for a frame * @hw: pointer obtained from ieee80211_alloc_hw(). + * @if_id: interface ID from &struct ieee80211_if_init_conf. * @frame_len: the length of the frame. * @rate: the rate (in 100kbps) at which the frame is going to be transmitted. * * Calculate the duration field of some generic frame, given its * length and transmission rate (in 100kbps). */ -__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, +__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, int if_id, size_t frame_len, int rate); @@ -929,14 +1347,26 @@ struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, int if_id, struct ieee80211_tx_control *control); -/* Given an sk_buff with a raw 802.11 header at the data pointer this function +/** + * ieee80211_get_hdrlen_from_skb - get header length from data + * + * Given an skb with a raw 802.11 header at the data pointer this function * returns the 802.11 header length in bytes (not including encryption * headers). If the data in the sk_buff is too short to contain a valid 802.11 * header the function returns 0. + * + * @skb: the frame */ int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); -/* Like ieee80211_get_hdrlen_from_skb() but takes a FC in CPU order. */ +/** + * ieee80211_get_hdrlen - get header length from frame control + * + * This function returns the 802.11 header length in bytes (not including + * encryption headers.) + * + * @fc: the frame control field (in CPU endianness) + */ int ieee80211_get_hdrlen(u16 fc); /** @@ -982,66 +1412,14 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw); void ieee80211_wake_queues(struct ieee80211_hw *hw); /** - * ieee80211_get_mc_list_item - iteration over items in multicast list - * @hw: pointer as obtained from ieee80211_alloc_hw(). - * @prev: value returned by previous call to ieee80211_get_mc_list_item() or - * NULL to start a new iteration. - * @ptr: pointer to buffer of void * type for internal usage of - * ieee80211_get_mc_list_item(). - * - * Iterates over items in multicast list of given device. To get the first - * item, pass NULL in @prev and in *@ptr. In subsequent calls, pass the - * value returned by previous call in @prev. Don't alter *@ptr during - * iteration. When there are no more items, NULL is returned. + * ieee80211_scan_completed - completed hardware scan + * + * When hardware scan offload is used (i.e. the hw_scan() callback is + * assigned) this function needs to be called by the driver to notify + * mac80211 that the scan finished. + * + * @hw: the hardware that finished the scan */ -struct dev_mc_list * -ieee80211_get_mc_list_item(struct ieee80211_hw *hw, - struct dev_mc_list *prev, - void **ptr); - -/* called by driver to notify scan status completed */ void ieee80211_scan_completed(struct ieee80211_hw *hw); -/* Function to indicate Radar Detection. The low level driver must call this - * function to indicate the presence of radar in the current channel. - * Additionally the radar type also could be sent */ -int ieee80211_radar_status(struct ieee80211_hw *hw, int channel, - int radar, int radar_type); - -/* return a pointer to the source address (SA) */ -static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) -{ - u8 *raw = (u8 *) hdr; - u8 tofrom = (*(raw+1)) & 3; /* get the TODS and FROMDS bits */ - - switch (tofrom) { - case 2: - return hdr->addr3; - case 3: - return hdr->addr4; - } - return hdr->addr2; -} - -/* return a pointer to the destination address (DA) */ -static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) -{ - u8 *raw = (u8 *) hdr; - u8 to_ds = (*(raw+1)) & 1; /* get the TODS bit */ - - if (to_ds) - return hdr->addr3; - return hdr->addr1; -} - -static inline int ieee80211_get_morefrag(struct ieee80211_hdr *hdr) -{ - return (le16_to_cpu(hdr->frame_control) & - IEEE80211_FCTL_MOREFRAGS) != 0; -} - -#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" -#define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \ - ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5] - #endif /* MAC80211_H */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 475b10c575b38ec1c137ebab2953adb21278aad0..6684f7efbeeb84add2716337c8d8628fc6449bf8 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -24,6 +24,7 @@ enum { ND_OPT_MTU = 5, /* RFC2461 */ __ND_OPT_ARRAY_MAX, ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ + ND_OPT_RDNSS = 25, /* RFC5006 */ __ND_OPT_MAX }; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h new file mode 100644 index 0000000000000000000000000000000000000000..93aa87d328040b71877a31ee400e7adebd3e2073 --- /dev/null +++ b/include/net/net_namespace.h @@ -0,0 +1,123 @@ +/* + * Operations on the network namespace + */ +#ifndef __NET_NET_NAMESPACE_H +#define __NET_NET_NAMESPACE_H + +#include +#include +#include + +struct proc_dir_entry; +struct net_device; +struct net { + atomic_t count; /* To decided when the network + * namespace should be freed. + */ + atomic_t use_count; /* To track references we + * destroy on demand + */ + struct list_head list; /* list of network namespaces */ + struct work_struct work; /* work struct for freeing */ + + struct proc_dir_entry *proc_net; + struct proc_dir_entry *proc_net_stat; + struct proc_dir_entry *proc_net_root; + + struct net_device *loopback_dev; /* The loopback */ + + struct list_head dev_base_head; + struct hlist_head *dev_name_head; + struct hlist_head *dev_index_head; +}; + +#ifdef CONFIG_NET +/* Init's network namespace */ +extern struct net init_net; +#define INIT_NET_NS(net_ns) .net_ns = &init_net, +#else +#define INIT_NET_NS(net_ns) +#endif + +extern struct list_head net_namespace_list; + +#ifdef CONFIG_NET +extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns); +#else +static inline struct net *copy_net_ns(unsigned long flags, struct net *net_ns) +{ + /* There is nothing to copy so this is a noop */ + return net_ns; +} +#endif + +extern void __put_net(struct net *net); + +static inline struct net *get_net(struct net *net) +{ +#ifdef CONFIG_NET + atomic_inc(&net->count); +#endif + return net; +} + +static inline struct net *maybe_get_net(struct net *net) +{ + /* Used when we know struct net exists but we + * aren't guaranteed a previous reference count + * exists. If the reference count is zero this + * function fails and returns NULL. + */ + if (!atomic_inc_not_zero(&net->count)) + net = NULL; + return net; +} + +static inline void put_net(struct net *net) +{ +#ifdef CONFIG_NET + if (atomic_dec_and_test(&net->count)) + __put_net(net); +#endif +} + +static inline struct net *hold_net(struct net *net) +{ +#ifdef CONFIG_NET + atomic_inc(&net->use_count); +#endif + return net; +} + +static inline void release_net(struct net *net) +{ +#ifdef CONFIG_NET + atomic_dec(&net->use_count); +#endif +} + +#define for_each_net(VAR) \ + list_for_each_entry(VAR, &net_namespace_list, list) + +#ifdef CONFIG_NET_NS +#define __net_init +#define __net_exit +#define __net_initdata +#else +#define __net_init __init +#define __net_exit __exit_refok +#define __net_initdata __initdata +#endif + +struct pernet_operations { + struct list_head list; + int (*init)(struct net *net); + void (*exit)(struct net *net); +}; + +extern int register_pernet_subsys(struct pernet_operations *); +extern void unregister_pernet_subsys(struct pernet_operations *); +extern int register_pernet_device(struct pernet_operations *); +extern void unregister_pernet_device(struct pernet_operations *); + +#endif /* __NET_NET_NAMESPACE_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 810020ec345dcc835615d6ffaf6cb41707e61b71..90fb66d99d0c1a25ead5470b74e2c153cc4b08ec 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -116,9 +116,6 @@ struct nf_conn struct ip_conntrack_counter counters[IP_CT_DIR_MAX]; #endif - /* Unique ID that identifies this conntrack*/ - unsigned int id; - #if defined(CONFIG_NF_CONNTRACK_MARK) u_int32_t mark; #endif diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index cae1a0dce36555e80c5451a1e92098fb6de96d6f..b47c04f12dbe19d17da9efc445b31f2f749e310f 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -38,9 +38,6 @@ struct nf_conntrack_expect /* Usage count. */ atomic_t use; - /* Unique ID */ - unsigned int id; - /* Flags */ unsigned int flags; diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index d04f99964d94bad80d86342fa9b4b645c982b676..0dcc4c828ce94bff389248223584a992872f063a 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -36,7 +36,7 @@ struct nf_conntrack_helper void (*destroy)(struct nf_conn *ct); - int (*to_nfattr)(struct sk_buff *skb, const struct nf_conn *ct); + int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct); }; extern struct nf_conntrack_helper * diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 3c58a2c4df283e98014c16a35089b7581f1f8e7c..15888fc7b72dc5ab49d702886fc7fa083bbe0a90 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -11,11 +11,11 @@ #ifndef _NF_CONNTRACK_L3PROTO_H #define _NF_CONNTRACK_L3PROTO_H +#include +#include #include #include -struct nfattr; - struct nf_conntrack_l3proto { /* L3 Protocol Family number. ex) PF_INET */ @@ -64,11 +64,12 @@ struct nf_conntrack_l3proto int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff, unsigned int *dataoff, u_int8_t *protonum); - int (*tuple_to_nfattr)(struct sk_buff *skb, + int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); - int (*nfattr_to_tuple)(struct nfattr *tb[], + int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t); + const struct nla_policy *nla_policy; #ifdef CONFIG_SYSCTL struct ctl_table_header *ctl_table_header; diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index f46cb930414cc8a1cdd27c5d688a688a154fc301..fb50c217ba0af56f447ec44d84255bacd2d1061f 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -9,10 +9,11 @@ #ifndef _NF_CONNTRACK_L4PROTO_H #define _NF_CONNTRACK_L4PROTO_H +#include +#include #include struct seq_file; -struct nfattr; struct nf_conntrack_l4proto { @@ -65,16 +66,17 @@ struct nf_conntrack_l4proto int pf, unsigned int hooknum); /* convert protoinfo to nfnetink attributes */ - int (*to_nfattr)(struct sk_buff *skb, struct nfattr *nfa, + int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, const struct nf_conn *ct); /* convert nfnetlink attributes to protoinfo */ - int (*from_nfattr)(struct nfattr *tb[], struct nf_conn *ct); + int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); - int (*tuple_to_nfattr)(struct sk_buff *skb, + int (*tuple_to_nlattr)(struct sk_buff *skb, const struct nf_conntrack_tuple *t); - int (*nfattr_to_tuple)(struct nfattr *tb[], + int (*nlattr_to_tuple)(struct nlattr *tb[], struct nf_conntrack_tuple *t); + const struct nla_policy *nla_policy; #ifdef CONFIG_SYSCTL struct ctl_table_header **ctl_table_header; @@ -111,10 +113,11 @@ extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto); extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto); /* Generic netlink helpers */ -extern int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, +extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple); -extern int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[], +extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t); +extern const struct nla_policy nf_ct_port_nla_policy[]; /* Log invalid packets */ extern unsigned int nf_ct_log_invalid; diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h index a9ec5ef61468c7c35b6c2c3e89db8b9eb3c424fa..14c7b2d7263cc2be8ae4f5c4a9a0aef73708b6ff 100644 --- a/include/net/netfilter/nf_nat_protocol.h +++ b/include/net/netfilter/nf_nat_protocol.h @@ -38,10 +38,10 @@ struct nf_nat_protocol enum nf_nat_manip_type maniptype, const struct nf_conn *ct); - int (*range_to_nfattr)(struct sk_buff *skb, + int (*range_to_nlattr)(struct sk_buff *skb, const struct nf_nat_range *range); - int (*nfattr_to_range)(struct nfattr *tb[], + int (*nlattr_to_range)(struct nlattr *tb[], struct nf_nat_range *range); }; @@ -62,9 +62,9 @@ extern int init_protocols(void) __init; extern void cleanup_protocols(void); extern struct nf_nat_protocol *find_nat_proto(u_int16_t protonum); -extern int nf_nat_port_range_to_nfattr(struct sk_buff *skb, +extern int nf_nat_port_range_to_nlattr(struct sk_buff *skb, const struct nf_nat_range *range); -extern int nf_nat_port_nfattr_to_range(struct nfattr *tb[], +extern int nf_nat_port_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range); #endif /*_NF_NAT_PROTO_H*/ diff --git a/include/net/netlink.h b/include/net/netlink.h index d7b824be5422146b73f13e4758995507d153ec9f..9298218c07f9afa7fda027fcbb07a8b4684b673f 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -84,7 +84,7 @@ * nla_next(nla)-----------------------------' * * Data Structures: - * struct nlattr netlink attribtue header + * struct nlattr netlink attribute header * * Attribute Construction: * nla_reserve(skb, type, len) reserve room for an attribute @@ -220,9 +220,9 @@ struct nl_info { u32 pid; }; -extern void netlink_run_queue(struct sock *sk, unsigned int *qlen, - int (*cb)(struct sk_buff *, - struct nlmsghdr *)); +extern int netlink_rcv_skb(struct sk_buff *skb, + int (*cb)(struct sk_buff *, + struct nlmsghdr *)); extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, unsigned int group, int report, gfp_t flags); @@ -666,6 +666,15 @@ static inline int nla_padlen(int payload) return nla_total_size(payload) - nla_attr_size(payload); } +/** + * nla_type - attribute type + * @nla: netlink attribute + */ +static inline int nla_type(const struct nlattr *nla) +{ + return nla->nla_type & NLA_TYPE_MASK; +} + /** * nla_data - head of payload * @nla: netlink attribute @@ -697,7 +706,7 @@ static inline int nla_ok(const struct nlattr *nla, int remaining) } /** - * nla_next - next netlink attribte in attribute stream + * nla_next - next netlink attribute in attribute stream * @nla: netlink attribute * @remaining: number of bytes remaining in attribute stream * @@ -773,7 +782,7 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype, ({ data = nla_len(nla) >= len ? nla_data(nla) : NULL; \ __nla_parse_nested_compat(tb, maxtype, nla, policy, len); }) /** - * nla_put_u8 - Add a u16 netlink attribute to a socket buffer + * nla_put_u8 - Add a u8 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value @@ -989,7 +998,7 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) /** * nla_nest_end - Finalize nesting of attributes - * @skb: socket buffer the attribtues are stored in + * @skb: socket buffer the attributes are stored in * @start: container attribute * * Corrects the container attribute header to include the all @@ -1032,7 +1041,7 @@ static inline struct nlattr *nla_nest_compat_start(struct sk_buff *skb, /** * nla_nest_compat_end - Finalize nesting of compat attributes - * @skb: socket buffer the attribtues are stored in + * @skb: socket buffer the attributes are stored in * @start: container attribute * * Corrects the container attribute header to include the all diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 7968b1d66369abd3a7efbbb1a732b4affc066987..f285de69c615edeb003eb3e3e5ca9746cdcb7fda 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -2,6 +2,7 @@ #define __NET_PKT_CLS_H #include +#include #include #include @@ -351,7 +352,7 @@ tcf_match_indev(struct sk_buff *skb, char *indev) if (indev[0]) { if (!skb->iif) return 0; - dev = __dev_get_by_index(skb->iif); + dev = __dev_get_by_index(&init_net, skb->iif); if (!dev || strcmp(indev, dev->name)) return 0; } diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 9e22526e80e72b646c2a1f5eab446a88025731d0..ab61809a9616af402d8d197da3fc58a89b05e69c 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -97,10 +97,9 @@ extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ -static inline unsigned psched_mtu(struct net_device *dev) +static inline unsigned psched_mtu(const struct net_device *dev) { - unsigned mtu = dev->mtu; - return dev->hard_header ? mtu + dev->hard_header_len : mtu; + return dev->mtu + dev->hard_header_len; } #endif diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 3861c05cdf0fafe41d1dbe692b8fe5a2d5aa5bf7..793863e09c69c8e35ea6a1f729f67763ce40b717 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -78,6 +78,10 @@ extern void __rtnl_link_unregister(struct rtnl_link_ops *ops); extern int rtnl_link_register(struct rtnl_link_ops *ops); extern void rtnl_link_unregister(struct rtnl_link_ops *ops); +extern struct net_device *rtnl_create_link(struct net *net, char *ifname, + const struct rtnl_link_ops *ops, struct nlattr *tb[]); +extern const struct nla_policy ifla_policy[IFLA_MAX+1]; + #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 8a67f24cbe0218596c663007439a654dc20c7fb4..a02ec9e5fea51934666d4b18085b0bcbd682f86a 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -302,4 +302,18 @@ static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) return NET_XMIT_DROP; } +/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how + long it will take to send a packet given its size. + */ +static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) +{ + int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead; + if (slot < 0) + slot = 0; + slot >>= rtab->rate.cell_log; + if (slot > 255) + return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); + return rtab->data[slot]; +} + #endif diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h new file mode 100644 index 0000000000000000000000000000000000000000..4945954a16af7305ebb6218adaed0f4c2f947410 --- /dev/null +++ b/include/net/sctp/auth.h @@ -0,0 +1,128 @@ +/* SCTP kernel reference Implementation + * (C) Copyright 2007 Hewlett-Packard Development Company, L.P. + * + * This file is part of the SCTP kernel reference Implementation + * + * The SCTP reference implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * The SCTP reference implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * Vlad Yasevich + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ + +#ifndef __sctp_auth_h__ +#define __sctp_auth_h__ + +#include +#include + +struct sctp_endpoint; +struct sctp_association; +struct sctp_authkey; +struct sctp_hmacalgo; + +/* + * Define a generic struct that will hold all the info + * necessary for an HMAC transform + */ +struct sctp_hmac { + __u16 hmac_id; /* one of the above ids */ + char *hmac_name; /* name for loading */ + __u16 hmac_len; /* length of the signature */ +}; + +/* This is generic structure that containst authentication bytes used + * as keying material. It's a what is referred to as byte-vector all + * over SCTP-AUTH + */ +struct sctp_auth_bytes { + atomic_t refcnt; + __u32 len; + __u8 data[]; +}; + +/* Definition for a shared key, weather endpoint or association */ +struct sctp_shared_key { + struct list_head key_list; + __u16 key_id; + struct sctp_auth_bytes *key; +}; + +#define key_for_each(__key, __list_head) \ + list_for_each_entry(__key, __list_head, key_list) + +#define key_for_each_safe(__key, __tmp, __list_head) \ + list_for_each_entry_safe(__key, __tmp, __list_head, key_list) + +static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key) +{ + if (!key) + return; + + atomic_inc(&key->refcnt); +} + +void sctp_auth_key_put(struct sctp_auth_bytes *key); +struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp); +void sctp_auth_shkey_free(struct sctp_shared_key *sh_key); +void sctp_auth_destroy_keys(struct list_head *keys); +int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp); +struct sctp_shared_key *sctp_auth_get_shkey( + const struct sctp_association *asoc, + __u16 key_id); +int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, + struct sctp_association *asoc, + gfp_t gfp); +int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp); +void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]); +struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id); +struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc); +void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, + struct sctp_hmac_algo_param *hmacs); +int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, + __u16 hmac_id); +int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc); +int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc); +void sctp_auth_calculate_hmac(const struct sctp_association *asoc, + struct sk_buff *skb, + struct sctp_auth_chunk *auth, gfp_t gfp); + +/* API Helpers */ +int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id); +int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, + struct sctp_hmacalgo *hmacs); +int sctp_auth_set_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + struct sctp_authkey *auth_key); +int sctp_auth_set_active_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id); +int sctp_auth_del_key_id(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id); + +#endif diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index f56c8d695a826908fd24735e40c6bb8c4e05121c..b8733364557ff6d78403584c383a2a2e4334044c 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -102,6 +102,7 @@ typedef enum { SCTP_CMD_SET_SK_ERR, /* Set sk_err */ SCTP_CMD_ASSOC_CHANGE, /* generate and send assoc_change event */ SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */ + SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */ SCTP_CMD_LAST } sctp_verb_t; diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index bb37724495a55ba425ce503ef4ee2f5b2b06ae34..da8354e8e33c5269fa92ab4a1dd4a8f4d4971ad8 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -64,12 +64,18 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; #define SCTP_CID_MAX SCTP_CID_ASCONF_ACK #define SCTP_NUM_BASE_CHUNK_TYPES (SCTP_CID_BASE_MAX + 1) -#define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNKTYPES + 2) #define SCTP_NUM_ADDIP_CHUNK_TYPES 2 #define SCTP_NUM_PRSCTP_CHUNK_TYPES 1 +#define SCTP_NUM_AUTH_CHUNK_TYPES 1 + +#define SCTP_NUM_CHUNK_TYPES (SCTP_NUM_BASE_CHUNK_TYPES + \ + SCTP_NUM_ADDIP_CHUNK_TYPES +\ + SCTP_NUM_PRSCTP_CHUNK_TYPES +\ + SCTP_NUM_AUTH_CHUNK_TYPES) + /* These are the different flavours of event. */ typedef enum { @@ -177,7 +183,9 @@ typedef enum { SCTP_IERROR_NO_DATA, SCTP_IERROR_BAD_STREAM, SCTP_IERROR_BAD_PORTS, - + SCTP_IERROR_AUTH_BAD_HMAC, + SCTP_IERROR_AUTH_BAD_KEYID, + SCTP_IERROR_PROTO_VIOLATION, } sctp_ierror_t; @@ -409,4 +417,45 @@ typedef enum { SCTP_LOWER_CWND_INACTIVE, } sctp_lower_cwnd_t; + +/* SCTP-AUTH Necessary constants */ + +/* SCTP-AUTH, Section 3.3 + * + * The following Table 2 shows the currently defined values for HMAC + * identifiers. + * + * +-----------------+--------------------------+ + * | HMAC Identifier | Message Digest Algorithm | + * +-----------------+--------------------------+ + * | 0 | Reserved | + * | 1 | SHA-1 defined in [8] | + * | 2 | Reserved | + * | 3 | SHA-256 defined in [8] | + * +-----------------+--------------------------+ + */ +enum { + SCTP_AUTH_HMAC_ID_RESERVED_0, + SCTP_AUTH_HMAC_ID_SHA1, + SCTP_AUTH_HMAC_ID_RESERVED_2, + SCTP_AUTH_HMAC_ID_SHA256 +}; + +#define SCTP_AUTH_HMAC_ID_MAX SCTP_AUTH_HMAC_ID_SHA256 +#define SCTP_AUTH_NUM_HMACS (SCTP_AUTH_HMAC_ID_SHA256 + 1) +#define SCTP_SHA1_SIG_SIZE 20 +#define SCTP_SHA256_SIG_SIZE 32 + +/* SCTP-AUTH, Section 3.2 + * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH chunks + * MUST NOT be listed in the CHUNKS parameter + */ +#define SCTP_NUM_NOAUTH_CHUNKS 4 +#define SCTP_AUTH_MAX_CHUNKS (SCTP_NUM_CHUNK_TYPES - SCTP_NUM_NOAUTH_CHUNKS) + +/* SCTP-AUTH Section 6.1 + * The RANDOM parameter MUST contain a 32 byte random number. + */ +#define SCTP_AUTH_RANDOM_LENGTH 32 + #endif /* __sctp_constants_h__ */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index c9cc00c857821f328a751d7b3ddaf3209c142d27..119f5a1ed499ee5253e0a751033087aa079d0b7c 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -341,6 +341,7 @@ extern atomic_t sctp_dbg_objcnt_bind_bucket; extern atomic_t sctp_dbg_objcnt_addr; extern atomic_t sctp_dbg_objcnt_ssnmap; extern atomic_t sctp_dbg_objcnt_datamsg; +extern atomic_t sctp_dbg_objcnt_keys; /* Macros to atomically increment/decrement objcnt counters. */ #define SCTP_DBG_OBJCNT_INC(name) \ @@ -469,6 +470,11 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) skb->sk = sk; skb->destructor = sctp_sock_rfree; atomic_add(event->rmem_len, &sk->sk_rmem_alloc); + /* + * This mimics the behavior of + * sk_stream_set_owner_r + */ + sk->sk_forward_alloc -= event->rmem_len; } /* Tests if the list has one and only one entry. */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index e8e3a64eb32254d50a86d87e7028042aae8578d4..bf2f5ed69c1507466dfb1e44926e2e94f4e9433c 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -143,6 +143,7 @@ sctp_state_fn_t sctp_sf_do_asconf_ack; sctp_state_fn_t sctp_sf_do_9_2_reshutack; sctp_state_fn_t sctp_sf_eat_fwd_tsn; sctp_state_fn_t sctp_sf_eat_fwd_tsn_fast; +sctp_state_fn_t sctp_sf_eat_auth; /* Prototypes for primitive event state functions. */ sctp_state_fn_t sctp_sf_do_prm_asoc; @@ -256,6 +257,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist); +struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); void sctp_chunk_assign_tsn(struct sctp_chunk *); void sctp_chunk_assign_ssn(struct sctp_chunk *); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index baff49dfcdbd488ef21c922be48eacf3be17eea2..ef892e00c83365acdccb3810158277364b5ddae9 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -64,6 +64,7 @@ #include /* We need sk_buff_head. */ #include /* We need tq_struct. */ #include /* We need sctp* header structs. */ +#include /* We need auth specific structs */ /* A convenience structure for handling sockaddr structures. * We should wean ourselves off this. @@ -196,8 +197,6 @@ extern struct sctp_globals { /* This is the sctp port control hash. */ int port_hashsize; - int port_rover; - spinlock_t port_alloc_lock; /* Protects port_rover. */ struct sctp_bind_hashbucket *port_hashtable; /* This is the global local address list. @@ -216,6 +215,9 @@ extern struct sctp_globals { /* Flag to indicate if PR-SCTP is enabled. */ int prsctp_enable; + + /* Flag to idicate if SCTP-AUTH is enabled */ + int auth_enable; } sctp_globals; #define sctp_rto_initial (sctp_globals.rto_initial) @@ -248,6 +250,7 @@ extern struct sctp_globals { #define sctp_local_addr_lock (sctp_globals.addr_list_lock) #define sctp_addip_enable (sctp_globals.addip_enable) #define sctp_prsctp_enable (sctp_globals.prsctp_enable) +#define sctp_auth_enable (sctp_globals.auth_enable) /* SCTP Socket type: UDP or TCP style. */ typedef enum { @@ -397,6 +400,9 @@ struct sctp_cookie { __u32 adaptation_ind; + __u8 auth_random[sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH]; + __u8 auth_hmacs[SCTP_AUTH_NUM_HMACS + 2]; + __u8 auth_chunks[sizeof(sctp_paramhdr_t) + SCTP_AUTH_MAX_CHUNKS]; /* This is a shim for my peer's INIT packet, followed by * a copy of the raw address list of the association. @@ -440,6 +446,10 @@ union sctp_params { struct sctp_ipv6addr_param *v6; union sctp_addr_param *addr; struct sctp_adaptation_ind_param *aind; + struct sctp_supported_ext_param *ext; + struct sctp_random_param *random; + struct sctp_chunks_param *chunks; + struct sctp_hmac_algo_param *hmac_algo; }; /* RFC 2960. Section 3.3.5 Heartbeat. @@ -678,6 +688,7 @@ struct sctp_chunk { struct sctp_errhdr *err_hdr; struct sctp_addiphdr *addip_hdr; struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_authhdr *auth_hdr; } subh; __u8 *chunk_end; @@ -711,6 +722,13 @@ struct sctp_chunk { */ struct sctp_transport *transport; + /* SCTP-AUTH: For the special case inbound processing of COOKIE-ECHO + * we need save a pointer to the AUTH chunk, since the SCTP-AUTH + * spec violates the principle premis that all chunks are processed + * in order. + */ + struct sk_buff *auth_chunk; + __u8 rtt_in_progress; /* Is this chunk used for RTT calculation? */ __u8 resent; /* Has this chunk ever been retransmitted. */ __u8 has_tsn; /* Does this chunk have a TSN yet? */ @@ -723,6 +741,7 @@ struct sctp_chunk { __s8 fast_retransmit; /* Is this chunk fast retransmitted? */ __u8 tsn_missing_report; /* Data chunk missing counter. */ __u8 data_accepted; /* At least 1 chunk in this packet accepted */ + __u8 auth; /* IN: was auth'ed | OUT: needs auth */ }; void sctp_chunk_hold(struct sctp_chunk *); @@ -731,7 +750,6 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, struct iovec *data); void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); -void *sctp_addto_param(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, const struct sctp_association *, struct sock *); @@ -773,16 +791,25 @@ struct sctp_packet { */ struct sctp_transport *transport; + /* pointer to the auth chunk for this packet */ + struct sctp_chunk *auth; + /* This packet contains a COOKIE-ECHO chunk. */ - char has_cookie_echo; + __u8 has_cookie_echo; + + /* This packet contains a SACK chunk. */ + __u8 has_sack; + + /* This packet contains an AUTH chunk */ + __u8 has_auth; - /* This packet containsa SACK chunk. */ - char has_sack; + /* This packet contains at least 1 DATA chunk */ + __u8 has_data; /* SCTP cannot fragment this packet. So let ip fragment it. */ - char ipfragok; + __u8 ipfragok; - int malloced; + __u8 malloced; }; struct sctp_packet *sctp_packet_init(struct sctp_packet *, @@ -1045,6 +1072,7 @@ void sctp_inq_init(struct sctp_inq *); void sctp_inq_free(struct sctp_inq *); void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); +struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *); void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t); /* This is the structure we use to hold outbound chunks. You push @@ -1291,6 +1319,21 @@ struct sctp_endpoint { /* rcvbuf acct. policy. */ __u32 rcvbuf_policy; + + /* SCTP AUTH: array of the HMACs that will be allocated + * we need this per association so that we don't serialize + */ + struct crypto_hash **auth_hmacs; + + /* SCTP-AUTH: hmacs for the endpoint encoded into parameter */ + struct sctp_hmac_algo_param *auth_hmacs_list; + + /* SCTP-AUTH: chunks to authenticate encoded into parameter */ + struct sctp_chunks_param *auth_chunk_list; + + /* SCTP-AUTH: endpoint shared keys */ + struct list_head endpoint_shared_keys; + __u16 active_key_id; }; /* Recover the outter endpoint structure. */ @@ -1497,6 +1540,8 @@ struct sctp_association { __u8 hostname_address;/* Peer understands DNS addresses? */ __u8 asconf_capable; /* Does peer support ADDIP? */ __u8 prsctp_capable; /* Can peer do PR-SCTP? */ + __u8 auth_capable; /* Is peer doing SCTP-AUTH? */ + __u8 addip_capable; /* Can peer do ADD-IP */ __u32 adaptation_ind; /* Adaptation Code point. */ @@ -1514,6 +1559,14 @@ struct sctp_association { * Initial TSN Value minus 1 */ __u32 addip_serial; + + /* SCTP-AUTH: We need to know pears random number, hmac list + * and authenticated chunk list. All that is part of the + * cookie and these are just pointers to those locations + */ + sctp_random_param_t *peer_random; + sctp_chunks_param_t *peer_chunks; + sctp_hmac_algo_param_t *peer_hmacs; } peer; /* State : A state variable indicating what state the @@ -1797,6 +1850,24 @@ struct sctp_association { */ __u32 addip_serial; + /* SCTP AUTH: list of the endpoint shared keys. These + * keys are provided out of band by the user applicaton + * and can't change during the lifetime of the association + */ + struct list_head endpoint_shared_keys; + + /* SCTP AUTH: + * The current generated assocaition shared key (secret) + */ + struct sctp_auth_bytes *asoc_shared_key; + + /* SCTP AUTH: hmac id of the first peer requested algorithm + * that we support. + */ + __u16 default_hmac_id; + + __u16 active_key_id; + /* Need to send an ECNE Chunk? */ char need_ecne; diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index de88ed5b0ba689ebb4c9163baf1e6670c497e93e..922a151eb93c1ea7e89b712f2db0ea031d7097ec 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -128,6 +128,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, gfp_t gfp); +struct sctp_ulpevent *sctp_ulpevent_make_authkey( + const struct sctp_association *asoc, __u16 key_id, + __u32 indication, gfp_t gfp); + void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event); diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 6d2b57758cca0d1f26c5283e1c6e39af16b2fb5c..00848b641f59d592d933452ee309ab22ee499047 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -103,6 +103,21 @@ enum sctp_optname { #define SCTP_PARTIAL_DELIVERY_POINT SCTP_PARTIAL_DELIVERY_POINT SCTP_MAX_BURST, /* Set/Get max burst */ #define SCTP_MAX_BURST SCTP_MAX_BURST + SCTP_AUTH_CHUNK, /* Set only: add a chunk type to authenticat */ +#define SCTP_AUTH_CHUNK SCTP_AUTH_CHUNK + SCTP_HMAC_IDENT, +#define SCTP_HMAC_IDENT SCTP_HMAC_IDENT + SCTP_AUTH_KEY, +#define SCTP_AUTH_KEY SCTP_AUTH_KEY + SCTP_AUTH_ACTIVE_KEY, +#define SCTP_AUTH_ACTIVE_KEY SCTP_AUTH_ACTIVE_KEY + SCTP_AUTH_DELETE_KEY, +#define SCTP_AUTH_DELETE_KEY SCTP_AUTH_DELETE_KEY + SCTP_PEER_AUTH_CHUNKS, /* Read only */ +#define SCTP_PEER_AUTH_CHUNKS SCTP_PEER_AUTH_CHUNKS + SCTP_LOCAL_AUTH_CHUNKS, /* Read only */ +#define SCTP_LOCAL_AUTH_CHUNKS SCTP_LOCAL_AUTH_CHUNKS + /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. @@ -370,6 +385,19 @@ struct sctp_pdapi_event { enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; +struct sctp_authkey_event { + __u16 auth_type; + __u16 auth_flags; + __u32 auth_length; + __u16 auth_keynumber; + __u16 auth_altkeynumber; + __u32 auth_indication; + sctp_assoc_t auth_assoc_id; +}; + +enum { SCTP_AUTH_NEWKEY = 0, }; + + /* * Described in Section 7.3 * Ancillary Data and Notification Interest Options @@ -405,6 +433,7 @@ union sctp_notification { struct sctp_shutdown_event sn_shutdown_event; struct sctp_adaptation_event sn_adaptation_event; struct sctp_pdapi_event sn_pdapi_event; + struct sctp_authkey_event sn_authkey_event; }; /* Section 5.3.1 @@ -421,6 +450,7 @@ enum sctp_sn_type { SCTP_SHUTDOWN_EVENT, SCTP_PARTIAL_DELIVERY_EVENT, SCTP_ADAPTATION_INDICATION, + SCTP_AUTHENTICATION_EVENT, }; /* Notification error codes used to fill up the error fields in some @@ -539,6 +569,54 @@ struct sctp_paddrparams { __u32 spp_flags; } __attribute__((packed, aligned(4))); +/* + * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) + * + * This set option adds a chunk type that the user is requesting to be + * received only in an authenticated way. Changes to the list of chunks + * will only effect future associations on the socket. + */ +struct sctp_authchunk { + __u8 sauth_chunk; +}; + +/* + * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) + * + * This option gets or sets the list of HMAC algorithms that the local + * endpoint requires the peer to use. +*/ +struct sctp_hmacalgo { + __u16 shmac_num_idents; + __u16 shmac_idents[]; +}; + +/* + * 7.1.20. Set a shared key (SCTP_AUTH_KEY) + * + * This option will set a shared secret key which is used to build an + * association shared key. + */ +struct sctp_authkey { + sctp_assoc_t sca_assoc_id; + __u16 sca_keynumber; + __u16 sca_keylen; + __u8 sca_key[]; +}; + +/* + * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) + * + * This option will get or set the active shared key to be used to build + * the association shared key. + */ + +struct sctp_authkeyid { + sctp_assoc_t scact_assoc_id; + __u16 scact_keynumber; +}; + + /* 7.1.23. Delayed Ack Timer (SCTP_DELAYED_ACK_TIME) * * This options will get or set the delayed ack timer. The time is set @@ -607,6 +685,18 @@ struct sctp_status { struct sctp_paddrinfo sstat_primary; }; +/* + * 7.2.3. Get the list of chunks the peer requires to be authenticated + * (SCTP_PEER_AUTH_CHUNKS) + * + * This option gets a list of chunks for a specified association that + * the peer requires to be received authenticated only. + */ +struct sctp_authchunks { + sctp_assoc_t gauth_assoc_id; + uint8_t gauth_chunks[]; +}; + /* * 8.3, 8.5 get all peer/local addresses in an association. * This parameter struct is used by SCTP_GET_PEER_ADDRS and diff --git a/include/net/snmp.h b/include/net/snmp.h index 464970e39ec087f334cc448f073e4552f804b26a..ea206bff0dc4c01e8fa6cfa5f00d82add2c96743 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -82,12 +82,23 @@ struct icmp_mib { unsigned long mibs[ICMP_MIB_MAX]; } __SNMP_MIB_ALIGN__; +#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX +struct icmpmsg_mib { + unsigned long mibs[ICMPMSG_MIB_MAX]; +} __SNMP_MIB_ALIGN__; + /* ICMP6 (IPv6-ICMP) */ #define ICMP6_MIB_MAX __ICMP6_MIB_MAX struct icmpv6_mib { unsigned long mibs[ICMP6_MIB_MAX]; } __SNMP_MIB_ALIGN__; +#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX +struct icmpv6msg_mib { + unsigned long mibs[ICMP6MSG_MIB_MAX]; +} __SNMP_MIB_ALIGN__; + + /* TCP */ #define TCP_MIB_MAX __TCP_MIB_MAX struct tcp_mib { diff --git a/include/net/sock.h b/include/net/sock.h index dfeb8b13024f8827febf8cedbcee48666106d4b3..453c79d0915ba00ca8b33ff160ba64cb7c58a521 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -40,6 +40,7 @@ #ifndef _SOCK_H #define _SOCK_H +#include #include #include #include @@ -55,6 +56,7 @@ #include #include #include +#include /* * This structure really needs to be cleaned up. @@ -75,10 +77,9 @@ * between user contexts and software interrupt processing, whereas the * mini-semaphore synchronizes multiple users amongst themselves. */ -struct sock_iocb; typedef struct { spinlock_t slock; - struct sock_iocb *owner; + int owned; wait_queue_head_t wq; /* * We express the mutex-alike socket_lock semantics @@ -105,6 +106,7 @@ struct proto; * @skc_refcnt: reference count * @skc_hash: hash value used with various protocol lookup tables * @skc_prot: protocol handlers inside a network family + * @skc_net: reference to the network namespace of this socket * * This is the minimal network layer representation of sockets, the header * for struct sock and struct inet_timewait_sock. @@ -119,6 +121,7 @@ struct sock_common { atomic_t skc_refcnt; unsigned int skc_hash; struct proto *skc_prot; + struct net *skc_net; }; /** @@ -195,6 +198,7 @@ struct sock { #define sk_refcnt __sk_common.skc_refcnt #define sk_hash __sk_common.skc_hash #define sk_prot __sk_common.skc_prot +#define sk_net __sk_common.skc_net unsigned char sk_shutdown : 2, sk_no_check : 2, sk_userlocks : 4; @@ -481,17 +485,17 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb) skb->next = NULL; } -#define sk_wait_event(__sk, __timeo, __condition) \ -({ int rc; \ - release_sock(__sk); \ - rc = __condition; \ - if (!rc) { \ - *(__timeo) = schedule_timeout(*(__timeo)); \ - } \ - lock_sock(__sk); \ - rc = __condition; \ - rc; \ -}) +#define sk_wait_event(__sk, __timeo, __condition) \ + ({ int __rc; \ + release_sock(__sk); \ + __rc = __condition; \ + if (!__rc) { \ + *(__timeo) = schedule_timeout(*(__timeo)); \ + } \ + lock_sock(__sk); \ + __rc = __condition; \ + __rc; \ + }) extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); @@ -702,7 +706,7 @@ extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind); static inline int sk_stream_pages(int amt) { - return (amt + SK_STREAM_MEM_QUANTUM - 1) / SK_STREAM_MEM_QUANTUM; + return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM); } static inline void sk_stream_mem_reclaim(struct sock *sk) @@ -736,7 +740,7 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) * Since ~2.3.5 it is also exclusive sleep lock serializing * accesses from user process context. */ -#define sock_owned_by_user(sk) ((sk)->sk_lock.owner) +#define sock_owned_by_user(sk) ((sk)->sk_lock.owned) /* * Macro so as to not evaluate some arguments when @@ -747,7 +751,7 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size) */ #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ do { \ - sk->sk_lock.owner = NULL; \ + sk->sk_lock.owned = 0; \ init_waitqueue_head(&sk->sk_lock.wq); \ spin_lock_init(&(sk)->sk_lock.slock); \ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ @@ -773,7 +777,7 @@ extern void FASTCALL(release_sock(struct sock *sk)); SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -extern struct sock *sk_alloc(int family, +extern struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int zero_it); extern void sk_free(struct sock *sk); @@ -1002,6 +1006,7 @@ static inline void sock_copy(struct sock *nsk, const struct sock *osk) #endif memcpy(nsk, osk, osk->sk_prot->obj_size); + get_net(nsk->sk_net); #ifdef CONFIG_SECURITY_NETWORK nsk->sk_security = sptr; security_sk_clone(osk, nsk); diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h new file mode 100644 index 0000000000000000000000000000000000000000..4a691f34d7035c2eea28a941cf701c82832d7050 --- /dev/null +++ b/include/net/tc_act/tc_nat.h @@ -0,0 +1,21 @@ +#ifndef __NET_TC_NAT_H +#define __NET_TC_NAT_H + +#include +#include + +struct tcf_nat { + struct tcf_common common; + + __be32 old_addr; + __be32 new_addr; + __be32 mask; + u32 flags; +}; + +static inline struct tcf_nat *to_tcf_nat(struct tcf_common *pc) +{ + return container_of(pc, struct tcf_nat, common); +} + +#endif /* __NET_TC_NAT_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 54053de0bdd7c12f2f3a0269f2ddf5b77c33ec47..92049e681258a4d43c1e3bd7ccee8ac8af8dab2d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -39,6 +39,7 @@ #include #include #include +#include #include @@ -330,6 +331,17 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) rx_opt->tstamp_ok = rx_opt->sack_ok = rx_opt->wscale_ok = rx_opt->snd_wscale = 0; } +#define TCP_ECN_OK 1 +#define TCP_ECN_QUEUE_CWR 2 +#define TCP_ECN_DEMAND_CWR 4 + +static __inline__ void +TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) +{ + if (sysctl_tcp_ecn && th->ece && th->cwr) + inet_rsk(req)->ecn_ok = 1; +} + enum tcp_tw_status { TCP_TW_SUCCESS = 0, @@ -573,8 +585,6 @@ struct tcp_skb_cb { #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) -#include - /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ @@ -589,32 +599,19 @@ static inline int tcp_skb_mss(const struct sk_buff *skb) return skb_shinfo(skb)->gso_size; } -static inline void tcp_dec_pcount_approx(__u32 *count, - const struct sk_buff *skb) +static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr) { if (*count) { - *count -= tcp_skb_pcount(skb); + *count -= decr; if ((int)*count < 0) *count = 0; } } -static inline void tcp_packets_out_inc(struct sock *sk, - const struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - int orig = tp->packets_out; - - tp->packets_out += tcp_skb_pcount(skb); - if (!orig) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, TCP_RTO_MAX); -} - -static inline void tcp_packets_out_dec(struct tcp_sock *tp, - const struct sk_buff *skb) +static inline void tcp_dec_pcount_approx(__u32 *count, + const struct sk_buff *skb) { - tp->packets_out -= tcp_skb_pcount(skb); + tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb)); } /* Events passed to congestion control interface */ @@ -704,6 +701,39 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) icsk->icsk_ca_ops->cwnd_event(sk, event); } +/* These functions determine how the current flow behaves in respect of SACK + * handling. SACK is negotiated with the peer, and therefore it can vary + * between different flows. + * + * tcp_is_sack - SACK enabled + * tcp_is_reno - No SACK + * tcp_is_fack - FACK enabled, implies SACK enabled + */ +static inline int tcp_is_sack(const struct tcp_sock *tp) +{ + return tp->rx_opt.sack_ok; +} + +static inline int tcp_is_reno(const struct tcp_sock *tp) +{ + return !tcp_is_sack(tp); +} + +static inline int tcp_is_fack(const struct tcp_sock *tp) +{ + return tp->rx_opt.sack_ok & 2; +} + +static inline void tcp_enable_fack(struct tcp_sock *tp) +{ + tp->rx_opt.sack_ok |= 2; +} + +static inline unsigned int tcp_left_out(const struct tcp_sock *tp) +{ + return tp->sacked_out + tp->lost_out; +} + /* This determines how many packets are "in the network" to the best * of our knowledge. In many cases it is conservative, but where * detailed information is available from the receiver (via SACK @@ -720,7 +750,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) */ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) { - return (tp->packets_out - tp->left_out + tp->retrans_out); + return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; } /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. @@ -738,12 +768,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) (tp->snd_cwnd >> 2))); } -static inline void tcp_sync_left_out(struct tcp_sock *tp) -{ - BUG_ON(tp->rx_opt.sack_ok && - (tp->sacked_out + tp->lost_out > tp->packets_out)); - tp->left_out = tp->sacked_out + tp->lost_out; -} +/* Use define here intentionally to get WARN_ON location shown at the caller */ +#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); @@ -1040,12 +1066,18 @@ static inline void tcp_mib_init(void) TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1); } -/*from STCP */ -static inline void clear_all_retrans_hints(struct tcp_sock *tp){ +/* from STCP */ +static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) +{ tp->lost_skb_hint = NULL; tp->scoreboard_skb_hint = NULL; tp->retransmit_skb_hint = NULL; tp->forward_skb_hint = NULL; +} + +static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) +{ + tcp_clear_retrans_hints_partial(tp); tp->fastpath_skb_hint = NULL; } diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h deleted file mode 100644 index 89eb3e05116de5a373faccaf14e0e9d7bc8a589b..0000000000000000000000000000000000000000 --- a/include/net/tcp_ecn.h +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef _NET_TCP_ECN_H_ -#define _NET_TCP_ECN_H_ 1 - -#include -#include - -#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) - -#define TCP_ECN_OK 1 -#define TCP_ECN_QUEUE_CWR 2 -#define TCP_ECN_DEMAND_CWR 4 - -static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) -{ - if (tp->ecn_flags&TCP_ECN_OK) - tp->ecn_flags |= TCP_ECN_QUEUE_CWR; -} - - -/* Output functions */ - -static inline void TCP_ECN_send_synack(struct tcp_sock *tp, - struct sk_buff *skb) -{ - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; - if (!(tp->ecn_flags&TCP_ECN_OK)) - TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; -} - -static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - tp->ecn_flags = 0; - if (sysctl_tcp_ecn) { - TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; - tp->ecn_flags = TCP_ECN_OK; - } -} - -static __inline__ void -TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) -{ - if (inet_rsk(req)->ecn_ok) - th->ece = 1; -} - -static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, - int tcp_header_len) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (tp->ecn_flags & TCP_ECN_OK) { - /* Not-retransmitted data segment: set ECT and inject CWR. */ - if (skb->len != tcp_header_len && - !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { - INET_ECN_xmit(sk); - if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { - tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; - tcp_hdr(skb)->cwr = 1; - skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - } - } else { - /* ACK or retransmitted segment: clear ECT|CE */ - INET_ECN_dontxmit(sk); - } - if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) - tcp_hdr(skb)->ece = 1; - } -} - -/* Input functions */ - -static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) -{ - if (tcp_hdr(skb)->cwr) - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; -} - -static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) -{ - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; -} - -static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) -{ - if (tp->ecn_flags&TCP_ECN_OK) { - if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; - /* Funny extension: if ECT is not set on a segment, - * it is surely retransmit. It is not in ECN RFC, - * but Linux follows this rule. */ - else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) - tcp_enter_quickack_mode((struct sock *)tp); - } -} - -static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) -{ - if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr)) - tp->ecn_flags &= ~TCP_ECN_OK; -} - -static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) -{ - if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr)) - tp->ecn_flags &= ~TCP_ECN_OK; -} - -static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) -{ - if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK)) - return 1; - return 0; -} - -static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, - struct request_sock *req) -{ - tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; -} - -static __inline__ void -TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) -{ - if (sysctl_tcp_ecn && th->ece && th->cwr) - inet_rsk(req)->ecn_ok = 1; -} - -#endif diff --git a/include/net/veth.h b/include/net/veth.h new file mode 100644 index 0000000000000000000000000000000000000000..3354c1eb424e6a7813ef1f1f6eb90c899cded6fe --- /dev/null +++ b/include/net/veth.h @@ -0,0 +1,12 @@ +#ifndef __NET_VETH_H_ +#define __NET_VETH_H_ + +enum { + VETH_INFO_UNSPEC, + VETH_INFO_PEER, + + __VETH_INFO_MAX +#define VETH_INFO_MAX (__VETH_INFO_MAX - 1) +}; + +#endif diff --git a/include/net/wext.h b/include/net/wext.h index c02b8decf3af8324b104bfeb5f9593785ec2f321..80b31d826b7a8a3f3765bf0dc6fc569eefc930da 100644 --- a/include/net/wext.h +++ b/include/net/wext.h @@ -5,16 +5,23 @@ * wireless extensions interface to the core code */ +struct net; + #ifdef CONFIG_WIRELESS_EXT -extern int wext_proc_init(void); -extern int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, +extern int wext_proc_init(struct net *net); +extern void wext_proc_exit(struct net *net); +extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, void __user *arg); #else -static inline int wext_proc_init(void) +static inline int wext_proc_init(struct net *net) { return 0; } -static inline int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, +static inline void wext_proc_exit(struct net *net) +{ + return; +} +static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, void __user *arg) { return -EINVAL; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a5f80bfbaaa467757f1ee4d45fe610c38622e652..77be396ca633c93888767178915d1bd6f5e5a2ca 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -2,7 +2,6 @@ #define _NET_XFRM_H #include -#include #include #include #include @@ -12,9 +11,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -278,6 +279,7 @@ struct xfrm_type __u8 proto; __u8 flags; #define XFRM_TYPE_NON_FRAGMENT 1 +#define XFRM_TYPE_REPLAY_PROT 2 int (*init_state)(struct xfrm_state *x); void (*destructor)(struct xfrm_state *); @@ -298,6 +300,18 @@ extern void xfrm_put_type(struct xfrm_type *type); struct xfrm_mode { int (*input)(struct xfrm_state *x, struct sk_buff *skb); + + /* + * Add encapsulation header. + * + * On exit, the transport header will be set to the start of the + * encapsulation header to be filled in by x->type->output and + * the mac header will be set to the nextheader (protocol for + * IPv4) field of the extension header directly preceding the + * encapsulation header, or in its absence, that of the top IP + * header. The value of the network header will always point + * to the top IP header while skb->data will point to the payload. + */ int (*output)(struct xfrm_state *x,struct sk_buff *skb); struct module *owner; @@ -418,18 +432,66 @@ extern int xfrm_unregister_km(struct xfrm_mgr *km); extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; +/* + * This structure is used for the duration where packets are being + * transformed by IPsec. As soon as the packet leaves IPsec the + * area beyond the generic IP part may be overwritten. + */ +struct xfrm_skb_cb { + union { + struct inet_skb_parm h4; + struct inet6_skb_parm h6; + } header; + + /* Sequence number for replay protection. */ + u64 seq; +}; + +#define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0])) + /* Audit Information */ struct xfrm_audit { - uid_t loginuid; + u32 loginuid; u32 secid; }; #ifdef CONFIG_AUDITSYSCALL -extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result, - struct xfrm_policy *xp, struct xfrm_state *x); +static inline struct audit_buffer *xfrm_audit_start(u32 auid, u32 sid) +{ + struct audit_buffer *audit_buf = NULL; + char *secctx; + u32 secctx_len; + + audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, + AUDIT_MAC_IPSEC_EVENT); + if (audit_buf == NULL) + return NULL; + + audit_log_format(audit_buf, "auid=%u", auid); + + if (sid != 0 && + security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) { + audit_log_format(audit_buf, " subj=%s", secctx); + security_release_secctx(secctx, secctx_len); + } else + audit_log_task_context(audit_buf); + return audit_buf; +} + +extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, + u32 auid, u32 sid); +extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, + u32 auid, u32 sid); +extern void xfrm_audit_state_add(struct xfrm_state *x, int result, + u32 auid, u32 sid); +extern void xfrm_audit_state_delete(struct xfrm_state *x, int result, + u32 auid, u32 sid); #else -#define xfrm_audit_log(a,s,t,r,p,x) do { ; } while (0) +#define xfrm_audit_policy_add(x, r, a, s) do { ; } while (0) +#define xfrm_audit_policy_delete(x, r, a, s) do { ; } while (0) +#define xfrm_audit_state_add(x, r, a, s) do { ; } while (0) +#define xfrm_audit_state_delete(x, r, a, s) do { ; } while (0) #endif /* CONFIG_AUDITSYSCALL */ static inline void xfrm_pol_hold(struct xfrm_policy *policy) @@ -981,9 +1043,9 @@ extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); extern void xfrm_replay_notify(struct xfrm_state *x, int event); -extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); extern int xfrm_init_state(struct xfrm_state *x); +extern int xfrm_output(struct sk_buff *skb); extern int xfrm4_rcv(struct sk_buff *skb); extern int xfrm4_output(struct sk_buff *skb); extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); @@ -1034,7 +1096,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir, struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err); int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info); u32 xfrm_get_acqseq(void); -void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi); +extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi); struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create, unsigned short family); @@ -1113,12 +1175,6 @@ static inline int xfrm_aevent_is_on(void) return ret; } -static inline void xfrm_aevent_doreplay(struct xfrm_state *x) -{ - if (xfrm_aevent_is_on()) - xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); -} - #ifdef CONFIG_XFRM_MIGRATE static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) { diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 145d5a0d299f77d8cb328b9deb292f55c69f5a7d..24df3347ad4b4ca47d60a8f6c33f4e4d9dea1c40 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -521,8 +521,7 @@ static void __do_notify(struct mqueue_inode_info *info) break; case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); - netlink_sendskb(info->notify_sock, - info->notify_cookie, 0); + netlink_sendskb(info->notify_sock, info->notify_cookie); break; } /* after notification unregisters process */ @@ -568,7 +567,7 @@ static void remove_notification(struct mqueue_inode_info *info) if (info->notify_owner != NULL && info->notify.sigev_notify == SIGEV_THREAD) { set_cookie(info->notify_cookie, NOTIFY_REMOVED); - netlink_sendskb(info->notify_sock, info->notify_cookie, 0); + netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); info->notify_owner = NULL; diff --git a/kernel/audit.c b/kernel/audit.c index eb0f9165b401bcf4375baf4b233ab6a1401b448d..2924251a6547bebb25d952dc64ea5590848b1bce 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -847,18 +847,10 @@ static void audit_receive_skb(struct sk_buff *skb) } /* Receive messages from netlink socket. */ -static void audit_receive(struct sock *sk, int length) +static void audit_receive(struct sk_buff *skb) { - struct sk_buff *skb; - unsigned int qlen; - mutex_lock(&audit_cmd_mutex); - - for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { - skb = skb_dequeue(&sk->sk_receive_queue); - audit_receive_skb(skb); - kfree_skb(skb); - } + audit_receive_skb(skb); mutex_unlock(&audit_cmd_mutex); } @@ -876,8 +868,8 @@ static int __init audit_init(void) printk(KERN_INFO "audit: initializing netlink socket (%s)\n", audit_default ? "enabled" : "disabled"); - audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive, - NULL, THIS_MODULE); + audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, + audit_receive, NULL, THIS_MODULE); if (!audit_sock) audit_panic("cannot initialize netlink socket"); else diff --git a/kernel/fork.c b/kernel/fork.c index 33f12f48684a0053a82d72506c1d7ec1e7ca6a15..5e67f90a1694b46fc752b0a5b48fa3701bc15ec2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1608,7 +1608,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) err = -EINVAL; if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| - CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER)) + CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| + CLONE_NEWNET)) goto bad_unshare_out; if ((err = unshare_thread(unshare_flags))) diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c21ca6bfaa6600dd864ef1b32798dc1a4ae8c081..dc8a4451d79b15d64a37caf55c9dc4595c6ec5d2 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -277,6 +277,30 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) } EXPORT_SYMBOL_GPL(ktime_add_ns); + +/** + * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable + * @kt: minuend + * @nsec: the scalar nsec value to subtract + * + * Returns the subtraction of @nsec from @kt in ktime_t format + */ +ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) +{ + ktime_t tmp; + + if (likely(nsec < NSEC_PER_SEC)) { + tmp.tv64 = nsec; + } else { + unsigned long rem = do_div(nsec, NSEC_PER_SEC); + + tmp = ktime_set((long)nsec, rem); + } + + return ktime_sub(kt, tmp); +} + +EXPORT_SYMBOL_GPL(ktime_sub_ns); # endif /* !CONFIG_KTIME_SCALAR */ /* diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index a4fb7d46971f48a8655ba8e5087025593cf6e79e..f1decd21a534c248054bef7c1c6f9f6ec7da072c 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -20,6 +20,7 @@ #include #include #include +#include static struct kmem_cache *nsproxy_cachep; @@ -98,8 +99,17 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, goto out_user; } + new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); + if (IS_ERR(new_nsp->net_ns)) { + err = PTR_ERR(new_nsp->net_ns); + goto out_net; + } + return new_nsp; +out_net: + if (new_nsp->user_ns) + put_user_ns(new_nsp->user_ns); out_user: if (new_nsp->pid_ns) put_pid_ns(new_nsp->pid_ns); @@ -132,7 +142,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) get_nsproxy(old_ns); - if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER))) + if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER | CLONE_NEWNET))) return 0; if (!capable(CAP_SYS_ADMIN)) { @@ -164,6 +174,7 @@ void free_nsproxy(struct nsproxy *ns) put_pid_ns(ns->pid_ns); if (ns->user_ns) put_user_ns(ns->user_ns); + put_net(ns->net_ns); kmem_cache_free(nsproxy_cachep, ns); } @@ -177,7 +188,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, int err = 0; if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | - CLONE_NEWUSER))) + CLONE_NEWUSER | CLONE_NEWNET))) return 0; if (!capable(CAP_SYS_ADMIN)) diff --git a/kernel/softirq.c b/kernel/softirq.c index 0f546ddea43df41debcf42507ede2ac6873c80a9..bd89bc4eb0b9c0baa8070678d3cb099a8b40bb1c 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -271,8 +271,6 @@ asmlinkage void do_softirq(void) local_irq_restore(flags); } -EXPORT_SYMBOL(do_softirq); - #endif /* @@ -332,8 +330,6 @@ inline fastcall void raise_softirq_irqoff(unsigned int nr) wakeup_softirqd(); } -EXPORT_SYMBOL(raise_softirq_irqoff); - void fastcall raise_softirq(unsigned int nr) { unsigned long flags; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index df02814699d747bc484b89cb22100467f82e0a91..e06a8dcec0f04ce9e566901be3fa52c786a74e41 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -280,9 +280,8 @@ EXPORT_SYMBOL_GPL(add_uevent_var); #if defined(CONFIG_NET) static int __init kobject_uevent_init(void) { - uevent_sock = netlink_kernel_create(NETLINK_KOBJECT_UEVENT, 1, NULL, - NULL, THIS_MODULE); - + uevent_sock = netlink_kernel_create(&init_net, NETLINK_KOBJECT_UEVENT, + 1, NULL, NULL, THIS_MODULE); if (!uevent_sock) { printk(KERN_ERR "kobject_uevent: unable to create netlink socket!\n"); diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index d84560c076d83cfca8a0d2d19b6214ed1cac23ca..8550b0c05d00191d97521356ed7d6ed2e1a84f3f 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -69,22 +69,22 @@ void inflate_fast(z_streamp strm, unsigned start) { struct inflate_state *state; - unsigned char *in; /* local strm->next_in */ - unsigned char *last; /* while in < last, enough input available */ - unsigned char *out; /* local strm->next_out */ - unsigned char *beg; /* inflate()'s initial strm->next_out */ - unsigned char *end; /* while out < end, enough space available */ + const unsigned char *in; /* local strm->next_in */ + const unsigned char *last; /* while in < last, enough input available */ + unsigned char *out; /* local strm->next_out */ + unsigned char *beg; /* inflate()'s initial strm->next_out */ + unsigned char *end; /* while out < end, enough space available */ #ifdef INFLATE_STRICT unsigned dmax; /* maximum distance from zlib header */ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned write; /* window write index */ - unsigned char *window; /* allocated sliding window, if wsize != 0 */ + unsigned char *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ - code const *lcode; /* local strm->lencode */ - code const *dcode; /* local strm->distcode */ + code const *lcode; /* local strm->lencode */ + code const *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ code this; /* retrieved table entry */ @@ -92,7 +92,7 @@ void inflate_fast(z_streamp strm, unsigned start) /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ unsigned dist; /* match distance */ - unsigned char *from; /* where to copy match from */ + unsigned char *from; /* where to copy match from */ /* copy state to local variables */ state = (struct inflate_state *)strm->state; diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index 7e1e3114a73e1943ee054478571016145bc0f214..0ad1ebf00947a1e7aa5bfb2889bc9d39e298ebe6 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c @@ -332,14 +332,14 @@ static int zlib_inflateSyncPacket(z_streamp strm) int zlib_inflate(z_streamp strm, int flush) { struct inflate_state *state; - unsigned char *next; /* next input */ - unsigned char *put; /* next output */ + const unsigned char *next; /* next input */ + unsigned char *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ - unsigned char *from; /* where to copy match bytes from */ + unsigned char *from; /* where to copy match bytes from */ code this; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ @@ -897,7 +897,7 @@ int zlib_inflateIncomp(z_stream *z) /* Setup some variables to allow misuse of updateWindow */ z->avail_out = 0; - z->next_out = z->next_in + z->avail_in; + z->next_out = (unsigned char*)z->next_in + z->avail_in; zlib_updatewindow(z, z->avail_in); @@ -916,3 +916,50 @@ int zlib_inflateIncomp(z_stream *z) return Z_OK; } + +#include +#include +#include + +/* Utility function: initialize zlib, unpack binary blob, clean up zlib, + * return len or negative error code. */ +int zlib_inflate_blob(void *gunzip_buf, unsigned sz, const void *buf, unsigned len) +{ + const u8 *zbuf = buf; + struct z_stream_s *strm; + int rc; + + rc = -ENOMEM; + strm = kmalloc(sizeof(*strm), GFP_KERNEL); + if (strm == NULL) + goto gunzip_nomem1; + strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL); + if (strm->workspace == NULL) + goto gunzip_nomem2; + + /* gzip header (1f,8b,08... 10 bytes total + possible asciz filename) + * expected to be stripped from input */ + + strm->next_in = zbuf; + strm->avail_in = len; + strm->next_out = gunzip_buf; + strm->avail_out = sz; + + rc = zlib_inflateInit2(strm, -MAX_WBITS); + if (rc == Z_OK) { + rc = zlib_inflate(strm, Z_FINISH); + /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */ + if (rc == Z_STREAM_END) + rc = sz - strm->avail_out; + else + rc = -EINVAL; + zlib_inflateEnd(strm); + } else + rc = -EINVAL; + + kfree(strm->workspace); +gunzip_nomem2: + kfree(strm); +gunzip_nomem1: + return rc; /* returns Z_OK (0) if successful */ +} diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c index 2061d4f06765e614875c2d16f386c42a9dde0686..67329fe9907e5ca43425c45bde1824d7282fd998 100644 --- a/lib/zlib_inflate/inflate_syms.c +++ b/lib/zlib_inflate/inflate_syms.c @@ -16,4 +16,5 @@ EXPORT_SYMBOL(zlib_inflateInit2); EXPORT_SYMBOL(zlib_inflateEnd); EXPORT_SYMBOL(zlib_inflateReset); EXPORT_SYMBOL(zlib_inflateIncomp); +EXPORT_SYMBOL(zlib_inflate_blob); MODULE_LICENSE("GPL"); diff --git a/net/802/fc.c b/net/802/fc.c index 675d9ba8e591bf78803a09ecd3fcfa32596224b7..cb3475ea6fdada58337e7bf6e39773478afb0b7b 100644 --- a/net/802/fc.c +++ b/net/802/fc.c @@ -35,7 +35,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned len) { struct fch_hdr *fch; int hdr_len; @@ -95,11 +95,14 @@ static int fc_rebuild_header(struct sk_buff *skb) #endif } +static const struct header_ops fc_header_ops = { + .create = fc_header, + .rebuild = fc_rebuild_header, +}; + static void fc_setup(struct net_device *dev) { - dev->hard_header = fc_header; - dev->rebuild_header = fc_rebuild_header; - + dev->header_ops = &fc_header_ops; dev->type = ARPHRD_IEEE802; dev->hard_header_len = FC_HLEN; dev->mtu = 2024; diff --git a/net/802/fddi.c b/net/802/fddi.c index 91dde41b54819998d2db2e603611910da1604b77..0549317b935699f76c604bb06dabbee90492fec4 100644 --- a/net/802/fddi.c +++ b/net/802/fddi.c @@ -52,7 +52,7 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned len) { int hl = FDDI_K_SNAP_HLEN; struct fddihdr *fddi; @@ -175,11 +175,15 @@ static int fddi_change_mtu(struct net_device *dev, int new_mtu) return(0); } +static const struct header_ops fddi_header_ops = { + .create = fddi_header, + .rebuild = fddi_rebuild_header, +}; + static void fddi_setup(struct net_device *dev) { dev->change_mtu = fddi_change_mtu; - dev->hard_header = fddi_header; - dev->rebuild_header = fddi_rebuild_header; + dev->header_ops = &fddi_header_ops; dev->type = ARPHRD_FDDI; dev->hard_header_len = FDDI_K_SNAP_HLEN+3; /* Assume 802.2 SNAP hdr len + 3 pad bytes */ diff --git a/net/802/hippi.c b/net/802/hippi.c index 87ffc12b6891826232ebba946f29c101e3b4b2dc..e35dc1e0915d27ad3b99da056541689327a7a70c 100644 --- a/net/802/hippi.c +++ b/net/802/hippi.c @@ -45,8 +45,8 @@ */ static int hippi_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; @@ -182,16 +182,18 @@ static int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) return 0; } +static const struct header_ops hippi_header_ops = { + .create = hippi_header, + .rebuild = hippi_rebuild_header, +}; + + static void hippi_setup(struct net_device *dev) { dev->set_multicast_list = NULL; dev->change_mtu = hippi_change_mtu; - dev->hard_header = hippi_header; - dev->rebuild_header = hippi_rebuild_header; + dev->header_ops = &hippi_header_ops; dev->set_mac_address = hippi_mac_addr; - dev->hard_header_parse = NULL; - dev->hard_header_cache = NULL; - dev->header_cache_update = NULL; dev->neigh_setup = hippi_neigh_setup_dev; /* diff --git a/net/802/p8023.c b/net/802/p8023.c index 53cf05709283662f16578fab5cd06972c07eec16..6ab1835041a7485f1d4e23cadb7e3b35f6fcc631 100644 --- a/net/802/p8023.c +++ b/net/802/p8023.c @@ -31,7 +31,7 @@ static int p8023_request(struct datalink_proto *dl, { struct net_device *dev = skb->dev; - dev->hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); + dev_hard_header(skb, dev, ETH_P_802_3, dest_node, NULL, skb->len); return dev_queue_xmit(skb); } diff --git a/net/802/tr.c b/net/802/tr.c index e56e61a7f5450ad783aa92a510dafab0a7309069..a2bd0f2e3af84c85e5e0077963b2bd3616b69d93 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -36,6 +36,7 @@ #include #include #include +#include static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev); static void rif_check_expire(unsigned long dummy); @@ -99,7 +100,7 @@ static inline unsigned long rif_hash(const unsigned char *addr) static int tr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) + const void *daddr, const void *saddr, unsigned len) { struct trh_hdr *trh; int hdr_len; @@ -141,7 +142,7 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev, if(daddr) { memcpy(trh->daddr,daddr,dev->addr_len); - tr_source_route(skb,trh,dev); + tr_source_route(skb, trh, dev); return(hdr_len); } @@ -246,7 +247,8 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) * We try to do source routing... */ -void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *dev) +void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh, + struct net_device *dev) { int slack; unsigned int hash; @@ -282,8 +284,10 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device * if(entry) { #if TR_SR_DEBUG -printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0], - trh->daddr[1],trh->daddr[2],trh->daddr[3],trh->daddr[4],trh->daddr[5]); +{ +DECLARE_MAC_BUF(mac); +printk("source routing for %s\n",print_mac(mac, trh->daddr)); +} #endif if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8) { @@ -365,10 +369,9 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) if(entry==NULL) { #if TR_SR_DEBUG -printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", - trh->saddr[0],trh->saddr[1],trh->saddr[2], - trh->saddr[3],trh->saddr[4],trh->saddr[5], - ntohs(trh->rcf)); + DECLARE_MAC_BUF(mac); + printk("adding rif_entry: addr:%s rcf:%04X\n", + print_mac(mac, trh->saddr), ntohs(trh->rcf)); #endif /* * Allocate our new entry. A failure to allocate loses @@ -413,10 +416,11 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", !(trh->rcf & htons(TR_RCF_BROADCAST_MASK))) { #if TR_SR_DEBUG -printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", - trh->saddr[0],trh->saddr[1],trh->saddr[2], - trh->saddr[3],trh->saddr[4],trh->saddr[5], - ntohs(trh->rcf)); +{ +DECLARE_MAC_BUF(mac); +printk("updating rif_entry: addr:%s rcf:%04X\n", + print_mac(mac, trh->saddr), ntohs(trh->rcf)); +} #endif entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); @@ -527,19 +531,19 @@ static int rif_seq_show(struct seq_file *seq, void *v) { int j, rcf_len, segment, brdgnmb; struct rif_cache *entry = v; + DECLARE_MAC_BUF(mac); if (v == SEQ_START_TOKEN) seq_puts(seq, "if TR address TTL rcf routing segments\n"); else { - struct net_device *dev = dev_get_by_index(entry->iface); + struct net_device *dev = dev_get_by_index(&init_net, entry->iface); long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout) - (long) jiffies; - seq_printf(seq, "%s %02X:%02X:%02X:%02X:%02X:%02X %7li ", + seq_printf(seq, "%s %s %7li ", dev?dev->name:"?", - entry->addr[0],entry->addr[1],entry->addr[2], - entry->addr[3],entry->addr[4],entry->addr[5], + print_mac(mac, entry->addr), ttl/HZ); if (entry->local_ring) @@ -589,14 +593,18 @@ static const struct file_operations rif_seq_fops = { #endif +static const struct header_ops tr_header_ops = { + .create = tr_header, + .rebuild= tr_rebuild_header, +}; + static void tr_setup(struct net_device *dev) { /* * Configure and register */ - dev->hard_header = tr_header; - dev->rebuild_header = tr_rebuild_header; + dev->header_ops = &tr_header_ops; dev->type = ARPHRD_IEEE802_TR; dev->hard_header_len = TR_HLEN; @@ -639,7 +647,7 @@ static int __init rif_init(void) rif_timer.function = rif_check_expire; add_timer(&rif_timer); - proc_net_fops_create("tr_rif", S_IRUGO, &rif_seq_fops); + proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); return 0; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 2a546919d6fbd72e10ea6c01dc908e84eb012aa4..3fe4fc86055faa4765701c3e36f88fed043cfaeb 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "vlan.h" @@ -50,7 +51,7 @@ static char vlan_copyright[] = "Ben Greear "; static char vlan_buggyright[] = "David S. Miller "; static int vlan_device_event(struct notifier_block *, unsigned long, void *); -static int vlan_ioctl_handler(void __user *); +static int vlan_ioctl_handler(struct net *net, void __user *); static int unregister_vlan_dev(struct net_device *, unsigned short ); static struct notifier_block vlan_notifier_block = { @@ -313,6 +314,12 @@ int unregister_vlan_device(struct net_device *dev) */ static struct lock_class_key vlan_netdev_xmit_lock_key; +static const struct header_ops vlan_header_ops = { + .create = vlan_dev_hard_header, + .rebuild = vlan_dev_rebuild_header, + .parse = eth_header_parse, +}; + static int vlan_dev_init(struct net_device *dev) { struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev; @@ -324,24 +331,23 @@ static int vlan_dev_init(struct net_device *dev) (1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_PRESENT); + /* ipv6 shared card related stuff */ + dev->dev_id = real_dev->dev_id; + if (is_zero_ether_addr(dev->dev_addr)) memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len); if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); if (real_dev->features & NETIF_F_HW_VLAN_TX) { - dev->hard_header = real_dev->hard_header; + dev->header_ops = real_dev->header_ops; dev->hard_header_len = real_dev->hard_header_len; dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit; - dev->rebuild_header = real_dev->rebuild_header; } else { - dev->hard_header = vlan_dev_hard_header; + dev->header_ops = &vlan_header_ops; dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; dev->hard_start_xmit = vlan_dev_hard_start_xmit; - dev->rebuild_header = vlan_dev_rebuild_header; } - dev->hard_header_parse = real_dev->hard_header_parse; - dev->hard_header_cache = NULL; lockdep_set_class(&dev->_xmit_lock, &vlan_netdev_xmit_lock_key); return 0; @@ -349,8 +355,6 @@ static int vlan_dev_init(struct net_device *dev) void vlan_setup(struct net_device *new_dev) { - SET_MODULE_OWNER(new_dev); - ether_setup(new_dev); /* new_dev->ifindex = 0; it will be set when added to @@ -603,6 +607,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, int i, flgs; struct net_device *vlandev; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (!grp) goto out; @@ -693,7 +700,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event, * o execute requested action or pass command to the device driver * arg is really a struct vlan_ioctl_args __user *. */ -static int vlan_ioctl_handler(void __user *arg) +static int vlan_ioctl_handler(struct net *net, void __user *arg) { int err; unsigned short vid = 0; @@ -722,7 +729,7 @@ static int vlan_ioctl_handler(void __user *arg) case GET_VLAN_REALDEV_NAME_CMD: case GET_VLAN_VID_CMD: err = -ENODEV; - dev = __dev_get_by_name(args.device1); + dev = __dev_get_by_name(&init_net, args.device1); if (!dev) goto out; diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 7df5b29355795382e6ceb5d334768f87c37fe9b6..cf4a80d06b3526f1ca2caf4e938561ec57c4dca9 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -53,8 +53,8 @@ int vlan_dev_rebuild_header(struct sk_buff *skb); int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev); int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len); + unsigned short type, const void *daddr, + const void *saddr, unsigned len); int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); int vlan_dev_change_mtu(struct net_device *dev, int new_mtu); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 328759c32d616195ff634ad12369667d16e4f80e..1a1740aa9a8b52ced5b71c4b2443b2eea773c0a7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -122,6 +122,11 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, unsigned short vlan_TCI; __be16 proto; + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return -1; + } + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return -1; @@ -338,8 +343,8 @@ static inline unsigned short vlan_dev_get_egress_qos_mask(struct net_device* dev * physical devices. */ int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, void *daddr, void *saddr, - unsigned len) + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { struct vlan_hdr *vhdr; unsigned short veth_TCI = 0; @@ -429,21 +434,19 @@ int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, if (build_vlan_header) { /* Now make the underlying real hard header */ - rc = dev->hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, len + VLAN_HLEN); - - if (rc > 0) { + rc = dev_hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, + len + VLAN_HLEN); + if (rc > 0) rc += VLAN_HLEN; - } else if (rc < 0) { + else if (rc < 0) rc -= VLAN_HLEN; - } - } else { + } else /* If here, then we'll just make a normal looking ethernet frame, * but, the hard_start_xmit method will insert the tag (it has to * be able to do this for bridged and other skbs that don't come * down the protocol stack in an orderly manner. */ - rc = dev->hard_header(skb, dev, type, daddr, saddr, len); - } + rc = dev_hard_header(skb, dev, type, daddr, saddr, len); return rc; } diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 6cdd1e015e2d6f17dfaa419e1a7b49428d88cd91..0996185e2ed5dd816946df9fdcf10a1d4a1d4b13 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include "vlan.h" @@ -112,7 +113,7 @@ static int vlan_newlink(struct net_device *dev, if (!tb[IFLA_LINK]) return -EINVAL; - real_dev = __dev_get_by_index(nla_get_u32(tb[IFLA_LINK])); + real_dev = __dev_get_by_index(&init_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index bd08aa090763ad469e7e9c795f8057d8370e46f0..6cefdf8e381a3b40bf9d6d78c5535e25bab6f612 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "vlanproc.h" #include "vlan.h" @@ -143,7 +144,7 @@ void vlan_proc_cleanup(void) remove_proc_entry(name_conf, proc_vlan_dir); if (proc_vlan_dir) - proc_net_remove(name_root); + proc_net_remove(&init_net, name_root); /* Dynamically added entries should be cleaned up as their vlan_device * is removed, so we should not have to take care of it here... @@ -156,7 +157,7 @@ void vlan_proc_cleanup(void) int __init vlan_proc_init(void) { - proc_vlan_dir = proc_mkdir(name_root, proc_net); + proc_vlan_dir = proc_mkdir(name_root, init_net.proc_net); if (proc_vlan_dir) { proc_vlan_conf = create_proc_entry(name_conf, S_IFREG|S_IRUSR|S_IWUSR, @@ -253,7 +254,7 @@ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) return SEQ_START_TOKEN; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (!is_vlan_dev(dev)) continue; @@ -272,9 +273,9 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) dev = (struct net_device *)v; if (v == SEQ_START_TOKEN) - dev = net_device_entry(&dev_base_head); + dev = net_device_entry(&init_net.dev_base_head); - for_each_netdev_continue(dev) { + for_each_netdev_continue(&init_net, dev) { if (!is_vlan_dev(dev)) continue; diff --git a/net/Kconfig b/net/Kconfig index cdba08ca2efe6145464c1f55516c672dd303892c..ab4e6da5012f1e1d2ef45436fa863d80655f75e2 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -27,6 +27,14 @@ if NET menu "Networking options" +config NET_NS + bool "Network namespace support" + default n + depends on EXPERIMENTAL && !SYSFS + help + Allow user space to create what appear to be multiple instances + of the network stack. + source "net/packet/Kconfig" source "net/unix/Kconfig" source "net/xfrm/Kconfig" diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 3d1655f983887593e8541994c17b59e502446667..6c5c6dc098ec3c34814202979d84ec4dcc951376 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -330,15 +330,19 @@ static void aarp_expire_timeout(unsigned long unused) static int aarp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct net_device *dev = ptr; int ct; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_DOWN) { write_lock_bh(&aarp_lock); for (ct = 0; ct < AARP_HASH_SIZE; ct++) { - __aarp_expire_device(&resolved[ct], ptr); - __aarp_expire_device(&unresolved[ct], ptr); - __aarp_expire_device(&proxies[ct], ptr); + __aarp_expire_device(&resolved[ct], dev); + __aarp_expire_device(&unresolved[ct], dev); + __aarp_expire_device(&proxies[ct], dev); } write_unlock_bh(&aarp_lock); @@ -712,6 +716,9 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, struct atalk_addr sa, *ma, da; struct atalk_iface *ifa; + if (dev->nd_net != &init_net) + goto out0; + /* We only do Ethernet SNAP AARP. */ if (dev->type != ARPHRD_ETHER) goto out0; @@ -815,8 +822,6 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev, * address. So as a precaution flush any * entries we have for this address. */ - struct aarp_entry *a; - a = __aarp_find_entry(resolved[sa.s_node % (AARP_HASH_SIZE - 1)], skb->dev, &sa); @@ -990,6 +995,7 @@ static int aarp_seq_show(struct seq_file *seq, void *v) struct aarp_iter_state *iter = seq->private; struct aarp_entry *entry = v; unsigned long now = jiffies; + DECLARE_MAC_BUF(mac); if (v == SEQ_START_TOKEN) seq_puts(seq, @@ -1000,13 +1006,7 @@ static int aarp_seq_show(struct seq_file *seq, void *v) ntohs(entry->target_addr.s_net), (unsigned int) entry->target_addr.s_node, entry->dev ? entry->dev->name : "????"); - seq_printf(seq, "%02X:%02X:%02X:%02X:%02X:%02X", - entry->hwaddr[0] & 0xFF, - entry->hwaddr[1] & 0xFF, - entry->hwaddr[2] & 0xFF, - entry->hwaddr[3] & 0xFF, - entry->hwaddr[4] & 0xFF, - entry->hwaddr[5] & 0xFF); + seq_printf(seq, "%s", print_mac(mac, entry->hwaddr)); seq_printf(seq, " %8s", dt2str((long)entry->expires_at - (long)now)); if (iter->table == unresolved) diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 87a582cc811195482a3e2ddd638525db6ae4f4fd..05d9652afcb6a57708219094b1f849d1207668fb 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -271,7 +272,7 @@ int __init atalk_proc_init(void) struct proc_dir_entry *p; int rc = -ENOMEM; - atalk_proc_dir = proc_mkdir("atalk", proc_net); + atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net); if (!atalk_proc_dir) goto out; atalk_proc_dir->owner = THIS_MODULE; @@ -306,7 +307,7 @@ int __init atalk_proc_init(void) out_route: remove_proc_entry("interface", atalk_proc_dir); out_interface: - remove_proc_entry("atalk", proc_net); + remove_proc_entry("atalk", init_net.proc_net); goto out; } @@ -316,5 +317,5 @@ void __exit atalk_proc_exit(void) remove_proc_entry("route", atalk_proc_dir); remove_proc_entry("socket", atalk_proc_dir); remove_proc_entry("arp", atalk_proc_dir); - remove_proc_entry("atalk", proc_net); + remove_proc_entry("atalk", init_net.proc_net); } diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index fbdfb1224ae1a7a908a9e155564c7f56aa8a7df1..7c0b5151d5265408614fda8c25e489aff50c5b41 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -647,9 +647,14 @@ static inline void atalk_dev_down(struct net_device *dev) static int ddp_device_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct net_device *dev = ptr; + + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_DOWN) /* Discard any use of this */ - atalk_dev_down(ptr); + atalk_dev_down(dev); return NOTIFY_DONE; } @@ -672,7 +677,7 @@ static int atif_ioctl(int cmd, void __user *arg) if (copy_from_user(&atreq, arg, sizeof(atreq))) return -EFAULT; - dev = __dev_get_by_name(atreq.ifr_name); + dev = __dev_get_by_name(&init_net, atreq.ifr_name); if (!dev) return -ENODEV; @@ -896,7 +901,7 @@ static int atrtr_ioctl(unsigned int cmd, void __user *arg) if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1)) return -EFAULT; name[IFNAMSIZ-1] = '\0'; - dev = __dev_get_by_name(name); + dev = __dev_get_by_name(&init_net, name); if (!dev) return -ENODEV; } @@ -1024,11 +1029,14 @@ static struct proto ddp_proto = { * Create a socket. Initialise the socket, blank the addresses * set the state. */ -static int atalk_create(struct socket *sock, int protocol) +static int atalk_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; int rc = -ESOCKTNOSUPPORT; + if (net != &init_net) + return -EAFNOSUPPORT; + /* * We permit SOCK_DGRAM and RAW is an extension. It is trivial to do * and gives you the full ELAP frame. Should be handy for CAP 8) @@ -1036,7 +1044,7 @@ static int atalk_create(struct socket *sock, int protocol) if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) goto out; rc = -ENOMEM; - sk = sk_alloc(PF_APPLETALK, GFP_KERNEL, &ddp_proto, 1); + sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, 1); if (!sk) goto out; rc = 0; @@ -1265,7 +1273,7 @@ static __inline__ int is_ip_over_ddp(struct sk_buff *skb) static int handle_ip_over_ddp(struct sk_buff *skb) { - struct net_device *dev = __dev_get_by_name("ipddp0"); + struct net_device *dev = __dev_get_by_name(&init_net, "ipddp0"); struct net_device_stats *stats; /* This needs to be able to handle ipddp"N" devices */ @@ -1398,6 +1406,9 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, int origlen; __u16 len_hops; + if (dev->nd_net != &init_net) + goto freeit; + /* Don't mangle buffer if shared */ if (!(skb = skb_share_check(skb, GFP_ATOMIC))) goto out; @@ -1483,6 +1494,9 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { + if (dev->nd_net != &init_net) + goto freeit; + /* Expand any short form frames */ if (skb_mac_header(skb)[2] == 1) { struct ddpehdr *ddp; diff --git a/net/appletalk/dev.c b/net/appletalk/dev.c index 9e4dffc1e42359b91027868f1fc73bd8062c7368..d856a62ab50fda448896689ff210e235dc8d0e6d 100644 --- a/net/appletalk/dev.c +++ b/net/appletalk/dev.c @@ -24,11 +24,7 @@ static void ltalk_setup(struct net_device *dev) /* Fill in the fields of the device structure with localtalk-generic values. */ dev->change_mtu = ltalk_change_mtu; - dev->hard_header = NULL; - dev->rebuild_header = NULL; dev->set_mac_address = ltalk_mac_addr; - dev->hard_header_cache = NULL; - dev->header_cache_update= NULL; dev->type = ARPHRD_LOCALTLK; dev->hard_header_len = LTALK_HLEN; diff --git a/net/atm/br2684.c b/net/atm/br2684.c index c0f6861eefe35061061d7ae39d4ecde87d8777da..c742d37bfb972c631d45fda6bd1580d5216c5d3b 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -34,12 +34,6 @@ Author: Marcell GAL, 2000, XDSL Ltd, Hungary */ /* #define FASTER_VERSION */ -#ifdef DEBUG -#define DPRINTK(format, args...) printk(KERN_DEBUG "br2684: " format, ##args) -#else -#define DPRINTK(format, args...) -#endif - #ifdef SKB_DEBUG static void skb_debug(const struct sk_buff *skb) { @@ -180,7 +174,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, skb_debug(skb); ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; - DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); + pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); if (!atm_may_send(atmvcc, skb->truesize)) { /* we free this here for now, because we cannot know in a higher layer whether the skb point it supplied wasn't freed yet. @@ -209,11 +203,11 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) struct br2684_dev *brdev = BRPRIV(dev); struct br2684_vcc *brvcc; - DPRINTK("br2684_start_xmit, skb->dst=%p\n", skb->dst); + pr_debug("br2684_start_xmit, skb->dst=%p\n", skb->dst); read_lock(&devs_lock); brvcc = pick_outgoing_vcc(skb, brdev); if (brvcc == NULL) { - DPRINTK("no vcc attached to dev %s\n", dev->name); + pr_debug("no vcc attached to dev %s\n", dev->name); brdev->stats.tx_errors++; brdev->stats.tx_carrier_errors++; /* netif_stop_queue(dev); */ @@ -239,7 +233,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) static struct net_device_stats *br2684_get_stats(struct net_device *dev) { - DPRINTK("br2684_get_stats\n"); + pr_debug("br2684_get_stats\n"); return &BRPRIV(dev)->stats; } @@ -390,7 +384,7 @@ packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb) static void br2684_close_vcc(struct br2684_vcc *brvcc) { - DPRINTK("removing VCC %p from dev %p\n", brvcc, brvcc->device); + pr_debug("removing VCC %p from dev %p\n", brvcc, brvcc->device); write_lock_irq(&devs_lock); list_del(&brvcc->brvccs); write_unlock_irq(&devs_lock); @@ -408,7 +402,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) struct br2684_dev *brdev = BRPRIV(net_dev); int plen = sizeof(llc_oui_pid_pad) + ETH_HLEN; - DPRINTK("br2684_push\n"); + pr_debug("br2684_push\n"); if (unlikely(skb == NULL)) { /* skb==NULL means VCC is being destroyed */ @@ -425,7 +419,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) skb_debug(skb); atm_return(atmvcc, skb->truesize); - DPRINTK("skb from brdev %p\n", brdev); + pr_debug("skb from brdev %p\n", brdev); if (brvcc->encaps == e_llc) { /* let us waste some time for checking the encapsulation. Note, that only 7 char is checked so frames with a valid FCS @@ -474,7 +468,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) #endif /* CONFIG_ATM_BR2684_IPFILTER */ skb->dev = net_dev; ATM_SKB(skb)->vcc = atmvcc; /* needed ? */ - DPRINTK("received packet's protocol: %x\n", ntohs(skb->protocol)); + pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol)); skb_debug(skb); if (unlikely(!(net_dev->flags & IFF_UP))) { /* sigh, interface is down */ @@ -532,7 +526,7 @@ Note: we do not have explicit unassign, but look at _push() err = -EINVAL; goto error; } - DPRINTK("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, + pr_debug("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc); if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { unsigned char *esi = atmvcc->dev->esi; @@ -612,7 +606,7 @@ static int br2684_create(void __user *arg) struct br2684_dev *brdev; struct atm_newif_br2684 ni; - DPRINTK("br2684_create\n"); + pr_debug("br2684_create\n"); if (copy_from_user(&ni, arg, sizeof ni)) { return -EFAULT; @@ -629,7 +623,7 @@ static int br2684_create(void __user *arg) brdev = BRPRIV(netdev); - DPRINTK("registered netdev %s\n", netdev->name); + pr_debug("registered netdev %s\n", netdev->name); /* open, stop, do_ioctl ? */ err = register_netdev(netdev); if (err < 0) { @@ -715,17 +709,13 @@ static int br2684_seq_show(struct seq_file *seq, void *v) br2684_devs); const struct net_device *net_dev = brdev->net_dev; const struct br2684_vcc *brvcc; + DECLARE_MAC_BUF(mac); - seq_printf(seq, "dev %.16s: num=%d, mac=%02X:%02X:" - "%02X:%02X:%02X:%02X (%s)\n", net_dev->name, - brdev->number, - net_dev->dev_addr[0], - net_dev->dev_addr[1], - net_dev->dev_addr[2], - net_dev->dev_addr[3], - net_dev->dev_addr[4], - net_dev->dev_addr[5], - brdev->mac_was_set ? "set" : "auto"); + seq_printf(seq, "dev %.16s: num=%d, mac=%s (%s)\n", + net_dev->name, + brdev->number, + print_mac(mac, net_dev->dev_addr), + brdev->mac_was_set ? "set" : "auto"); list_for_each_entry(brvcc, &brdev->brvccs, brvccs) { seq_printf(seq, " vcc %d.%d.%d: encaps=%s" diff --git a/net/atm/clip.c b/net/atm/clip.c index ecf0f79b94ae0d17f9e061433a7e4a9f328f55d9..741742f0079715604430491a71de39288e429ff0 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -40,14 +40,6 @@ #include "resources.h" #include - -#if 0 -#define DPRINTK(format,args...) printk(format,##args) -#else -#define DPRINTK(format,args...) -#endif - - static struct net_device *clip_devs; static struct atm_vcc *atmarpd; static struct neigh_table clip_tbl; @@ -59,7 +51,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) struct atmarp_ctrl *ctrl; struct sk_buff *skb; - DPRINTK("to_atmarpd(%d)\n", type); + pr_debug("to_atmarpd(%d)\n", type); if (!atmarpd) return -EUNATCH; skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC); @@ -79,7 +71,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) { - DPRINTK("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry, + pr_debug("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh); clip_vcc->entry = entry; clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ @@ -134,7 +126,7 @@ static int neigh_check_cb(struct neighbour *n) unsigned long exp = cv->last_use + cv->idle_timeout; if (cv->idle_timeout && time_after(jiffies, exp)) { - DPRINTK("releasing vcc %p->%p of entry %p\n", + pr_debug("releasing vcc %p->%p of entry %p\n", cv, cv->vcc, entry); vcc_release_async(cv->vcc, -ETIMEDOUT); } @@ -146,7 +138,7 @@ static int neigh_check_cb(struct neighbour *n) if (atomic_read(&n->refcnt) > 1) { struct sk_buff *skb; - DPRINTK("destruction postponed with ref %d\n", + pr_debug("destruction postponed with ref %d\n", atomic_read(&n->refcnt)); while ((skb = skb_dequeue(&n->arp_queue)) != NULL) @@ -155,7 +147,7 @@ static int neigh_check_cb(struct neighbour *n) return 0; } - DPRINTK("expired neigh %p\n", n); + pr_debug("expired neigh %p\n", n); return 1; } @@ -171,14 +163,14 @@ static int clip_arp_rcv(struct sk_buff *skb) { struct atm_vcc *vcc; - DPRINTK("clip_arp_rcv\n"); + pr_debug("clip_arp_rcv\n"); vcc = ATM_SKB(skb)->vcc; if (!vcc || !atm_charge(vcc, skb->truesize)) { dev_kfree_skb_any(skb); return 0; } - DPRINTK("pushing to %p\n", vcc); - DPRINTK("using %p\n", CLIP_VCC(vcc)->old_push); + pr_debug("pushing to %p\n", vcc); + pr_debug("using %p\n", CLIP_VCC(vcc)->old_push); CLIP_VCC(vcc)->old_push(vcc, skb); return 0; } @@ -196,9 +188,9 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) { struct clip_vcc *clip_vcc = CLIP_VCC(vcc); - DPRINTK("clip push\n"); + pr_debug("clip push\n"); if (!skb) { - DPRINTK("removing VCC %p\n", clip_vcc); + pr_debug("removing VCC %p\n", clip_vcc); if (clip_vcc->entry) unlink_clip_vcc(clip_vcc); clip_vcc->old_push(vcc, NULL); /* pass on the bad news */ @@ -247,7 +239,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb) int old; unsigned long flags; - DPRINTK("clip_pop(vcc %p)\n", vcc); + pr_debug("clip_pop(vcc %p)\n", vcc); clip_vcc->old_pop(vcc, skb); /* skb->dev == NULL in outbound ARP packets */ if (!dev) @@ -263,7 +255,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb) static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) { - DPRINTK("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb); + pr_debug("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb); to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); } @@ -292,7 +284,7 @@ static int clip_constructor(struct neighbour *neigh) struct in_device *in_dev; struct neigh_parms *parms; - DPRINTK("clip_constructor (neigh %p, entry %p)\n", neigh, entry); + pr_debug("clip_constructor (neigh %p, entry %p)\n", neigh, entry); neigh->type = inet_addr_type(entry->ip); if (neigh->type != RTN_UNICAST) return -EINVAL; @@ -376,7 +368,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) int old; unsigned long flags; - DPRINTK("clip_start_xmit (skb %p)\n", skb); + pr_debug("clip_start_xmit (skb %p)\n", skb); if (!skb->dst) { printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n"); dev_kfree_skb(skb); @@ -412,9 +404,9 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) } return 0; } - DPRINTK("neigh %p, vccs %p\n", entry, entry->vccs); + pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; - DPRINTK("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc); + pr_debug("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc); if (entry->vccs->encap) { void *here; @@ -425,7 +417,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev) atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); ATM_SKB(skb)->atm_options = vcc->atm_options; entry->vccs->last_use = jiffies; - DPRINTK("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); + pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ if (old) { printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n"); @@ -468,7 +460,7 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); if (!clip_vcc) return -ENOMEM; - DPRINTK("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc); + pr_debug("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc); clip_vcc->vcc = vcc; vcc->user_back = clip_vcc; set_bit(ATM_VF_IS_CLIP, &vcc->flags); @@ -538,7 +530,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) printk(KERN_ERR "hiding hidden ATMARP entry\n"); return 0; } - DPRINTK("setentry: remove\n"); + pr_debug("setentry: remove\n"); unlink_clip_vcc(clip_vcc); return 0; } @@ -552,9 +544,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) entry = NEIGH2ENTRY(neigh); if (entry != clip_vcc->entry) { if (!clip_vcc->entry) - DPRINTK("setentry: add\n"); + pr_debug("setentry: add\n"); else { - DPRINTK("setentry: update\n"); + pr_debug("setentry: update\n"); unlink_clip_vcc(clip_vcc); } link_vcc(clip_vcc, entry); @@ -611,7 +603,7 @@ static int clip_create(int number) } clip_priv->next = clip_devs; clip_devs = dev; - DPRINTK("registered (net:%s)\n", dev->name); + pr_debug("registered (net:%s)\n", dev->name); return number; } @@ -620,6 +612,9 @@ static int clip_device_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = arg; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_UNREGISTER) { neigh_ifdown(&clip_tbl, dev); return NOTIFY_DONE; @@ -631,16 +626,16 @@ static int clip_device_event(struct notifier_block *this, unsigned long event, switch (event) { case NETDEV_UP: - DPRINTK("clip_device_event NETDEV_UP\n"); + pr_debug("clip_device_event NETDEV_UP\n"); to_atmarpd(act_up, PRIV(dev)->number, 0); break; case NETDEV_GOING_DOWN: - DPRINTK("clip_device_event NETDEV_DOWN\n"); + pr_debug("clip_device_event NETDEV_DOWN\n"); to_atmarpd(act_down, PRIV(dev)->number, 0); break; case NETDEV_CHANGE: case NETDEV_CHANGEMTU: - DPRINTK("clip_device_event NETDEV_CHANGE*\n"); + pr_debug("clip_device_event NETDEV_CHANGE*\n"); to_atmarpd(act_change, PRIV(dev)->number, 0); break; } @@ -681,14 +676,14 @@ static struct notifier_block clip_inet_notifier = { static void atmarpd_close(struct atm_vcc *vcc) { - DPRINTK("atmarpd_close\n"); + pr_debug("atmarpd_close\n"); rtnl_lock(); atmarpd = NULL; skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); rtnl_unlock(); - DPRINTK("(done)\n"); + pr_debug("(done)\n"); module_put(THIS_MODULE); } diff --git a/net/atm/common.c b/net/atm/common.c index 282d761454baaca115b57ac3995dcb4466949fe5..e166d9e0ffd949f0fe9246a615d7f0a7fbb54b9d 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -30,13 +30,6 @@ #include "addr.h" /* address registry */ #include "signaling.h" /* for WAITING and sigd_attach */ - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - struct hlist_head vcc_hash[VCC_HTABLE_SIZE]; DEFINE_RWLOCK(vcc_sklist_lock); @@ -70,13 +63,13 @@ static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size) struct sock *sk = sk_atm(vcc); if (atomic_read(&sk->sk_wmem_alloc) && !atm_may_send(vcc, size)) { - DPRINTK("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", + pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n", atomic_read(&sk->sk_wmem_alloc), size, sk->sk_sndbuf); return NULL; } while (!(skb = alloc_skb(size,GFP_KERNEL))) schedule(); - DPRINTK("AlTx %d += %d\n", atomic_read(&sk->sk_wmem_alloc), + pr_debug("AlTx %d += %d\n", atomic_read(&sk->sk_wmem_alloc), skb->truesize); atomic_add(skb->truesize, &sk->sk_wmem_alloc); return skb; @@ -132,7 +125,7 @@ static struct proto vcc_proto = { .obj_size = sizeof(struct atm_vcc), }; -int vcc_create(struct socket *sock, int protocol, int family) +int vcc_create(struct net *net, struct socket *sock, int protocol, int family) { struct sock *sk; struct atm_vcc *vcc; @@ -140,7 +133,7 @@ int vcc_create(struct socket *sock, int protocol, int family) sock->sk = NULL; if (sock->type == SOCK_STREAM) return -EINVAL; - sk = sk_alloc(family, GFP_KERNEL, &vcc_proto, 1); + sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, 1); if (!sk) return -ENOMEM; sock_init_data(sock, sk); @@ -392,10 +385,10 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi, if (!error) error = adjust_tp(&vcc->qos.rxtp,vcc->qos.aal); if (error) goto fail; - DPRINTK("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal); - DPRINTK(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class, + pr_debug("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal); + pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu); - DPRINTK(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class, + pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu); if (dev->ops->open) { @@ -420,7 +413,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci) struct atm_vcc *vcc = ATM_SD(sock); int error; - DPRINTK("vcc_connect (vpi %d, vci %d)\n",vpi,vci); + pr_debug("vcc_connect (vpi %d, vci %d)\n",vpi,vci); if (sock->state == SS_CONNECTED) return -EISCONN; if (sock->state != SS_UNCONNECTED) @@ -433,7 +426,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci) else if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) return -EINVAL; - DPRINTK("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; " + pr_debug("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; " "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n", vcc->qos.txtp.traffic_class,vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu, @@ -504,7 +497,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, if (error) return error; sock_recv_timestamp(msg, sk, skb); - DPRINTK("RcvM %d -= %d\n", atomic_read(&sk->rmem_alloc), skb->truesize); + pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); skb_free_datagram(sk, skb); return copied; diff --git a/net/atm/common.h b/net/atm/common.h index ad78c9e1117d2d3d8c2a9fb5fa995c1539b447cb..16f32c1fa1c9314d8bef751f6185a9173727cc62 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -10,7 +10,7 @@ #include /* for poll_table */ -int vcc_create(struct socket *sock, int protocol, int family); +int vcc_create(struct net *net, struct socket *sock, int protocol, int family); int vcc_release(struct socket *sock); int vcc_connect(struct socket *sock, int itf, short vpi, int vci); int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, diff --git a/net/atm/lec.c b/net/atm/lec.c index 59d5aa3366f229636ec7ca7a1ab8d821e8d97b41..7eb1b21a0e9415d0730e4942d9bb129f88067b2a 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -48,12 +48,6 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 }; #include "lec_arpc.h" #include "resources.h" -#if 0 -#define DPRINTK printk -#else -#define DPRINTK(format,args...) -#endif - #define DUMP_PACKETS 0 /* * 0 = None, * 1 = 30 first bytes @@ -272,8 +266,9 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) char buf[300]; int i = 0; #endif /* DUMP_PACKETS >0 */ + DECLARE_MAC_BUF(mac); - DPRINTK("lec_start_xmit called\n"); + pr_debug("lec_start_xmit called\n"); if (!priv->lecd) { printk("%s:No lecd attached\n", dev->name); priv->stats.tx_errors++; @@ -281,7 +276,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) return -EUNATCH; } - DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n", + pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n", (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), (long)skb_end_pointer(skb)); #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) @@ -292,7 +287,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Make sure we have room for lec_id */ if (skb_headroom(skb) < 2) { - DPRINTK("lec_start_xmit: reallocating skb\n"); + pr_debug("lec_start_xmit: reallocating skb\n"); skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN); kfree_skb(skb); if (skb2 == NULL) @@ -373,25 +368,21 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif entry = NULL; vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry); - DPRINTK("%s:vcc:%p vcc_flags:%x, entry:%p\n", dev->name, + pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name, vcc, vcc ? vcc->flags : 0, entry); if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) { if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { - DPRINTK("%s:lec_start_xmit: queuing packet, ", + pr_debug("%s:lec_start_xmit: queuing packet, ", dev->name); - DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n", - lec_h->h_dest[0], lec_h->h_dest[1], - lec_h->h_dest[2], lec_h->h_dest[3], - lec_h->h_dest[4], lec_h->h_dest[5]); + pr_debug("MAC address %s\n", + print_mac(mac, lec_h->h_dest)); skb_queue_tail(&entry->tx_wait, skb); } else { - DPRINTK + pr_debug ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ", dev->name); - DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n", - lec_h->h_dest[0], lec_h->h_dest[1], - lec_h->h_dest[2], lec_h->h_dest[3], - lec_h->h_dest[4], lec_h->h_dest[5]); + pr_debug("MAC address %s\n", + print_mac(mac, lec_h->h_dest)); priv->stats.tx_dropped++; dev_kfree_skb(skb); } @@ -402,10 +393,9 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev) #endif /* DUMP_PACKETS > 0 */ while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) { - DPRINTK("lec.c: emptying tx queue, "); - DPRINTK("MAC address 0x%02x:%02x:%02x:%02x:%02x:%02x\n", - lec_h->h_dest[0], lec_h->h_dest[1], lec_h->h_dest[2], - lec_h->h_dest[3], lec_h->h_dest[4], lec_h->h_dest[5]); + pr_debug("lec.c: emptying tx queue, "); + pr_debug("MAC address %s\n", + print_mac(mac, lec_h->h_dest)); lec_send(vcc, skb2, priv); } @@ -459,12 +449,13 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) struct lec_arp_table *entry; int i; char *tmp; /* FIXME */ + DECLARE_MAC_BUF(mac); atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); mesg = (struct atmlec_msg *)skb->data; tmp = skb->data; tmp += sizeof(struct atmlec_msg); - DPRINTK("%s: msg from zeppelin:%d\n", dev->name, mesg->type); + pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type); switch (mesg->type) { case l_set_mac_addr: for (i = 0; i < 6; i++) { @@ -500,9 +491,9 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) mesg->content.normal.atm_addr, mesg->content.normal.flag, mesg->content.normal.targetless_le_arp); - DPRINTK("lec: in l_arp_update\n"); + pr_debug("lec: in l_arp_update\n"); if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */ - DPRINTK("lec: LANE2 3.1.5, got tlvs, size %d\n", + pr_debug("lec: LANE2 3.1.5, got tlvs, size %d\n", mesg->sizeoftlvs); lane2_associate_ind(dev, mesg->content.normal.mac_addr, tmp, mesg->sizeoftlvs); @@ -544,14 +535,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_bridge_fdb_entry *f; - DPRINTK - ("%s: bridge zeppelin asks about 0x%02x:%02x:%02x:%02x:%02x:%02x\n", - dev->name, mesg->content.proxy.mac_addr[0], - mesg->content.proxy.mac_addr[1], - mesg->content.proxy.mac_addr[2], - mesg->content.proxy.mac_addr[3], - mesg->content.proxy.mac_addr[4], - mesg->content.proxy.mac_addr[5]); + pr_debug + ("%s: bridge zeppelin asks about %s\n", + dev->name, + print_mac(mac, mesg->content.proxy.mac_addr)); if (br_fdb_get_hook == NULL || dev->br_port == NULL) break; @@ -564,7 +551,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb) struct sk_buff *skb2; struct sock *sk; - DPRINTK + pr_debug ("%s: entry found, responding to zeppelin\n", dev->name); skb2 = @@ -670,7 +657,7 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type, sk->sk_data_ready(sk, skb->len); if (data != NULL) { - DPRINTK("lec: about to send %d bytes of data\n", data->len); + pr_debug("lec: about to send %d bytes of data\n", data->len); atm_force_charge(priv->lecd, data->truesize); skb_queue_tail(&sk->sk_receive_queue, data); sk->sk_data_ready(sk, skb->len); @@ -742,7 +729,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) vcc->vpi, vcc->vci); #endif if (!skb) { - DPRINTK("%s: null skb\n", dev->name); + pr_debug("%s: null skb\n", dev->name); lec_vcc_close(priv, vcc); return; } @@ -766,7 +753,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */ struct sock *sk = sk_atm(vcc); - DPRINTK("%s: To daemon\n", dev->name); + pr_debug("%s: To daemon\n", dev->name); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); } else { /* Data frame, queue to protocol handlers */ @@ -780,7 +767,7 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) * Probably looping back, or if lecd is missing, * lecd has gone down */ - DPRINTK("Ignoring frame...\n"); + pr_debug("Ignoring frame...\n"); dev_kfree_skb(skb); return; } @@ -1442,9 +1429,9 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, #include #if 0 -#define DPRINTK(format,args...) +#define pr_debug(format,args...) /* -#define DPRINTK printk +#define pr_debug printk */ #endif #define DEBUG_ARP_TABLE 0 @@ -1513,7 +1500,7 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry) tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])]; hlist_add_head(&entry->next, tmp); - DPRINTK("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", + pr_debug("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1], 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3], 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]); @@ -1555,7 +1542,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove) } skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */ - DPRINTK("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", + pr_debug("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1], 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3], 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]); @@ -1777,7 +1764,7 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv, struct hlist_head *head; struct lec_arp_table *entry; - DPRINTK("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", + pr_debug("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff, mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff); @@ -1819,7 +1806,7 @@ static void lec_arp_expire_arp(unsigned long data) entry = (struct lec_arp_table *)data; - DPRINTK("lec_arp_expire_arp\n"); + pr_debug("lec_arp_expire_arp\n"); if (entry->status == ESI_ARP_PENDING) { if (entry->no_tries <= entry->priv->max_retry_count) { if (entry->is_rdesc) @@ -1843,7 +1830,7 @@ static void lec_arp_expire_vcc(unsigned long data) del_timer(&to_remove->timer); - DPRINTK("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", + pr_debug("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n", to_remove, priv, to_remove->vcc ? to_remove->recv_vcc->vpi : 0, to_remove->vcc ? to_remove->recv_vcc->vci : 0); @@ -1883,7 +1870,7 @@ static void lec_arp_check_expire(struct work_struct *work) unsigned long time_to_check; int i; - DPRINTK("lec_arp_check_expire %p\n", priv); + pr_debug("lec_arp_check_expire %p\n", priv); now = jiffies; restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); @@ -1895,13 +1882,13 @@ static void lec_arp_check_expire(struct work_struct *work) else time_to_check = priv->aging_time; - DPRINTK("About to expire: %lx - %lx > %lx\n", + pr_debug("About to expire: %lx - %lx > %lx\n", now, entry->last_used, time_to_check); if (time_after(now, entry->last_used + time_to_check) && !(entry->flags & LEC_PERMANENT_FLAG) && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */ /* Remove entry */ - DPRINTK("LEC:Entry timed out\n"); + pr_debug("LEC:Entry timed out\n"); lec_arp_remove(priv, entry); lec_arp_put(entry); } else { @@ -1999,7 +1986,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, entry->packets_flooded < priv->maximum_unknown_frame_count) { entry->packets_flooded++; - DPRINTK("LEC_ARP: Flooding..\n"); + pr_debug("LEC_ARP: Flooding..\n"); found = priv->mcast_vcc; goto out; } @@ -2010,13 +1997,13 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv, */ lec_arp_hold(entry); *ret_entry = entry; - DPRINTK("lec: entry->status %d entry->vcc %p\n", entry->status, + pr_debug("lec: entry->status %d entry->vcc %p\n", entry->status, entry->vcc); found = NULL; } else { /* No matching entry was found */ entry = make_entry(priv, mac_to_find); - DPRINTK("LEC_ARP: Making entry\n"); + pr_debug("LEC_ARP: Making entry\n"); if (!entry) { found = priv->mcast_vcc; goto out; @@ -2053,7 +2040,7 @@ lec_addr_delete(struct lec_priv *priv, unsigned char *atm_addr, struct lec_arp_table *entry; int i; - DPRINTK("lec_addr_delete\n"); + pr_debug("lec_addr_delete\n"); spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) { @@ -2084,8 +2071,8 @@ lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, struct lec_arp_table *entry, *tmp; int i; - DPRINTK("lec:%s", (targetless_le_arp) ? "targetless " : " "); - DPRINTK("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", + pr_debug("lec:%s", (targetless_le_arp) ? "targetless " : " "); + pr_debug("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); @@ -2122,7 +2109,7 @@ lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, entry->flags |= LEC_REMOTE_FLAG; else entry->flags &= ~LEC_REMOTE_FLAG; - DPRINTK("After update\n"); + pr_debug("After update\n"); dump_arp_table(priv); goto out; } @@ -2166,7 +2153,7 @@ lec_arp_update(struct lec_priv *priv, unsigned char *mac_addr, entry->status = ESI_VC_PENDING; send_to_lecd(priv, l_svc_setup, entry->mac_addr, atm_addr, NULL); } - DPRINTK("After update2\n"); + pr_debug("After update2\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); @@ -2189,7 +2176,7 @@ lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, if (ioc_data->receive == 2) { /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */ - DPRINTK("LEC_ARP: Attaching mcast forward\n"); + pr_debug("LEC_ARP: Attaching mcast forward\n"); #if 0 entry = lec_arp_find(priv, bus_mac); if (!entry) { @@ -2214,7 +2201,7 @@ lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, * Vcc which we don't want to make default vcc, * attach it anyway. */ - DPRINTK + pr_debug ("LEC_ARP:Attaching data direct, not default: " "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], @@ -2242,7 +2229,7 @@ lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, dump_arp_table(priv); goto out; } - DPRINTK + pr_debug ("LEC_ARP:Attaching data direct, default: " "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n", ioc_data->atm_addr[0], ioc_data->atm_addr[1], @@ -2260,8 +2247,8 @@ lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, if (memcmp (ioc_data->atm_addr, entry->atm_addr, ATM_ESA_LEN) == 0) { - DPRINTK("LEC_ARP: Attaching data direct\n"); - DPRINTK("Currently -> Vcc: %d, Rvcc:%d\n", + pr_debug("LEC_ARP: Attaching data direct\n"); + pr_debug("Currently -> Vcc: %d, Rvcc:%d\n", entry->vcc ? entry->vcc->vci : 0, entry->recv_vcc ? entry->recv_vcc-> vci : 0); @@ -2303,7 +2290,7 @@ lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, } } if (found_entry) { - DPRINTK("After vcc was added\n"); + pr_debug("After vcc was added\n"); dump_arp_table(priv); goto out; } @@ -2323,7 +2310,7 @@ lec_vcc_added(struct lec_priv *priv, struct atmlec_ioc *ioc_data, entry->timer.expires = jiffies + priv->vcc_timeout_period; entry->timer.function = lec_arp_expire_vcc; add_timer(&entry->timer); - DPRINTK("After vcc was added\n"); + pr_debug("After vcc was added\n"); dump_arp_table(priv); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); @@ -2336,7 +2323,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) struct lec_arp_table *entry; int i; - DPRINTK("LEC:lec_flush_complete %lx\n", tran_id); + pr_debug("LEC:lec_flush_complete %lx\n", tran_id); restart: spin_lock_irqsave(&priv->lec_arp_lock, flags); for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) { @@ -2353,7 +2340,7 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id) entry->last_used = jiffies; entry->status = ESI_FORWARD_DIRECT; lec_arp_put(entry); - DPRINTK("LEC_ARP: Flushed\n"); + pr_debug("LEC_ARP: Flushed\n"); goto restart; } } @@ -2376,7 +2363,7 @@ lec_set_flush_tran_id(struct lec_priv *priv, hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) { if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) { entry->flush_tran_id = tran_id; - DPRINTK("Set flush transaction id to %lx for %p\n", + pr_debug("Set flush transaction id to %lx for %p\n", tran_id, entry); } } @@ -2427,7 +2414,7 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc) struct lec_arp_table *entry; int i; - DPRINTK("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci); + pr_debug("LEC_ARP: lec_vcc_close vpi:%d vci:%d\n", vcc->vpi, vcc->vci); dump_arp_table(priv); spin_lock_irqsave(&priv->lec_arp_lock, flags); @@ -2510,7 +2497,7 @@ lec_arp_check_empties(struct lec_priv *priv, goto out; } } - DPRINTK("LEC_ARP: Arp_check_empties: entry not found!\n"); + pr_debug("LEC_ARP: Arp_check_empties: entry not found!\n"); out: spin_unlock_irqrestore(&priv->lec_arp_lock, flags); } diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 7c85aa551d5e98977e7e9f91a0a6949f31bf9544..2086396de177557a71d9e1da1a7c52bf218ae359 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -244,7 +244,7 @@ static struct net_device *find_lec_by_itfnum(int itf) char name[IFNAMSIZ]; sprintf(name, "lec%d", itf); - dev = dev_get_by_name(name); + dev = dev_get_by_name(&init_net, name); return dev; } @@ -956,6 +956,10 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo struct lec_priv *priv; dev = (struct net_device *)dev_ptr; + + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (dev->name == NULL || strncmp(dev->name, "lec", 3)) return NOTIFY_DONE; /* we are only interested in lec:s */ diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index 19d5dfc0702f2477447e4f228801e64794d7e68f..0af84cd4f65bdba5c70888e4aa6ce01cb54cd41b 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -46,13 +46,6 @@ #include "common.h" -#if 0 -#define DPRINTK(format, args...) \ - printk(KERN_DEBUG "pppoatm: " format, ##args) -#else -#define DPRINTK(format, args...) -#endif - enum pppoatm_encaps { e_autodetect = PPPOATM_ENCAPS_AUTODETECT, e_vc = PPPOATM_ENCAPS_VC, @@ -139,9 +132,9 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc) static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) { struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); - DPRINTK("pppoatm push\n"); + pr_debug("pppoatm push\n"); if (skb == NULL) { /* VCC was closed */ - DPRINTK("removing ATMPPP VCC %p\n", pvcc); + pr_debug("removing ATMPPP VCC %p\n", pvcc); pppoatm_unassign_vcc(atmvcc); atmvcc->push(atmvcc, NULL); /* Pass along bad news */ return; @@ -172,9 +165,8 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb) pvcc->chan.mtu += LLC_LEN; break; } - DPRINTK("(unit %d): Couldn't autodetect yet " + pr_debug("Couldn't autodetect yet " "(skb: %02X %02X %02X %02X %02X %02X)\n", - pvcc->chan.unit, skb->data[0], skb->data[1], skb->data[2], skb->data[3], skb->data[4], skb->data[5]); goto error; @@ -202,8 +194,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) { struct pppoatm_vcc *pvcc = chan_to_pvcc(chan); ATM_SKB(skb)->vcc = pvcc->atmvcc; - DPRINTK("(unit %d): pppoatm_send (skb=0x%p, vcc=0x%p)\n", - pvcc->chan.unit, skb, pvcc->atmvcc); + pr_debug("pppoatm_send (skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc); if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT)) (void) skb_pull(skb, 1); switch (pvcc->encaps) { /* LLC encapsulation needed */ @@ -228,16 +219,14 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb) goto nospace; break; case e_autodetect: - DPRINTK("(unit %d): Trying to send without setting encaps!\n", - pvcc->chan.unit); + pr_debug("Trying to send without setting encaps!\n"); kfree_skb(skb); return 1; } atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc); ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options; - DPRINTK("(unit %d): atm_skb(%p)->vcc(%p)->dev(%p)\n", - pvcc->chan.unit, skb, ATM_SKB(skb)->vcc, + pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev); return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb) ? DROP_PACKET : 1; diff --git a/net/atm/proc.c b/net/atm/proc.c index 99fc1fe950eeba01f4a1de204cfbf0b583bdd55f..5d9d5ffba145d5295ff9dab749739547376ca0ca 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -22,6 +22,7 @@ #include #include #include /* for __init */ +#include #include #include #include @@ -175,7 +176,7 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc) seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s", vcc->dev->number,vcc->vpi,vcc->vci, - vcc->qos.aal >= sizeof(aal_name)/sizeof(aal_name[0]) ? "err" : + vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" : aal_name[vcc->qos.aal],vcc->qos.rxtp.min_pcr, class_name[vcc->qos.rxtp.traffic_class],vcc->qos.txtp.min_pcr, class_name[vcc->qos.txtp.traffic_class]); @@ -475,7 +476,7 @@ static void atm_proc_dirs_remove(void) if (e->dirent) remove_proc_entry(e->name, atm_proc_root); } - remove_proc_entry("net/atm", NULL); + remove_proc_entry("atm", init_net.proc_net); } int __init atm_proc_init(void) @@ -483,7 +484,7 @@ int __init atm_proc_init(void) static struct atm_proc_entry *e; int ret; - atm_proc_root = proc_mkdir("net/atm",NULL); + atm_proc_root = proc_mkdir("atm", init_net.proc_net); if (!atm_proc_root) goto err_out; for (e = atm_proc_ents; e->name; e++) { diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 848e6e191cc7179f30c3cfdcfb4007f53c5e8a85..43e8bf5ed0019ceceea7737bf7edbe983c4d8892 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -124,10 +124,13 @@ static const struct proto_ops pvc_proto_ops = { }; -static int pvc_create(struct socket *sock,int protocol) +static int pvc_create(struct net *net, struct socket *sock,int protocol) { + if (net != &init_net) + return -EAFNOSUPPORT; + sock->ops = &pvc_proto_ops; - return vcc_create(sock, protocol, PF_ATMPVC); + return vcc_create(net, sock, protocol, PF_ATMPVC); } diff --git a/net/atm/raw.c b/net/atm/raw.c index 1378f61c5c3178876aeffb0438621efdb91394c7..b0a2d8cb67447d70bed4b87b89b657c1c55d8f2c 100644 --- a/net/atm/raw.c +++ b/net/atm/raw.c @@ -13,14 +13,6 @@ #include "common.h" #include "protocols.h" - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - - /* * SKB == NULL indicates that the link is being closed */ @@ -40,8 +32,8 @@ static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb) { struct sock *sk = sk_atm(vcc); - DPRINTK("APopR (%d) %d -= %d\n", vcc->vci, sk->sk_wmem_alloc, - skb->truesize); + pr_debug("APopR (%d) %d -= %d\n", vcc->vci, + atomic_read(&sk->sk_wmem_alloc), skb->truesize); atomic_sub(skb->truesize, &sk->sk_wmem_alloc); dev_kfree_skb_any(skb); sk->sk_write_space(sk); diff --git a/net/atm/signaling.c b/net/atm/signaling.c index d14baaf1f4c32cda834d8227982331c81c61eed0..22992140052290a92ef783a0ed6bbb9b5eafd861 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -23,13 +23,6 @@ Danger: may cause nasty hangs if the demon crashes. */ -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - - struct atm_vcc *sigd = NULL; #ifdef WAIT_FOR_DEMON static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep); @@ -44,14 +37,14 @@ static void sigd_put_skb(struct sk_buff *skb) add_wait_queue(&sigd_sleep,&wait); while (!sigd) { set_current_state(TASK_UNINTERRUPTIBLE); - DPRINTK("atmsvc: waiting for signaling demon...\n"); + pr_debug("atmsvc: waiting for signaling demon...\n"); schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&sigd_sleep,&wait); #else if (!sigd) { - DPRINTK("atmsvc: no signaling demon\n"); + pr_debug("atmsvc: no signaling demon\n"); kfree_skb(skb); return; } @@ -96,9 +89,9 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb) msg = (struct atmsvc_msg *) skb->data; atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); - DPRINTK("sigd_send %d (0x%lx)\n",(int) msg->type, - (unsigned long) msg->vcc); vcc = *(struct atm_vcc **) &msg->vcc; + pr_debug("sigd_send %d (0x%lx)\n",(int) msg->type, + (unsigned long) vcc); sk = sk_atm(vcc); switch (msg->type) { @@ -130,7 +123,7 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb) case as_indicate: vcc = *(struct atm_vcc **) &msg->listen_vcc; sk = sk_atm(vcc); - DPRINTK("as_indicate!!!\n"); + pr_debug("as_indicate!!!\n"); lock_sock(sk); if (sk_acceptq_is_full(sk)) { sigd_enq(NULL,as_reject,vcc,NULL,NULL); @@ -139,7 +132,7 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb) } sk->sk_ack_backlog++; skb_queue_tail(&sk->sk_receive_queue, skb); - DPRINTK("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); + pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); sk->sk_state_change(sk); as_indicate_complete: release_sock(sk); @@ -176,7 +169,7 @@ void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type, struct atmsvc_msg *msg; static unsigned session = 0; - DPRINTK("sigd_enq %d (0x%p)\n",(int) type,vcc); + pr_debug("sigd_enq %d (0x%p)\n",(int) type,vcc); while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL))) schedule(); msg = (struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)); @@ -226,7 +219,7 @@ static void sigd_close(struct atm_vcc *vcc) struct sock *s; int i; - DPRINTK("sigd_close\n"); + pr_debug("sigd_close\n"); sigd = NULL; if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) printk(KERN_ERR "sigd_close: closing with requests pending\n"); @@ -237,7 +230,7 @@ static void sigd_close(struct atm_vcc *vcc) struct hlist_head *head = &vcc_hash[i]; sk_for_each(s, node, head) { - struct atm_vcc *vcc = atm_sk(s); + vcc = atm_sk(s); purge_vcc(vcc); } @@ -263,7 +256,7 @@ static struct atm_dev sigd_dev = { int sigd_attach(struct atm_vcc *vcc) { if (sigd) return -EADDRINUSE; - DPRINTK("sigd_attach\n"); + pr_debug("sigd_attach\n"); sigd = vcc; vcc->dev = &sigd_dev; vcc_insert_socket(sk_atm(vcc)); diff --git a/net/atm/svc.c b/net/atm/svc.c index 876ec7b47a2f7a8eff8b3882f7e69e9b32cac9ea..daf9a48a7db04454392fda10fd1f8c142f27b930 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -25,16 +25,7 @@ #include "signaling.h" #include "addr.h" - -#if 0 -#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) -#else -#define DPRINTK(format,args...) -#endif - - -static int svc_create(struct socket *sock,int protocol); - +static int svc_create(struct net *net, struct socket *sock,int protocol); /* * Note: since all this is still nicely synchronized with the signaling demon, @@ -55,7 +46,7 @@ static void svc_disconnect(struct atm_vcc *vcc) struct sk_buff *skb; struct sock *sk = sk_atm(vcc); - DPRINTK("svc_disconnect %p\n",vcc); + pr_debug("svc_disconnect %p\n",vcc); if (test_bit(ATM_VF_REGIS,&vcc->flags)) { prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc,as_close,NULL,NULL,NULL); @@ -69,7 +60,7 @@ static void svc_disconnect(struct atm_vcc *vcc) as_indicate has been answered */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { atm_return(vcc, skb->truesize); - DPRINTK("LISTEN REL\n"); + pr_debug("LISTEN REL\n"); sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0); dev_kfree_skb(skb); } @@ -85,7 +76,7 @@ static int svc_release(struct socket *sock) if (sk) { vcc = ATM_SD(sock); - DPRINTK("svc_release %p\n", vcc); + pr_debug("svc_release %p\n", vcc); clear_bit(ATM_VF_READY, &vcc->flags); /* VCC pointer is used as a reference, so we must not free it (thereby subjecting it to re-use) before all pending connections @@ -162,7 +153,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr, struct atm_vcc *vcc = ATM_SD(sock); int error; - DPRINTK("svc_connect %p\n",vcc); + pr_debug("svc_connect %p\n",vcc); lock_sock(sk); if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { error = -EINVAL; @@ -224,7 +215,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr, prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); continue; } - DPRINTK("*ABORT*\n"); + pr_debug("*ABORT*\n"); /* * This is tricky: * Kernel ---close--> Demon @@ -295,7 +286,7 @@ static int svc_listen(struct socket *sock,int backlog) struct atm_vcc *vcc = ATM_SD(sock); int error; - DPRINTK("svc_listen %p\n",vcc); + pr_debug("svc_listen %p\n",vcc); lock_sock(sk); /* let server handle listen on unbound sockets */ if (test_bit(ATM_VF_SESSION,&vcc->flags)) { @@ -335,13 +326,13 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags) lock_sock(sk); - error = svc_create(newsock,0); + error = svc_create(sk->sk_net, newsock,0); if (error) goto out; new_vcc = ATM_SD(newsock); - DPRINTK("svc_accept %p -> %p\n",old_vcc,new_vcc); + pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc); while (1) { DEFINE_WAIT(wait); @@ -545,7 +536,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, error = -EINPROGRESS; goto out; } - DPRINTK("svc_addparty added wait queue\n"); + pr_debug("svc_addparty added wait queue\n"); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); @@ -636,12 +627,15 @@ static const struct proto_ops svc_proto_ops = { }; -static int svc_create(struct socket *sock,int protocol) +static int svc_create(struct net *net, struct socket *sock,int protocol) { int error; + if (net != &init_net) + return -EAFNOSUPPORT; + sock->ops = &svc_proto_ops; - error = vcc_create(sock, protocol, AF_ATMSVC); + error = vcc_create(net, sock, protocol, AF_ATMSVC); if (error) return error; ATM_SD(sock)->local.sas_family = AF_ATMSVC; ATM_SD(sock)->remote.sas_family = AF_ATMSVC; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index dae2a42d3d865a96af92329cedf0b11a186da755..993e5c75e90908c22c079f02ae5646f250fbfa8f 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -103,6 +104,9 @@ static int ax25_device_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = (struct net_device *)ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* Reject non AX.25 devices */ if (dev->type != ARPHRD_AX25) return NOTIFY_DONE; @@ -627,7 +631,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; } - dev = dev_get_by_name(devname); + dev = dev_get_by_name(&init_net, devname); if (dev == NULL) { res = -ENODEV; break; @@ -779,11 +783,14 @@ static struct proto ax25_proto = { .obj_size = sizeof(struct sock), }; -static int ax25_create(struct socket *sock, int protocol) +static int ax25_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; ax25_cb *ax25; + if (net != &init_net) + return -EAFNOSUPPORT; + switch (sock->type) { case SOCK_DGRAM: if (protocol == 0 || protocol == PF_AX25) @@ -829,7 +836,7 @@ static int ax25_create(struct socket *sock, int protocol) return -ESOCKTNOSUPPORT; } - if ((sk = sk_alloc(PF_AX25, GFP_ATOMIC, &ax25_proto, 1)) == NULL) + if ((sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, 1)) == NULL) return -ENOMEM; ax25 = sk->sk_protinfo = ax25_create_cb(); @@ -854,7 +861,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) struct sock *sk; ax25_cb *ax25, *oax25; - if ((sk = sk_alloc(PF_AX25, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) + if ((sk = sk_alloc(osk->sk_net, PF_AX25, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) return NULL; if ((ax25 = ax25_create_cb()) == NULL) { @@ -1998,9 +2005,9 @@ static int __init ax25_init(void) register_netdevice_notifier(&ax25_dev_notifier); ax25_register_sysctl(); - proc_net_fops_create("ax25_route", S_IRUGO, &ax25_route_fops); - proc_net_fops_create("ax25", S_IRUGO, &ax25_info_fops); - proc_net_fops_create("ax25_calls", S_IRUGO, &ax25_uid_fops); + proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); + proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); + proc_net_fops_create(&init_net, "ax25_calls", S_IRUGO, &ax25_uid_fops); out: return rc; } @@ -2014,9 +2021,9 @@ MODULE_ALIAS_NETPROTO(PF_AX25); static void __exit ax25_exit(void) { - proc_net_remove("ax25_route"); - proc_net_remove("ax25"); - proc_net_remove("ax25_calls"); + proc_net_remove(&init_net, "ax25_route"); + proc_net_remove(&init_net, "ax25"); + proc_net_remove(&init_net, "ax25_calls"); ax25_rt_free(); ax25_uid_free(); ax25_dev_free(); diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 0ddaff0df217d37e58c0e920f0c401e4dde330cb..3b7d1720c2eee2c3d652526f74f92094bb4634b4 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -451,6 +451,11 @@ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, skb->sk = NULL; /* Initially we don't know who it's for */ skb->destructor = NULL; /* Who initializes this, dammit?! */ + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return 0; + } + if ((*skb->data & 0x0F) != 0) { kfree_skb(skb); /* Not a KISS data frame */ return 0; diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 930e4918037f51031b6e2839c05b4163d29845b2..f047a57aa95c151be44c41a92d9e834a8c35cfb5 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -46,7 +46,9 @@ #ifdef CONFIG_INET -int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len) +int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { unsigned char *buff; @@ -215,7 +217,9 @@ int ax25_rebuild_header(struct sk_buff *skb) #else /* INET */ -int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, void *daddr, void *saddr, unsigned len) +int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, const void *daddr, + const void *saddr, unsigned len) { return -AX25_HEADER_LEN; } @@ -227,5 +231,12 @@ int ax25_rebuild_header(struct sk_buff *skb) #endif +const struct header_ops ax25_header_ops = { + .create = ax25_hard_header, + .rebuild = ax25_rebuild_header, +}; + EXPORT_SYMBOL(ax25_hard_header); EXPORT_SYMBOL(ax25_rebuild_header); +EXPORT_SYMBOL(ax25_header_ops); + diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d942b946ba07efb959686d968677a2ad0faf00ef..1220d8a41eb5ff52d6726250238c975f4cef4e8a 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -95,10 +95,13 @@ int bt_sock_unregister(int proto) } EXPORT_SYMBOL(bt_sock_unregister); -static int bt_sock_create(struct socket *sock, int proto) +static int bt_sock_create(struct net *net, struct socket *sock, int proto) { int err; + if (net != &init_net) + return -EAFNOSUPPORT; + if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; @@ -113,7 +116,7 @@ static int bt_sock_create(struct socket *sock, int proto) read_lock(&bt_proto_lock); if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { - err = bt_proto[proto]->create(sock, proto); + err = bt_proto[proto]->create(net, sock, proto); module_put(bt_proto[proto]->owner); } diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 10292e77604612f371e0e26d9b59d446f4897b34..f718965f296c7c3cdd2b46453ffff644fd62fa53 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -204,7 +204,7 @@ static struct proto bnep_proto = { .obj_size = sizeof(struct bt_sock) }; -static int bnep_sock_create(struct socket *sock, int protocol) +static int bnep_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -213,7 +213,7 @@ static int bnep_sock_create(struct socket *sock, int protocol) if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1); if (!sk) return -ENOMEM; diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index 19be7861e51ead07db64e1fafb06956cc425e0d1..cf700c20d11eb8915d901914ce3a02e8b546d838 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c @@ -195,7 +195,7 @@ static struct proto cmtp_proto = { .obj_size = sizeof(struct bt_sock) }; -static int cmtp_sock_create(struct socket *sock, int protocol) +static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -204,7 +204,7 @@ static int cmtp_sock_create(struct socket *sock, int protocol) if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1); if (!sk) return -ENOMEM; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 5ccea5fbd2363e3ab1780c39d18f68508432ff0b..43dd6373bff906e56859e36c5c572d772addad3b 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -634,7 +634,7 @@ static struct proto hci_sk_proto = { .obj_size = sizeof(struct hci_pinfo) }; -static int hci_sock_create(struct socket *sock, int protocol) +static int hci_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -645,7 +645,7 @@ static int hci_sock_create(struct socket *sock, int protocol) sock->ops = &hci_sock_ops; - sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1); if (!sk) return -ENOMEM; diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 64d89ca2884796ab29365f1ad2b40cebf35a136c..ff5784b440d792b87bf2cd088801b1b6b5b5c0af 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -625,7 +625,7 @@ static struct device *hidp_get_device(struct hidp_session *session) return conn ? &conn->dev : NULL; } -static inline void hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) +static inline int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) { struct input_dev *input = session->input; int i; @@ -667,7 +667,7 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co input->event = hidp_input_event; - input_register_device(input); + return input_register_device(input); } static int hidp_open(struct hid_device *hid) @@ -820,8 +820,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); session->idle_to = req->idle_to; - if (session->input) - hidp_setup_input(session, req); + if (session->input) { + err = hidp_setup_input(session, req); + if (err < 0) + goto failed; + } if (session->hid) hidp_setup_hid(session, req); diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 0c185257e55bbbe65933a57f6991930b149d2c62..1de2b6fbcac0d442a2d9073e145aa82e3eb9c1d4 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@ -246,7 +246,7 @@ static struct proto hidp_proto = { .obj_size = sizeof(struct bt_sock) }; -static int hidp_sock_create(struct socket *sock, int protocol) +static int hidp_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -255,7 +255,7 @@ static int hidp_sock_create(struct socket *sock, int protocol) if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; - sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1); if (!sk) return -ENOMEM; diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index c4e4ce4ebb2b1115c6c92b07079fa6be7647c589..36ef27b625db23d19f67c81ae1e1849665d0bc10 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -518,11 +518,11 @@ static struct proto l2cap_proto = { .obj_size = sizeof(struct l2cap_pinfo) }; -static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio) +static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; - sk = sk_alloc(PF_BLUETOOTH, prio, &l2cap_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1); if (!sk) return NULL; @@ -543,7 +543,7 @@ static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio) return sk; } -static int l2cap_sock_create(struct socket *sock, int protocol) +static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -560,7 +560,7 @@ static int l2cap_sock_create(struct socket *sock, int protocol) sock->ops = &l2cap_sock_ops; - sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC); + sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; @@ -1425,7 +1425,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd goto response; } - sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC); + sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC); if (!sk) goto response; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 30586ab9e8783cad490916c367b1099a14103a85..266b6972667d7713899a22681af5f0a8eef21f73 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -282,12 +282,12 @@ static struct proto rfcomm_proto = { .obj_size = sizeof(struct rfcomm_pinfo) }; -static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio) +static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct rfcomm_dlc *d; struct sock *sk; - sk = sk_alloc(PF_BLUETOOTH, prio, &rfcomm_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, 1); if (!sk) return NULL; @@ -323,7 +323,7 @@ static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio return sk; } -static int rfcomm_sock_create(struct socket *sock, int protocol) +static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -336,7 +336,7 @@ static int rfcomm_sock_create(struct socket *sock, int protocol) sock->ops = &rfcomm_sock_ops; - sk = rfcomm_sock_alloc(sock, protocol, GFP_ATOMIC); + sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; @@ -868,7 +868,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc * goto done; } - sk = rfcomm_sock_alloc(NULL, BTPROTO_RFCOMM, GFP_ATOMIC); + sk = rfcomm_sock_alloc(parent->sk_net, NULL, BTPROTO_RFCOMM, GFP_ATOMIC); if (!sk) goto done; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 3f5163e725ede1892bdb2912427062f63d881320..65b6fb1c41548c5b508a6b93eb9fe3c0d181857b 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -414,11 +414,11 @@ static struct proto sco_proto = { .obj_size = sizeof(struct sco_pinfo) }; -static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio) +static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; - sk = sk_alloc(PF_BLUETOOTH, prio, &sco_proto, 1); + sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, 1); if (!sk) return NULL; @@ -439,7 +439,7 @@ static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio) return sk; } -static int sco_sock_create(struct socket *sock, int protocol) +static int sco_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -452,7 +452,7 @@ static int sco_sock_create(struct socket *sock, int protocol) sock->ops = &sco_sock_ops; - sk = sco_sock_alloc(sock, protocol, GFP_ATOMIC); + sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC); if (!sk) return -ENOMEM; @@ -807,7 +807,7 @@ static void sco_conn_ready(struct sco_conn *conn) bh_lock_sock(parent); - sk = sco_sock_alloc(NULL, BTPROTO_SCO, GFP_ATOMIC); + sk = sco_sock_alloc(parent->sk_net, NULL, BTPROTO_SCO, GFP_ATOMIC); if (!sk) { bh_unlock_sock(parent); goto done; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 99292e8e1d0f05f2f813d85b08c8497e31c1d98d..c07bac5e3e10486c57b86556d3d45d1f2d3de226 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -150,11 +150,8 @@ static int br_set_tx_csum(struct net_device *dev, u32 data) static struct ethtool_ops br_ethtool_ops = { .get_drvinfo = br_getinfo, .get_link = ethtool_op_get_link, - .get_sg = ethtool_op_get_sg, .set_sg = br_set_sg, - .get_tx_csum = ethtool_op_get_tx_csum, .set_tx_csum = br_set_tx_csum, - .get_tso = ethtool_op_get_tso, .set_tso = br_set_tso, }; @@ -171,7 +168,6 @@ void br_dev_setup(struct net_device *dev) dev->set_multicast_list = br_dev_set_multicast_list; dev->change_mtu = br_change_mtu; dev->destructor = free_netdev; - SET_MODULE_OWNER(dev); SET_ETHTOOL_OPS(dev, &br_ethtool_ops); dev->stop = br_dev_stop; dev->tx_queue_len = 0; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 9272f12f664cfb6a4604e041652c824b30480abf..935784f736b3d0f88cff37c76140a5c22a167dd2 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -303,7 +303,7 @@ int br_del_bridge(const char *name) int ret = 0; rtnl_lock(); - dev = __dev_get_by_name(name); + dev = __dev_get_by_name(&init_net, name); if (dev == NULL) ret = -ENXIO; /* Could not find device */ @@ -444,7 +444,7 @@ void __exit br_cleanup_bridges(void) struct net_device *dev, *nxt; rtnl_lock(); - for_each_netdev_safe(dev, nxt) + for_each_netdev_safe(&init_net, dev, nxt) if (dev->priv_flags & IFF_EBRIDGE) del_br(dev->priv); rtnl_unlock(); diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index bb15e9e259b12e0bae720bf3fa7ff93ad2eb6afc..0655a5f07f58aeef86a3ef934949cffbda360e6e 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "br_private.h" @@ -27,7 +28,7 @@ static int get_bridge_ifindices(int *indices, int num) struct net_device *dev; int i = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (i >= num) break; if (dev->priv_flags & IFF_EBRIDGE) @@ -90,7 +91,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd) if (!capable(CAP_NET_ADMIN)) return -EPERM; - dev = dev_get_by_index(ifindex); + dev = dev_get_by_index(&init_net, ifindex); if (dev == NULL) return -EINVAL; @@ -364,7 +365,7 @@ static int old_deviceless(void __user *uarg) return -EOPNOTSUPP; } -int br_ioctl_deviceless_stub(unsigned int cmd, void __user *uarg) +int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uarg) { switch (cmd) { case SIOCGIFBR: diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fc13130035e73997dbe5efb95ef3a02f3bd02b0d..8245f051ccbbcb63f95755f663c1503f0cc954b9 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -904,7 +904,6 @@ int brnf_sysctl_call_tables(ctl_table * ctl, int write, struct file *filp, static ctl_table brnf_table[] = { { - .ctl_name = NET_BRIDGE_NF_CALL_ARPTABLES, .procname = "bridge-nf-call-arptables", .data = &brnf_call_arptables, .maxlen = sizeof(int), @@ -912,7 +911,6 @@ static ctl_table brnf_table[] = { .proc_handler = &brnf_sysctl_call_tables, }, { - .ctl_name = NET_BRIDGE_NF_CALL_IPTABLES, .procname = "bridge-nf-call-iptables", .data = &brnf_call_iptables, .maxlen = sizeof(int), @@ -920,7 +918,6 @@ static ctl_table brnf_table[] = { .proc_handler = &brnf_sysctl_call_tables, }, { - .ctl_name = NET_BRIDGE_NF_CALL_IP6TABLES, .procname = "bridge-nf-call-ip6tables", .data = &brnf_call_ip6tables, .maxlen = sizeof(int), @@ -928,7 +925,6 @@ static ctl_table brnf_table[] = { .proc_handler = &brnf_sysctl_call_tables, }, { - .ctl_name = NET_BRIDGE_NF_FILTER_VLAN_TAGGED, .procname = "bridge-nf-filter-vlan-tagged", .data = &brnf_filter_vlan_tagged, .maxlen = sizeof(int), @@ -936,7 +932,6 @@ static ctl_table brnf_table[] = { .proc_handler = &brnf_sysctl_call_tables, }, { - .ctl_name = NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, .procname = "bridge-nf-filter-pppoe-tagged", .data = &brnf_filter_pppoe_tagged, .maxlen = sizeof(int), diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 0fcf6f073064feb4c2d29753246fe93186782637..53ab8e0cb5189c40a880b92b9b49e9c9e0aad85e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -12,6 +12,7 @@ #include #include +#include #include "br_private.h" static inline size_t br_nlmsg_size(void) @@ -110,7 +111,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) int idx; idx = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { /* not a bridge port */ if (dev->br_port == NULL || idx < cb->args[0]) goto skip; @@ -155,7 +156,7 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (new_state > BR_STATE_BLOCKING) return -EINVAL; - dev = __dev_get_by_index(ifm->ifi_index); + dev = __dev_get_by_index(&init_net, ifm->ifi_index); if (!dev) return -ENODEV; diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index c8451d3a070c5f937c72d8409f70e4ca39882e0e..07ac3ae68d8ff089000a78c19024a5d40c3baf9a 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -15,6 +15,7 @@ #include #include +#include #include "br_private.h" @@ -36,6 +37,9 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v struct net_bridge_port *p = dev->br_port; struct net_bridge *br; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* not a port of a bridge */ if (p == NULL) return NOTIFY_DONE; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e6dc6f52990d1946d083ea7a254453a244b7306c..f666f7b28ff584d7222a9763c95288c6d63da11d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -192,7 +192,7 @@ extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, /* br_ioctl.c */ extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -extern int br_ioctl_deviceless_stub(unsigned int cmd, void __user *arg); +extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg); /* br_netfilter.c */ #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 60112bce66984343e8ab13121c1304a1cde29739..0edbd2a1c3f3cafe6d2046360f92b5a9084235f3 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -64,7 +65,7 @@ static inline int br_get_ticks(const unsigned char *src) { unsigned long ticks = ntohs(get_unaligned((__be16 *)src)); - return (ticks * HZ + STP_HZ - 1) / STP_HZ; + return DIV_ROUND_UP(ticks * HZ, STP_HZ); } /* called under bridge lock */ @@ -141,6 +142,9 @@ int br_stp_rcv(struct sk_buff *skb, struct net_device *dev, struct net_bridge *br; const unsigned char *buf; + if (dev->nd_net != &init_net) + goto err; + if (!p) goto err; diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c index 204c968fa86dfe6cd8fa96cf7725880548cbb074..e7cfd30bac756f0c97b32aaa1eb6200bcc5c47f8 100644 --- a/net/bridge/netfilter/ebt_ulog.c +++ b/net/bridge/netfilter/ebt_ulog.c @@ -300,8 +300,9 @@ static int __init ebt_ulog_init(void) spin_lock_init(&ulog_buffers[i].lock); } - ebtulognl = netlink_kernel_create(NETLINK_NFLOG, EBT_ULOG_MAXNLGROUPS, - NULL, NULL, THIS_MODULE); + ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, + EBT_ULOG_MAXNLGROUPS, NULL, NULL, + THIS_MODULE); if (!ebtulognl) ret = -ENOMEM; else if ((ret = ebt_register_watcher(&ulog))) diff --git a/net/core/Makefile b/net/core/Makefile index 4751613e1b59a5c6cb75fbfe45648bd1f36a07ed..b1332f6d0042bab1cc5acad312782e3c2da6fb64 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -3,7 +3,7 @@ # obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ - gen_stats.o gen_estimator.o + gen_stats.o gen_estimator.o net_namespace.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o @@ -11,7 +11,7 @@ obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o obj-$(CONFIG_XFRM) += flow.o -obj-$(CONFIG_SYSFS) += net-sysfs.o +obj-y += net-sysfs.o obj-$(CONFIG_NET_PKTGEN) += pktgen.o obj-$(CONFIG_NETPOLL) += netpoll.o obj-$(CONFIG_NET_DMA) += user_dma.o diff --git a/net/core/dev.c b/net/core/dev.c index a76021c71207a6bf4578cc4995412e68eca18bba..1e169a541ce76b617601a462571af418627ac68a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -92,6 +92,7 @@ #include #include #include +#include #include #include #include @@ -189,25 +190,50 @@ static struct net_dma net_dma = { * unregister_netdevice(), which must be called with the rtnl * semaphore held. */ -LIST_HEAD(dev_base_head); DEFINE_RWLOCK(dev_base_lock); -EXPORT_SYMBOL(dev_base_head); EXPORT_SYMBOL(dev_base_lock); #define NETDEV_HASHBITS 8 -static struct hlist_head dev_name_head[1<dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)]; } -static inline struct hlist_head *dev_index_hash(int ifindex) +static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) { - return &dev_index_head[ifindex & ((1<dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)]; +} + +/* Device list insertion */ +static int list_netdevice(struct net_device *dev) +{ + struct net *net = dev->nd_net; + + ASSERT_RTNL(); + + write_lock_bh(&dev_base_lock); + list_add_tail(&dev->dev_list, &net->dev_base_head); + hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); + hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex)); + write_unlock_bh(&dev_base_lock); + return 0; +} + +/* Device list removal */ +static void unlist_netdevice(struct net_device *dev) +{ + ASSERT_RTNL(); + + /* Unlink dev from the device chain */ + write_lock_bh(&dev_base_lock); + list_del(&dev->dev_list); + hlist_del(&dev->name_hlist); + hlist_del(&dev->index_hlist); + write_unlock_bh(&dev_base_lock); } /* @@ -220,17 +246,12 @@ static RAW_NOTIFIER_HEAD(netdev_chain); * Device drivers call our routines to queue packets here. We empty the * queue in the local softnet handler. */ -DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL }; -#ifdef CONFIG_SYSFS -extern int netdev_sysfs_init(void); -extern int netdev_register_sysfs(struct net_device *); -extern void netdev_unregister_sysfs(struct net_device *); -#else -#define netdev_sysfs_init() (0) -#define netdev_register_sysfs(dev) (0) -#define netdev_unregister_sysfs(dev) do { } while(0) -#endif +DEFINE_PER_CPU(struct softnet_data, softnet_data); + +extern int netdev_kobject_init(void); +extern int netdev_register_kobject(struct net_device *); +extern void netdev_unregister_kobject(struct net_device *); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* @@ -490,7 +511,7 @@ unsigned long netdev_boot_base(const char *prefix, int unit) * If device already registered then return base of 1 * to indicate not to probe for this interface */ - if (__dev_get_by_name(name)) + if (__dev_get_by_name(&init_net, name)) return 1; for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) @@ -545,11 +566,11 @@ __setup("netdev=", netdev_boot_setup); * careful with locks. */ -struct net_device *__dev_get_by_name(const char *name) +struct net_device *__dev_get_by_name(struct net *net, const char *name) { struct hlist_node *p; - hlist_for_each(p, dev_name_hash(name)) { + hlist_for_each(p, dev_name_hash(net, name)) { struct net_device *dev = hlist_entry(p, struct net_device, name_hlist); if (!strncmp(dev->name, name, IFNAMSIZ)) @@ -569,12 +590,12 @@ struct net_device *__dev_get_by_name(const char *name) * matching device is found. */ -struct net_device *dev_get_by_name(const char *name) +struct net_device *dev_get_by_name(struct net *net, const char *name) { struct net_device *dev; read_lock(&dev_base_lock); - dev = __dev_get_by_name(name); + dev = __dev_get_by_name(net, name); if (dev) dev_hold(dev); read_unlock(&dev_base_lock); @@ -592,11 +613,11 @@ struct net_device *dev_get_by_name(const char *name) * or @dev_base_lock. */ -struct net_device *__dev_get_by_index(int ifindex) +struct net_device *__dev_get_by_index(struct net *net, int ifindex) { struct hlist_node *p; - hlist_for_each(p, dev_index_hash(ifindex)) { + hlist_for_each(p, dev_index_hash(net, ifindex)) { struct net_device *dev = hlist_entry(p, struct net_device, index_hlist); if (dev->ifindex == ifindex) @@ -616,12 +637,12 @@ struct net_device *__dev_get_by_index(int ifindex) * dev_put to indicate they have finished with it. */ -struct net_device *dev_get_by_index(int ifindex) +struct net_device *dev_get_by_index(struct net *net, int ifindex) { struct net_device *dev; read_lock(&dev_base_lock); - dev = __dev_get_by_index(ifindex); + dev = __dev_get_by_index(net, ifindex); if (dev) dev_hold(dev); read_unlock(&dev_base_lock); @@ -642,13 +663,13 @@ struct net_device *dev_get_by_index(int ifindex) * If the API was consistent this would be __dev_get_by_hwaddr */ -struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha) { struct net_device *dev; ASSERT_RTNL(); - for_each_netdev(dev) + for_each_netdev(&init_net, dev) if (dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len)) return dev; @@ -658,12 +679,12 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) EXPORT_SYMBOL(dev_getbyhwaddr); -struct net_device *__dev_getfirstbyhwtype(unsigned short type) +struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev; ASSERT_RTNL(); - for_each_netdev(dev) + for_each_netdev(net, dev) if (dev->type == type) return dev; @@ -672,12 +693,12 @@ struct net_device *__dev_getfirstbyhwtype(unsigned short type) EXPORT_SYMBOL(__dev_getfirstbyhwtype); -struct net_device *dev_getfirstbyhwtype(unsigned short type) +struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev; rtnl_lock(); - dev = __dev_getfirstbyhwtype(type); + dev = __dev_getfirstbyhwtype(net, type); if (dev) dev_hold(dev); rtnl_unlock(); @@ -697,13 +718,13 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype); * dev_put to indicate they have finished with it. */ -struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask) +struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask) { struct net_device *dev, *ret; ret = NULL; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { dev_hold(dev); ret = dev; @@ -740,9 +761,10 @@ int dev_valid_name(const char *name) } /** - * dev_alloc_name - allocate a name for a device - * @dev: device + * __dev_alloc_name - allocate a name for a device + * @net: network namespace to allocate the device name in * @name: name format string + * @buf: scratch buffer and result name string * * Passed a format string - eg "lt%d" it will try and find a suitable * id. It scans list of devices to build up a free map, then chooses @@ -753,13 +775,12 @@ int dev_valid_name(const char *name) * Returns the number of the unit assigned or a negative errno code. */ -int dev_alloc_name(struct net_device *dev, const char *name) +static int __dev_alloc_name(struct net *net, const char *name, char *buf) { int i = 0; - char buf[IFNAMSIZ]; const char *p; const int max_netdevices = 8*PAGE_SIZE; - long *inuse; + unsigned long *inuse; struct net_device *d; p = strnchr(name, IFNAMSIZ-1, '%'); @@ -773,18 +794,18 @@ int dev_alloc_name(struct net_device *dev, const char *name) return -EINVAL; /* Use one page as a bit array of possible slots */ - inuse = (long *) get_zeroed_page(GFP_ATOMIC); + inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); if (!inuse) return -ENOMEM; - for_each_netdev(d) { + for_each_netdev(net, d) { if (!sscanf(d->name, name, &i)) continue; if (i < 0 || i >= max_netdevices) continue; /* avoid cases where sscanf is not exact inverse of printf */ - snprintf(buf, sizeof(buf), name, i); + snprintf(buf, IFNAMSIZ, name, i); if (!strncmp(buf, d->name, IFNAMSIZ)) set_bit(i, inuse); } @@ -793,11 +814,9 @@ int dev_alloc_name(struct net_device *dev, const char *name) free_page((unsigned long) inuse); } - snprintf(buf, sizeof(buf), name, i); - if (!__dev_get_by_name(buf)) { - strlcpy(dev->name, buf, IFNAMSIZ); + snprintf(buf, IFNAMSIZ, name, i); + if (!__dev_get_by_name(net, buf)) return i; - } /* It is possible to run out of possible slots * when the name is long and there isn't enough space left @@ -806,6 +825,34 @@ int dev_alloc_name(struct net_device *dev, const char *name) return -ENFILE; } +/** + * dev_alloc_name - allocate a name for a device + * @dev: device + * @name: name format string + * + * Passed a format string - eg "lt%d" it will try and find a suitable + * id. It scans list of devices to build up a free map, then chooses + * the first empty slot. The caller must hold the dev_base or rtnl lock + * while allocating the name and adding the device in order to avoid + * duplicates. + * Limited to bits_per_byte * page size devices (ie 32K on most platforms). + * Returns the number of the unit assigned or a negative errno code. + */ + +int dev_alloc_name(struct net_device *dev, const char *name) +{ + char buf[IFNAMSIZ]; + struct net *net; + int ret; + + BUG_ON(!dev->nd_net); + net = dev->nd_net; + ret = __dev_alloc_name(net, name, buf); + if (ret >= 0) + strlcpy(dev->name, buf, IFNAMSIZ); + return ret; +} + /** * dev_change_name - change name of a device @@ -820,9 +867,12 @@ int dev_change_name(struct net_device *dev, char *newname) char oldname[IFNAMSIZ]; int err = 0; int ret; + struct net *net; ASSERT_RTNL(); + BUG_ON(!dev->nd_net); + net = dev->nd_net; if (dev->flags & IFF_UP) return -EBUSY; @@ -837,7 +887,7 @@ int dev_change_name(struct net_device *dev, char *newname) return err; strcpy(newname, dev->name); } - else if (__dev_get_by_name(newname)) + else if (__dev_get_by_name(net, newname)) return -EEXIST; else strlcpy(dev->name, newname, IFNAMSIZ); @@ -847,10 +897,10 @@ int dev_change_name(struct net_device *dev, char *newname) write_lock_bh(&dev_base_lock); hlist_del(&dev->name_hlist); - hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); + hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); write_unlock_bh(&dev_base_lock); - ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev); + ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); ret = notifier_to_errno(ret); if (ret) { @@ -876,7 +926,7 @@ int dev_change_name(struct net_device *dev, char *newname) */ void netdev_features_change(struct net_device *dev) { - raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); + call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); } EXPORT_SYMBOL(netdev_features_change); @@ -891,8 +941,7 @@ EXPORT_SYMBOL(netdev_features_change); void netdev_state_change(struct net_device *dev) { if (dev->flags & IFF_UP) { - raw_notifier_call_chain(&netdev_chain, - NETDEV_CHANGE, dev); + call_netdevice_notifiers(NETDEV_CHANGE, dev); rtmsg_ifinfo(RTM_NEWLINK, dev, 0); } } @@ -906,26 +955,18 @@ void netdev_state_change(struct net_device *dev) * available in this kernel then it becomes a nop. */ -void dev_load(const char *name) +void dev_load(struct net *net, const char *name) { struct net_device *dev; read_lock(&dev_base_lock); - dev = __dev_get_by_name(name); + dev = __dev_get_by_name(net, name); read_unlock(&dev_base_lock); if (!dev && capable(CAP_SYS_MODULE)) request_module("%s", name); } -static int default_rebuild_header(struct sk_buff *skb) -{ - printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", - skb->dev ? skb->dev->name : "NULL!!!"); - kfree_skb(skb); - return 1; -} - /** * dev_open - prepare an interface for use. * @dev: device to open @@ -988,7 +1029,7 @@ int dev_open(struct net_device *dev) /* * ... and announce new interface. */ - raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); + call_netdevice_notifiers(NETDEV_UP, dev); } return ret; } @@ -1004,6 +1045,8 @@ int dev_open(struct net_device *dev) */ int dev_close(struct net_device *dev) { + might_sleep(); + if (!(dev->flags & IFF_UP)) return 0; @@ -1011,23 +1054,19 @@ int dev_close(struct net_device *dev) * Tell people we are going down, so that they can * prepare to death, when device is still operating. */ - raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); + call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); dev_deactivate(dev); clear_bit(__LINK_STATE_START, &dev->state); /* Synchronize to scheduled poll. We cannot touch poll list, - * it can be even on different cpu. So just clear netif_running(), - * and wait when poll really will happen. Actually, the best place - * for this is inside dev->stop() after device stopped its irq - * engine, but this requires more changes in devices. */ - + * it can be even on different cpu. So just clear netif_running(). + * + * dev->stop() will invoke napi_disable() on all of it's + * napi_struct instances on this device. + */ smp_mb__after_clear_bit(); /* Commit netif_running(). */ - while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { - /* No hurry. */ - msleep(1); - } /* * Call the device specific close. This cannot fail. @@ -1048,12 +1087,14 @@ int dev_close(struct net_device *dev) /* * Tell people we are down */ - raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); + call_netdevice_notifiers(NETDEV_DOWN, dev); return 0; } +static int dev_boot_phase = 1; + /* * Device change register/unregister. These are not inline or static * as we export them to the world. @@ -1077,23 +1118,27 @@ int register_netdevice_notifier(struct notifier_block *nb) { struct net_device *dev; struct net_device *last; + struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_register(&netdev_chain, nb); if (err) goto unlock; + if (dev_boot_phase) + goto unlock; + for_each_net(net) { + for_each_netdev(net, dev) { + err = nb->notifier_call(nb, NETDEV_REGISTER, dev); + err = notifier_to_errno(err); + if (err) + goto rollback; + + if (!(dev->flags & IFF_UP)) + continue; - for_each_netdev(dev) { - err = nb->notifier_call(nb, NETDEV_REGISTER, dev); - err = notifier_to_errno(err); - if (err) - goto rollback; - - if (!(dev->flags & IFF_UP)) - continue; - - nb->notifier_call(nb, NETDEV_UP, dev); + nb->notifier_call(nb, NETDEV_UP, dev); + } } unlock: @@ -1102,15 +1147,17 @@ int register_netdevice_notifier(struct notifier_block *nb) rollback: last = dev; - for_each_netdev(dev) { - if (dev == last) - break; + for_each_net(net) { + for_each_netdev(net, dev) { + if (dev == last) + break; - if (dev->flags & IFF_UP) { - nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); - nb->notifier_call(nb, NETDEV_DOWN, dev); + if (dev->flags & IFF_UP) { + nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); + nb->notifier_call(nb, NETDEV_DOWN, dev); + } + nb->notifier_call(nb, NETDEV_UNREGISTER, dev); } - nb->notifier_call(nb, NETDEV_UNREGISTER, dev); } goto unlock; } @@ -1144,9 +1191,9 @@ int unregister_netdevice_notifier(struct notifier_block *nb) * are as for raw_notifier_call_chain(). */ -int call_netdevice_notifiers(unsigned long val, void *v) +int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { - return raw_notifier_call_chain(&netdev_chain, val, v); + return raw_notifier_call_chain(&netdev_chain, val, dev); } /* When > 0 there are consumers of rx skb time stamps */ @@ -1233,21 +1280,21 @@ void __netif_schedule(struct net_device *dev) } EXPORT_SYMBOL(__netif_schedule); -void __netif_rx_schedule(struct net_device *dev) +void dev_kfree_skb_irq(struct sk_buff *skb) { - unsigned long flags; + if (atomic_dec_and_test(&skb->users)) { + struct softnet_data *sd; + unsigned long flags; - local_irq_save(flags); - dev_hold(dev); - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); - if (dev->quota < 0) - dev->quota += dev->weight; - else - dev->quota = dev->weight; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); - local_irq_restore(flags); + local_irq_save(flags); + sd = &__get_cpu_var(softnet_data); + skb->next = sd->completion_queue; + sd->completion_queue = skb; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); + } } -EXPORT_SYMBOL(__netif_rx_schedule); +EXPORT_SYMBOL(dev_kfree_skb_irq); void dev_kfree_skb_any(struct sk_buff *skb) { @@ -1259,7 +1306,12 @@ void dev_kfree_skb_any(struct sk_buff *skb) EXPORT_SYMBOL(dev_kfree_skb_any); -/* Hot-plugging. */ +/** + * netif_device_detach - mark device as removed + * @dev: network device + * + * Mark device as removed from system and therefore no longer available. + */ void netif_device_detach(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && @@ -1269,6 +1321,12 @@ void netif_device_detach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_detach); +/** + * netif_device_attach - mark device as attached + * @dev: network device + * + * Mark device as attached from system and restart if needed. + */ void netif_device_attach(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && @@ -1501,18 +1559,6 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -#define HARD_TX_LOCK(dev, cpu) { \ - if ((dev->features & NETIF_F_LLTX) == 0) { \ - netif_tx_lock(dev); \ - } \ -} - -#define HARD_TX_UNLOCK(dev) { \ - if ((dev->features & NETIF_F_LLTX) == 0) { \ - netif_tx_unlock(dev); \ - } \ -} - /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit @@ -1730,7 +1776,7 @@ int netif_rx(struct sk_buff *skb) return NET_RX_SUCCESS; } - netif_rx_schedule(&queue->backlog_dev); + napi_schedule(&queue->backlog); goto enqueue; } @@ -1771,6 +1817,7 @@ static inline struct net_device *skb_bond(struct sk_buff *skb) return dev; } + static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); @@ -1927,7 +1974,7 @@ int netif_receive_skb(struct sk_buff *skb) __be16 type; /* if we've gotten here through NAPI, check netpoll */ - if (skb->dev->poll && netpoll_rx(skb)) + if (netpoll_receive_skb(skb)) return NET_RX_DROP; if (!skb->tstamp.tv64) @@ -2017,22 +2064,25 @@ int netif_receive_skb(struct sk_buff *skb) return ret; } -static int process_backlog(struct net_device *backlog_dev, int *budget) +static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; - int quota = min(backlog_dev->quota, *budget); struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; - backlog_dev->weight = weight_p; - for (;;) { + napi->weight = weight_p; + do { struct sk_buff *skb; struct net_device *dev; local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); - if (!skb) - goto job_done; + if (!skb) { + __napi_complete(napi); + local_irq_enable(); + break; + } + local_irq_enable(); dev = skb->dev; @@ -2040,67 +2090,86 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) netif_receive_skb(skb); dev_put(dev); + } while (++work < quota && jiffies == start_time); - work++; - - if (work >= quota || jiffies - start_time > 1) - break; - - } - - backlog_dev->quota -= work; - *budget -= work; - return -1; - -job_done: - backlog_dev->quota -= work; - *budget -= work; + return work; +} - list_del(&backlog_dev->poll_list); - smp_mb__before_clear_bit(); - netif_poll_enable(backlog_dev); +/** + * __napi_schedule - schedule for receive + * @napi: entry to schedule + * + * The entry's receive function will be scheduled to run + */ +void fastcall __napi_schedule(struct napi_struct *n) +{ + unsigned long flags; - local_irq_enable(); - return 0; + local_irq_save(flags); + list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list); + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + local_irq_restore(flags); } +EXPORT_SYMBOL(__napi_schedule); + static void net_rx_action(struct softirq_action *h) { - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct list_head *list = &__get_cpu_var(softnet_data).poll_list; unsigned long start_time = jiffies; int budget = netdev_budget; void *have; local_irq_disable(); - while (!list_empty(&queue->poll_list)) { - struct net_device *dev; + while (!list_empty(list)) { + struct napi_struct *n; + int work, weight; - if (budget <= 0 || jiffies - start_time > 1) + /* If softirq window is exhuasted then punt. + * + * Note that this is a slight policy change from the + * previous NAPI code, which would allow up to 2 + * jiffies to pass before breaking out. The test + * used to be "jiffies - start_time > 1". + */ + if (unlikely(budget <= 0 || jiffies != start_time)) goto softnet_break; local_irq_enable(); - dev = list_entry(queue->poll_list.next, - struct net_device, poll_list); - have = netpoll_poll_lock(dev); + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new + * entries to the tail of this list, and only ->poll() + * calls can remove this head entry from the list. + */ + n = list_entry(list->next, struct napi_struct, poll_list); - if (dev->quota <= 0 || dev->poll(dev, &budget)) { - netpoll_poll_unlock(have); - local_irq_disable(); - list_move_tail(&dev->poll_list, &queue->poll_list); - if (dev->quota < 0) - dev->quota += dev->weight; - else - dev->quota = dev->weight; - } else { - netpoll_poll_unlock(have); - dev_put(dev); - local_irq_disable(); - } + have = netpoll_poll_lock(n); + + weight = n->weight; + + work = n->poll(n, weight); + + WARN_ON_ONCE(work > weight); + + budget -= work; + + local_irq_disable(); + + /* Drivers must not modify the NAPI state if they + * consume the entire weight. In such cases this code + * still "owns" the NAPI instance and therefore can + * move the instance around on the list at-will. + */ + if (unlikely(work == weight)) + list_move_tail(&n->poll_list, list); + + netpoll_poll_unlock(have); } out: local_irq_enable(); + #ifdef CONFIG_NET_DMA /* * There may not be any more sk_buffs coming right now, so push @@ -2115,6 +2184,7 @@ static void net_rx_action(struct softirq_action *h) } } #endif + return; softnet_break: @@ -2154,7 +2224,7 @@ int register_gifconf(unsigned int family, gifconf_func_t * gifconf) * match. --pb */ -static int dev_ifname(struct ifreq __user *arg) +static int dev_ifname(struct net *net, struct ifreq __user *arg) { struct net_device *dev; struct ifreq ifr; @@ -2167,7 +2237,7 @@ static int dev_ifname(struct ifreq __user *arg) return -EFAULT; read_lock(&dev_base_lock); - dev = __dev_get_by_index(ifr.ifr_ifindex); + dev = __dev_get_by_index(net, ifr.ifr_ifindex); if (!dev) { read_unlock(&dev_base_lock); return -ENODEV; @@ -2187,7 +2257,7 @@ static int dev_ifname(struct ifreq __user *arg) * Thus we will need a 'compatibility mode'. */ -static int dev_ifconf(char __user *arg) +static int dev_ifconf(struct net *net, char __user *arg) { struct ifconf ifc; struct net_device *dev; @@ -2211,7 +2281,7 @@ static int dev_ifconf(char __user *arg) */ total = 0; - for_each_netdev(dev) { + for_each_netdev(net, dev) { for (i = 0; i < NPROTO; i++) { if (gifconf_list[i]) { int done; @@ -2245,6 +2315,7 @@ static int dev_ifconf(char __user *arg) */ void *dev_seq_start(struct seq_file *seq, loff_t *pos) { + struct net *net = seq->private; loff_t off; struct net_device *dev; @@ -2253,7 +2324,7 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) return SEQ_START_TOKEN; off = 1; - for_each_netdev(dev) + for_each_netdev(net, dev) if (off++ == *pos) return dev; @@ -2262,9 +2333,10 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos) void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct net *net = seq->private; ++*pos; return v == SEQ_START_TOKEN ? - first_net_device() : next_net_device((struct net_device *)v); + first_net_device(net) : next_net_device((struct net_device *)v); } void dev_seq_stop(struct seq_file *seq, void *v) @@ -2360,7 +2432,26 @@ static const struct seq_operations dev_seq_ops = { static int dev_seq_open(struct inode *inode, struct file *file) { - return seq_open(file, &dev_seq_ops); + struct seq_file *seq; + int res; + res = seq_open(file, &dev_seq_ops); + if (!res) { + seq = file->private_data; + seq->private = get_proc_net(inode); + if (!seq->private) { + seq_release(inode, file); + res = -ENXIO; + } + } + return res; +} + +static int dev_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct net *net = seq->private; + put_net(net); + return seq_release(inode, file); } static const struct file_operations dev_seq_fops = { @@ -2368,7 +2459,7 @@ static const struct file_operations dev_seq_fops = { .open = dev_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = dev_seq_release, }; static const struct seq_operations softnet_seq_ops = { @@ -2520,30 +2611,49 @@ static const struct file_operations ptype_seq_fops = { }; -static int __init dev_proc_init(void) +static int __net_init dev_proc_net_init(struct net *net) { int rc = -ENOMEM; - if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops)) + if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops)) goto out; - if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops)) + if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops)) goto out_dev; - if (!proc_net_fops_create("ptype", S_IRUGO, &ptype_seq_fops)) - goto out_dev2; - - if (wext_proc_init()) + if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops)) goto out_softnet; + + if (wext_proc_init(net)) + goto out_ptype; rc = 0; out: return rc; +out_ptype: + proc_net_remove(net, "ptype"); out_softnet: - proc_net_remove("ptype"); -out_dev2: - proc_net_remove("softnet_stat"); + proc_net_remove(net, "softnet_stat"); out_dev: - proc_net_remove("dev"); + proc_net_remove(net, "dev"); goto out; } + +static void __net_exit dev_proc_net_exit(struct net *net) +{ + wext_proc_exit(net); + + proc_net_remove(net, "ptype"); + proc_net_remove(net, "softnet_stat"); + proc_net_remove(net, "dev"); +} + +static struct pernet_operations __net_initdata dev_proc_ops = { + .init = dev_proc_net_init, + .exit = dev_proc_net_exit, +}; + +static int __init dev_proc_init(void) +{ + return register_pernet_subsys(&dev_proc_ops); +} #else #define dev_proc_init() 0 #endif /* CONFIG_PROC_FS */ @@ -2906,8 +3016,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags) if (dev->flags & IFF_UP && ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) - raw_notifier_call_chain(&netdev_chain, - NETDEV_CHANGE, dev); + call_netdevice_notifiers(NETDEV_CHANGE, dev); if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? +1 : -1; @@ -2953,8 +3062,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu) else dev->mtu = new_mtu; if (!err && dev->flags & IFF_UP) - raw_notifier_call_chain(&netdev_chain, - NETDEV_CHANGEMTU, dev); + call_netdevice_notifiers(NETDEV_CHANGEMTU, dev); return err; } @@ -2970,18 +3078,17 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) return -ENODEV; err = dev->set_mac_address(dev, sa); if (!err) - raw_notifier_call_chain(&netdev_chain, - NETDEV_CHANGEADDR, dev); + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return err; } /* - * Perform the SIOCxIFxxx calls. + * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock) */ -static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) +static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; - struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); if (!dev) return -ENODEV; @@ -2991,25 +3098,15 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ifr->ifr_flags = dev_get_flags(dev); return 0; - case SIOCSIFFLAGS: /* Set interface flags */ - return dev_change_flags(dev, ifr->ifr_flags); - case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ ifr->ifr_metric = 0; return 0; - case SIOCSIFMETRIC: /* Set the metric on the interface - (currently unused) */ - return -EOPNOTSUPP; - case SIOCGIFMTU: /* Get the MTU of a device */ ifr->ifr_mtu = dev->mtu; return 0; - case SIOCSIFMTU: /* Set the MTU of a device */ - return dev_set_mtu(dev, ifr->ifr_mtu); - case SIOCGIFHWADDR: if (!dev->addr_len) memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); @@ -3019,17 +3116,9 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ifr->ifr_hwaddr.sa_family = dev->type; return 0; - case SIOCSIFHWADDR: - return dev_set_mac_address(dev, &ifr->ifr_hwaddr); - - case SIOCSIFHWBROADCAST: - if (ifr->ifr_hwaddr.sa_family != dev->type) - return -EINVAL; - memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); - raw_notifier_call_chain(&netdev_chain, - NETDEV_CHANGEADDR, dev); - return 0; + case SIOCGIFSLAVE: + err = -EINVAL; + break; case SIOCGIFMAP: ifr->ifr_map.mem_start = dev->mem_start; @@ -3040,6 +3129,59 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ifr->ifr_map.port = dev->if_port; return 0; + case SIOCGIFINDEX: + ifr->ifr_ifindex = dev->ifindex; + return 0; + + case SIOCGIFTXQLEN: + ifr->ifr_qlen = dev->tx_queue_len; + return 0; + + default: + /* dev_ioctl() should ensure this case + * is never reached + */ + WARN_ON(1); + err = -EINVAL; + break; + + } + return err; +} + +/* + * Perform the SIOCxIFxxx calls, inside rtnl_lock() + */ +static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) +{ + int err; + struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); + + if (!dev) + return -ENODEV; + + switch (cmd) { + case SIOCSIFFLAGS: /* Set interface flags */ + return dev_change_flags(dev, ifr->ifr_flags); + + case SIOCSIFMETRIC: /* Set the metric on the interface + (currently unused) */ + return -EOPNOTSUPP; + + case SIOCSIFMTU: /* Set the MTU of a device */ + return dev_set_mtu(dev, ifr->ifr_mtu); + + case SIOCSIFHWADDR: + return dev_set_mac_address(dev, &ifr->ifr_hwaddr); + + case SIOCSIFHWBROADCAST: + if (ifr->ifr_hwaddr.sa_family != dev->type) + return -EINVAL; + memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, + min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + return 0; + case SIOCSIFMAP: if (dev->set_config) { if (!netif_device_present(dev)) @@ -3066,14 +3208,6 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, dev->addr_len, 1); - case SIOCGIFINDEX: - ifr->ifr_ifindex = dev->ifindex; - return 0; - - case SIOCGIFTXQLEN: - ifr->ifr_qlen = dev->tx_queue_len; - return 0; - case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) return -EINVAL; @@ -3134,7 +3268,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) * positive or a negative errno code on error. */ -int dev_ioctl(unsigned int cmd, void __user *arg) +int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct ifreq ifr; int ret; @@ -3147,12 +3281,12 @@ int dev_ioctl(unsigned int cmd, void __user *arg) if (cmd == SIOCGIFCONF) { rtnl_lock(); - ret = dev_ifconf((char __user *) arg); + ret = dev_ifconf(net, (char __user *) arg); rtnl_unlock(); return ret; } if (cmd == SIOCGIFNAME) - return dev_ifname((struct ifreq __user *)arg); + return dev_ifname(net, (struct ifreq __user *)arg); if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; @@ -3182,9 +3316,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) case SIOCGIFMAP: case SIOCGIFINDEX: case SIOCGIFTXQLEN: - dev_load(ifr.ifr_name); + dev_load(net, ifr.ifr_name); read_lock(&dev_base_lock); - ret = dev_ifsioc(&ifr, cmd); + ret = dev_ifsioc_locked(net, &ifr, cmd); read_unlock(&dev_base_lock); if (!ret) { if (colon) @@ -3196,9 +3330,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) return ret; case SIOCETHTOOL: - dev_load(ifr.ifr_name); + dev_load(net, ifr.ifr_name); rtnl_lock(); - ret = dev_ethtool(&ifr); + ret = dev_ethtool(net, &ifr); rtnl_unlock(); if (!ret) { if (colon) @@ -3220,9 +3354,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) case SIOCSIFNAME: if (!capable(CAP_NET_ADMIN)) return -EPERM; - dev_load(ifr.ifr_name); + dev_load(net, ifr.ifr_name); rtnl_lock(); - ret = dev_ifsioc(&ifr, cmd); + ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); if (!ret) { if (colon) @@ -3261,9 +3395,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) /* fall through */ case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: - dev_load(ifr.ifr_name); + dev_load(net, ifr.ifr_name); rtnl_lock(); - ret = dev_ifsioc(&ifr, cmd); + ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); return ret; @@ -3283,9 +3417,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) if (cmd == SIOCWANDEV || (cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15)) { - dev_load(ifr.ifr_name); + dev_load(net, ifr.ifr_name); rtnl_lock(); - ret = dev_ifsioc(&ifr, cmd); + ret = dev_ifsioc(net, &ifr, cmd); rtnl_unlock(); if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) @@ -3294,7 +3428,7 @@ int dev_ioctl(unsigned int cmd, void __user *arg) } /* Take care of Wireless Extensions */ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) - return wext_handle_ioctl(&ifr, cmd, arg); + return wext_handle_ioctl(net, &ifr, cmd, arg); return -EINVAL; } } @@ -3307,19 +3441,17 @@ int dev_ioctl(unsigned int cmd, void __user *arg) * number. The caller must hold the rtnl semaphore or the * dev_base_lock to be sure it remains unique. */ -static int dev_new_index(void) +static int dev_new_index(struct net *net) { static int ifindex; for (;;) { if (++ifindex <= 0) ifindex = 1; - if (!__dev_get_by_index(ifindex)) + if (!__dev_get_by_index(net, ifindex)) return ifindex; } } -static int dev_boot_phase = 1; - /* Delayed registration/unregisteration */ static DEFINE_SPINLOCK(net_todo_list_lock); static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list); @@ -3353,6 +3485,7 @@ int register_netdevice(struct net_device *dev) struct hlist_head *head; struct hlist_node *p; int ret; + struct net *net; BUG_ON(dev_boot_phase); ASSERT_RTNL(); @@ -3361,6 +3494,8 @@ int register_netdevice(struct net_device *dev) /* When net_device's are persistent, this will be fatal. */ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); + BUG_ON(!dev->nd_net); + net = dev->nd_net; spin_lock_init(&dev->queue_lock); spin_lock_init(&dev->_xmit_lock); @@ -3385,12 +3520,12 @@ int register_netdevice(struct net_device *dev) goto err_uninit; } - dev->ifindex = dev_new_index(); + dev->ifindex = dev_new_index(net); if (dev->iflink == -1) dev->iflink = dev->ifindex; /* Check for existence of name */ - head = dev_name_hash(dev->name); + head = dev_name_hash(net, dev->name); hlist_for_each(p, head) { struct net_device *d = hlist_entry(p, struct net_device, name_hlist); @@ -3446,15 +3581,7 @@ int register_netdevice(struct net_device *dev) } } - /* - * nil rebuild_header routine, - * that should be never called and used as just bug trap. - */ - - if (!dev->rebuild_header) - dev->rebuild_header = default_rebuild_header; - - ret = netdev_register_sysfs(dev); + ret = netdev_register_kobject(dev); if (ret) goto err_uninit; dev->reg_state = NETREG_REGISTERED; @@ -3467,15 +3594,11 @@ int register_netdevice(struct net_device *dev) set_bit(__LINK_STATE_PRESENT, &dev->state); dev_init_scheduler(dev); - write_lock_bh(&dev_base_lock); - list_add_tail(&dev->dev_list, &dev_base_head); - hlist_add_head(&dev->name_hlist, head); - hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); dev_hold(dev); - write_unlock_bh(&dev_base_lock); + list_netdevice(dev); /* Notify protocols, that a new device appeared. */ - ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); + ret = call_netdevice_notifiers(NETDEV_REGISTER, dev); ret = notifier_to_errno(ret); if (ret) unregister_netdevice(dev); @@ -3546,8 +3669,7 @@ static void netdev_wait_allrefs(struct net_device *dev) rtnl_lock(); /* Rebroadcast unregister notification */ - raw_notifier_call_chain(&netdev_chain, - NETDEV_UNREGISTER, dev); + call_netdevice_notifiers(NETDEV_UNREGISTER, dev); if (test_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { @@ -3692,6 +3814,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, dev = (struct net_device *) (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); dev->padded = (char *)dev - (char *)p; + dev->nd_net = &init_net; if (sizeof_priv) { dev->priv = ((char *)dev + @@ -3704,6 +3827,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, dev->egress_subqueue_count = queue_count; dev->get_stats = internal_stats; + netpoll_netdev_init(dev); setup(dev); strcpy(dev->name, name); return dev; @@ -3720,7 +3844,6 @@ EXPORT_SYMBOL(alloc_netdev_mq); */ void free_netdev(struct net_device *dev) { -#ifdef CONFIG_SYSFS /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED) { kfree((char *)dev - dev->padded); @@ -3732,9 +3855,6 @@ void free_netdev(struct net_device *dev) /* will free via device release */ put_device(&dev->dev); -#else - kfree((char *)dev - dev->padded); -#endif } /* Synchronize with packet receive processing. */ @@ -3773,15 +3893,10 @@ void unregister_netdevice(struct net_device *dev) BUG_ON(dev->reg_state != NETREG_REGISTERED); /* If device is running, close it first. */ - if (dev->flags & IFF_UP) - dev_close(dev); + dev_close(dev); /* And unlink it from device chain. */ - write_lock_bh(&dev_base_lock); - list_del(&dev->dev_list); - hlist_del(&dev->name_hlist); - hlist_del(&dev->index_hlist); - write_unlock_bh(&dev_base_lock); + unlist_netdevice(dev); dev->reg_state = NETREG_UNREGISTERING; @@ -3794,7 +3909,7 @@ void unregister_netdevice(struct net_device *dev) /* Notify protocols, that we are about to destroy this device. They should clean all the things. */ - raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); + call_netdevice_notifiers(NETDEV_UNREGISTER, dev); /* * Flush the unicast and multicast chains @@ -3807,8 +3922,8 @@ void unregister_netdevice(struct net_device *dev) /* Notifier chain MUST detach us from master device. */ BUG_TRAP(!dev->master); - /* Remove entries from sysfs */ - netdev_unregister_sysfs(dev); + /* Remove entries from kobject tree */ + netdev_unregister_kobject(dev); /* Finish processing unregister after unlock */ net_set_todo(dev); @@ -3839,6 +3954,121 @@ void unregister_netdev(struct net_device *dev) EXPORT_SYMBOL(unregister_netdev); +/** + * dev_change_net_namespace - move device to different nethost namespace + * @dev: device + * @net: network namespace + * @pat: If not NULL name pattern to try if the current device name + * is already taken in the destination network namespace. + * + * This function shuts down a device interface and moves it + * to a new network namespace. On success 0 is returned, on + * a failure a netagive errno code is returned. + * + * Callers must hold the rtnl semaphore. + */ + +int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat) +{ + char buf[IFNAMSIZ]; + const char *destname; + int err; + + ASSERT_RTNL(); + + /* Don't allow namespace local devices to be moved. */ + err = -EINVAL; + if (dev->features & NETIF_F_NETNS_LOCAL) + goto out; + + /* Ensure the device has been registrered */ + err = -EINVAL; + if (dev->reg_state != NETREG_REGISTERED) + goto out; + + /* Get out if there is nothing todo */ + err = 0; + if (dev->nd_net == net) + goto out; + + /* Pick the destination device name, and ensure + * we can use it in the destination network namespace. + */ + err = -EEXIST; + destname = dev->name; + if (__dev_get_by_name(net, destname)) { + /* We get here if we can't use the current device name */ + if (!pat) + goto out; + if (!dev_valid_name(pat)) + goto out; + if (strchr(pat, '%')) { + if (__dev_alloc_name(net, pat, buf) < 0) + goto out; + destname = buf; + } else + destname = pat; + if (__dev_get_by_name(net, destname)) + goto out; + } + + /* + * And now a mini version of register_netdevice unregister_netdevice. + */ + + /* If device is running close it first. */ + dev_close(dev); + + /* And unlink it from device chain */ + err = -ENODEV; + unlist_netdevice(dev); + + synchronize_net(); + + /* Shutdown queueing discipline. */ + dev_shutdown(dev); + + /* Notify protocols, that we are about to destroy + this device. They should clean all the things. + */ + call_netdevice_notifiers(NETDEV_UNREGISTER, dev); + + /* + * Flush the unicast and multicast chains + */ + dev_addr_discard(dev); + + /* Actually switch the network namespace */ + dev->nd_net = net; + + /* Assign the new device name */ + if (destname != dev->name) + strcpy(dev->name, destname); + + /* If there is an ifindex conflict assign a new one */ + if (__dev_get_by_index(net, dev->ifindex)) { + int iflink = (dev->iflink == dev->ifindex); + dev->ifindex = dev_new_index(net); + if (iflink) + dev->iflink = dev->ifindex; + } + + /* Fixup kobjects */ + err = device_rename(&dev->dev, dev->name); + WARN_ON(err); + + /* Add the device back in the hashes */ + list_netdevice(dev); + + /* Notify protocols, that a new device appeared. */ + call_netdevice_notifiers(NETDEV_REGISTER, dev); + + synchronize_net(); + err = 0; +out: + return err; +} + static int dev_cpu_callback(struct notifier_block *nfb, unsigned long action, void *ocpu) @@ -4032,6 +4262,82 @@ int netdev_compute_features(unsigned long all, unsigned long one) } EXPORT_SYMBOL(netdev_compute_features); +static struct hlist_head *netdev_create_hash(void) +{ + int i; + struct hlist_head *hash; + + hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL); + if (hash != NULL) + for (i = 0; i < NETDEV_HASHENTRIES; i++) + INIT_HLIST_HEAD(&hash[i]); + + return hash; +} + +/* Initialize per network namespace state */ +static int __net_init netdev_init(struct net *net) +{ + INIT_LIST_HEAD(&net->dev_base_head); + rwlock_init(&dev_base_lock); + + net->dev_name_head = netdev_create_hash(); + if (net->dev_name_head == NULL) + goto err_name; + + net->dev_index_head = netdev_create_hash(); + if (net->dev_index_head == NULL) + goto err_idx; + + return 0; + +err_idx: + kfree(net->dev_name_head); +err_name: + return -ENOMEM; +} + +static void __net_exit netdev_exit(struct net *net) +{ + kfree(net->dev_name_head); + kfree(net->dev_index_head); +} + +static struct pernet_operations __net_initdata netdev_net_ops = { + .init = netdev_init, + .exit = netdev_exit, +}; + +static void __net_exit default_device_exit(struct net *net) +{ + struct net_device *dev, *next; + /* + * Push all migratable of the network devices back to the + * initial network namespace + */ + rtnl_lock(); + for_each_netdev_safe(net, dev, next) { + int err; + + /* Ignore unmoveable devices (i.e. loopback) */ + if (dev->features & NETIF_F_NETNS_LOCAL) + continue; + + /* Push remaing network devices to init_net */ + err = dev_change_net_namespace(dev, &init_net, "dev%d"); + if (err) { + printk(KERN_WARNING "%s: failed to move %s to init_net: %d\n", + __func__, dev->name, err); + unregister_netdevice(dev); + } + } + rtnl_unlock(); +} + +static struct pernet_operations __net_initdata default_device_ops = { + .exit = default_device_exit, +}; + /* * Initialize the DEV module. At boot time this walks the device list and * unhooks any devices that fail to initialise (normally hardware not @@ -4052,18 +4358,18 @@ static int __init net_dev_init(void) if (dev_proc_init()) goto out; - if (netdev_sysfs_init()) + if (netdev_kobject_init()) goto out; INIT_LIST_HEAD(&ptype_all); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&ptype_base[i]); - for (i = 0; i < ARRAY_SIZE(dev_name_head); i++) - INIT_HLIST_HEAD(&dev_name_head[i]); + if (register_pernet_subsys(&netdev_net_ops)) + goto out; - for (i = 0; i < ARRAY_SIZE(dev_index_head); i++) - INIT_HLIST_HEAD(&dev_index_head[i]); + if (register_pernet_device(&default_device_ops)) + goto out; /* * Initialise the packet receive queues. @@ -4076,10 +4382,9 @@ static int __init net_dev_init(void) skb_queue_head_init(&queue->input_pkt_queue); queue->completion_queue = NULL; INIT_LIST_HEAD(&queue->poll_list); - set_bit(__LINK_STATE_START, &queue->backlog_dev.state); - queue->backlog_dev.weight = weight_p; - queue->backlog_dev.poll = process_backlog; - atomic_set(&queue->backlog_dev.refcnt, 1); + + queue->backlog.poll = process_backlog; + queue->backlog.weight = weight_p; } netdev_dma_register(); diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index 20330c572610968af449837b1f615a59fd42f9df..15241cf48af8a8b835a502978e1777c6c2d8ab12 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -186,11 +187,12 @@ EXPORT_SYMBOL(dev_mc_unsync); #ifdef CONFIG_PROC_FS static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos) { + struct net *net = seq->private; struct net_device *dev; loff_t off = 0; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(net, dev) { if (off++ == *pos) return dev; } @@ -239,7 +241,26 @@ static const struct seq_operations dev_mc_seq_ops = { static int dev_mc_seq_open(struct inode *inode, struct file *file) { - return seq_open(file, &dev_mc_seq_ops); + struct seq_file *seq; + int res; + res = seq_open(file, &dev_mc_seq_ops); + if (!res) { + seq = file->private_data; + seq->private = get_proc_net(inode); + if (!seq->private) { + seq_release(inode, file); + res = -ENXIO; + } + } + return res; +} + +static int dev_mc_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct net *net = seq->private; + put_net(net); + return seq_release(inode, file); } static const struct file_operations dev_mc_seq_fops = { @@ -247,14 +268,31 @@ static const struct file_operations dev_mc_seq_fops = { .open = dev_mc_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = dev_mc_seq_release, }; #endif +static int __net_init dev_mc_net_init(struct net *net) +{ + if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) + return -ENOMEM; + return 0; +} + +static void __net_exit dev_mc_net_exit(struct net *net) +{ + proc_net_remove(net, "dev_mcast"); +} + +static struct pernet_operations __net_initdata dev_mc_net_ops = { + .init = dev_mc_net_init, + .exit = dev_mc_net_exit, +}; + void __init dev_mcast_init(void) { - proc_net_fops_create("dev_mcast", 0, &dev_mc_seq_fops); + register_pernet_subsys(&dev_mc_net_ops); } EXPORT_SYMBOL(dev_mc_add); diff --git a/net/core/dst.c b/net/core/dst.c index c6a05879d58cd3c87fb854541bb3c3440d858de9..16958e64e577f8c12723f02129a5f1e9b1791fdb 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -9,59 +9,84 @@ #include #include #include +#include #include #include #include #include #include #include +#include +#include #include -/* Locking strategy: - * 1) Garbage collection state of dead destination cache - * entries is protected by dst_lock. - * 2) GC is run only from BH context, and is the only remover - * of entries. - * 3) Entries are added to the garbage list from both BH - * and non-BH context, so local BH disabling is needed. - * 4) All operations modify state, so a spinlock is used. +/* + * Theory of operations: + * 1) We use a list, protected by a spinlock, to add + * new entries from both BH and non-BH context. + * 2) In order to keep spinlock held for a small delay, + * we use a second list where are stored long lived + * entries, that are handled by the garbage collect thread + * fired by a workqueue. + * 3) This list is guarded by a mutex, + * so that the gc_task and dst_dev_event() can be synchronized. */ -static struct dst_entry *dst_garbage_list; #if RT_CACHE_DEBUG >= 2 static atomic_t dst_total = ATOMIC_INIT(0); #endif -static DEFINE_SPINLOCK(dst_lock); -static unsigned long dst_gc_timer_expires; -static unsigned long dst_gc_timer_inc = DST_GC_MAX; -static void dst_run_gc(unsigned long); +/* + * We want to keep lock & list close together + * to dirty as few cache lines as possible in __dst_free(). + * As this is not a very strong hint, we dont force an alignment on SMP. + */ +static struct { + spinlock_t lock; + struct dst_entry *list; + unsigned long timer_inc; + unsigned long timer_expires; +} dst_garbage = { + .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), + .timer_inc = DST_GC_MAX, +}; +static void dst_gc_task(struct work_struct *work); static void ___dst_free(struct dst_entry * dst); -static DEFINE_TIMER(dst_gc_timer, dst_run_gc, DST_GC_MIN, 0); +static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); -static void dst_run_gc(unsigned long dummy) +static DEFINE_MUTEX(dst_gc_mutex); +/* + * long lived entries are maintained in this list, guarded by dst_gc_mutex + */ +static struct dst_entry *dst_busy_list; + +static void dst_gc_task(struct work_struct *work) { int delayed = 0; - int work_performed; - struct dst_entry * dst, **dstp; + int work_performed = 0; + unsigned long expires = ~0L; + struct dst_entry *dst, *next, head; + struct dst_entry *last = &head; +#if RT_CACHE_DEBUG >= 2 + ktime_t time_start = ktime_get(); + struct timespec elapsed; +#endif - if (!spin_trylock(&dst_lock)) { - mod_timer(&dst_gc_timer, jiffies + HZ/10); - return; - } + mutex_lock(&dst_gc_mutex); + next = dst_busy_list; - del_timer(&dst_gc_timer); - dstp = &dst_garbage_list; - work_performed = 0; - while ((dst = *dstp) != NULL) { - if (atomic_read(&dst->__refcnt)) { - dstp = &dst->next; +loop: + while ((dst = next) != NULL) { + next = dst->next; + prefetch(&next->next); + if (likely(atomic_read(&dst->__refcnt))) { + last->next = dst; + last = dst; delayed++; continue; } - *dstp = dst->next; - work_performed = 1; + work_performed++; dst = dst_destroy(dst); if (dst) { @@ -77,38 +102,56 @@ static void dst_run_gc(unsigned long dummy) continue; ___dst_free(dst); - dst->next = *dstp; - *dstp = dst; - dstp = &dst->next; + dst->next = next; + next = dst; } } - if (!dst_garbage_list) { - dst_gc_timer_inc = DST_GC_MAX; - goto out; + + spin_lock_bh(&dst_garbage.lock); + next = dst_garbage.list; + if (next) { + dst_garbage.list = NULL; + spin_unlock_bh(&dst_garbage.lock); + goto loop; } - if (!work_performed) { - if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) - dst_gc_timer_expires = DST_GC_MAX; - dst_gc_timer_inc += DST_GC_INC; - } else { - dst_gc_timer_inc = DST_GC_INC; - dst_gc_timer_expires = DST_GC_MIN; + last->next = NULL; + dst_busy_list = head.next; + if (!dst_busy_list) + dst_garbage.timer_inc = DST_GC_MAX; + else { + /* + * if we freed less than 1/10 of delayed entries, + * we can sleep longer. + */ + if (work_performed <= delayed/10) { + dst_garbage.timer_expires += dst_garbage.timer_inc; + if (dst_garbage.timer_expires > DST_GC_MAX) + dst_garbage.timer_expires = DST_GC_MAX; + dst_garbage.timer_inc += DST_GC_INC; + } else { + dst_garbage.timer_inc = DST_GC_INC; + dst_garbage.timer_expires = DST_GC_MIN; + } + expires = dst_garbage.timer_expires; + /* + * if the next desired timer is more than 4 seconds in the future + * then round the timer to whole seconds + */ + if (expires > 4*HZ) + expires = round_jiffies_relative(expires); + schedule_delayed_work(&dst_gc_work, expires); } + + spin_unlock_bh(&dst_garbage.lock); + mutex_unlock(&dst_gc_mutex); #if RT_CACHE_DEBUG >= 2 - printk("dst_total: %d/%d %ld\n", - atomic_read(&dst_total), delayed, dst_gc_timer_expires); + elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); + printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" + " expires: %lu elapsed: %lu us\n", + atomic_read(&dst_total), delayed, work_performed, + expires, + elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); #endif - /* if the next desired timer is more than 4 seconds in the future - * then round the timer to whole seconds - */ - if (dst_gc_timer_expires > 4*HZ) - mod_timer(&dst_gc_timer, - round_jiffies(jiffies + dst_gc_timer_expires)); - else - mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); - -out: - spin_unlock(&dst_lock); } static int dst_discard(struct sk_buff *skb) @@ -153,16 +196,16 @@ static void ___dst_free(struct dst_entry * dst) void __dst_free(struct dst_entry * dst) { - spin_lock_bh(&dst_lock); + spin_lock_bh(&dst_garbage.lock); ___dst_free(dst); - dst->next = dst_garbage_list; - dst_garbage_list = dst; - if (dst_gc_timer_inc > DST_GC_INC) { - dst_gc_timer_inc = DST_GC_INC; - dst_gc_timer_expires = DST_GC_MIN; - mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); + dst->next = dst_garbage.list; + dst_garbage.list = dst; + if (dst_garbage.timer_inc > DST_GC_INC) { + dst_garbage.timer_inc = DST_GC_INC; + dst_garbage.timer_expires = DST_GC_MIN; + schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); } - spin_unlock_bh(&dst_lock); + spin_unlock_bh(&dst_garbage.lock); } struct dst_entry *dst_destroy(struct dst_entry * dst) @@ -236,13 +279,13 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, if (!unregister) { dst->input = dst->output = dst_discard; } else { - dst->dev = &loopback_dev; - dev_hold(&loopback_dev); + dst->dev = init_net.loopback_dev; + dev_hold(dst->dev); dev_put(dev); if (dst->neighbour && dst->neighbour->dev == dev) { - dst->neighbour->dev = &loopback_dev; + dst->neighbour->dev = init_net.loopback_dev; dev_put(dev); - dev_hold(&loopback_dev); + dev_hold(dst->neighbour->dev); } } } @@ -250,16 +293,33 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; - struct dst_entry *dst; + struct dst_entry *dst, *last = NULL; + + if (dev->nd_net != &init_net) + return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: case NETDEV_DOWN: - spin_lock_bh(&dst_lock); - for (dst = dst_garbage_list; dst; dst = dst->next) { + mutex_lock(&dst_gc_mutex); + for (dst = dst_busy_list; dst; dst = dst->next) { + last = dst; + dst_ifdown(dst, dev, event != NETDEV_DOWN); + } + + spin_lock_bh(&dst_garbage.lock); + dst = dst_garbage.list; + dst_garbage.list = NULL; + spin_unlock_bh(&dst_garbage.lock); + + if (last) + last->next = dst; + else + dst_busy_list = dst; + for (; dst; dst = dst->next) { dst_ifdown(dst, dev, event != NETDEV_DOWN); } - spin_unlock_bh(&dst_lock); + mutex_unlock(&dst_gc_mutex); break; } return NOTIFY_DONE; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index c5e059352d43f1e4cfe8f17589825c82a28523a4..1163eb2256d0560b1fb3b4a71db5df5d4a40a072 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -109,6 +109,32 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data) return 0; } +/* the following list of flags are the same as their associated + * NETIF_F_xxx values in include/linux/netdevice.h + */ +static const u32 flags_dup_features = + ETH_FLAG_LRO; + +u32 ethtool_op_get_flags(struct net_device *dev) +{ + /* in the future, this function will probably contain additional + * handling for flags which are not so easily handled + * by a simple masking operation + */ + + return dev->features & flags_dup_features; +} + +int ethtool_op_set_flags(struct net_device *dev, u32 data) +{ + if (data & ETH_FLAG_LRO) + dev->features |= NETIF_F_LRO; + else + dev->features &= ~NETIF_F_LRO; + + return 0; +} + /* Handlers for each ethtool command */ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) @@ -153,10 +179,26 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) info.cmd = ETHTOOL_GDRVINFO; ops->get_drvinfo(dev, &info); - if (ops->self_test_count) - info.testinfo_len = ops->self_test_count(dev); - if (ops->get_stats_count) - info.n_stats = ops->get_stats_count(dev); + if (ops->get_sset_count) { + int rc; + + rc = ops->get_sset_count(dev, ETH_SS_TEST); + if (rc >= 0) + info.testinfo_len = rc; + rc = ops->get_sset_count(dev, ETH_SS_STATS); + if (rc >= 0) + info.n_stats = rc; + rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); + if (rc >= 0) + info.n_priv_flags = rc; + } else { + /* code path for obsolete hooks */ + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + } if (ops->get_regs_len) info.regdump_len = ops->get_regs_len(dev); if (ops->get_eeprom_len) @@ -230,34 +272,6 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) return dev->ethtool_ops->set_wol(dev, &wol); } -static int ethtool_get_msglevel(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GMSGLVL }; - - if (!dev->ethtool_ops->get_msglevel) - return -EOPNOTSUPP; - - edata.data = dev->ethtool_ops->get_msglevel(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_msglevel(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata; - - if (!dev->ethtool_ops->set_msglevel) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - dev->ethtool_ops->set_msglevel(dev, edata.data); - return 0; -} - static int ethtool_nway_reset(struct net_device *dev) { if (!dev->ethtool_ops->nway_reset) @@ -266,20 +280,6 @@ static int ethtool_nway_reset(struct net_device *dev) return dev->ethtool_ops->nway_reset(dev); } -static int ethtool_get_link(struct net_device *dev, void __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GLINK }; - - if (!dev->ethtool_ops->get_link) - return -EOPNOTSUPP; - - edata.data = dev->ethtool_ops->get_link(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) { struct ethtool_eeprom eeprom; @@ -447,48 +447,6 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); } -static int ethtool_get_rx_csum(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GRXCSUM }; - - if (!dev->ethtool_ops->get_rx_csum) - return -EOPNOTSUPP; - - edata.data = dev->ethtool_ops->get_rx_csum(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - -static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata; - - if (!dev->ethtool_ops->set_rx_csum) - return -EOPNOTSUPP; - - if (copy_from_user(&edata, useraddr, sizeof(edata))) - return -EFAULT; - - dev->ethtool_ops->set_rx_csum(dev, edata.data); - return 0; -} - -static int ethtool_get_tx_csum(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GTXCSUM }; - - if (!dev->ethtool_ops->get_tx_csum) - return -EOPNOTSUPP; - - edata.data = dev->ethtool_ops->get_tx_csum(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - static int __ethtool_set_sg(struct net_device *dev, u32 data) { int err; @@ -527,20 +485,6 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr) return dev->ethtool_ops->set_tx_csum(dev, edata.data); } -static int ethtool_get_sg(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GSG }; - - if (!dev->ethtool_ops->get_sg) - return -EOPNOTSUPP; - - edata.data = dev->ethtool_ops->get_sg(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - static int ethtool_set_sg(struct net_device *dev, char __user *useraddr) { struct ethtool_value edata; @@ -558,20 +502,6 @@ static int ethtool_set_sg(struct net_device *dev, char __user *useraddr) return __ethtool_set_sg(dev, edata.data); } -static int ethtool_get_tso(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GTSO }; - - if (!dev->ethtool_ops->get_tso) - return -EOPNOTSUPP; - - edata.data = dev->ethtool_ops->get_tso(dev); - - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - static int ethtool_set_tso(struct net_device *dev, char __user *useraddr) { struct ethtool_value edata; @@ -588,18 +518,6 @@ static int ethtool_set_tso(struct net_device *dev, char __user *useraddr) return dev->ethtool_ops->set_tso(dev, edata.data); } -static int ethtool_get_ufo(struct net_device *dev, char __user *useraddr) -{ - struct ethtool_value edata = { ETHTOOL_GUFO }; - - if (!dev->ethtool_ops->get_ufo) - return -EOPNOTSUPP; - edata.data = dev->ethtool_ops->get_ufo(dev); - if (copy_to_user(useraddr, &edata, sizeof(edata))) - return -EFAULT; - return 0; -} - static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr) { struct ethtool_value edata; @@ -643,16 +561,27 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr) struct ethtool_test test; const struct ethtool_ops *ops = dev->ethtool_ops; u64 *data; - int ret; + int ret, test_len; - if (!ops->self_test || !ops->self_test_count) + if (!ops->self_test) return -EOPNOTSUPP; + if (!ops->get_sset_count && !ops->self_test_count) + return -EOPNOTSUPP; + + if (ops->get_sset_count) + test_len = ops->get_sset_count(dev, ETH_SS_TEST); + else + /* code path for obsolete hook */ + test_len = ops->self_test_count(dev); + if (test_len < 0) + return test_len; + WARN_ON(test_len == 0); if (copy_from_user(&test, useraddr, sizeof(test))) return -EFAULT; - test.len = ops->self_test_count(dev); - data = kmalloc(test.len * sizeof(u64), GFP_USER); + test.len = test_len; + data = kmalloc(test_len * sizeof(u64), GFP_USER); if (!data) return -ENOMEM; @@ -684,19 +613,29 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) return -EFAULT; - switch (gstrings.string_set) { - case ETH_SS_TEST: - if (!ops->self_test_count) - return -EOPNOTSUPP; - gstrings.len = ops->self_test_count(dev); - break; - case ETH_SS_STATS: - if (!ops->get_stats_count) - return -EOPNOTSUPP; - gstrings.len = ops->get_stats_count(dev); - break; - default: - return -EINVAL; + if (ops->get_sset_count) { + ret = ops->get_sset_count(dev, gstrings.string_set); + if (ret < 0) + return ret; + + gstrings.len = ret; + } else { + /* code path for obsolete hooks */ + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } } data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); @@ -736,16 +675,27 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) struct ethtool_stats stats; const struct ethtool_ops *ops = dev->ethtool_ops; u64 *data; - int ret; + int ret, n_stats; - if (!ops->get_ethtool_stats || !ops->get_stats_count) + if (!ops->get_ethtool_stats) + return -EOPNOTSUPP; + if (!ops->get_sset_count && !ops->get_stats_count) return -EOPNOTSUPP; + if (ops->get_sset_count) + n_stats = ops->get_sset_count(dev, ETH_SS_STATS); + else + /* code path for obsolete hook */ + n_stats = ops->get_stats_count(dev); + if (n_stats < 0) + return n_stats; + WARN_ON(n_stats == 0); + if (copy_from_user(&stats, useraddr, sizeof(stats))) return -EFAULT; - stats.n_stats = ops->get_stats_count(dev); - data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + stats.n_stats = n_stats; + data = kmalloc(n_stats * sizeof(u64), GFP_USER); if (!data) return -ENOMEM; @@ -783,11 +733,55 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) return 0; } +static int ethtool_get_value(struct net_device *dev, char __user *useraddr, + u32 cmd, u32 (*actor)(struct net_device *)) +{ + struct ethtool_value edata = { cmd }; + + if (!actor) + return -EOPNOTSUPP; + + edata.data = actor(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, + void (*actor)(struct net_device *, u32)) +{ + struct ethtool_value edata; + + if (!actor) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + actor(dev, edata.data); + return 0; +} + +static int ethtool_set_value(struct net_device *dev, char __user *useraddr, + int (*actor)(struct net_device *, u32)) +{ + struct ethtool_value edata; + + if (!actor) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return actor(dev, edata.data); +} + /* The main entry point in this file. Called from net/core/dev.c */ -int dev_ethtool(struct ifreq *ifr) +int dev_ethtool(struct net *net, struct ifreq *ifr) { - struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; @@ -817,6 +811,8 @@ int dev_ethtool(struct ifreq *ifr) case ETHTOOL_GPERMADDR: case ETHTOOL_GUFO: case ETHTOOL_GGSO: + case ETHTOOL_GFLAGS: + case ETHTOOL_GPFLAGS: break; default: if (!capable(CAP_NET_ADMIN)) @@ -849,16 +845,19 @@ int dev_ethtool(struct ifreq *ifr) rc = ethtool_set_wol(dev, useraddr); break; case ETHTOOL_GMSGLVL: - rc = ethtool_get_msglevel(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_msglevel); break; case ETHTOOL_SMSGLVL: - rc = ethtool_set_msglevel(dev, useraddr); + rc = ethtool_set_value_void(dev, useraddr, + dev->ethtool_ops->set_msglevel); break; case ETHTOOL_NWAY_RST: rc = ethtool_nway_reset(dev); break; case ETHTOOL_GLINK: - rc = ethtool_get_link(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_link); break; case ETHTOOL_GEEPROM: rc = ethtool_get_eeprom(dev, useraddr); @@ -885,25 +884,36 @@ int dev_ethtool(struct ifreq *ifr) rc = ethtool_set_pauseparam(dev, useraddr); break; case ETHTOOL_GRXCSUM: - rc = ethtool_get_rx_csum(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_rx_csum); break; case ETHTOOL_SRXCSUM: - rc = ethtool_set_rx_csum(dev, useraddr); + rc = ethtool_set_value(dev, useraddr, + dev->ethtool_ops->set_rx_csum); break; case ETHTOOL_GTXCSUM: - rc = ethtool_get_tx_csum(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + (dev->ethtool_ops->get_tx_csum ? + dev->ethtool_ops->get_tx_csum : + ethtool_op_get_tx_csum)); break; case ETHTOOL_STXCSUM: rc = ethtool_set_tx_csum(dev, useraddr); break; case ETHTOOL_GSG: - rc = ethtool_get_sg(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + (dev->ethtool_ops->get_sg ? + dev->ethtool_ops->get_sg : + ethtool_op_get_sg)); break; case ETHTOOL_SSG: rc = ethtool_set_sg(dev, useraddr); break; case ETHTOOL_GTSO: - rc = ethtool_get_tso(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + (dev->ethtool_ops->get_tso ? + dev->ethtool_ops->get_tso : + ethtool_op_get_tso)); break; case ETHTOOL_STSO: rc = ethtool_set_tso(dev, useraddr); @@ -924,7 +934,10 @@ int dev_ethtool(struct ifreq *ifr) rc = ethtool_get_perm_addr(dev, useraddr); break; case ETHTOOL_GUFO: - rc = ethtool_get_ufo(dev, useraddr); + rc = ethtool_get_value(dev, useraddr, ethcmd, + (dev->ethtool_ops->get_ufo ? + dev->ethtool_ops->get_ufo : + ethtool_op_get_ufo)); break; case ETHTOOL_SUFO: rc = ethtool_set_ufo(dev, useraddr); @@ -935,6 +948,22 @@ int dev_ethtool(struct ifreq *ifr) case ETHTOOL_SGSO: rc = ethtool_set_gso(dev, useraddr); break; + case ETHTOOL_GFLAGS: + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_flags); + break; + case ETHTOOL_SFLAGS: + rc = ethtool_set_value(dev, useraddr, + dev->ethtool_ops->set_flags); + break; + case ETHTOOL_GPFLAGS: + rc = ethtool_get_value(dev, useraddr, ethcmd, + dev->ethtool_ops->get_priv_flags); + break; + case ETHTOOL_SPFLAGS: + rc = ethtool_set_value(dev, useraddr, + dev->ethtool_ops->set_priv_flags); + break; default: rc = -EOPNOTSUPP; } @@ -959,3 +988,5 @@ EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); EXPORT_SYMBOL(ethtool_op_set_ufo); EXPORT_SYMBOL(ethtool_op_get_ufo); +EXPORT_SYMBOL(ethtool_op_set_flags); +EXPORT_SYMBOL(ethtool_op_get_flags); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 8c5474e16683aa22971b1bed0390354b728a0f64..13de6f53f0981f19bb16e7aef2d542f2d5f8167a 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include static LIST_HEAD(rules_ops); @@ -82,7 +84,7 @@ static void cleanup_ops(struct fib_rules_ops *ops) { struct fib_rule *rule, *tmp; - list_for_each_entry_safe(rule, tmp, ops->rules_list, list) { + list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { list_del_rcu(&rule->list); fib_rule_put(rule); } @@ -137,7 +139,7 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, rcu_read_lock(); - list_for_each_entry_rcu(rule, ops->rules_list, list) { + list_for_each_entry_rcu(rule, &ops->rules_list, list) { jumped: if (!fib_rule_match(rule, ops, fl, flags)) continue; @@ -197,6 +199,7 @@ static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { + struct net *net = skb->sk->sk_net; struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; struct fib_rule *rule, *r, *last = NULL; @@ -234,7 +237,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) rule->ifindex = -1; nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ); - dev = __dev_get_by_name(rule->ifname); + dev = __dev_get_by_name(net, rule->ifname); if (dev) rule->ifindex = dev->ifindex; } @@ -268,7 +271,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (rule->target <= rule->pref) goto errout_free; - list_for_each_entry(r, ops->rules_list, list) { + list_for_each_entry(r, &ops->rules_list, list) { if (r->pref == rule->target) { rule->ctarget = r; break; @@ -284,7 +287,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (err < 0) goto errout_free; - list_for_each_entry(r, ops->rules_list, list) { + list_for_each_entry(r, &ops->rules_list, list) { if (r->pref > rule->pref) break; last = r; @@ -297,7 +300,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) * There are unresolved goto rules in the list, check if * any of them are pointing to this new rule. */ - list_for_each_entry(r, ops->rules_list, list) { + list_for_each_entry(r, &ops->rules_list, list) { if (r->action == FR_ACT_GOTO && r->target == rule->pref) { BUG_ON(r->ctarget != NULL); @@ -317,7 +320,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (last) list_add_rcu(&rule->list, &last->list); else - list_add_rcu(&rule->list, ops->rules_list); + list_add_rcu(&rule->list, &ops->rules_list); notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid); flush_route_cache(ops); @@ -356,7 +359,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) if (err < 0) goto errout; - list_for_each_entry(rule, ops->rules_list, list) { + list_for_each_entry(rule, &ops->rules_list, list) { if (frh->action && (frh->action != rule->action)) continue; @@ -399,7 +402,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) * actually been added. */ if (ops->nr_goto_rules > 0) { - list_for_each_entry(tmp, ops->rules_list, list) { + list_for_each_entry(tmp, &ops->rules_list, list) { if (tmp->ctarget == rule) { rcu_assign_pointer(tmp->ctarget, NULL); ops->unresolved_rules++; @@ -495,7 +498,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, int idx = 0; struct fib_rule *rule; - list_for_each_entry(rule, ops->rules_list, list) { + list_for_each_entry(rule, &ops->rules_list, list) { if (idx < cb->args[1]) goto skip; @@ -596,18 +599,21 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event, struct net_device *dev = ptr; struct fib_rules_ops *ops; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + ASSERT_RTNL(); rcu_read_lock(); switch (event) { case NETDEV_REGISTER: list_for_each_entry(ops, &rules_ops, list) - attach_rules(ops->rules_list, dev); + attach_rules(&ops->rules_list, dev); break; case NETDEV_UNREGISTER: list_for_each_entry(ops, &rules_ops, list) - detach_rules(ops->rules_list, dev); + detach_rules(&ops->rules_list, dev); break; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f7de8f24d8dd9a63b9d87bbdcd904333e6ba5b09..c52df858d0be10a597109bed466c860d1177e097 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -25,6 +25,7 @@ #include #endif #include +#include #include #include #include @@ -55,9 +56,8 @@ #define PNEIGH_HASHMASK 0xF static void neigh_timer_handler(unsigned long arg); -#ifdef CONFIG_ARPD -static void neigh_app_notify(struct neighbour *n); -#endif +static void __neigh_notify(struct neighbour *n, int type, int flags); +static void neigh_update_notify(struct neighbour *neigh); static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); @@ -105,6 +105,15 @@ static int neigh_blackhole(struct sk_buff *skb) return -ENETDOWN; } +static void neigh_cleanup_and_release(struct neighbour *neigh) +{ + if (neigh->parms->neigh_cleanup) + neigh->parms->neigh_cleanup(neigh); + + __neigh_notify(neigh, RTM_DELNEIGH, 0); + neigh_release(neigh); +} + /* * It is random distribution in the interval (1/2)*base...(3/2)*base. * It corresponds to default IPv6 settings and is not overridable, @@ -141,9 +150,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) n->dead = 1; shrunk = 1; write_unlock(&n->lock); - if (n->parms->neigh_cleanup) - n->parms->neigh_cleanup(n); - neigh_release(n); + neigh_cleanup_and_release(n); continue; } write_unlock(&n->lock); @@ -214,9 +221,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) NEIGH_PRINTK2("neigh %p is stray.\n", n); } write_unlock(&n->lock); - if (n->parms->neigh_cleanup) - n->parms->neigh_cleanup(n); - neigh_release(n); + neigh_cleanup_and_release(n); } } } @@ -677,9 +682,7 @@ static void neigh_periodic_timer(unsigned long arg) *np = n->next; n->dead = 1; write_unlock(&n->lock); - if (n->parms->neigh_cleanup) - n->parms->neigh_cleanup(n); - neigh_release(n); + neigh_cleanup_and_release(n); continue; } write_unlock(&n->lock); @@ -828,13 +831,10 @@ static void neigh_timer_handler(unsigned long arg) out: write_unlock(&neigh->lock); } + if (notify) - call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); + neigh_update_notify(neigh); -#ifdef CONFIG_ARPD - if (notify && neigh->parms->app_probes) - neigh_app_notify(neigh); -#endif neigh_release(neigh); } @@ -897,8 +897,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) static void neigh_update_hhs(struct neighbour *neigh) { struct hh_cache *hh; - void (*update)(struct hh_cache*, struct net_device*, unsigned char *) = - neigh->dev->header_cache_update; + void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) + = neigh->dev->header_ops->cache_update; if (update) { for (hh = neigh->hh; hh; hh = hh->hh_next) { @@ -1063,11 +1063,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, write_unlock_bh(&neigh->lock); if (notify) - call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); -#ifdef CONFIG_ARPD - if (notify && neigh->parms->app_probes) - neigh_app_notify(neigh); -#endif + neigh_update_notify(neigh); + return err; } @@ -1098,7 +1095,8 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, hh->hh_type = protocol; atomic_set(&hh->hh_refcnt, 0); hh->hh_next = NULL; - if (dev->hard_header_cache(n, hh)) { + + if (dev->header_ops->cache(n, hh)) { kfree(hh); hh = NULL; } else { @@ -1128,10 +1126,9 @@ int neigh_compat_output(struct sk_buff *skb) __skb_pull(skb, skb_network_offset(skb)); - if (dev->hard_header && - dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, - skb->len) < 0 && - dev->rebuild_header(skb)) + if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, + skb->len) < 0 && + dev->header_ops->rebuild(skb)) return 0; return dev_queue_xmit(skb); @@ -1153,17 +1150,17 @@ int neigh_resolve_output(struct sk_buff *skb) if (!neigh_event_send(neigh, skb)) { int err; struct net_device *dev = neigh->dev; - if (dev->hard_header_cache && !dst->hh) { + if (dev->header_ops->cache && !dst->hh) { write_lock_bh(&neigh->lock); if (!dst->hh) neigh_hh_init(neigh, dst, dst->ops->protocol); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), - neigh->ha, NULL, skb->len); + err = dev_hard_header(skb, dev, ntohs(skb->protocol), + neigh->ha, NULL, skb->len); write_unlock_bh(&neigh->lock); } else { read_lock_bh(&neigh->lock); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), - neigh->ha, NULL, skb->len); + err = dev_hard_header(skb, dev, ntohs(skb->protocol), + neigh->ha, NULL, skb->len); read_unlock_bh(&neigh->lock); } if (err >= 0) @@ -1194,8 +1191,8 @@ int neigh_connected_output(struct sk_buff *skb) __skb_pull(skb, skb_network_offset(skb)); read_lock_bh(&neigh->lock); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), - neigh->ha, NULL, skb->len); + err = dev_hard_header(skb, dev, ntohs(skb->protocol), + neigh->ha, NULL, skb->len); read_unlock_bh(&neigh->lock); if (err >= 0) err = neigh->ops->queue_xmit(skb); @@ -1354,7 +1351,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) panic("cannot create neighbour cache statistics"); #ifdef CONFIG_PROC_FS - tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat); + tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat); if (!tbl->pde) panic("cannot create neighbour proc dir entry"); tbl->pde->proc_fops = &neigh_stat_seq_fops; @@ -1444,6 +1441,7 @@ int neigh_table_clear(struct neigh_table *tbl) static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { + struct net *net = skb->sk->sk_net; struct ndmsg *ndm; struct nlattr *dst_attr; struct neigh_table *tbl; @@ -1459,7 +1457,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ndm = nlmsg_data(nlh); if (ndm->ndm_ifindex) { - dev = dev_get_by_index(ndm->ndm_ifindex); + dev = dev_get_by_index(net, ndm->ndm_ifindex); if (dev == NULL) { err = -ENODEV; goto out; @@ -1509,6 +1507,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { + struct net *net = skb->sk->sk_net; struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; struct neigh_table *tbl; @@ -1525,7 +1524,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ndm = nlmsg_data(nlh); if (ndm->ndm_ifindex) { - dev = dev_get_by_index(ndm->ndm_ifindex); + dev = dev_get_by_index(net, ndm->ndm_ifindex); if (dev == NULL) { err = -ENODEV; goto out; @@ -2000,6 +1999,11 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, return -EMSGSIZE; } +static void neigh_update_notify(struct neighbour *neigh) +{ + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); + __neigh_notify(neigh, RTM_NEWNEIGH, 0); +} static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb) @@ -2095,11 +2099,8 @@ void __neigh_for_each_release(struct neigh_table *tbl, } else np = &n->next; write_unlock(&n->lock); - if (release) { - if (n->parms->neigh_cleanup) - n->parms->neigh_cleanup(n); - neigh_release(n); - } + if (release) + neigh_cleanup_and_release(n); } } } @@ -2422,7 +2423,6 @@ static const struct file_operations neigh_stat_seq_fops = { #endif /* CONFIG_PROC_FS */ -#ifdef CONFIG_ARPD static inline size_t neigh_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ndmsg)) @@ -2454,16 +2454,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags) rtnl_set_sk_err(RTNLGRP_NEIGH, err); } +#ifdef CONFIG_ARPD void neigh_app_ns(struct neighbour *n) { __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST); } - -static void neigh_app_notify(struct neighbour *n) -{ - __neigh_notify(n, RTM_NEWNEIGH, 0); -} - #endif /* CONFIG_ARPD */ #ifdef CONFIG_SYSCTL diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 5c19b0646d7a3e09c0284a93686c2d738fc92fd1..909a03d6c0e9578abf0894a19dd800080aa1ca03 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -18,6 +18,7 @@ #include #include +#ifdef CONFIG_SYSFS static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; @@ -216,20 +217,6 @@ static ssize_t store_tx_queue_len(struct device *dev, return netdev_store(dev, attr, buf, len, change_tx_queue_len); } -NETDEVICE_SHOW(weight, fmt_dec); - -static int change_weight(struct net_device *net, unsigned long new_weight) -{ - net->weight = new_weight; - return 0; -} - -static ssize_t store_weight(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) -{ - return netdev_store(dev, attr, buf, len, change_weight); -} - static struct device_attribute net_class_attributes[] = { __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), __ATTR(iflink, S_IRUGO, show_iflink, NULL), @@ -246,7 +233,6 @@ static struct device_attribute net_class_attributes[] = { __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags), __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, store_tx_queue_len), - __ATTR(weight, S_IRUGO | S_IWUSR, show_weight, store_weight), {} }; @@ -407,6 +393,8 @@ static struct attribute_group wireless_group = { }; #endif +#endif /* CONFIG_SYSFS */ + #ifdef CONFIG_HOTPLUG static int netdev_uevent(struct device *d, char **envp, int num_envp, char *buf, int size) @@ -450,7 +438,9 @@ static void netdev_release(struct device *d) static struct class net_class = { .name = "net", .dev_release = netdev_release, +#ifdef CONFIG_SYSFS .dev_attrs = net_class_attributes, +#endif /* CONFIG_SYSFS */ #ifdef CONFIG_HOTPLUG .dev_uevent = netdev_uevent, #endif @@ -459,7 +449,7 @@ static struct class net_class = { /* Delete sysfs entries but hold kobject reference until after all * netdev references are gone. */ -void netdev_unregister_sysfs(struct net_device * net) +void netdev_unregister_kobject(struct net_device * net) { struct device *dev = &(net->dev); @@ -468,7 +458,7 @@ void netdev_unregister_sysfs(struct net_device * net) } /* Create sysfs entries for network device. */ -int netdev_register_sysfs(struct net_device *net) +int netdev_register_kobject(struct net_device *net) { struct device *dev = &(net->dev); struct attribute_group **groups = net->sysfs_groups; @@ -481,6 +471,7 @@ int netdev_register_sysfs(struct net_device *net) BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ); strlcpy(dev->bus_id, net->name, BUS_ID_SIZE); +#ifdef CONFIG_SYSFS if (net->get_stats) *groups++ = &netstat_group; @@ -488,11 +479,12 @@ int netdev_register_sysfs(struct net_device *net) if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats) *groups++ = &wireless_group; #endif +#endif /* CONFIG_SYSFS */ return device_add(dev); } -int netdev_sysfs_init(void) +int netdev_kobject_init(void) { return class_register(&net_class); } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c new file mode 100644 index 0000000000000000000000000000000000000000..6f71db8c4428c04a573020a2773b72a97c8e2ff7 --- /dev/null +++ b/net/core/net_namespace.c @@ -0,0 +1,317 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Our network namespace constructor/destructor lists + */ + +static LIST_HEAD(pernet_list); +static struct list_head *first_device = &pernet_list; +static DEFINE_MUTEX(net_mutex); + +LIST_HEAD(net_namespace_list); + +static struct kmem_cache *net_cachep; + +struct net init_net; +EXPORT_SYMBOL_GPL(init_net); + +static struct net *net_alloc(void) +{ + return kmem_cache_zalloc(net_cachep, GFP_KERNEL); +} + +static void net_free(struct net *net) +{ + if (!net) + return; + + if (unlikely(atomic_read(&net->use_count) != 0)) { + printk(KERN_EMERG "network namespace not free! Usage: %d\n", + atomic_read(&net->use_count)); + return; + } + + kmem_cache_free(net_cachep, net); +} + +static void cleanup_net(struct work_struct *work) +{ + struct pernet_operations *ops; + struct net *net; + + net = container_of(work, struct net, work); + + mutex_lock(&net_mutex); + + /* Don't let anyone else find us. */ + rtnl_lock(); + list_del(&net->list); + rtnl_unlock(); + + /* Run all of the network namespace exit methods */ + list_for_each_entry_reverse(ops, &pernet_list, list) { + if (ops->exit) + ops->exit(net); + } + + mutex_unlock(&net_mutex); + + /* Ensure there are no outstanding rcu callbacks using this + * network namespace. + */ + rcu_barrier(); + + /* Finally it is safe to free my network namespace structure */ + net_free(net); +} + + +void __put_net(struct net *net) +{ + /* Cleanup the network namespace in process context */ + INIT_WORK(&net->work, cleanup_net); + schedule_work(&net->work); +} +EXPORT_SYMBOL_GPL(__put_net); + +/* + * setup_net runs the initializers for the network namespace object. + */ +static int setup_net(struct net *net) +{ + /* Must be called with net_mutex held */ + struct pernet_operations *ops; + int error; + + atomic_set(&net->count, 1); + atomic_set(&net->use_count, 0); + + error = 0; + list_for_each_entry(ops, &pernet_list, list) { + if (ops->init) { + error = ops->init(net); + if (error < 0) + goto out_undo; + } + } +out: + return error; + +out_undo: + /* Walk through the list backwards calling the exit functions + * for the pernet modules whose init functions did not fail. + */ + list_for_each_entry_continue_reverse(ops, &pernet_list, list) { + if (ops->exit) + ops->exit(net); + } + goto out; +} + +struct net *copy_net_ns(unsigned long flags, struct net *old_net) +{ + struct net *new_net = NULL; + int err; + + get_net(old_net); + + if (!(flags & CLONE_NEWNET)) + return old_net; + +#ifndef CONFIG_NET_NS + return ERR_PTR(-EINVAL); +#endif + + err = -ENOMEM; + new_net = net_alloc(); + if (!new_net) + goto out; + + mutex_lock(&net_mutex); + err = setup_net(new_net); + if (err) + goto out_unlock; + + rtnl_lock(); + list_add_tail(&new_net->list, &net_namespace_list); + rtnl_unlock(); + + +out_unlock: + mutex_unlock(&net_mutex); +out: + put_net(old_net); + if (err) { + net_free(new_net); + new_net = ERR_PTR(err); + } + return new_net; +} + +static int __init net_ns_init(void) +{ + int err; + + printk(KERN_INFO "net_namespace: %zd bytes\n", sizeof(struct net)); + net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), + SMP_CACHE_BYTES, + SLAB_PANIC, NULL); + mutex_lock(&net_mutex); + err = setup_net(&init_net); + + rtnl_lock(); + list_add_tail(&init_net.list, &net_namespace_list); + rtnl_unlock(); + + mutex_unlock(&net_mutex); + if (err) + panic("Could not setup the initial network namespace"); + + return 0; +} + +pure_initcall(net_ns_init); + +static int register_pernet_operations(struct list_head *list, + struct pernet_operations *ops) +{ + struct net *net, *undo_net; + int error; + + error = 0; + list_add_tail(&ops->list, list); + for_each_net(net) { + if (ops->init) { + error = ops->init(net); + if (error) + goto out_undo; + } + } +out: + return error; + +out_undo: + /* If I have an error cleanup all namespaces I initialized */ + list_del(&ops->list); + for_each_net(undo_net) { + if (undo_net == net) + goto undone; + if (ops->exit) + ops->exit(undo_net); + } +undone: + goto out; +} + +static void unregister_pernet_operations(struct pernet_operations *ops) +{ + struct net *net; + + list_del(&ops->list); + for_each_net(net) + if (ops->exit) + ops->exit(net); +} + +/** + * register_pernet_subsys - register a network namespace subsystem + * @ops: pernet operations structure for the subsystem + * + * Register a subsystem which has init and exit functions + * that are called when network namespaces are created and + * destroyed respectively. + * + * When registered all network namespace init functions are + * called for every existing network namespace. Allowing kernel + * modules to have a race free view of the set of network namespaces. + * + * When a new network namespace is created all of the init + * methods are called in the order in which they were registered. + * + * When a network namespace is destroyed all of the exit methods + * are called in the reverse of the order with which they were + * registered. + */ +int register_pernet_subsys(struct pernet_operations *ops) +{ + int error; + mutex_lock(&net_mutex); + error = register_pernet_operations(first_device, ops); + mutex_unlock(&net_mutex); + return error; +} +EXPORT_SYMBOL_GPL(register_pernet_subsys); + +/** + * unregister_pernet_subsys - unregister a network namespace subsystem + * @ops: pernet operations structure to manipulate + * + * Remove the pernet operations structure from the list to be + * used when network namespaces are created or destoryed. In + * addition run the exit method for all existing network + * namespaces. + */ +void unregister_pernet_subsys(struct pernet_operations *module) +{ + mutex_lock(&net_mutex); + unregister_pernet_operations(module); + mutex_unlock(&net_mutex); +} +EXPORT_SYMBOL_GPL(unregister_pernet_subsys); + +/** + * register_pernet_device - register a network namespace device + * @ops: pernet operations structure for the subsystem + * + * Register a device which has init and exit functions + * that are called when network namespaces are created and + * destroyed respectively. + * + * When registered all network namespace init functions are + * called for every existing network namespace. Allowing kernel + * modules to have a race free view of the set of network namespaces. + * + * When a new network namespace is created all of the init + * methods are called in the order in which they were registered. + * + * When a network namespace is destroyed all of the exit methods + * are called in the reverse of the order with which they were + * registered. + */ +int register_pernet_device(struct pernet_operations *ops) +{ + int error; + mutex_lock(&net_mutex); + error = register_pernet_operations(&pernet_list, ops); + if (!error && (first_device == &pernet_list)) + first_device = &ops->list; + mutex_unlock(&net_mutex); + return error; +} +EXPORT_SYMBOL_GPL(register_pernet_device); + +/** + * unregister_pernet_device - unregister a network namespace netdevice + * @ops: pernet operations structure to manipulate + * + * Remove the pernet operations structure from the list to be + * used when network namespaces are created or destoryed. In + * addition run the exit method for all existing network + * namespaces. + */ +void unregister_pernet_device(struct pernet_operations *ops) +{ + mutex_lock(&net_mutex); + if (&ops->list == first_device) + first_device = first_device->next; + unregister_pernet_operations(ops); + mutex_unlock(&net_mutex); +} +EXPORT_SYMBOL_GPL(unregister_pernet_device); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index de1b26aa5720f011616413d87d852f94d46f9c52..95daba6249676dc8e466303279cadc1223e57d81 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -119,19 +119,22 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, static void poll_napi(struct netpoll *np) { struct netpoll_info *npinfo = np->dev->npinfo; + struct napi_struct *napi; int budget = 16; - if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && - npinfo->poll_owner != smp_processor_id() && - spin_trylock(&npinfo->poll_lock)) { - npinfo->rx_flags |= NETPOLL_RX_DROP; - atomic_inc(&trapped); + list_for_each_entry(napi, &np->dev->napi_list, dev_list) { + if (test_bit(NAPI_STATE_SCHED, &napi->state) && + napi->poll_owner != smp_processor_id() && + spin_trylock(&napi->poll_lock)) { + npinfo->rx_flags |= NETPOLL_RX_DROP; + atomic_inc(&trapped); - np->dev->poll(np->dev, &budget); + napi->poll(napi, budget); - atomic_dec(&trapped); - npinfo->rx_flags &= ~NETPOLL_RX_DROP; - spin_unlock(&npinfo->poll_lock); + atomic_dec(&trapped); + npinfo->rx_flags &= ~NETPOLL_RX_DROP; + spin_unlock(&napi->poll_lock); + } } } @@ -157,7 +160,7 @@ void netpoll_poll(struct netpoll *np) /* Process pending work on NIC */ np->dev->poll_controller(np->dev); - if (np->dev->poll) + if (!list_empty(&np->dev->napi_list)) poll_napi(np); service_arp_queue(np->dev->npinfo); @@ -233,6 +236,17 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) return skb; } +static int netpoll_owner_active(struct net_device *dev) +{ + struct napi_struct *napi; + + list_for_each_entry(napi, &dev->napi_list, dev_list) { + if (napi->poll_owner == smp_processor_id()) + return 1; + } + return 0; +} + static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { int status = NETDEV_TX_BUSY; @@ -246,8 +260,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } /* don't get messages out of order, and no recursion */ - if (skb_queue_len(&npinfo->txq) == 0 && - npinfo->poll_owner != smp_processor_id()) { + if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { unsigned long flags; local_irq_save(flags); @@ -402,11 +415,9 @@ static void arp_reply(struct sk_buff *skb) send_skb->protocol = htons(ETH_P_ARP); /* Fill the device header for the ARP frame */ - - if (np->dev->hard_header && - np->dev->hard_header(send_skb, skb->dev, ptype, - sha, np->local_mac, - send_skb->len) < 0) { + if (dev_hard_header(send_skb, skb->dev, ptype, + sha, np->local_mac, + send_skb->len) < 0) { kfree_skb(send_skb); return; } @@ -519,6 +530,23 @@ int __netpoll_rx(struct sk_buff *skb) return 0; } +void netpoll_print_options(struct netpoll *np) +{ + DECLARE_MAC_BUF(mac); + printk(KERN_INFO "%s: local port %d\n", + np->name, np->local_port); + printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", + np->name, HIPQUAD(np->local_ip)); + printk(KERN_INFO "%s: interface %s\n", + np->name, np->dev_name); + printk(KERN_INFO "%s: remote port %d\n", + np->name, np->remote_port); + printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", + np->name, HIPQUAD(np->remote_ip)); + printk(KERN_INFO "%s: remote ethernet address %s\n", + np->name, print_mac(mac, np->remote_mac)); +} + int netpoll_parse_options(struct netpoll *np, char *opt) { char *cur=opt, *delim; @@ -531,7 +559,6 @@ int netpoll_parse_options(struct netpoll *np, char *opt) cur = delim; } cur++; - printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); if (*cur != '/') { if ((delim = strchr(cur, '/')) == NULL) @@ -539,9 +566,6 @@ int netpoll_parse_options(struct netpoll *np, char *opt) *delim = 0; np->local_ip = ntohl(in_aton(cur)); cur = delim; - - printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", - np->name, HIPQUAD(np->local_ip)); } cur++; @@ -555,8 +579,6 @@ int netpoll_parse_options(struct netpoll *np, char *opt) } cur++; - printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); - if (*cur != '@') { /* dst port */ if ((delim = strchr(cur, '@')) == NULL) @@ -566,7 +588,6 @@ int netpoll_parse_options(struct netpoll *np, char *opt) cur = delim; } cur++; - printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); /* dst ip */ if ((delim = strchr(cur, '/')) == NULL) @@ -575,9 +596,6 @@ int netpoll_parse_options(struct netpoll *np, char *opt) np->remote_ip = ntohl(in_aton(cur)); cur = delim + 1; - printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", - np->name, HIPQUAD(np->remote_ip)); - if (*cur != 0) { /* MAC address */ if ((delim = strchr(cur, ':')) == NULL) @@ -608,15 +626,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) np->remote_mac[5] = simple_strtol(cur, NULL, 16); } - printk(KERN_INFO "%s: remote ethernet address " - "%02x:%02x:%02x:%02x:%02x:%02x\n", - np->name, - np->remote_mac[0], - np->remote_mac[1], - np->remote_mac[2], - np->remote_mac[3], - np->remote_mac[4], - np->remote_mac[5]); + netpoll_print_options(np); return 0; @@ -635,7 +645,7 @@ int netpoll_setup(struct netpoll *np) int err; if (np->dev_name) - ndev = dev_get_by_name(np->dev_name); + ndev = dev_get_by_name(&init_net, np->dev_name); if (!ndev) { printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", np->name, np->dev_name); @@ -652,8 +662,6 @@ int netpoll_setup(struct netpoll *np) npinfo->rx_flags = 0; npinfo->rx_np = NULL; - spin_lock_init(&npinfo->poll_lock); - npinfo->poll_owner = -1; spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); @@ -820,6 +828,7 @@ void netpoll_set_trap(int trap) EXPORT_SYMBOL(netpoll_set_trap); EXPORT_SYMBOL(netpoll_trap); +EXPORT_SYMBOL(netpoll_print_options); EXPORT_SYMBOL(netpoll_parse_options); EXPORT_SYMBOL(netpoll_setup); EXPORT_SYMBOL(netpoll_cleanup); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 803d0c8826af006047c19e198e7d242192d02b81..2100c734b102c9bde14ef3e86a362621996f37b9 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -152,6 +152,7 @@ #include #include #include +#include #include #include #include @@ -167,7 +168,7 @@ #include /* do_div */ #include -#define VERSION "pktgen v2.68: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n" /* The buckets are exponential in 'width' */ #define LAT_BUCKETS_MAX 32 @@ -189,6 +190,7 @@ #define F_SVID_RND (1<<10) /* Random SVLAN ID */ #define F_FLOW_SEQ (1<<11) /* Sequential flows */ #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ +#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ /* Thread control flag bits */ #define T_TERMINATE (1<<0) @@ -331,6 +333,7 @@ struct pktgen_dev { __be32 cur_daddr; __u16 cur_udp_dst; __u16 cur_udp_src; + __u16 cur_queue_map; __u32 cur_pkt_size; __u8 hh[14]; @@ -358,6 +361,10 @@ struct pktgen_dev { unsigned lflow; /* Flow length (config) */ unsigned nflows; /* accumulated flows (stats) */ unsigned curfl; /* current sequenced flow (state)*/ + + u16 queue_map_min; + u16 queue_map_max; + #ifdef CONFIG_XFRM __u8 ipsmode; /* IPSEC mode (config) */ __u8 ipsproto; /* IPSEC type (config) */ @@ -378,7 +385,6 @@ struct pktgen_thread { struct list_head th_list; struct task_struct *tsk; char result[512]; - u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ /* Field for thread to receive "posted" events terminate, stop ifs etc. */ @@ -593,11 +599,11 @@ static const struct file_operations pktgen_fops = { static int pktgen_if_show(struct seq_file *seq, void *v) { - int i; struct pktgen_dev *pkt_dev = seq->private; __u64 sa; __u64 stopped; __u64 now = getCurUs(); + DECLARE_MAC_BUF(mac); seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", @@ -613,6 +619,11 @@ static int pktgen_if_show(struct seq_file *seq, void *v) seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); + seq_printf(seq, + " queue_map_min: %u queue_map_max: %u\n", + pkt_dev->queue_map_min, + pkt_dev->queue_map_max); + if (pkt_dev->flags & F_IPV6) { char b1[128], b2[128], b3[128]; fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); @@ -637,19 +648,12 @@ static int pktgen_if_show(struct seq_file *seq, void *v) seq_puts(seq, " src_mac: "); - if (is_zero_ether_addr(pkt_dev->src_mac)) - for (i = 0; i < 6; i++) - seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], - i == 5 ? " " : ":"); - else - for (i = 0; i < 6; i++) - seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], - i == 5 ? " " : ":"); + seq_printf(seq, "%s ", + print_mac(mac, is_zero_ether_addr(pkt_dev->src_mac) ? + pkt_dev->odev->dev_addr : pkt_dev->src_mac)); seq_printf(seq, "dst_mac: "); - for (i = 0; i < 6; i++) - seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], - i == 5 ? "\n" : ":"); + seq_printf(seq, "%s\n", print_mac(mac, pkt_dev->dst_mac)); seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", @@ -709,6 +713,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_MPLS_RND) seq_printf(seq, "MPLS_RND "); + if (pkt_dev->flags & F_QUEUE_MAP_RND) + seq_printf(seq, "QUEUE_MAP_RND "); + if (pkt_dev->cflows) { if (pkt_dev->flags & F_FLOW_SEQ) seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ @@ -764,6 +771,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v) seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); + seq_printf(seq, " cur_queue_map: %u\n", pkt_dev->cur_queue_map); + seq_printf(seq, " flows: %u\n", pkt_dev->nflows); if (pkt_dev->result[0]) @@ -1215,6 +1224,11 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "FLOW_SEQ") == 0) pkt_dev->flags |= F_FLOW_SEQ; + else if (strcmp(f, "QUEUE_MAP_RND") == 0) + pkt_dev->flags |= F_QUEUE_MAP_RND; + + else if (strcmp(f, "!QUEUE_MAP_RND") == 0) + pkt_dev->flags &= ~F_QUEUE_MAP_RND; #ifdef CONFIG_XFRM else if (strcmp(f, "IPSEC") == 0) pkt_dev->flags |= F_IPSEC_ON; @@ -1526,16 +1540,40 @@ static ssize_t pktgen_if_write(struct file *file, return count; } + if (!strcmp(name, "queue_map_min")) { + len = num_arg(&user_buffer[i], 5, &value); + if (len < 0) { + return len; + } + i += len; + pkt_dev->queue_map_min = value; + sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); + return count; + } + + if (!strcmp(name, "queue_map_max")) { + len = num_arg(&user_buffer[i], 5, &value); + if (len < 0) { + return len; + } + i += len; + pkt_dev->queue_map_max = value; + sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); + return count; + } + if (!strcmp(name, "mpls")) { - unsigned n, offset; + unsigned n, cnt; + len = get_labels(&user_buffer[i], pkt_dev); - if (len < 0) { return len; } + if (len < 0) + return len; i += len; - offset = sprintf(pg_result, "OK: mpls="); + cnt = sprintf(pg_result, "OK: mpls="); for (n = 0; n < pkt_dev->nr_labels; n++) - offset += sprintf(pg_result + offset, - "%08x%s", ntohl(pkt_dev->labels[n]), - n == pkt_dev->nr_labels-1 ? "" : ","); + cnt += sprintf(pg_result + cnt, + "%08x%s", ntohl(pkt_dev->labels[n]), + n == pkt_dev->nr_labels-1 ? "" : ","); if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) { pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ @@ -1718,9 +1756,6 @@ static int pktgen_thread_show(struct seq_file *seq, void *v) BUG_ON(!t); - seq_printf(seq, "Name: %s max_before_softirq: %d\n", - t->tsk->comm, t->max_before_softirq); - seq_printf(seq, "Running: "); if_lock(t); @@ -1753,7 +1788,6 @@ static ssize_t pktgen_thread_write(struct file *file, int i = 0, max, len, ret; char name[40]; char *pg_result; - unsigned long value = 0; if (count < 1) { // sprintf(pg_result, "Wrong command format"); @@ -1827,12 +1861,8 @@ static ssize_t pktgen_thread_write(struct file *file, } if (!strcmp(name, "max_before_softirq")) { - len = num_arg(&user_buffer[i], 10, &value); - mutex_lock(&pktgen_thread_lock); - t->max_before_softirq = value; - mutex_unlock(&pktgen_thread_lock); + sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use"); ret = count; - sprintf(pg_result, "OK: max_before_softirq=%lu", value); goto out; } @@ -1940,6 +1970,9 @@ static int pktgen_device_event(struct notifier_block *unused, { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* It is OK that we do not hold the group lock right now, * as we run under the RTNL lock. */ @@ -1970,7 +2003,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) pkt_dev->odev = NULL; } - odev = dev_get_by_name(ifname); + odev = dev_get_by_name(&init_net, ifname); if (!odev) { printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); return -ENODEV; @@ -2111,7 +2144,6 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) if (spin_until_us - now > jiffies_to_usecs(1) + 1) schedule_timeout_interruptible(1); else if (spin_until_us - now > 100) { - do_softirq(); if (!pkt_dev->running) return; if (need_resched()) @@ -2387,6 +2419,20 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->cur_pkt_size = t; } + if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { + __u16 t; + if (pkt_dev->flags & F_QUEUE_MAP_RND) { + t = random32() % + (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1) + + pkt_dev->queue_map_min; + } else { + t = pkt_dev->cur_queue_map + 1; + if (t > pkt_dev->queue_map_max) + t = pkt_dev->queue_map_min; + } + pkt_dev->cur_queue_map = t; + } + pkt_dev->flows[flow].count++; } @@ -2557,6 +2603,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, skb->network_header = skb->tail; skb->transport_header = skb->network_header + sizeof(struct iphdr); skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); + skb->queue_mapping = pkt_dev->cur_queue_map; iph = ip_hdr(skb); udph = udp_hdr(skb); @@ -2686,6 +2733,7 @@ static unsigned int scan_ip6(const char *s, char ip[16]) unsigned int prefixlen = 0; unsigned int suffixlen = 0; __be32 tmp; + char *pos; for (i = 0; i < 16; i++) ip[i] = 0; @@ -2700,12 +2748,9 @@ static unsigned int scan_ip6(const char *s, char ip[16]) } s++; } - { - char *tmp; - u = simple_strtoul(s, &tmp, 16); - i = tmp - s; - } + u = simple_strtoul(s, &pos, 16); + i = pos - s; if (!i) return 0; if (prefixlen == 12 && s[i] == '.') { @@ -2733,11 +2778,9 @@ static unsigned int scan_ip6(const char *s, char ip[16]) len++; } else if (suffixlen != 0) break; - { - char *tmp; - u = simple_strtol(s, &tmp, 16); - i = tmp - s; - } + + u = simple_strtol(s, &pos, 16); + i = pos - s; if (!i) { if (*s) len--; @@ -2898,6 +2941,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, skb->network_header = skb->tail; skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); + skb->queue_mapping = pkt_dev->cur_queue_map; iph = ipv6_hdr(skb); udph = udp_hdr(skb); @@ -3465,8 +3509,6 @@ static int pktgen_thread_worker(void *arg) struct pktgen_thread *t = arg; struct pktgen_dev *pkt_dev = NULL; int cpu = t->cpu; - u32 max_before_softirq; - u32 tx_since_softirq = 0; BUG_ON(smp_processor_id() != cpu); @@ -3474,8 +3516,6 @@ static int pktgen_thread_worker(void *arg) pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid); - max_before_softirq = t->max_before_softirq; - set_current_state(TASK_INTERRUPTIBLE); set_freezable(); @@ -3494,24 +3534,9 @@ static int pktgen_thread_worker(void *arg) __set_current_state(TASK_RUNNING); - if (pkt_dev) { - + if (pkt_dev) pktgen_xmit(pkt_dev); - /* - * We like to stay RUNNING but must also give - * others fair share. - */ - - tx_since_softirq += pkt_dev->last_ok; - - if (tx_since_softirq > max_before_softirq) { - if (local_softirq_pending()) - do_softirq(); - tx_since_softirq = 0; - } - } - if (t->control & T_STOP) { pktgen_stop(t); t->control &= ~(T_STOP); @@ -3778,7 +3803,7 @@ static int __init pg_init(void) printk(KERN_INFO "%s", version); - pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); + pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net); if (!pg_proc_dir) return -ENODEV; pg_proc_dir->owner = THIS_MODULE; @@ -3787,7 +3812,7 @@ static int __init pg_init(void) if (pe == NULL) { printk(KERN_ERR "pktgen: ERROR: cannot create %s " "procfs entry.\n", PGCTRL); - proc_net_remove(PG_PROC_DIR); + proc_net_remove(&init_net, PG_PROC_DIR); return -EINVAL; } @@ -3811,7 +3836,7 @@ static int __init pg_init(void) "all threads\n"); unregister_netdevice_notifier(&pktgen_notifier_block); remove_proc_entry(PGCTRL, pg_proc_dir); - proc_net_remove(PG_PROC_DIR); + proc_net_remove(&init_net, PG_PROC_DIR); return -ENODEV; } @@ -3838,7 +3863,7 @@ static void __exit pg_cleanup(void) /* Clean up proc file system */ remove_proc_entry(PGCTRL, pg_proc_dir); - proc_net_remove(PG_PROC_DIR); + proc_net_remove(&init_net, PG_PROC_DIR); } module_init(pg_init); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4756d5857abfb9010a55610423f967d129ae45f4..1072d16696c3b651993ef7130bd09a475c072b81 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -74,8 +75,6 @@ void __rtnl_unlock(void) void rtnl_unlock(void) { mutex_unlock(&rtnl_mutex); - if (rtnl && rtnl->sk_receive_queue.qlen) - rtnl->sk_data_ready(rtnl, 0); netdev_run_todo(); } @@ -306,10 +305,13 @@ EXPORT_SYMBOL_GPL(rtnl_link_register); void __rtnl_link_unregister(struct rtnl_link_ops *ops) { struct net_device *dev, *n; + struct net *net; - for_each_netdev_safe(dev, n) { - if (dev->rtnl_link_ops == ops) - ops->dellink(dev); + for_each_net(net) { + for_each_netdev_safe(net, dev, n) { + if (dev->rtnl_link_ops == ops) + ops->dellink(dev); + } } list_del(&ops->list); } @@ -634,7 +636,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); - NLA_PUT_U32(skb, IFLA_WEIGHT, dev->weight); NLA_PUT_U8(skb, IFLA_OPERSTATE, netif_running(dev) ? dev->operstate : IF_OPER_DOWN); NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); @@ -694,12 +695,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { + struct net *net = skb->sk->sk_net; int idx; int s_idx = cb->args[0]; struct net_device *dev; idx = 0; - for_each_netdev(dev) { + for_each_netdev(net, dev) { if (idx < s_idx) goto cont; if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, @@ -714,7 +716,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static const struct nla_policy ifla_policy[IFLA_MAX+1] = { +const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 }, [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, @@ -724,6 +726,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_WEIGHT] = { .type = NLA_U32 }, [IFLA_OPERSTATE] = { .type = NLA_U8 }, [IFLA_LINKMODE] = { .type = NLA_U8 }, + [IFLA_NET_NS_PID] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -731,12 +734,45 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { [IFLA_INFO_DATA] = { .type = NLA_NESTED }, }; +static struct net *get_net_ns_by_pid(pid_t pid) +{ + struct task_struct *tsk; + struct net *net; + + /* Lookup the network namespace */ + net = ERR_PTR(-ESRCH); + rcu_read_lock(); + tsk = find_task_by_pid(pid); + if (tsk) { + task_lock(tsk); + if (tsk->nsproxy) + net = get_net(tsk->nsproxy->net_ns); + task_unlock(tsk); + } + rcu_read_unlock(); + return net; +} + static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { int send_addr_notify = 0; int err; + if (tb[IFLA_NET_NS_PID]) { + struct net *net; + net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); + if (IS_ERR(net)) { + err = PTR_ERR(net); + goto errout; + } + err = dev_change_net_namespace(dev, net, ifname); + put_net(net); + if (err) + goto errout; + modified = 1; + } + if (tb[IFLA_MAP]) { struct rtnl_link_ifmap *u_map; struct ifmap k_map; @@ -834,9 +870,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, if (tb[IFLA_TXQLEN]) dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); - if (tb[IFLA_WEIGHT]) - dev->weight = nla_get_u32(tb[IFLA_WEIGHT]); - if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); @@ -862,6 +895,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { + struct net *net = skb->sk->sk_net; struct ifinfomsg *ifm; struct net_device *dev; int err; @@ -880,9 +914,9 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) err = -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) - dev = dev_get_by_index(ifm->ifi_index); + dev = dev_get_by_index(net, ifm->ifi_index); else if (tb[IFLA_IFNAME]) - dev = dev_get_by_name(ifname); + dev = dev_get_by_name(net, ifname); else goto errout; @@ -908,6 +942,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { + struct net *net = skb->sk->sk_net; const struct rtnl_link_ops *ops; struct net_device *dev; struct ifinfomsg *ifm; @@ -924,9 +959,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) - dev = __dev_get_by_index(ifm->ifi_index); + dev = __dev_get_by_index(net, ifm->ifi_index); else if (tb[IFLA_IFNAME]) - dev = __dev_get_by_name(ifname); + dev = __dev_get_by_name(net, ifname); else return -EINVAL; @@ -941,8 +976,52 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return 0; } +struct net_device *rtnl_create_link(struct net *net, char *ifname, + const struct rtnl_link_ops *ops, struct nlattr *tb[]) +{ + int err; + struct net_device *dev; + + err = -ENOMEM; + dev = alloc_netdev(ops->priv_size, ifname, ops->setup); + if (!dev) + goto err; + + if (strchr(dev->name, '%')) { + err = dev_alloc_name(dev, dev->name); + if (err < 0) + goto err_free; + } + + dev->nd_net = net; + dev->rtnl_link_ops = ops; + + if (tb[IFLA_MTU]) + dev->mtu = nla_get_u32(tb[IFLA_MTU]); + if (tb[IFLA_ADDRESS]) + memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), + nla_len(tb[IFLA_ADDRESS])); + if (tb[IFLA_BROADCAST]) + memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), + nla_len(tb[IFLA_BROADCAST])); + if (tb[IFLA_TXQLEN]) + dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); + if (tb[IFLA_OPERSTATE]) + set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); + if (tb[IFLA_LINKMODE]) + dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); + + return dev; + +err_free: + free_netdev(dev); +err: + return ERR_PTR(err); +} + static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { + struct net *net = skb->sk->sk_net; const struct rtnl_link_ops *ops; struct net_device *dev; struct ifinfomsg *ifm; @@ -966,9 +1045,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) - dev = __dev_get_by_index(ifm->ifi_index); + dev = __dev_get_by_index(net, ifm->ifi_index); else if (ifname[0]) - dev = __dev_get_by_name(ifname); + dev = __dev_get_by_name(net, ifname); else dev = NULL; @@ -1053,40 +1132,17 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) if (!ifname[0]) snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); - dev = alloc_netdev(ops->priv_size, ifname, ops->setup); - if (!dev) - return -ENOMEM; - - if (strchr(dev->name, '%')) { - err = dev_alloc_name(dev, dev->name); - if (err < 0) - goto err_free; - } - dev->rtnl_link_ops = ops; - - if (tb[IFLA_MTU]) - dev->mtu = nla_get_u32(tb[IFLA_MTU]); - if (tb[IFLA_ADDRESS]) - memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]), - nla_len(tb[IFLA_ADDRESS])); - if (tb[IFLA_BROADCAST]) - memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]), - nla_len(tb[IFLA_BROADCAST])); - if (tb[IFLA_TXQLEN]) - dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); - if (tb[IFLA_WEIGHT]) - dev->weight = nla_get_u32(tb[IFLA_WEIGHT]); - if (tb[IFLA_OPERSTATE]) - set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); - if (tb[IFLA_LINKMODE]) - dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); - - if (ops->newlink) + + dev = rtnl_create_link(net, ifname, ops, tb); + + if (IS_ERR(dev)) + err = PTR_ERR(dev); + else if (ops->newlink) err = ops->newlink(dev, tb, data); else err = register_netdevice(dev); -err_free: - if (err < 0) + + if (err < 0 && !IS_ERR(dev)) free_netdev(dev); return err; } @@ -1094,6 +1150,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) { + struct net *net = skb->sk->sk_net; struct ifinfomsg *ifm; struct nlattr *tb[IFLA_MAX+1]; struct net_device *dev = NULL; @@ -1106,7 +1163,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) { - dev = dev_get_by_index(ifm->ifi_index); + dev = dev_get_by_index(net, ifm->ifi_index); if (dev == NULL) return -ENODEV; } else @@ -1255,22 +1312,20 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return doit(skb, nlh, (void *)&rta_buf[0]); } -static void rtnetlink_rcv(struct sock *sk, int len) +static void rtnetlink_rcv(struct sk_buff *skb) { - unsigned int qlen = 0; - - do { - mutex_lock(&rtnl_mutex); - netlink_run_queue(sk, &qlen, &rtnetlink_rcv_msg); - mutex_unlock(&rtnl_mutex); - - netdev_run_todo(); - } while (qlen); + rtnl_lock(); + netlink_rcv_skb(skb, &rtnetlink_rcv_msg); + rtnl_unlock(); } static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; + + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch (event) { case NETDEV_UNREGISTER: rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); @@ -1308,8 +1363,8 @@ void __init rtnetlink_init(void) if (!rta_buf) panic("rtnetlink_init: cannot allocate rta_buf\n"); - rtnl = netlink_kernel_create(NETLINK_ROUTE, RTNLGRP_MAX, rtnetlink_rcv, - &rtnl_mutex, THIS_MODULE); + rtnl = netlink_kernel_create(&init_net, NETLINK_ROUTE, RTNLGRP_MAX, + rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); if (rtnl == NULL) panic("rtnetlink_init: cannot initialize rtnetlink\n"); netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); @@ -1335,3 +1390,5 @@ EXPORT_SYMBOL(rtnl_unlock); EXPORT_SYMBOL(rtnl_unicast); EXPORT_SYMBOL(rtnl_notify); EXPORT_SYMBOL(rtnl_set_sk_err); +EXPORT_SYMBOL(rtnl_create_link); +EXPORT_SYMBOL(ifla_policy); diff --git a/net/core/scm.c b/net/core/scm.c index 44c4ec2c8769b1a922d69a6f0e5882bc6f58f001..530bee8d9ed90448ee2b406b897ada19ec11d28c 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -167,7 +167,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) { - struct cmsghdr __user *cm = (struct cmsghdr __user *)msg->msg_control; + struct cmsghdr __user *cm + = (__force struct cmsghdr __user *)msg->msg_control; struct cmsghdr cmhdr; int cmlen = CMSG_LEN(len); int err; @@ -202,7 +203,8 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) { - struct cmsghdr __user *cm = (struct cmsghdr __user*)msg->msg_control; + struct cmsghdr __user *cm + = (__force struct cmsghdr __user*)msg->msg_control; int fdmax = 0; int fdnum = scm->fp->count; @@ -222,7 +224,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) if (fdnum < fdmax) fdmax = fdnum; - for (i=0, cmfptr=(int __user *)CMSG_DATA(cm); isp = secpath_get(old->sp); #endif + new->csum_start = old->csum_start; + new->csum_offset = old->csum_offset; + new->ip_summed = old->ip_summed; new->transport_header = old->transport_header; new->network_header = old->network_header; new->mac_header = old->mac_header; @@ -545,8 +548,6 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) skb_reserve(n, headerlen); /* Set the tail pointer and length */ skb_put(n, skb->len); - n->csum = skb->csum; - n->ip_summed = skb->ip_summed; if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) BUG(); @@ -589,8 +590,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) skb_put(n, skb_headlen(skb)); /* Copy the bytes */ skb_copy_from_linear_data(skb, n->data, n->len); - n->csum = skb->csum; - n->ip_summed = skb->ip_summed; n->truesize += skb->data_len; n->data_len = skb->data_len; @@ -686,6 +685,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->transport_header += off; skb->network_header += off; skb->mac_header += off; + skb->csum_start += off; skb->cloned = 0; skb->hdr_len = 0; skb->nohdr = 0; @@ -734,9 +734,6 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) * * You must pass %GFP_ATOMIC as the allocation priority if this function * is called from an interrupt. - * - * BUG ALERT: ip_summed is not copied. Why does this work? Is it used - * only by netfilter in the cases when checksum is recalculated? --ANK */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, @@ -749,7 +746,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, gfp_mask); int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; - int off = 0; + int off; if (!n) return NULL; @@ -773,12 +770,13 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, copy_skb_header(n, skb); -#ifdef NET_SKBUFF_DATA_USES_OFFSET off = newheadroom - oldheadroom; -#endif + n->csum_start += off; +#ifdef NET_SKBUFF_DATA_USES_OFFSET n->transport_header += off; n->network_header += off; n->mac_header += off; +#endif return n; } diff --git a/net/core/sock.c b/net/core/sock.c index 190de61cd648db8400ec852e40a652a6897a9e7d..4ed9b507c1e78d519488088e0285785ad1460f9b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -119,6 +119,7 @@ #include #include #include +#include #include #include #include @@ -366,6 +367,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES + struct net *net = sk->sk_net; char devname[IFNAMSIZ]; int index; @@ -394,7 +396,7 @@ static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) if (devname[0] == '\0') { index = 0; } else { - struct net_device *dev = dev_get_by_name(devname); + struct net_device *dev = dev_get_by_name(net, devname); ret = -ENODEV; if (!dev) @@ -872,7 +874,7 @@ static inline void sock_lock_init(struct sock *sk) * @prot: struct proto associated with this new sock instance * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, gfp_t priority, +struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; @@ -893,6 +895,7 @@ struct sock *sk_alloc(int family, gfp_t priority, */ sk->sk_prot = sk->sk_prot_creator = prot; sock_lock_init(sk); + sk->sk_net = get_net(net); } if (security_sk_alloc(sk, family, priority)) @@ -932,6 +935,7 @@ void sk_free(struct sock *sk) __FUNCTION__, atomic_read(&sk->sk_omem_alloc)); security_sk_free(sk); + put_net(sk->sk_net); if (sk->sk_prot_creator->slab != NULL) kmem_cache_free(sk->sk_prot_creator->slab, sk); else @@ -941,7 +945,7 @@ void sk_free(struct sock *sk) struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { - struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); + struct sock *newsk = sk_alloc(sk->sk_net, sk->sk_family, priority, sk->sk_prot, 0); if (newsk != NULL) { struct sk_filter *filter; @@ -1585,9 +1589,9 @@ void fastcall lock_sock_nested(struct sock *sk, int subclass) { might_sleep(); spin_lock_bh(&sk->sk_lock.slock); - if (sk->sk_lock.owner) + if (sk->sk_lock.owned) __lock_sock(sk); - sk->sk_lock.owner = (void *)1; + sk->sk_lock.owned = 1; spin_unlock(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: @@ -1608,7 +1612,7 @@ void fastcall release_sock(struct sock *sk) spin_lock_bh(&sk->sk_lock.slock); if (sk->sk_backlog.tail) __release_sock(sk); - sk->sk_lock.owner = NULL; + sk->sk_lock.owned = 0; if (waitqueue_active(&sk->sk_lock.wq)) wake_up(&sk->sk_lock.wq); spin_unlock_bh(&sk->sk_lock.slock); @@ -1973,7 +1977,7 @@ static const struct file_operations proto_seq_fops = { static int __init proto_init(void) { /* register /proc/net/protocols */ - return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0; + return proc_net_fops_create(&init_net, "protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0; } subsys_initcall(proto_init); diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 7ac775f9a64b0f27cd2d05455dfe7420ea869faf..83378f379f72c129670fabf8cbe86e06626485b3 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -69,21 +69,20 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) struct dccp_sock *dp = dccp_sk(sk); struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec; /* Figure out how many options do we need to represent the ackvec */ - const u16 nr_opts = (av->dccpav_vec_len + - DCCP_MAX_ACKVEC_OPT_LEN - 1) / - DCCP_MAX_ACKVEC_OPT_LEN; + const u16 nr_opts = DIV_ROUND_UP(av->dccpav_vec_len, + DCCP_MAX_ACKVEC_OPT_LEN); u16 len = av->dccpav_vec_len + 2 * nr_opts, i; - struct timeval now; u32 elapsed_time; const unsigned char *tail, *from; unsigned char *to; struct dccp_ackvec_record *avr; + suseconds_t delta; if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) return -1; - dccp_timestamp(sk, &now); - elapsed_time = timeval_delta(&now, &av->dccpav_time) / 10; + delta = ktime_us_delta(ktime_get_real(), av->dccpav_time); + elapsed_time = delta / 10; if (elapsed_time != 0 && dccp_insert_option_elapsed_time(sk, skb, elapsed_time)) @@ -159,8 +158,7 @@ struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority) av->dccpav_buf_head = DCCP_MAX_ACKVEC_LEN - 1; av->dccpav_buf_ackno = UINT48_MAX + 1; av->dccpav_buf_nonce = av->dccpav_buf_nonce = 0; - av->dccpav_time.tv_sec = 0; - av->dccpav_time.tv_usec = 0; + av->dccpav_time = ktime_set(0, 0); av->dccpav_vec_len = 0; INIT_LIST_HEAD(&av->dccpav_records); } @@ -321,7 +319,7 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, } av->dccpav_buf_ackno = ackno; - dccp_timestamp(sk, &av->dccpav_time); + av->dccpav_time = ktime_get_real(); out: return 0; diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index 96504a3b16e43dc3a00ada1a3f44f709394c3aae..9ef0737043ee4e226938af84edb1e31edc39f7c8 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h @@ -12,8 +12,8 @@ */ #include +#include #include -#include #include /* Read about the ECN nonce to see why it is 253 */ @@ -52,7 +52,7 @@ struct dccp_ackvec { u64 dccpav_buf_ackno; struct list_head dccpav_records; - struct timeval dccpav_time; + ktime_t dccpav_time; u16 dccpav_buf_head; u16 dccpav_vec_len; u8 dccpav_buf_nonce; diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index d29b88fe723cad5f0dc07d7d2158fac7c214c1b5..426008e3b7e3479a295c0a8ee2954a5b2e5c89c2 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -59,7 +59,8 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) pipe++; /* packets are sent sequentially */ - BUG_ON(seqp->ccid2s_seq <= prev->ccid2s_seq); + BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq, + prev->ccid2s_seq ) >= 0); BUG_ON(time_before(seqp->ccid2s_sent, prev->ccid2s_sent)); @@ -83,8 +84,7 @@ static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hctx) #define ccid2_hc_tx_check_sanity(hctx) #endif -static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, - gfp_t gfp) +static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx) { struct ccid2_seq *seqp; int i; @@ -95,16 +95,16 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, return -ENOMEM; /* allocate buffer and initialize linked list */ - seqp = kmalloc(sizeof(*seqp) * num, gfp); + seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any()); if (seqp == NULL) return -ENOMEM; - for (i = 0; i < (num - 1); i++) { + for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) { seqp[i].ccid2s_next = &seqp[i + 1]; seqp[i + 1].ccid2s_prev = &seqp[i]; } - seqp[num - 1].ccid2s_next = seqp; - seqp->ccid2s_prev = &seqp[num - 1]; + seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp; + seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; /* This is the first allocation. Initiate the head and tail. */ if (hctx->ccid2hctx_seqbufc == 0) @@ -114,8 +114,8 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, hctx->ccid2hctx_seqh->ccid2s_next = seqp; seqp->ccid2s_prev = hctx->ccid2hctx_seqh; - hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[num - 1]; - seqp[num - 1].ccid2s_next = hctx->ccid2hctx_seqt; + hctx->ccid2hctx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1]; + seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->ccid2hctx_seqt; } /* store the original pointer to the buffer so we can free it */ @@ -127,19 +127,7 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx, int num, static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) { - struct ccid2_hc_tx_sock *hctx; - - switch (DCCP_SKB_CB(skb)->dccpd_type) { - case 0: /* XXX data packets from userland come through like this */ - case DCCP_PKT_DATA: - case DCCP_PKT_DATAACK: - break; - /* No congestion control on other packets */ - default: - return 0; - } - - hctx = ccid2_hc_tx_sk(sk); + struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk); ccid2_pr_debug("pipe=%d cwnd=%d\n", hctx->ccid2hctx_pipe, hctx->ccid2hctx_cwnd); @@ -180,16 +168,11 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, int val) dp->dccps_l_ack_ratio = val; } -static void ccid2_change_cwnd(struct ccid2_hc_tx_sock *hctx, int val) +static void ccid2_change_cwnd(struct ccid2_hc_tx_sock *hctx, u32 val) { - if (val == 0) - val = 1; - /* XXX do we need to change ack ratio? */ - ccid2_pr_debug("change cwnd to %d\n", val); - - BUG_ON(val < 1); - hctx->ccid2hctx_cwnd = val; + hctx->ccid2hctx_cwnd = val? : 1; + ccid2_pr_debug("changed cwnd to %u\n", hctx->ccid2hctx_cwnd); } static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val) @@ -295,12 +278,11 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) next = hctx->ccid2hctx_seqh->ccid2s_next; /* check if we need to alloc more space */ if (next == hctx->ccid2hctx_seqt) { - int rc; - - ccid2_pr_debug("allocating more space in history\n"); - rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, gfp_any()); - BUG_ON(rc); /* XXX what do we do? */ - + if (ccid2_hc_tx_alloc_seq(hctx)) { + DCCP_CRIT("packet history - out of memory!"); + /* FIXME: find a more graceful way to bail out */ + return; + } next = hctx->ccid2hctx_seqh->ccid2s_next; BUG_ON(next == hctx->ccid2hctx_seqt); } @@ -581,8 +563,8 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) hctx->ccid2hctx_rpseq = seqno; } else { /* check if packet is consecutive */ - if ((hctx->ccid2hctx_rpseq + 1) == seqno) - hctx->ccid2hctx_rpseq++; + if (dccp_delta_seqno(hctx->ccid2hctx_rpseq, seqno) == 1) + hctx->ccid2hctx_rpseq = seqno; /* it's a later packet */ else if (after48(seqno, hctx->ccid2hctx_rpseq)) { hctx->ccid2hctx_rpdupack++; @@ -771,7 +753,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) hctx->ccid2hctx_seqbufc = 0; /* XXX init ~ to window size... */ - if (ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, GFP_ATOMIC) != 0) + if (ccid2_hc_tx_alloc_seq(hctx)) return -ENOMEM; hctx->ccid2hctx_sent = 0; @@ -835,7 +817,7 @@ static struct ccid_operations ccid2 = { }; #ifdef CONFIG_IP_DCCP_CCID2_DEBUG -module_param(ccid2_debug, int, 0444); +module_param(ccid2_debug, bool, 0444); MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); #endif diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index ebd79499c85a3b678d566bb56cd72dcecee0be59..d9daa534c9bef108d1435d53b49d16b2975cbda9 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h @@ -50,7 +50,7 @@ struct ccid2_seq { * @ccid2hctx_rpdupack - dupacks since rpseq */ struct ccid2_hc_tx_sock { - int ccid2hctx_cwnd; + u32 ccid2hctx_cwnd; int ccid2hctx_ssacks; int ccid2hctx_acks; unsigned int ccid2hctx_ssthresh; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index e91c2b9dc27b47146be86fc27a41c2efc80aeb25..25772c32617274646f926468e44988f25d4887b3 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -113,27 +113,24 @@ static inline void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hctx) hctx->ccid3hctx_s, (unsigned)(hctx->ccid3hctx_x >> 6)); } -/* - * Update X by - * If (p > 0) - * X_calc = calcX(s, R, p); - * X = max(min(X_calc, 2 * X_recv), s / t_mbi); - * Else - * If (now - tld >= R) - * X = max(min(2 * X, 2 * X_recv), s / R); - * tld = now; + +/** + * ccid3_hc_tx_update_x - Update allowed sending rate X + * @stamp: most recent time if available - can be left NULL. + * This function tracks draft rfc3448bis, check there for latest details. * * Note: X and X_recv are both stored in units of 64 * bytes/second, to support * fine-grained resolution of sending rates. This requires scaling by 2^6 * throughout the code. Only X_calc is unscaled (in bytes/second). * */ -static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now) +static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) { struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); __u64 min_rate = 2 * hctx->ccid3hctx_x_recv; const __u64 old_x = hctx->ccid3hctx_x; + ktime_t now = stamp? *stamp : ktime_get_real(); /* * Handle IDLE periods: do not reduce below RFC3390 initial sending rate @@ -153,14 +150,14 @@ static void ccid3_hc_tx_update_x(struct sock *sk, struct timeval *now) (((__u64)hctx->ccid3hctx_s) << 6) / TFRC_T_MBI); - } else if (timeval_delta(now, &hctx->ccid3hctx_t_ld) - - (suseconds_t)hctx->ccid3hctx_rtt >= 0) { + } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) + - (s64)hctx->ccid3hctx_rtt >= 0) { hctx->ccid3hctx_x = max(min(2 * hctx->ccid3hctx_x, min_rate), scaled_div(((__u64)hctx->ccid3hctx_s) << 6, hctx->ccid3hctx_rtt)); - hctx->ccid3hctx_t_ld = *now; + hctx->ccid3hctx_t_ld = now; } if (hctx->ccid3hctx_x != old_x) { @@ -214,7 +211,6 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); - struct timeval now; unsigned long t_nfb = USEC_PER_SEC / 5; bh_lock_sock(sk); @@ -265,15 +261,12 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data) max(hctx->ccid3hctx_x_recv / 2, (((__u64)hctx->ccid3hctx_s) << 6) / (2 * TFRC_T_MBI)); - - if (hctx->ccid3hctx_p == 0) - dccp_timestamp(sk, &now); } else { hctx->ccid3hctx_x_recv = hctx->ccid3hctx_x_calc; hctx->ccid3hctx_x_recv <<= 4; } /* Now recalculate X [RFC 3448, 4.3, step (4)] */ - ccid3_hc_tx_update_x(sk, &now); + ccid3_hc_tx_update_x(sk, NULL); /* * Schedule no feedback timer to expire in * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) @@ -309,8 +302,6 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) ktime_t now = ktime_get_real(); s64 delay; - BUG_ON(hctx == NULL); - /* * This function is called only for Data and DataAck packets. Sending * zero-sized Data(Ack)s is theoretically possible, but for congestion @@ -341,7 +332,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); hctx->ccid3hctx_rtt = dp->dccps_syn_rtt; hctx->ccid3hctx_x = rfc3390_initial_rate(sk); - hctx->ccid3hctx_t_ld = ktime_to_timeval(now); + hctx->ccid3hctx_t_ld = now; } else { /* Sender does not have RTT sample: X = MSS/second */ hctx->ccid3hctx_x = dp->dccps_mss_cache; @@ -388,11 +379,8 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) { struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); - struct timeval now; struct dccp_tx_hist_entry *packet; - BUG_ON(hctx == NULL); - ccid3_hc_tx_update_s(hctx, len); packet = dccp_tx_hist_entry_new(ccid3_tx_hist, GFP_ATOMIC); @@ -402,8 +390,7 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more, } dccp_tx_hist_add_entry(&hctx->ccid3hctx_hist, packet); - dccp_timestamp(sk, &now); - packet->dccphtx_tstamp = now; + packet->dccphtx_tstamp = ktime_get_real(); packet->dccphtx_seqno = dccp_sk(sk)->dccps_gss; packet->dccphtx_rtt = hctx->ccid3hctx_rtt; packet->dccphtx_sent = 1; @@ -414,12 +401,10 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); struct ccid3_options_received *opt_recv; struct dccp_tx_hist_entry *packet; - struct timeval now; + ktime_t now; unsigned long t_nfb; u32 pinv, r_sample; - BUG_ON(hctx == NULL); - /* we are only interested in ACKs */ if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) @@ -452,13 +437,12 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) else /* can not exceed 100% */ hctx->ccid3hctx_p = 1000000 / pinv; - dccp_timestamp(sk, &now); - + now = ktime_get_real(); /* * Calculate new round trip sample as per [RFC 3448, 4.3] by * R_sample = (now - t_recvdata) - t_elapsed */ - r_sample = dccp_sample_rtt(sk, &now, &packet->dccphtx_tstamp); + r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, packet->dccphtx_tstamp)); /* * Update RTT estimate by @@ -561,8 +545,6 @@ static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); struct ccid3_options_received *opt_recv; - BUG_ON(hctx == NULL); - opt_recv = &hctx->ccid3hctx_options_received; if (opt_recv->ccid3or_seqno != dp->dccps_gsr) { @@ -636,8 +618,6 @@ static void ccid3_hc_tx_exit(struct sock *sk) { struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); - BUG_ON(hctx == NULL); - ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM); sk_stop_timer(sk, &hctx->ccid3hctx_no_feedback_timer); @@ -647,14 +627,13 @@ static void ccid3_hc_tx_exit(struct sock *sk) static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) { - const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); + struct ccid3_hc_tx_sock *hctx; /* Listen socks doesn't have a private CCID block */ if (sk->sk_state == DCCP_LISTEN) return; - BUG_ON(hctx == NULL); - + hctx = ccid3_hc_tx_sk(sk); info->tcpi_rto = hctx->ccid3hctx_t_rto; info->tcpi_rtt = hctx->ccid3hctx_rtt; } @@ -662,13 +641,14 @@ static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, u32 __user *optval, int __user *optlen) { - const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk); + const struct ccid3_hc_tx_sock *hctx; const void *val; /* Listen socks doesn't have a private CCID block */ if (sk->sk_state == DCCP_LISTEN) return -EINVAL; + hctx = ccid3_hc_tx_sk(sk); switch (optname) { case DCCP_SOCKOPT_CCID_TX_INFO: if (len < sizeof(hctx->ccid3hctx_tfrc)) @@ -729,20 +709,20 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_rx_hist_entry *packet; - struct timeval now; + ktime_t now; suseconds_t delta; ccid3_pr_debug("%s(%p) - entry \n", dccp_role(sk), sk); - dccp_timestamp(sk, &now); + now = ktime_get_real(); switch (hcrx->ccid3hcrx_state) { case TFRC_RSTATE_NO_DATA: hcrx->ccid3hcrx_x_recv = 0; break; case TFRC_RSTATE_DATA: - delta = timeval_delta(&now, - &hcrx->ccid3hcrx_tstamp_last_feedback); + delta = ktime_us_delta(now, + hcrx->ccid3hcrx_tstamp_last_feedback); DCCP_BUG_ON(delta < 0); hcrx->ccid3hcrx_x_recv = scaled_div32(hcrx->ccid3hcrx_bytes_recv, delta); @@ -764,7 +744,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) hcrx->ccid3hcrx_bytes_recv = 0; /* Elapsed time information [RFC 4340, 13.2] in units of 10 * usecs */ - delta = timeval_delta(&now, &packet->dccphrx_tstamp); + delta = ktime_us_delta(now, packet->dccphrx_tstamp); DCCP_BUG_ON(delta < 0); hcrx->ccid3hcrx_elapsed_time = delta / 10; @@ -782,14 +762,13 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk) static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) { - const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); + const struct ccid3_hc_rx_sock *hcrx; __be32 x_recv, pinv; - BUG_ON(hcrx == NULL); - if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) return 0; + hcrx = ccid3_hc_rx_sk(sk); DCCP_SKB_CB(skb)->dccpd_ccval = hcrx->ccid3hcrx_ccval_last_counter; if (dccp_packet_without_ack(skb)) @@ -839,7 +818,7 @@ static int ccid3_hc_rx_detect_loss(struct sock *sk, dccp_li_update_li(sk, &hcrx->ccid3hcrx_li_hist, &hcrx->ccid3hcrx_hist, - &hcrx->ccid3hcrx_tstamp_last_feedback, + hcrx->ccid3hcrx_tstamp_last_feedback, hcrx->ccid3hcrx_s, hcrx->ccid3hcrx_bytes_recv, hcrx->ccid3hcrx_x_recv, @@ -876,11 +855,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); const struct dccp_options_received *opt_recv; struct dccp_rx_hist_entry *packet; - struct timeval now; u32 p_prev, r_sample, rtt_prev; int loss, payload_size; - - BUG_ON(hcrx == NULL); + ktime_t now; opt_recv = &dccp_sk(sk)->dccps_options_received; @@ -891,9 +868,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) case DCCP_PKT_DATAACK: if (opt_recv->dccpor_timestamp_echo == 0) break; + r_sample = dccp_timestamp() - opt_recv->dccpor_timestamp_echo; rtt_prev = hcrx->ccid3hcrx_rtt; - dccp_timestamp(sk, &now); - r_sample = dccp_sample_rtt(sk, &now, NULL); + r_sample = dccp_sample_rtt(sk, 10 * r_sample); if (hcrx->ccid3hcrx_state == TFRC_RSTATE_NO_DATA) hcrx->ccid3hcrx_rtt = r_sample; @@ -912,7 +889,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) return; } - packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp, + packet = dccp_rx_hist_entry_new(ccid3_rx_hist, opt_recv->dccpor_ndp, skb, GFP_ATOMIC); if (unlikely(packet == NULL)) { DCCP_WARN("%s(%p), Not enough mem to add rx packet " @@ -941,9 +918,9 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) if (loss) break; - dccp_timestamp(sk, &now); - if ((timeval_delta(&now, &hcrx->ccid3hcrx_tstamp_last_ack) - - (suseconds_t)hcrx->ccid3hcrx_rtt) >= 0) { + now = ktime_get_real(); + if ((ktime_us_delta(now, hcrx->ccid3hcrx_tstamp_last_ack) - + (s64)hcrx->ccid3hcrx_rtt) >= 0) { hcrx->ccid3hcrx_tstamp_last_ack = now; ccid3_hc_rx_send_feedback(sk); } @@ -984,8 +961,8 @@ static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) hcrx->ccid3hcrx_state = TFRC_RSTATE_NO_DATA; INIT_LIST_HEAD(&hcrx->ccid3hcrx_hist); INIT_LIST_HEAD(&hcrx->ccid3hcrx_li_hist); - dccp_timestamp(sk, &hcrx->ccid3hcrx_tstamp_last_ack); - hcrx->ccid3hcrx_tstamp_last_feedback = hcrx->ccid3hcrx_tstamp_last_ack; + hcrx->ccid3hcrx_tstamp_last_feedback = + hcrx->ccid3hcrx_tstamp_last_ack = ktime_get_real(); hcrx->ccid3hcrx_s = 0; hcrx->ccid3hcrx_rtt = 0; return 0; @@ -995,8 +972,6 @@ static void ccid3_hc_rx_exit(struct sock *sk) { struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); - BUG_ON(hcrx == NULL); - ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM); /* Empty packet history */ @@ -1008,14 +983,13 @@ static void ccid3_hc_rx_exit(struct sock *sk) static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) { - const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); + const struct ccid3_hc_rx_sock *hcrx; /* Listen socks doesn't have a private CCID block */ if (sk->sk_state == DCCP_LISTEN) return; - BUG_ON(hcrx == NULL); - + hcrx = ccid3_hc_rx_sk(sk); info->tcpi_ca_state = hcrx->ccid3hcrx_state; info->tcpi_options |= TCPI_OPT_TIMESTAMPS; info->tcpi_rcv_rtt = hcrx->ccid3hcrx_rtt; @@ -1024,13 +998,14 @@ static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, u32 __user *optval, int __user *optlen) { - const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk); + const struct ccid3_hc_rx_sock *hcrx; const void *val; /* Listen socks doesn't have a private CCID block */ if (sk->sk_state == DCCP_LISTEN) return -EINVAL; + hcrx = ccid3_hc_rx_sk(sk); switch (optname) { case DCCP_SOCKOPT_CCID_RX_INFO: if (len < sizeof(hcrx->ccid3hcrx_tfrc)) @@ -1071,7 +1046,7 @@ static struct ccid_operations ccid3 = { }; #ifdef CONFIG_IP_DCCP_CCID3_DEBUG -module_param(ccid3_debug, int, 0444); +module_param(ccid3_debug, bool, 0444); MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); #endif diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index 51d4b804e3344d8c31f90392be3f74bdef85a958..0cdc982cfe470f46a59210bf06d4a913164aed8e 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h @@ -38,7 +38,6 @@ #include #include -#include #include #include #include "../ccid.h" @@ -111,13 +110,20 @@ struct ccid3_hc_tx_sock { u8 ccid3hctx_idle; ktime_t ccid3hctx_t_last_win_count; struct timer_list ccid3hctx_no_feedback_timer; - struct timeval ccid3hctx_t_ld; + ktime_t ccid3hctx_t_ld; ktime_t ccid3hctx_t_nom; u32 ccid3hctx_delta; struct list_head ccid3hctx_hist; struct ccid3_options_received ccid3hctx_options_received; }; +static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) +{ + struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); + BUG_ON(hctx == NULL); + return hctx; +} + /* TFRC receiver states */ enum ccid3_hc_rx_states { TFRC_RSTATE_NO_DATA = 1, @@ -153,8 +159,8 @@ struct ccid3_hc_rx_sock { ccid3hcrx_ccval_last_counter:4; enum ccid3_hc_rx_states ccid3hcrx_state:8; u32 ccid3hcrx_bytes_recv; - struct timeval ccid3hcrx_tstamp_last_feedback; - struct timeval ccid3hcrx_tstamp_last_ack; + ktime_t ccid3hcrx_tstamp_last_feedback; + ktime_t ccid3hcrx_tstamp_last_ack; struct list_head ccid3hcrx_hist; struct list_head ccid3hcrx_li_hist; u16 ccid3hcrx_s; @@ -162,14 +168,11 @@ struct ccid3_hc_rx_sock { u32 ccid3hcrx_elapsed_time; }; -static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) -{ - return ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid); -} - static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk) { - return ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid); + struct ccid3_hc_rx_sock *hcrx = ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid); + BUG_ON(hcrx == NULL); + return hcrx; } #endif /* _DCCP_CCID3_H_ */ diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 174d3f13d93f733bc0322d9283c82a14a72fd57e..40ad428a27f56dcf1aff0eed16b69dfa49cda4e2 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c @@ -125,14 +125,14 @@ static int dccp_li_hist_interval_new(struct list_head *list, * returns estimated loss interval in usecs */ static u32 dccp_li_calc_first_li(struct sock *sk, struct list_head *hist_list, - struct timeval *last_feedback, + ktime_t last_feedback, u16 s, u32 bytes_recv, u32 previous_x_recv) { struct dccp_rx_hist_entry *entry, *next, *tail = NULL; u32 x_recv, p; suseconds_t rtt, delta; - struct timeval tstamp = { 0, 0 }; + ktime_t tstamp = ktime_set(0, 0); int interval = 0; int win_count = 0; int step = 0; @@ -176,7 +176,7 @@ static u32 dccp_li_calc_first_li(struct sock *sk, return ~0; } - delta = timeval_delta(&tstamp, &tail->dccphrx_tstamp); + delta = ktime_us_delta(tstamp, tail->dccphrx_tstamp); DCCP_BUG_ON(delta < 0); rtt = delta * 4 / interval; @@ -196,8 +196,7 @@ static u32 dccp_li_calc_first_li(struct sock *sk, return ~0; } - dccp_timestamp(sk, &tstamp); - delta = timeval_delta(&tstamp, last_feedback); + delta = ktime_us_delta(ktime_get_real(), last_feedback); DCCP_BUG_ON(delta <= 0); x_recv = scaled_div32(bytes_recv, delta); @@ -226,7 +225,7 @@ static u32 dccp_li_calc_first_li(struct sock *sk, void dccp_li_update_li(struct sock *sk, struct list_head *li_hist_list, struct list_head *hist_list, - struct timeval *last_feedback, u16 s, u32 bytes_recv, + ktime_t last_feedback, u16 s, u32 bytes_recv, u32 previous_x_recv, u64 seq_loss, u8 win_loss) { struct dccp_li_hist_entry *head; diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 906c806d6d9d107720b9af7d5ba06840608e7868..27bee92dae13481a5299e8e7fb686b6133b956b4 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -13,8 +13,8 @@ * any later version. */ +#include #include -#include extern void dccp_li_hist_purge(struct list_head *list); @@ -23,7 +23,7 @@ extern u32 dccp_li_hist_calc_i_mean(struct list_head *list); extern void dccp_li_update_li(struct sock *sk, struct list_head *li_hist_list, struct list_head *hist_list, - struct timeval *last_feedback, u16 s, + ktime_t last_feedback, u16 s, u32 bytes_recv, u32 previous_x_recv, u64 seq_loss, u8 win_loss); #endif /* _DCCP_LI_HIST_ */ diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index 60d00f01539021742a329d212ed0dd84d12c7c7e..032bb61c6e39f833ddd8d9a525dd90921fc22a16 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -37,9 +37,9 @@ #ifndef _DCCP_PKT_HIST_ #define _DCCP_PKT_HIST_ +#include #include #include -#include #include "../../dccp.h" @@ -57,7 +57,7 @@ struct dccp_tx_hist_entry { u64 dccphtx_seqno:48, dccphtx_sent:1; u32 dccphtx_rtt; - struct timeval dccphtx_tstamp; + ktime_t dccphtx_tstamp; }; struct dccp_tx_hist { @@ -124,7 +124,7 @@ struct dccp_rx_hist_entry { dccphrx_ccval:4, dccphrx_type:4; u32 dccphrx_ndp; /* In fact it is from 8 to 24 bits */ - struct timeval dccphrx_tstamp; + ktime_t dccphrx_tstamp; }; struct dccp_rx_hist { @@ -136,7 +136,6 @@ extern void dccp_rx_hist_delete(struct dccp_rx_hist *hist); static inline struct dccp_rx_hist_entry * dccp_rx_hist_entry_new(struct dccp_rx_hist *hist, - const struct sock *sk, const u32 ndp, const struct sk_buff *skb, const gfp_t prio) @@ -151,7 +150,7 @@ static inline struct dccp_rx_hist_entry * entry->dccphrx_ccval = dh->dccph_ccval; entry->dccphrx_type = dh->dccph_type; entry->dccphrx_ndp = ndp; - dccp_timestamp(sk, &entry->dccphrx_tstamp); + entry->dccphrx_tstamp = ktime_get_real(); } return entry; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index e2d74cd7eeeb3557423d3284225146bd6aae1deb..ee97950d77d1993bdc53ddb1609bbd804342b150 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -91,6 +92,7 @@ extern int sysctl_dccp_feat_ack_ratio; extern int sysctl_dccp_feat_send_ack_vector; extern int sysctl_dccp_feat_send_ndp_count; extern int sysctl_dccp_tx_qlen; +extern int sysctl_dccp_sync_ratelimit; /* * 48-bit sequence number arithmetic (signed and unsigned) @@ -208,7 +210,6 @@ extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); extern int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb); extern void dccp_send_ack(struct sock *sk); -extern void dccp_send_delayed_ack(struct sock *sk); extern void dccp_reqsk_send_ack(struct sk_buff *sk, struct request_sock *rsk); extern void dccp_send_sync(struct sock *sk, const u64 seq, @@ -293,11 +294,12 @@ extern unsigned int dccp_poll(struct file *file, struct socket *sock, extern int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); +extern struct sk_buff *dccp_ctl_make_reset(struct socket *ctl, + struct sk_buff *skb); extern int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code); extern void dccp_send_close(struct sock *sk, const int active); extern int dccp_invalid_packet(struct sk_buff *skb); -extern u32 dccp_sample_rtt(struct sock *sk, struct timeval *t_recv, - struct timeval *t_history); +extern u32 dccp_sample_rtt(struct sock *sk, long delta); static inline int dccp_bad_service_code(const struct sock *sk, const __be32 service) @@ -309,10 +311,22 @@ static inline int dccp_bad_service_code(const struct sock *sk, return !dccp_list_has_service(dp->dccps_service_list, service); } +/** + * dccp_skb_cb - DCCP per-packet control information + * @dccpd_type: one of %dccp_pkt_type (or unknown) + * @dccpd_ccval: CCVal field (5.1), see e.g. RFC 4342, 8.1 + * @dccpd_reset_code: one of %dccp_reset_codes + * @dccpd_reset_data: Data1..3 fields (depend on @dccpd_reset_code) + * @dccpd_opt_len: total length of all options (5.8) in the packet + * @dccpd_seq: sequence number + * @dccpd_ack_seq: acknowledgment number subheader field value + * This is used for transmission as well as for reception. + */ struct dccp_skb_cb { __u8 dccpd_type:4; __u8 dccpd_ccval:4; - __u8 dccpd_reset_code; + __u8 dccpd_reset_code, + dccpd_reset_data[3]; __u16 dccpd_opt_len; __u64 dccpd_seq; __u64 dccpd_ack_seq; @@ -395,53 +409,14 @@ extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb); extern int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb, u32 elapsed_time); +extern u32 dccp_timestamp(void); +extern void dccp_timestamping_init(void); extern int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb); extern int dccp_insert_option(struct sock *sk, struct sk_buff *skb, unsigned char option, const void *value, unsigned char len); -extern void dccp_timestamp(const struct sock *sk, struct timeval *tv); - -static inline suseconds_t timeval_usecs(const struct timeval *tv) -{ - return tv->tv_sec * USEC_PER_SEC + tv->tv_usec; -} - -static inline suseconds_t timeval_delta(const struct timeval *large, - const struct timeval *small) -{ - time_t secs = large->tv_sec - small->tv_sec; - suseconds_t usecs = large->tv_usec - small->tv_usec; - - if (usecs < 0) { - secs--; - usecs += USEC_PER_SEC; - } - return secs * USEC_PER_SEC + usecs; -} - -static inline void timeval_add_usecs(struct timeval *tv, - const suseconds_t usecs) -{ - tv->tv_usec += usecs; - while (tv->tv_usec >= USEC_PER_SEC) { - tv->tv_sec++; - tv->tv_usec -= USEC_PER_SEC; - } -} - -static inline void timeval_sub_usecs(struct timeval *tv, - const suseconds_t usecs) -{ - tv->tv_usec -= usecs; - while (tv->tv_usec < 0) { - tv->tv_sec--; - tv->tv_usec += USEC_PER_SEC; - } - DCCP_BUG_ON(tv->tv_sec < 0); -} - #ifdef CONFIG_SYSCTL extern int dccp_sysctl_init(void); extern void dccp_sysctl_exit(void); diff --git a/net/dccp/input.c b/net/dccp/input.c index da6ec185ed5b5826e6df5b6d58f7870e26330cb9..19d7e1dbd87e99a98f463e944a6f20f052fd9856 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -68,7 +68,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); struct dccp_sock *dp = dccp_sk(sk); - u64 lswl, lawl; + u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq, + ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; /* * Step 5: Prepare sequence numbers for Sync @@ -84,11 +85,9 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) */ if (dh->dccph_type == DCCP_PKT_SYNC || dh->dccph_type == DCCP_PKT_SYNCACK) { - if (between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, - dp->dccps_awl, dp->dccps_awh) && - dccp_delta_seqno(dp->dccps_swl, - DCCP_SKB_CB(skb)->dccpd_seq) >= 0) - dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq); + if (between48(ackno, dp->dccps_awl, dp->dccps_awh) && + dccp_delta_seqno(dp->dccps_swl, seqno) >= 0) + dccp_update_gsr(sk, seqno); else return -1; } @@ -103,9 +102,6 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) * Update S.GSR, S.SWL, S.SWH * If P.type != Sync, * Update S.GAR - * Otherwise, - * Send Sync packet acknowledging P.seqno - * Drop packet and return */ lswl = dp->dccps_swl; lawl = dp->dccps_awl; @@ -113,35 +109,52 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) if (dh->dccph_type == DCCP_PKT_CLOSEREQ || dh->dccph_type == DCCP_PKT_CLOSE || dh->dccph_type == DCCP_PKT_RESET) { - lswl = dp->dccps_gsr; - dccp_inc_seqno(&lswl); + lswl = ADD48(dp->dccps_gsr, 1); lawl = dp->dccps_gar; } - if (between48(DCCP_SKB_CB(skb)->dccpd_seq, lswl, dp->dccps_swh) && - (DCCP_SKB_CB(skb)->dccpd_ack_seq == DCCP_PKT_WITHOUT_ACK_SEQ || - between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, - lawl, dp->dccps_awh))) { - dccp_update_gsr(sk, DCCP_SKB_CB(skb)->dccpd_seq); + if (between48(seqno, lswl, dp->dccps_swh) && + (ackno == DCCP_PKT_WITHOUT_ACK_SEQ || + between48(ackno, lawl, dp->dccps_awh))) { + dccp_update_gsr(sk, seqno); if (dh->dccph_type != DCCP_PKT_SYNC && - (DCCP_SKB_CB(skb)->dccpd_ack_seq != - DCCP_PKT_WITHOUT_ACK_SEQ)) - dp->dccps_gar = DCCP_SKB_CB(skb)->dccpd_ack_seq; + (ackno != DCCP_PKT_WITHOUT_ACK_SEQ)) + dp->dccps_gar = ackno; } else { + unsigned long now = jiffies; + /* + * Step 6: Check sequence numbers + * Otherwise, + * If P.type == Reset, + * Send Sync packet acknowledging S.GSR + * Otherwise, + * Send Sync packet acknowledging P.seqno + * Drop packet and return + * + * These Syncs are rate-limited as per RFC 4340, 7.5.4: + * at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second. + */ + if (time_before(now, (dp->dccps_rate_last + + sysctl_dccp_sync_ratelimit))) + return 0; + DCCP_WARN("DCCP: Step 6 failed for %s packet, " "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " "sending SYNC...\n", dccp_packet_name(dh->dccph_type), - (unsigned long long) lswl, - (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq, + (unsigned long long) lswl, (unsigned long long) seqno, (unsigned long long) dp->dccps_swh, - (DCCP_SKB_CB(skb)->dccpd_ack_seq == - DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", - (unsigned long long) lawl, - (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq, + (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" + : "exists", + (unsigned long long) lawl, (unsigned long long) ackno, (unsigned long long) dp->dccps_awh); - dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); + + dp->dccps_rate_last = now; + + if (dh->dccph_type == DCCP_PKT_RESET) + seqno = dp->dccps_gsr; + dccp_send_sync(sk, seqno, DCCP_PKT_SYNC); return -1; } @@ -280,6 +293,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, if (dh->dccph_type == DCCP_PKT_RESPONSE) { const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); + long tstamp = dccp_timestamp(); /* Stop the REQUEST timer */ inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); @@ -300,13 +314,10 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk, if (dccp_parse_options(sk, skb)) goto out_invalid_packet; - /* Obtain RTT sample from SYN exchange (used by CCID 3) */ - if (dp->dccps_options_received.dccpor_timestamp_echo) { - struct timeval now; - - dccp_timestamp(sk, &now); - dp->dccps_syn_rtt = dccp_sample_rtt(sk, &now, NULL); - } + /* Obtain usec RTT sample from SYN exchange (used by CCID 3) */ + if (likely(dp->dccps_options_received.dccpor_timestamp_echo)) + dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp - + dp->dccps_options_received.dccpor_timestamp_echo)); if (dccp_msk(sk)->dccpms_send_ack_vector && dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk, @@ -540,11 +551,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, return 0; } - if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) { - dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK); - goto discard; - } - switch (sk->sk_state) { case DCCP_CLOSED: dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; @@ -575,6 +581,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, sk_wake_async(sk, 0, POLL_OUT); break; } + } else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) { + dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK); + goto discard; } if (!queued) { @@ -587,37 +596,22 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, EXPORT_SYMBOL_GPL(dccp_rcv_state_process); /** - * dccp_sample_rtt - Sample RTT from packet exchange - * - * @sk: connected dccp_sock - * @t_recv: receive timestamp of packet with timestamp echo - * @t_hist: packet history timestamp or NULL + * dccp_sample_rtt - Validate and finalise computation of RTT sample + * @delta: number of microseconds between packet and acknowledgment + * The routine is kept generic to work in different contexts. It should be + * called immediately when the ACK used for the RTT sample arrives. */ -u32 dccp_sample_rtt(struct sock *sk, struct timeval *t_recv, - struct timeval *t_hist) +u32 dccp_sample_rtt(struct sock *sk, long delta) { - struct dccp_sock *dp = dccp_sk(sk); - struct dccp_options_received *or = &dp->dccps_options_received; - suseconds_t delta; - - if (t_hist == NULL) { - if (!or->dccpor_timestamp_echo) { - DCCP_WARN("packet without timestamp echo\n"); - return DCCP_SANE_RTT_MAX; - } - timeval_sub_usecs(t_recv, or->dccpor_timestamp_echo * 10); - delta = timeval_usecs(t_recv); - } else - delta = timeval_delta(t_recv, t_hist); - - delta -= or->dccpor_elapsed_time * 10; /* either set or 0 */ + /* dccpor_elapsed_time is either zeroed out or set and > 0 */ + delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10; if (unlikely(delta <= 0)) { - DCCP_WARN("unusable RTT sample %ld, using min\n", (long)delta); + DCCP_WARN("unusable RTT sample %ld, using min\n", delta); return DCCP_SANE_RTT_MIN; } - if (unlikely(delta - (suseconds_t)DCCP_SANE_RTT_MAX > 0)) { - DCCP_WARN("RTT sample %ld too large, using max\n", (long)delta); + if (unlikely(delta > DCCP_SANE_RTT_MAX)) { + DCCP_WARN("RTT sample %ld too large, using max\n", delta); return DCCP_SANE_RTT_MAX; } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 718f2fa923a10621b5ab3f107653a27125e0db15..44f6e17e105f3bc917bad9ffd6cf83fb3d6eb7ce 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -381,7 +381,6 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, { struct inet_request_sock *ireq; struct inet_sock *newinet; - struct dccp_sock *newdp; struct sock *newsk; if (sk_acceptq_is_full(sk)) @@ -396,7 +395,6 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, sk_setup_caps(newsk, dst); - newdp = dccp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); newinet->daddr = ireq->rmt_addr; @@ -512,17 +510,12 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { int err; - struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; const struct iphdr *rxiph; - const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) + - sizeof(struct dccp_hdr_ext) + - sizeof(struct dccp_hdr_reset); struct sk_buff *skb; struct dst_entry *dst; - u64 seqno = 0; /* Never send a reset in response to a reset. */ - if (rxdh->dccph_type == DCCP_PKT_RESET) + if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) return; if (((struct rtable *)rxskb->dst)->rt_type != RTN_LOCAL) @@ -532,37 +525,14 @@ static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) if (dst == NULL) return; - skb = alloc_skb(dccp_v4_ctl_socket->sk->sk_prot->max_header, - GFP_ATOMIC); + skb = dccp_ctl_make_reset(dccp_v4_ctl_socket, rxskb); if (skb == NULL) goto out; - /* Reserve space for headers. */ - skb_reserve(skb, dccp_v4_ctl_socket->sk->sk_prot->max_header); - skb->dst = dst_clone(dst); - - dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); - - /* Build DCCP header and checksum it. */ - dh->dccph_type = DCCP_PKT_RESET; - dh->dccph_sport = rxdh->dccph_dport; - dh->dccph_dport = rxdh->dccph_sport; - dh->dccph_doff = dccp_hdr_reset_len / 4; - dh->dccph_x = 1; - dccp_hdr_reset(skb)->dccph_reset_code = - DCCP_SKB_CB(rxskb)->dccpd_reset_code; - - /* See "8.3.1. Abnormal Termination" in RFC 4340 */ - if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) - dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); - - dccp_hdr_set_seq(dh, seqno); - dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); - - dccp_csum_outgoing(skb); rxiph = ip_hdr(rxskb); - dh->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, - rxiph->daddr); + dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, + rxiph->daddr); + skb->dst = dst_clone(dst); bh_lock_sock(dccp_v4_ctl_socket->sk); err = ip_build_and_send_pkt(skb, dccp_v4_ctl_socket->sk, @@ -598,17 +568,14 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) struct dccp_request_sock *dreq; const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); - __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ if (((struct rtable *)skb->dst)->rt_flags & - (RTCF_BROADCAST | RTCF_MULTICAST)) { - reset_code = DCCP_RESET_CODE_NO_CONNECTION; - goto drop; - } + (RTCF_BROADCAST | RTCF_MULTICAST)) + return 0; /* discard, don't send a reset here */ if (dccp_bad_service_code(sk, service)) { - reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; + dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* @@ -616,6 +583,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * limitations, they conserve resources and peer is * evidently real one. */ + dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; if (inet_csk_reqsk_queue_is_full(sk)) goto drop; @@ -668,7 +636,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); - dcb->dccpd_reset_code = reset_code; return -1; } diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index b158c661867bb128d7f182e0c215e6836f710f36..006a3834fbcdec78b0c437f3d9f60ccaa3e694d3 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -301,50 +301,23 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req) static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { - struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh; struct ipv6hdr *rxip6h; - const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + - sizeof(struct dccp_hdr_ext) + - sizeof(struct dccp_hdr_reset); struct sk_buff *skb; struct flowi fl; - u64 seqno = 0; - if (rxdh->dccph_type == DCCP_PKT_RESET) + if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) return; if (!ipv6_unicast_destination(rxskb)) return; - skb = alloc_skb(dccp_v6_ctl_socket->sk->sk_prot->max_header, - GFP_ATOMIC); + skb = dccp_ctl_make_reset(dccp_v6_ctl_socket, rxskb); if (skb == NULL) return; - skb_reserve(skb, dccp_v6_ctl_socket->sk->sk_prot->max_header); - - dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); - - /* Swap the send and the receive. */ - dh->dccph_type = DCCP_PKT_RESET; - dh->dccph_sport = rxdh->dccph_dport; - dh->dccph_dport = rxdh->dccph_sport; - dh->dccph_doff = dccp_hdr_reset_len / 4; - dh->dccph_x = 1; - dccp_hdr_reset(skb)->dccph_reset_code = - DCCP_SKB_CB(rxskb)->dccpd_reset_code; - - /* See "8.3.1. Abnormal Termination" in RFC 4340 */ - if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) - dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1); - - dccp_hdr_set_seq(dh, seqno); - dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), DCCP_SKB_CB(rxskb)->dccpd_seq); - - dccp_csum_outgoing(skb); rxip6h = ipv6_hdr(rxskb); - dh->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, - &rxip6h->daddr); + dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, + &rxip6h->daddr); memset(&fl, 0, sizeof(fl)); ipv6_addr_copy(&fl.fl6_dst, &rxip6h->saddr); @@ -352,8 +325,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) fl.proto = IPPROTO_DCCP; fl.oif = inet6_iif(rxskb); - fl.fl_ip_dport = dh->dccph_dport; - fl.fl_ip_sport = dh->dccph_sport; + fl.fl_ip_dport = dccp_hdr(skb)->dccph_dport; + fl.fl_ip_sport = dccp_hdr(skb)->dccph_sport; security_skb_classify_flow(rxskb, &fl); /* sk = NULL, but it is safe for now. RST socket required. */ @@ -417,21 +390,21 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct ipv6_pinfo *np = inet6_sk(sk); const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); - __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY; if (skb->protocol == htons(ETH_P_IP)) return dccp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) - goto drop; + return 0; /* discard, don't send a reset here */ if (dccp_bad_service_code(sk, service)) { - reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; + dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* * There are no SYN attacks on IPv6, yet... */ + dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; if (inet_csk_reqsk_queue_is_full(sk)) goto drop; @@ -491,7 +464,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); - dcb->dccpd_reset_code = reset_code; return -1; } diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index e18e249ac49b5974ed7bc9d6f788988e78fb1ea8..831b76e08d028c87c8a7cb3a4c69afff92a86358 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -42,6 +42,16 @@ struct inet_timewait_death_row dccp_death_row = { EXPORT_SYMBOL_GPL(dccp_death_row); +void dccp_minisock_init(struct dccp_minisock *dmsk) +{ + dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window; + dmsk->dccpms_rx_ccid = sysctl_dccp_feat_rx_ccid; + dmsk->dccpms_tx_ccid = sysctl_dccp_feat_tx_ccid; + dmsk->dccpms_ack_ratio = sysctl_dccp_feat_ack_ratio; + dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector; + dmsk->dccpms_send_ndp_count = sysctl_dccp_feat_send_ndp_count; +} + void dccp_time_wait(struct sock *sk, int state, int timeo) { struct inet_timewait_sock *tw = NULL; @@ -112,7 +122,6 @@ struct sock *dccp_create_openreq_child(struct sock *sk, newdp->dccps_service_list = NULL; newdp->dccps_service = dreq->dreq_service; newicsk->icsk_rto = DCCP_TIMEOUT_INIT; - do_gettimeofday(&newdp->dccps_epoch); if (dccp_feat_clone(sk, newsk)) goto out_free; diff --git a/net/dccp/options.c b/net/dccp/options.c index 34d536d5f1a1f4bafbd4bdf553b5832d4d5dcceb..d361b5533309dc3310564adf483e9e1900c4573f 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -29,16 +29,6 @@ int sysctl_dccp_feat_ack_ratio = DCCPF_INITIAL_ACK_RATIO; int sysctl_dccp_feat_send_ack_vector = DCCPF_INITIAL_SEND_ACK_VECTOR; int sysctl_dccp_feat_send_ndp_count = DCCPF_INITIAL_SEND_NDP_COUNT; -void dccp_minisock_init(struct dccp_minisock *dmsk) -{ - dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window; - dmsk->dccpms_rx_ccid = sysctl_dccp_feat_rx_ccid; - dmsk->dccpms_tx_ccid = sysctl_dccp_feat_tx_ccid; - dmsk->dccpms_ack_ratio = sysctl_dccp_feat_ack_ratio; - dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector; - dmsk->dccpms_send_ndp_count = sysctl_dccp_feat_send_ndp_count; -} - static u32 dccp_decode_value_var(const unsigned char *bf, const u8 len) { u32 value = 0; @@ -158,7 +148,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) opt_recv->dccpor_timestamp = ntohl(*(__be32 *)value); dp->dccps_timestamp_echo = opt_recv->dccpor_timestamp; - dccp_timestamp(sk, &dp->dccps_timestamp_time); + dp->dccps_timestamp_time = ktime_get_real(); dccp_pr_debug("%s rx opt: TIMESTAMP=%u, ackno=%llu\n", dccp_role(sk), opt_recv->dccpor_timestamp, @@ -189,7 +179,7 @@ int dccp_parse_options(struct sock *sk, struct sk_buff *skb) else elapsed_time = ntohl(*(__be32 *)(value + 4)); - dccp_pr_debug_cat(", ELAPSED_TIME=%d\n", elapsed_time); + dccp_pr_debug_cat(", ELAPSED_TIME=%u\n", elapsed_time); /* Give precedence to the biggest ELAPSED_TIME */ if (elapsed_time > opt_recv->dccpor_elapsed_time) @@ -370,29 +360,9 @@ int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb, EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time); -void dccp_timestamp(const struct sock *sk, struct timeval *tv) -{ - const struct dccp_sock *dp = dccp_sk(sk); - - do_gettimeofday(tv); - tv->tv_sec -= dp->dccps_epoch.tv_sec; - tv->tv_usec -= dp->dccps_epoch.tv_usec; - - while (tv->tv_usec < 0) { - tv->tv_sec--; - tv->tv_usec += USEC_PER_SEC; - } -} - -EXPORT_SYMBOL_GPL(dccp_timestamp); - int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb) { - struct timeval tv; - __be32 now; - - dccp_timestamp(sk, &tv); - now = htonl(timeval_usecs(&tv) / 10); + __be32 now = htonl(dccp_timestamp()); /* yes this will overflow but that is the point as we want a * 10 usec 32 bit timer which mean it wraps every 11.9 hours */ @@ -405,14 +375,12 @@ static int dccp_insert_option_timestamp_echo(struct sock *sk, struct sk_buff *skb) { struct dccp_sock *dp = dccp_sk(sk); - struct timeval now; __be32 tstamp_echo; - u32 elapsed_time; int len, elapsed_time_len; unsigned char *to; - - dccp_timestamp(sk, &now); - elapsed_time = timeval_delta(&now, &dp->dccps_timestamp_time) / 10; + const suseconds_t delta = ktime_us_delta(ktime_get_real(), + dp->dccps_timestamp_time); + u32 elapsed_time = delta / 10; elapsed_time_len = dccp_elapsed_time_len(elapsed_time); len = 6 + elapsed_time_len; @@ -438,8 +406,7 @@ static int dccp_insert_option_timestamp_echo(struct sock *sk, } dp->dccps_timestamp_echo = 0; - dp->dccps_timestamp_time.tv_sec = 0; - dp->dccps_timestamp_time.tv_usec = 0; + dp->dccps_timestamp_time = ktime_set(0, 0); return 0; } diff --git a/net/dccp/output.c b/net/dccp/output.c index c8d843e983fcd4073f6847f1a68c354398924864..f49544618f2007857dcf31c59512e5d1d2faca2c 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -61,6 +61,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) set_ack = 0; /* fall through */ case DCCP_PKT_DATAACK: + case DCCP_PKT_RESET: break; case DCCP_PKT_REQUEST: @@ -69,12 +70,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: - ackno = dcb->dccpd_seq; + ackno = dcb->dccpd_ack_seq; /* fall through */ default: /* - * Only data packets should come through with skb->sk - * set. + * Set owner/destructor: some skbs are allocated via + * alloc_skb (e.g. when retransmission may happen). + * Only Data, DataAck, and Reset packets should come + * through here with skb->sk set. */ WARN_ON(skb->sk); skb_set_owner_w(skb, sk); @@ -174,34 +177,38 @@ void dccp_write_space(struct sock *sk) /** * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet - * @sk: socket to wait for + * @sk: socket to wait for + * @skb: current skb to pass on for waiting + * @delay: sleep timeout in milliseconds (> 0) + * This function is called by default when the socket is closed, and + * when a non-zero linger time is set on the socket. For consistency */ -static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb) +static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) { struct dccp_sock *dp = dccp_sk(sk); DEFINE_WAIT(wait); - unsigned long delay; + unsigned long jiffdelay; int rc; - while (1) { + do { + dccp_pr_debug("delayed send by %d msec\n", delay); + jiffdelay = msecs_to_jiffies(delay); + prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); + sk->sk_write_pending++; + release_sock(sk); + schedule_timeout(jiffdelay); + lock_sock(sk); + sk->sk_write_pending--; + if (sk->sk_err) goto do_error; if (signal_pending(current)) goto do_interrupted; rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); - if (rc <= 0) - break; - dccp_pr_debug("delayed send by %d msec\n", rc); - delay = msecs_to_jiffies(rc); - sk->sk_write_pending++; - release_sock(sk); - schedule_timeout(delay); - lock_sock(sk); - sk->sk_write_pending--; - } + } while ((delay = rc) > 0); out: finish_wait(sk->sk_sleep, &wait); return rc; @@ -228,7 +235,7 @@ void dccp_write_xmit(struct sock *sk, int block) msecs_to_jiffies(err)+jiffies); break; } else - err = dccp_wait_for_ccid(sk, skb); + err = dccp_wait_for_ccid(sk, skb, err); if (err && err != -EINTR) DCCP_BUG("err=%d after dccp_wait_for_ccid", err); } @@ -324,72 +331,81 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, EXPORT_SYMBOL_GPL(dccp_make_response); -static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, - const enum dccp_reset_codes code) +/* answer offending packet in @rcv_skb with Reset from control socket @ctl */ +struct sk_buff *dccp_ctl_make_reset(struct socket *ctl, struct sk_buff *rcv_skb) { - struct dccp_hdr *dh; - struct dccp_sock *dp = dccp_sk(sk); - const u32 dccp_header_size = sizeof(struct dccp_hdr) + - sizeof(struct dccp_hdr_ext) + - sizeof(struct dccp_hdr_reset); - struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, - GFP_ATOMIC); - if (skb == NULL) - return NULL; - - /* Reserve space for headers. */ - skb_reserve(skb, sk->sk_prot->max_header); - - skb->dst = dst_clone(dst); - - dccp_inc_seqno(&dp->dccps_gss); - - DCCP_SKB_CB(skb)->dccpd_reset_code = code; - DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; - DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; + struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh; + struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb); + const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + + sizeof(struct dccp_hdr_ext) + + sizeof(struct dccp_hdr_reset); + struct dccp_hdr_reset *dhr; + struct sk_buff *skb; - if (dccp_insert_options(sk, skb)) { - kfree_skb(skb); + skb = alloc_skb(ctl->sk->sk_prot->max_header, GFP_ATOMIC); + if (skb == NULL) return NULL; - } - dh = dccp_zeroed_hdr(skb, dccp_header_size); + skb_reserve(skb, ctl->sk->sk_prot->max_header); - dh->dccph_sport = inet_sk(sk)->sport; - dh->dccph_dport = inet_sk(sk)->dport; - dh->dccph_doff = (dccp_header_size + - DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; + /* Swap the send and the receive. */ + dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); dh->dccph_type = DCCP_PKT_RESET; + dh->dccph_sport = rxdh->dccph_dport; + dh->dccph_dport = rxdh->dccph_sport; + dh->dccph_doff = dccp_hdr_reset_len / 4; dh->dccph_x = 1; - dccp_hdr_set_seq(dh, dp->dccps_gss); - dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); - dccp_hdr_reset(skb)->dccph_reset_code = code; - inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb); + dhr = dccp_hdr_reset(skb); + dhr->dccph_reset_code = dcb->dccpd_reset_code; + + switch (dcb->dccpd_reset_code) { + case DCCP_RESET_CODE_PACKET_ERROR: + dhr->dccph_reset_data[0] = rxdh->dccph_type; + break; + case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */ + case DCCP_RESET_CODE_MANDATORY_ERROR: + memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); + break; + } + /* + * From RFC 4340, 8.3.1: + * If P.ackno exists, set R.seqno := P.ackno + 1. + * Else set R.seqno := 0. + */ + if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) + dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1)); + dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq); - DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + dccp_csum_outgoing(skb); return skb; } +EXPORT_SYMBOL_GPL(dccp_ctl_make_reset); + +/* send Reset on established socket, to close or abort the connection */ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) { + struct sk_buff *skb; /* * FIXME: what if rebuild_header fails? * Should we be doing a rebuild_header here? */ int err = inet_sk_rebuild_header(sk); - if (err == 0) { - struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache, - code); - if (skb != NULL) { - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0); - return net_xmit_eval(err); - } - } + if (err != 0) + return err; + + skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); + if (skb == NULL) + return -ENOBUFS; - return err; + /* Reserve space for headers and prepare control bits. */ + skb_reserve(skb, sk->sk_prot->max_header); + DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; + DCCP_SKB_CB(skb)->dccpd_reset_code = code; + + return dccp_transmit_skb(sk, skb); } /* @@ -477,6 +493,7 @@ void dccp_send_ack(struct sock *sk) EXPORT_SYMBOL_GPL(dccp_send_ack); +/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ void dccp_send_delayed_ack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -507,7 +524,7 @@ void dccp_send_delayed_ack(struct sock *sk) sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } -void dccp_send_sync(struct sock *sk, const u64 seq, +void dccp_send_sync(struct sock *sk, const u64 ackno, const enum dccp_pkt_type pkt_type) { /* @@ -517,14 +534,16 @@ void dccp_send_sync(struct sock *sk, const u64 seq, */ struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); - if (skb == NULL) + if (skb == NULL) { /* FIXME: how to make sure the sync is sent? */ + DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type)); return; + } /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = pkt_type; - DCCP_SKB_CB(skb)->dccpd_seq = seq; + DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; dccp_transmit_skb(sk, skb); } diff --git a/net/dccp/probe.c b/net/dccp/probe.c index bae10b0f2fc3ca5dae81d239245503e034692ad6..7053bb827bc88c34ca6a47bdcf9644425bc74c25 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "dccp.h" #include "ccid.h" @@ -168,7 +169,7 @@ static __init int dccpprobe_init(void) if (IS_ERR(dccpw.fifo)) return PTR_ERR(dccpw.fifo); - if (!proc_net_fops_create(procname, S_IRUSR, &dccpprobe_fops)) + if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; ret = register_jprobe(&dccp_send_probe); @@ -178,7 +179,7 @@ static __init int dccpprobe_init(void) pr_info("DCCP watch registered (port=%d)\n", port); return 0; err1: - proc_net_remove(procname); + proc_net_remove(&init_net, procname); err0: kfifo_free(dccpw.fifo); return ret; @@ -188,7 +189,7 @@ module_init(dccpprobe_init); static __exit void dccpprobe_exit(void) { kfifo_free(dccpw.fifo); - proc_net_remove(procname); + proc_net_remove(&init_net, procname); unregister_jprobe(&dccp_send_probe); } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 04b59ec4f5121834caf429d15f84d5f0b4dde5e6..cc9bf1cb2646aa80d159988a5fcaa08af1b964d8 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -172,7 +172,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) struct inet_connection_sock *icsk = inet_csk(sk); dccp_minisock_init(&dp->dccps_minisock); - do_gettimeofday(&dp->dccps_epoch); /* * FIXME: We're hardcoding the CCID, and doing this at this point makes @@ -220,6 +219,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) sk->sk_write_space = dccp_write_space; icsk->icsk_sync_mss = dccp_sync_mss; dp->dccps_mss_cache = 536; + dp->dccps_rate_last = jiffies; dp->dccps_role = DCCP_ROLE_UNDEFINED; dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT; dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1; @@ -587,6 +587,10 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, case DCCP_SOCKOPT_SERVICE: return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); + case DCCP_SOCKOPT_GET_CUR_MPS: + val = dp->dccps_mss_cache; + len = sizeof(val); + break; case DCCP_SOCKOPT_SEND_CSCOV: val = dp->dccps_pcslen; len = sizeof(val); @@ -664,7 +668,7 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * so that the trick in dccp_rcv_request_sent_state_process. */ /* Wait for a connection to finish. */ - if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING)) + if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0) goto out_release; @@ -988,7 +992,7 @@ MODULE_PARM_DESC(thash_entries, "Number of ehash buckets"); #ifdef CONFIG_IP_DCCP_DEBUG int dccp_debug; -module_param(dccp_debug, int, 0444); +module_param(dccp_debug, bool, 0444); MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); EXPORT_SYMBOL_GPL(dccp_debug); @@ -1077,6 +1081,8 @@ static int __init dccp_init(void) rc = dccp_sysctl_init(); if (rc) goto out_ackvec_exit; + + dccp_timestamping_init(); out: return rc; out_ackvec_exit: diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 1260aabac5e1b1cc17332d01649121b5068ca922..9364b2fb4dbd4ce0e45581e7595c7e2d2a32e6dd 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -18,6 +18,9 @@ #error This file should not be compiled without CONFIG_SYSCTL defined #endif +/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */ +int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8; + static struct ctl_table dccp_default_table[] = { { .procname = "seq_window", @@ -89,6 +92,13 @@ static struct ctl_table dccp_default_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "sync_ratelimit", + .data = &sysctl_dccp_sync_ratelimit, + .maxlen = sizeof(sysctl_dccp_sync_ratelimit), + .mode = 0644, + .proc_handler = proc_dointvec_ms_jiffies, + }, { .ctl_name = 0, } }; diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 0197a41c256ad72e37499f631f6df40498d4dad6..3af067354bd4ec531ab61f2b22cf4ecd01028552 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -291,3 +291,24 @@ void dccp_init_xmit_timers(struct sock *sk) inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } + +static ktime_t dccp_timestamp_seed; +/** + * dccp_timestamp - 10s of microseconds time source + * Returns the number of 10s of microseconds since loading DCCP. This is native + * DCCP time difference format (RFC 4340, sec. 13). + * Please note: This will wrap around about circa every 11.9 hours. + */ +u32 dccp_timestamp(void) +{ + s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); + + do_div(delta, 10); + return delta; +} +EXPORT_SYMBOL_GPL(dccp_timestamp); + +void __init dccp_timestamping_init(void) +{ + dccp_timestamp_seed = ktime_get_real(); +} diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index ed76d4aab4a9dd241295463795ce373a7a317833..aabe98d9402f492bac411e111da9a0cc66a011f8 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -128,6 +128,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat #include #include #include +#include #include #include #include @@ -470,10 +471,10 @@ static struct proto dn_proto = { .obj_size = sizeof(struct dn_sock), }; -static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp) +static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) { struct dn_scp *scp; - struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); + struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, 1); if (!sk) goto out; @@ -674,10 +675,13 @@ char *dn_addr2asc(__u16 addr, char *buf) -static int dn_create(struct socket *sock, int protocol) +static int dn_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; + if (net != &init_net) + return -EAFNOSUPPORT; + switch(sock->type) { case SOCK_SEQPACKET: if (protocol != DNPROTO_NSP) @@ -690,7 +694,7 @@ static int dn_create(struct socket *sock, int protocol) } - if ((sk = dn_alloc_sock(sock, GFP_KERNEL)) == NULL) + if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL) return -ENOBUFS; sk->sk_protocol = protocol; @@ -747,7 +751,7 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (dn_ntohs(saddr->sdn_nodeaddrl)) { read_lock(&dev_base_lock); ldev = NULL; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (!dev->dn_ptr) continue; if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { @@ -1090,7 +1094,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags) cb = DN_SKB_CB(skb); sk->sk_ack_backlog--; - newsk = dn_alloc_sock(newsock, sk->sk_allocation); + newsk = dn_alloc_sock(sk->sk_net, newsock, sk->sk_allocation); if (newsk == NULL) { release_sock(sk); kfree_skb(skb); @@ -2085,6 +2089,9 @@ static int dn_device_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = (struct net_device *)ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch(event) { case NETDEV_UP: dn_dev_up(dev); @@ -2399,7 +2406,7 @@ static int __init decnet_init(void) dev_add_pack(&dn_dix_packet_type); register_netdevice_notifier(&dn_dev_notifier); - proc_net_fops_create("decnet", S_IRUGO, &dn_socket_seq_fops); + proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops); dn_register_sysctl(); out: return rc; @@ -2428,7 +2435,7 @@ static void __exit decnet_exit(void) dn_neigh_cleanup(); dn_fib_cleanup(); - proc_net_remove("decnet"); + proc_net_remove(&init_net, "decnet"); proto_unregister(&dn_proto); } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 8def68209edd48e95d23c19651e58c191ab41e60..26130afd80299a32692f74ab794af5d5e95f3b37 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -149,7 +150,7 @@ static struct dn_dev_parms dn_dev_list[] = { } }; -#define DN_DEV_LIST_SIZE (sizeof(dn_dev_list)/sizeof(struct dn_dev_parms)) +#define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list) #define DN_DEV_PARMS_OFFSET(x) ((int) ((char *) &((struct dn_dev_parms *)0)->x)) @@ -512,7 +513,7 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg) ifr->ifr_name[IFNAMSIZ-1] = 0; #ifdef CONFIG_KMOD - dev_load(ifr->ifr_name); + dev_load(&init_net, ifr->ifr_name); #endif switch(cmd) { @@ -530,7 +531,7 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg) rtnl_lock(); - if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL) { + if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) { ret = -ENODEV; goto done; } @@ -628,7 +629,7 @@ static struct dn_dev *dn_dev_by_index(int ifindex) { struct net_device *dev; struct dn_dev *dn_dev = NULL; - dev = dev_get_by_index(ifindex); + dev = dev_get_by_index(&init_net, ifindex); if (dev) { dn_dev = dev->dn_ptr; dev_put(dev); @@ -693,7 +694,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return -EINVAL; ifm = nlmsg_data(nlh); - if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL) + if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) return -ENODEV; if ((dn_db = dev->dn_ptr) == NULL) { @@ -799,7 +800,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) skip_naddr = cb->args[1]; idx = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (idx < skip_ndevs) goto cont; else if (idx > skip_ndevs) { @@ -868,10 +869,10 @@ int dn_dev_bind_default(__le16 *addr) rv = dn_dev_get_first(dev, addr); read_unlock(&dev_base_lock); dev_put(dev); - if (rv == 0 || dev == &loopback_dev) + if (rv == 0 || dev == init_net.loopback_dev) return rv; } - dev = &loopback_dev; + dev = init_net.loopback_dev; dev_hold(dev); goto last_chance; } @@ -1296,7 +1297,7 @@ void dn_dev_devices_off(void) struct net_device *dev; rtnl_lock(); - for_each_netdev(dev) + for_each_netdev(&init_net, dev) dn_dev_down(dev); rtnl_unlock(); @@ -1307,7 +1308,7 @@ void dn_dev_devices_on(void) struct net_device *dev; rtnl_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (dev->flags & IFF_UP) dn_dev_up(dev); } @@ -1341,7 +1342,7 @@ static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) return SEQ_START_TOKEN; i = 1; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (!is_dn_dev(dev)) continue; @@ -1360,9 +1361,9 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) dev = (struct net_device *)v; if (v == SEQ_START_TOKEN) - dev = net_device_entry(&dev_base_head); + dev = net_device_entry(&init_net.dev_base_head); - for_each_netdev_continue(dev) { + for_each_netdev_continue(&init_net, dev) { if (!is_dn_dev(dev)) continue; @@ -1462,7 +1463,7 @@ void __init dn_dev_init(void) rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL); rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr); - proc_net_fops_create("decnet_dev", S_IRUGO, &dn_dev_seq_fops); + proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); #ifdef CONFIG_SYSCTL { @@ -1483,7 +1484,7 @@ void __exit dn_dev_cleanup(void) } #endif /* CONFIG_SYSCTL */ - proc_net_remove("decnet_dev"); + proc_net_remove(&init_net, "decnet_dev"); dn_dev_devices_off(); } diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index d2bc19d479506de64ca4bf9a9da803a1ea8586d6..3760a20d10d08b661d5b881164ab1b14cd2f20c5 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -212,7 +212,7 @@ static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct return -EINVAL; if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST) return -EINVAL; - if ((dev = __dev_get_by_index(nh->nh_oif)) == NULL) + if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL) return -ENODEV; if (!(dev->flags&IFF_UP)) return -ENETDOWN; @@ -255,7 +255,7 @@ static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK)) return -EINVAL; - dev = __dev_get_by_index(nh->nh_oif); + dev = __dev_get_by_index(&init_net, nh->nh_oif); if (dev == NULL || dev->dn_ptr == NULL) return -ENODEV; if (!(dev->flags&IFF_UP)) @@ -355,7 +355,7 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta if (nhs != 1 || nh->nh_gw) goto err_inval; nh->nh_scope = RT_SCOPE_NOWHERE; - nh->nh_dev = dev_get_by_index(fi->fib_nh->nh_oif); + nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif); err = -ENODEV; if (nh->nh_dev == NULL) goto failure; @@ -602,7 +602,7 @@ static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa) /* Scan device list */ read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { dn_db = dev->dn_ptr; if (dn_db == NULL) continue; diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 174d8a7a6dac7834e3fbcb991388f780e99e2503..e851b143cca3e4284018c14831f8ea492c9e4a44 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -210,7 +211,8 @@ static int dn_neigh_output_packet(struct sk_buff *skb) char mac_addr[ETH_ALEN]; dn_dn2eth(mac_addr, rt->rt_local_src); - if (!dev->hard_header || dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, mac_addr, skb->len) >= 0) + if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, + mac_addr, skb->len) >= 0) return neigh->ops->queue_xmit(skb); if (net_ratelimit()) @@ -578,24 +580,8 @@ static const struct seq_operations dn_neigh_seq_ops = { static int dn_neigh_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &dn_neigh_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &dn_neigh_seq_ops, + sizeof(struct neigh_seq_state)); } static const struct file_operations dn_neigh_seq_fops = { @@ -611,11 +597,11 @@ static const struct file_operations dn_neigh_seq_fops = { void __init dn_neigh_init(void) { neigh_table_init(&dn_neigh_table); - proc_net_fops_create("decnet_neigh", S_IRUGO, &dn_neigh_seq_fops); + proc_net_fops_create(&init_net, "decnet_neigh", S_IRUGO, &dn_neigh_seq_fops); } void __exit dn_neigh_cleanup(void) { - proc_net_remove("decnet_neigh"); + proc_net_remove(&init_net, "decnet_neigh"); neigh_table_clear(&dn_neigh_table); } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index a4a620971ef02b02355f52bb50a373545c11ddad..97eee5e8fbbe35c1ec63ff45d8cb2dedd6aeba73 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -583,6 +584,9 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; unsigned char padlen = 0; + if (dev->nd_net != &init_net) + goto dump_it; + if (dn == NULL) goto dump_it; @@ -883,7 +887,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old .scope = RT_SCOPE_UNIVERSE, } }, .mark = oldflp->mark, - .iif = loopback_dev.ifindex, + .iif = init_net.loopback_dev->ifindex, .oif = oldflp->oif }; struct dn_route *rt = NULL; struct net_device *dev_out = NULL, *dev; @@ -900,11 +904,11 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old "dn_route_output_slow: dst=%04x src=%04x mark=%d" " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), dn_ntohs(oldflp->fld_src), - oldflp->mark, loopback_dev.ifindex, oldflp->oif); + oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif); /* If we have an output interface, verify its a DECnet device */ if (oldflp->oif) { - dev_out = dev_get_by_index(oldflp->oif); + dev_out = dev_get_by_index(&init_net, oldflp->oif); err = -ENODEV; if (dev_out && dev_out->dn_ptr == NULL) { dev_put(dev_out); @@ -925,7 +929,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old goto out; } read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (!dev->dn_ptr) continue; if (!dn_dev_islocal(dev, oldflp->fld_src)) @@ -953,7 +957,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old err = -EADDRNOTAVAIL; if (dev_out) dev_put(dev_out); - dev_out = &loopback_dev; + dev_out = init_net.loopback_dev; dev_hold(dev_out); if (!fl.fld_dst) { fl.fld_dst = @@ -962,7 +966,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old if (!fl.fld_dst) goto out; } - fl.oif = loopback_dev.ifindex; + fl.oif = init_net.loopback_dev->ifindex; res.type = RTN_LOCAL; goto make_route; } @@ -1008,7 +1012,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old if (dev_out) dev_put(dev_out); if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { - dev_out = &loopback_dev; + dev_out = init_net.loopback_dev; res.type = RTN_LOCAL; } else { dev_out = neigh->dev; @@ -1029,7 +1033,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old /* Possible improvement - check all devices for local addr */ if (dn_dev_islocal(dev_out, fl.fld_dst)) { dev_put(dev_out); - dev_out = &loopback_dev; + dev_out = init_net.loopback_dev; dev_hold(dev_out); res.type = RTN_LOCAL; goto select_source; @@ -1065,7 +1069,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old fl.fld_src = fl.fld_dst; if (dev_out) dev_put(dev_out); - dev_out = &loopback_dev; + dev_out = init_net.loopback_dev; dev_hold(dev_out); fl.oif = dev_out->ifindex; if (res.fi) @@ -1552,7 +1556,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void if (fl.iif) { struct net_device *dev; - if ((dev = dev_get_by_index(fl.iif)) == NULL) { + if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) { kfree_skb(skb); return -ENODEV; } @@ -1735,23 +1739,8 @@ static const struct seq_operations dn_rt_cache_seq_ops = { static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct dn_rt_cache_iter_state *s; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - goto out; - rc = seq_open(file, &dn_rt_cache_seq_ops); - if (rc) - goto out_kfree; - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &dn_rt_cache_seq_ops, + sizeof(struct dn_rt_cache_iter_state)); } static const struct file_operations dn_rt_cache_seq_fops = { @@ -1814,7 +1803,7 @@ void __init dn_route_init(void) dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); - proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); + proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); #ifdef CONFIG_DECNET_ROUTER rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump); @@ -1829,6 +1818,6 @@ void __exit dn_route_cleanup(void) del_timer(&dn_route_timer); dn_run_flush(0); - proc_net_remove("decnet_cache"); + proc_net_remove(&init_net, "decnet_cache"); } diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 84ff3dd370709fc8c8c7f2927d1e6f9861019ea5..ddd3f04f0919ee880dba236ee1516df96d3a6bee 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -57,8 +57,6 @@ static struct dn_fib_rule default_rule = { }, }; -static LIST_HEAD(dn_fib_rules); - int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res) { @@ -228,9 +226,9 @@ static u32 dn_fib_rule_default_pref(void) struct list_head *pos; struct fib_rule *rule; - if (!list_empty(&dn_fib_rules)) { - pos = dn_fib_rules.next; - if (pos->next != &dn_fib_rules) { + if (!list_empty(&dn_fib_rules_ops.rules_list)) { + pos = dn_fib_rules_ops.rules_list.next; + if (pos->next != &dn_fib_rules_ops.rules_list) { rule = list_entry(pos->next, struct fib_rule, list); if (rule->pref) return rule->pref - 1; @@ -258,13 +256,14 @@ static struct fib_rules_ops dn_fib_rules_ops = { .flush_cache = dn_fib_rule_flush_cache, .nlgroup = RTNLGRP_DECnet_RULE, .policy = dn_fib_rule_policy, - .rules_list = &dn_fib_rules, + .rules_list = LIST_HEAD_INIT(dn_fib_rules_ops.rules_list), .owner = THIS_MODULE, }; void __init dn_fib_rules_init(void) { - list_add_tail(&default_rule.common.list, &dn_fib_rules); + list_add_tail(&default_rule.common.list, + &dn_fib_rules_ops.rules_list); fib_rules_register(&dn_fib_rules_ops); } diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 696234688cf6fb7003613749a1c98b15f3623e8b..f7fba7721e639812e89f1c75d00b4ebaca9a1581 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -115,17 +115,6 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) RCV_SKB_FAIL(-EINVAL); } -static void dnrmg_receive_user_sk(struct sock *sk, int len) -{ - struct sk_buff *skb; - unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); - - for (; qlen && (skb = skb_dequeue(&sk->sk_receive_queue)); qlen--) { - dnrmg_receive_user_skb(skb); - kfree_skb(skb); - } -} - static struct nf_hook_ops dnrmg_ops = { .hook = dnrmg_hook, .pf = PF_DECnet, @@ -137,8 +126,10 @@ static int __init dn_rtmsg_init(void) { int rv = 0; - dnrmg = netlink_kernel_create(NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, - dnrmg_receive_user_sk, NULL, THIS_MODULE); + dnrmg = netlink_kernel_create(&init_net, + NETLINK_DNRTMSG, DNRNG_NLGRP_MAX, + dnrmg_receive_user_skb, + NULL, THIS_MODULE); if (dnrmg == NULL) { printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket"); return -ENOMEM; diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index 52e40d7eb22d9f583c045175ae1d64aa4dde2e16..ae354a43fb97af91d7f8c0250585051e0aa2b186 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -259,7 +259,7 @@ static int dn_def_dev_strategy(ctl_table *table, int __user *name, int nlen, devname[newlen] = 0; - dev = dev_get_by_name(devname); + dev = dev_get_by_name(&init_net, devname); if (dev == NULL) return -ENODEV; @@ -299,7 +299,7 @@ static int dn_def_dev_handler(ctl_table *table, int write, devname[*lenp] = 0; strip_it(devname); - dev = dev_get_by_name(devname); + dev = dev_get_by_name(&init_net, devname); if (dev == NULL) return -ENODEV; diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 35c96bcc0f32d445649cc67901814ad11b003ead..9cae16b4e0b7cea737c88a763d34a8eb49d079a8 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -336,6 +336,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, /* Real hardware Econet. We're not worthy etc. */ #ifdef CONFIG_ECONET_NATIVE unsigned short proto = 0; + int res; dev_hold(dev); @@ -354,12 +355,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, eb->sec = *saddr; eb->sent = ec_tx_done; - if (dev->hard_header) { - int res; + err = -EINVAL; + res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len); + if (res < 0) + goto out_free; + if (res > 0) { struct ec_framehdr *fh; - err = -EINVAL; - res = dev->hard_header(skb, dev, ntohs(proto), - &addr, NULL, len); /* Poke in our control byte and port number. Hack, hack. */ fh = (struct ec_framehdr *)(skb->data); @@ -368,8 +369,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, if (sock->type != SOCK_DGRAM) { skb_reset_tail_pointer(skb); skb->len = 0; - } else if (res < 0) - goto out_free; + } } /* Copy the data. Returns -EFAULT on error */ @@ -608,12 +608,15 @@ static struct proto econet_proto = { * Create an Econet socket */ -static int econet_create(struct socket *sock, int protocol) +static int econet_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct econet_sock *eo; int err; + if (net != &init_net) + return -EAFNOSUPPORT; + /* Econet only provides datagram services. */ if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; @@ -621,7 +624,7 @@ static int econet_create(struct socket *sock, int protocol) sock->state = SS_UNCONNECTED; err = -ENOBUFS; - sk = sk_alloc(PF_ECONET, GFP_KERNEL, &econet_proto, 1); + sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto, 1); if (sk == NULL) goto out; @@ -659,7 +662,7 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; - if ((dev = dev_get_by_name(ifr.ifr_name)) == NULL) + if ((dev = dev_get_by_name(&init_net, ifr.ifr_name)) == NULL) return -ENODEV; sec = (struct sockaddr_ec *)&ifr.ifr_addr; @@ -1062,6 +1065,9 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet struct sock *sk; struct ec_device *edev = dev->ec_ptr; + if (dev->nd_net != &init_net) + goto drop; + if (skb->pkt_type == PACKET_OTHERHOST) goto drop; @@ -1116,6 +1122,9 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void struct net_device *dev = (struct net_device *)data; struct ec_device *edev; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch (msg) { case NETDEV_UNREGISTER: /* A device has gone down - kill any data we hold for it. */ diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 12c765715acfdb102314ebc663d8de1bdbbb0fee..ed8a3d49487dd263282d90e0cc91659b3fea0b7a 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -75,8 +75,9 @@ __setup("ether=", netdev_boot_setup); * Set the protocol type. For a packet of type ETH_P_802_3 we put the length * in here instead. It is up to the 802.2 layer to carry protocol information. */ -int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +int eth_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); @@ -91,10 +92,10 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, if (!saddr) saddr = dev->dev_addr; - memcpy(eth->h_source, saddr, dev->addr_len); + memcpy(eth->h_source, saddr, ETH_ALEN); if (daddr) { - memcpy(eth->h_dest, daddr, dev->addr_len); + memcpy(eth->h_dest, daddr, ETH_ALEN); return ETH_HLEN; } @@ -103,12 +104,13 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) { - memset(eth->h_dest, 0, dev->addr_len); + memset(eth->h_dest, 0, ETH_ALEN); return ETH_HLEN; } return -ETH_HLEN; } +EXPORT_SYMBOL(eth_header); /** * eth_rebuild_header- rebuild the Ethernet MAC header. @@ -135,12 +137,13 @@ int eth_rebuild_header(struct sk_buff *skb) "%s: unable to resolve type %X addresses.\n", dev->name, (int)eth->h_proto); - memcpy(eth->h_source, dev->dev_addr, dev->addr_len); + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); break; } return 0; } +EXPORT_SYMBOL(eth_rebuild_header); /** * eth_type_trans - determine the packet's protocol ID. @@ -207,12 +210,13 @@ EXPORT_SYMBOL(eth_type_trans); * @skb: packet to extract header from * @haddr: destination buffer */ -static int eth_header_parse(struct sk_buff *skb, unsigned char *haddr) +int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) { - struct ethhdr *eth = eth_hdr(skb); + const struct ethhdr *eth = eth_hdr(skb); memcpy(haddr, eth->h_source, ETH_ALEN); return ETH_ALEN; } +EXPORT_SYMBOL(eth_header_parse); /** * eth_header_cache - fill cache entry from neighbour @@ -220,11 +224,11 @@ static int eth_header_parse(struct sk_buff *skb, unsigned char *haddr) * @hh: destination cache entry * Create an Ethernet header template from the neighbour. */ -int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh) +int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { __be16 type = hh->hh_type; struct ethhdr *eth; - struct net_device *dev = neigh->dev; + const struct net_device *dev = neigh->dev; eth = (struct ethhdr *) (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth)))); @@ -233,11 +237,12 @@ int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh) return -1; eth->h_proto = type; - memcpy(eth->h_source, dev->dev_addr, dev->addr_len); - memcpy(eth->h_dest, neigh->ha, dev->addr_len); + memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); + memcpy(eth->h_dest, neigh->ha, ETH_ALEN); hh->hh_len = ETH_HLEN; return 0; } +EXPORT_SYMBOL(eth_header_cache); /** * eth_header_cache_update - update cache entry @@ -247,12 +252,14 @@ int eth_header_cache(struct neighbour *neigh, struct hh_cache *hh) * * Called by Address Resolution module to notify changes in address. */ -void eth_header_cache_update(struct hh_cache *hh, struct net_device *dev, - unsigned char *haddr) +void eth_header_cache_update(struct hh_cache *hh, + const struct net_device *dev, + const unsigned char *haddr) { memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)), - haddr, dev->addr_len); + haddr, ETH_ALEN); } +EXPORT_SYMBOL(eth_header_cache_update); /** * eth_mac_addr - set new Ethernet hardware address @@ -271,7 +278,7 @@ static int eth_mac_addr(struct net_device *dev, void *p) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); return 0; } @@ -291,6 +298,14 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu) return 0; } +const struct header_ops eth_header_ops ____cacheline_aligned = { + .create = eth_header, + .parse = eth_header_parse, + .rebuild = eth_rebuild_header, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; + /** * ether_setup - setup Ethernet network device * @dev: network device @@ -298,13 +313,10 @@ static int eth_change_mtu(struct net_device *dev, int new_mtu) */ void ether_setup(struct net_device *dev) { + dev->header_ops = ð_header_ops; + dev->change_mtu = eth_change_mtu; - dev->hard_header = eth_header; - dev->rebuild_header = eth_rebuild_header; dev->set_mac_address = eth_mac_addr; - dev->hard_header_cache = eth_header_cache; - dev->header_cache_update= eth_header_cache_update; - dev->hard_header_parse = eth_header_parse; dev->type = ARPHRD_ETHER; dev->hard_header_len = ETH_HLEN; @@ -337,3 +349,11 @@ struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); } EXPORT_SYMBOL(alloc_etherdev_mq); + +char *print_mac(char *buf, const u8 *addr) +{ + sprintf(buf, MAC_FMT, + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + return buf; +} +EXPORT_SYMBOL(print_mac); diff --git a/net/ethernet/pe2.c b/net/ethernet/pe2.c index 9d57b4fb6440c74532856b9fc4160821aa19d2b8..d60e15d9365e720b56f823c1842170d12de8a218 100644 --- a/net/ethernet/pe2.c +++ b/net/ethernet/pe2.c @@ -12,9 +12,7 @@ static int pEII_request(struct datalink_proto *dl, struct net_device *dev = skb->dev; skb->protocol = htons(ETH_P_IPX); - if (dev->hard_header) - dev->hard_header(skb, dev, ETH_P_IPX, - dest_node, NULL, skb->len); + dev_hard_header(skb, dev, ETH_P_IPX, dest_node, NULL, skb->len); return dev_queue_xmit(skb); } diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index b016b4104de6d4893ba0dae8546bc7f1347026af..0936a3e0210b4f3cf3bdb8962531adc66491bf7c 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -9,6 +9,7 @@ * more details. */ +#include #include #include #include @@ -241,7 +242,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) hdr = (struct ieee80211_hdr_4addr *)skb->data; ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); - blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { @@ -296,6 +297,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) int i, blocks, last, len; size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; + DECLARE_MAC_BUF(mac); if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; @@ -308,7 +310,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet without ExtIV" - " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2)); + " flag from %s\n", print_mac(mac, hdr->addr2)); } key->dot11RSNAStatsCCMPFormatErrors++; return -2; @@ -321,9 +323,9 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) } if (!key->key_set) { if (net_ratelimit()) { - printk(KERN_DEBUG "CCMP: received packet from " MAC_FMT + printk(KERN_DEBUG "CCMP: received packet from %s" " with keyid=%d that does not have a configured" - " key\n", MAC_ARG(hdr->addr2), keyidx); + " key\n", print_mac(mac, hdr->addr2), keyidx); } return -3; } @@ -338,11 +340,13 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (ccmp_replay_check(pn, key->rx_pn)) { if (net_ratelimit()) { - IEEE80211_DEBUG_DROP("CCMP: replay detected: STA=" MAC_FMT - " previous PN %02x%02x%02x%02x%02x%02x " - "received PN %02x%02x%02x%02x%02x%02x\n", - MAC_ARG(hdr->addr2), MAC_ARG(key->rx_pn), - MAC_ARG(pn)); + IEEE80211_DEBUG_DROP("CCMP: replay detected: STA=%s " + "previous PN %02x%02x%02x%02x%02x%02x " + "received PN %02x%02x%02x%02x%02x%02x\n", + print_mac(mac, hdr->addr2), + key->rx_pn[0], key->rx_pn[1], key->rx_pn[2], + key->rx_pn[3], key->rx_pn[4], key->rx_pn[5], + pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); } key->dot11RSNAStatsCCMPReplays++; return -4; @@ -351,7 +355,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); - blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { @@ -370,7 +374,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: decrypt failed: STA=" - MAC_FMT "\n", MAC_ARG(hdr->addr2)); + "%s\n", print_mac(mac, hdr->addr2)); } key->dot11RSNAStatsCCMPDecryptErrors++; return -5; @@ -442,12 +446,16 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 * seq, void *priv) static char *ieee80211_ccmp_print_stats(char *p, void *priv) { struct ieee80211_ccmp_data *ccmp = priv; + p += sprintf(p, "key[%d] alg=CCMP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, - MAC_ARG(ccmp->tx_pn), MAC_ARG(ccmp->rx_pn), + ccmp->tx_pn[0], ccmp->tx_pn[1], ccmp->tx_pn[2], + ccmp->tx_pn[3], ccmp->tx_pn[4], ccmp->tx_pn[5], + ccmp->rx_pn[0], ccmp->rx_pn[1], ccmp->rx_pn[2], + ccmp->rx_pn[3], ccmp->rx_pn[4], ccmp->rx_pn[5], ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 5a48d8e0aec1a98a73761cfe9626d32c4fd47383..6cc54eeca3edf54b11f76f6d23ca7b43d2adeec5 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -359,14 +359,15 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 rc4key[16], *pos, *icv; u32 crc; struct scatterlist sg; + DECLARE_MAC_BUF(mac); if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { if (net_ratelimit()) { struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *)skb->data; printk(KERN_DEBUG ": TKIP countermeasures: dropped " - "TX packet to " MAC_FMT "\n", - MAC_ARG(hdr->addr1)); + "TX packet to %s\n", + print_mac(mac, hdr->addr1)); } return -1; } @@ -421,14 +422,15 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u32 crc; struct scatterlist sg; int plen; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *)skb->data; if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { if (net_ratelimit()) { printk(KERN_DEBUG ": TKIP countermeasures: dropped " - "received packet from " MAC_FMT "\n", - MAC_ARG(hdr->addr2)); + "received packet from %s\n", + print_mac(mac, hdr->addr2)); } return -1; } @@ -441,7 +443,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: received packet without ExtIV" - " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2)); + " flag from %s\n", print_mac(mac, hdr->addr2)); } return -2; } @@ -453,9 +455,9 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) } if (!tkey->key_set) { if (net_ratelimit()) { - printk(KERN_DEBUG "TKIP: received packet from " MAC_FMT + printk(KERN_DEBUG "TKIP: received packet from %s" " with keyid=%d that does not have a configured" - " key\n", MAC_ARG(hdr->addr2), keyidx); + " key\n", print_mac(mac, hdr->addr2), keyidx); } return -3; } @@ -465,9 +467,9 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { if (net_ratelimit()) { - IEEE80211_DEBUG_DROP("TKIP: replay detected: STA=" MAC_FMT + IEEE80211_DEBUG_DROP("TKIP: replay detected: STA=%s" " previous TSC %08x%04x received TSC " - "%08x%04x\n", MAC_ARG(hdr->addr2), + "%08x%04x\n", print_mac(mac, hdr->addr2), tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); } tkey->dot11RSNAStatsTKIPReplays++; @@ -489,8 +491,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { if (net_ratelimit()) { printk(KERN_DEBUG ": TKIP: failed to decrypt " - "received packet from " MAC_FMT "\n", - MAC_ARG(hdr->addr2)); + "received packet from %s\n", + print_mac(mac, hdr->addr2)); } return -7; } @@ -508,7 +510,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) } if (net_ratelimit()) { IEEE80211_DEBUG_DROP("TKIP: ICV error detected: STA=" - MAC_FMT "\n", MAC_ARG(hdr->addr2)); + "%s\n", print_mac(mac, hdr->addr2)); } tkey->dot11RSNAStatsTKIPICVErrors++; return -5; @@ -639,6 +641,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, { struct ieee80211_tkip_data *tkey = priv; u8 mic[8]; + DECLARE_MAC_BUF(mac); if (!tkey->key_set) return -1; @@ -651,8 +654,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, struct ieee80211_hdr_4addr *hdr; hdr = (struct ieee80211_hdr_4addr *)skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " - "MSDU from " MAC_FMT " keyidx=%d\n", - skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2), + "MSDU from %s keyidx=%d\n", + skb->dev ? skb->dev->name : "N/A", print_mac(mac, hdr->addr2), keyidx); if (skb->dev) ieee80211_michael_mic_failure(skb->dev, hdr, keyidx); diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 17ad278696edfada4765d6ae540903c1ef2a565a..69cb6aad25be0b6bf760492d6ca580c63883a026 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -264,7 +265,7 @@ static int __init ieee80211_init(void) struct proc_dir_entry *e; ieee80211_debug_level = debug; - ieee80211_proc = proc_mkdir(DRV_NAME, proc_net); + ieee80211_proc = proc_mkdir(DRV_NAME, init_net.proc_net); if (ieee80211_proc == NULL) { IEEE80211_ERROR("Unable to create " DRV_NAME " proc directory\n"); @@ -273,7 +274,7 @@ static int __init ieee80211_init(void) e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR, ieee80211_proc); if (!e) { - remove_proc_entry(DRV_NAME, proc_net); + remove_proc_entry(DRV_NAME, init_net.proc_net); ieee80211_proc = NULL; return -EIO; } @@ -293,7 +294,7 @@ static void __exit ieee80211_exit(void) #ifdef CONFIG_IEEE80211_DEBUG if (ieee80211_proc) { remove_proc_entry("debug_level", ieee80211_proc); - remove_proc_entry(DRV_NAME, proc_net); + remove_proc_entry(DRV_NAME, init_net.proc_net); ieee80211_proc = NULL; } #endif /* CONFIG_IEEE80211_DEBUG */ diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index 6284c99b456eeb7fc992ffd9063d28d609fdce0f..21c0fadde03b1fc7a86b083b8f5194ecec1a8617 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -271,6 +271,7 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, { struct ieee80211_hdr_3addr *hdr; int res, hdrlen; + DECLARE_MAC_BUF(mac); if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; @@ -282,8 +283,8 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { - IEEE80211_DEBUG_DROP("decryption failed (SA=" MAC_FMT - ") res=%d\n", MAC_ARG(hdr->addr2), res); + IEEE80211_DEBUG_DROP("decryption failed (SA=%s" + ") res=%d\n", print_mac(mac, hdr->addr2), res); if (res == -2) IEEE80211_DEBUG_DROP("Decryption failed ICV " "mismatch (key %d)\n", @@ -303,6 +304,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, { struct ieee80211_hdr_3addr *hdr; int res, hdrlen; + DECLARE_MAC_BUF(mac); if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; @@ -315,8 +317,8 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" - " (SA=" MAC_FMT " keyidx=%d)\n", - ieee->dev->name, MAC_ARG(hdr->addr2), keyidx); + " (SA=%s keyidx=%d)\n", + ieee->dev->name, print_mac(mac, hdr->addr2), keyidx); return -1; } @@ -350,6 +352,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int can_be_decrypted = 0; + DECLARE_MAC_BUF(mac); hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; @@ -459,8 +462,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, * frames silently instead of filling system log with * these reports. */ IEEE80211_DEBUG_DROP("Decryption failed (not set)" - " (SA=" MAC_FMT ")\n", - MAC_ARG(hdr->addr2)); + " (SA=%s)\n", + print_mac(mac, hdr->addr2)); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } @@ -471,8 +474,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt && (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " - "from " MAC_FMT "\n", dev->name, - MAC_ARG(hdr->addr2)); + "from %s\n", dev->name, + print_mac(mac, hdr->addr2)); /* TODO: could inform hostapd about this so that it * could send auth failure report */ goto rx_dropped; @@ -650,8 +653,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, * configured */ } else { IEEE80211_DEBUG_DROP("encryption configured, but RX " - "frame not encrypted (SA=" MAC_FMT - ")\n", MAC_ARG(hdr->addr2)); + "frame not encrypted (SA=%s" + ")\n", print_mac(mac, hdr->addr2)); goto rx_dropped; } } @@ -659,9 +662,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb)) { IEEE80211_DEBUG_DROP("dropped unencrypted RX data " - "frame from " MAC_FMT + "frame from %s" " (drop_unencrypted=1)\n", - MAC_ARG(hdr->addr2)); + print_mac(mac, hdr->addr2)); goto rx_dropped; } @@ -1411,6 +1414,8 @@ static int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee8021 struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { + DECLARE_MAC_BUF(mac); + network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; @@ -1457,11 +1462,11 @@ static int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee8021 } if (network->mode == 0) { - IEEE80211_DEBUG_SCAN("Filtered out '%s (" MAC_FMT ")' " + IEEE80211_DEBUG_SCAN("Filtered out '%s (%s)' " "network.\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid)); + print_mac(mac, network->bssid)); return 1; } @@ -1490,6 +1495,7 @@ static void update_network(struct ieee80211_network *dst, { int qos_active; u8 old_param; + DECLARE_MAC_BUF(mac); ieee80211_network_reset(dst); dst->ibss_dfs = src->ibss_dfs; @@ -1503,8 +1509,8 @@ static void update_network(struct ieee80211_network *dst, memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); else - IEEE80211_DEBUG_SCAN("Network " MAC_FMT " info received " - "off channel (%d vs. %d)\n", MAC_ARG(src->bssid), + IEEE80211_DEBUG_SCAN("Network %s info received " + "off channel (%d vs. %d)\n", print_mac(mac, src->bssid), dst->channel, src->stats.received_channel); dst->capability = src->capability; @@ -1576,12 +1582,13 @@ static void ieee80211_process_probe_response(struct ieee80211_device struct ieee80211_info_element *info_element = beacon->info_element; #endif unsigned long flags; + DECLARE_MAC_BUF(mac); - IEEE80211_DEBUG_SCAN("'%s' (" MAC_FMT + IEEE80211_DEBUG_SCAN("'%s' (%s" "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", escape_essid(info_element->data, info_element->len), - MAC_ARG(beacon->header.addr3), + print_mac(mac, beacon->header.addr3), (beacon->capability & (1 << 0xf)) ? '1' : '0', (beacon->capability & (1 << 0xe)) ? '1' : '0', (beacon->capability & (1 << 0xd)) ? '1' : '0', @@ -1600,10 +1607,10 @@ static void ieee80211_process_probe_response(struct ieee80211_device (beacon->capability & (1 << 0x0)) ? '1' : '0'); if (ieee80211_network_init(ieee, beacon, &network, stats)) { - IEEE80211_DEBUG_SCAN("Dropped '%s' (" MAC_FMT ") via %s.\n", + IEEE80211_DEBUG_SCAN("Dropped '%s' (%s) via %s.\n", escape_essid(info_element->data, info_element->len), - MAC_ARG(beacon->header.addr3), + print_mac(mac, beacon->header.addr3), is_beacon(beacon->header.frame_ctl) ? "BEACON" : "PROBE RESPONSE"); return; @@ -1637,11 +1644,11 @@ static void ieee80211_process_probe_response(struct ieee80211_device /* If there are no more slots, expire the oldest */ list_del(&oldest->list); target = oldest; - IEEE80211_DEBUG_SCAN("Expired '%s' (" MAC_FMT ") from " + IEEE80211_DEBUG_SCAN("Expired '%s' (%s) from " "network list.\n", escape_essid(target->ssid, target->ssid_len), - MAC_ARG(target->bssid)); + print_mac(mac, target->bssid)); ieee80211_network_reset(target); } else { /* Otherwise just pull from the free list */ @@ -1651,10 +1658,10 @@ static void ieee80211_process_probe_response(struct ieee80211_device } #ifdef CONFIG_IEEE80211_DEBUG - IEEE80211_DEBUG_SCAN("Adding '%s' (" MAC_FMT ") via %s.\n", + IEEE80211_DEBUG_SCAN("Adding '%s' (%s) via %s.\n", escape_essid(network.ssid, network.ssid_len), - MAC_ARG(network.bssid), + print_mac(mac, network.bssid), is_beacon(beacon->header.frame_ctl) ? "BEACON" : "PROBE RESPONSE"); #endif @@ -1662,10 +1669,10 @@ static void ieee80211_process_probe_response(struct ieee80211_device network.ibss_dfs = NULL; list_add_tail(&target->list, &ieee->network_list); } else { - IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n", + IEEE80211_DEBUG_SCAN("Updating '%s' (%s) via %s.\n", escape_essid(target->ssid, target->ssid_len), - MAC_ARG(target->bssid), + print_mac(mac, target->bssid), is_beacon(beacon->header.frame_ctl) ? "BEACON" : "PROBE RESPONSE"); update_network(target, &network); diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 465b73d505321e0fefecbb499d963f90ac58f4ea..9b58dd67acb6c34f0a43970e929a34cc24282d93 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -257,6 +257,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, char *ev = extra; char *stop = ev + wrqu->data.length; int i = 0; + DECLARE_MAC_BUF(mac); IEEE80211_DEBUG_WX("Getting scan\n"); @@ -274,10 +275,10 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, ev = ieee80211_translate_scan(ieee, ev, stop, network); else IEEE80211_DEBUG_SCAN("Not showing network '%s (" - MAC_FMT ")' due to age (%dms).\n", + "%s)' due to age (%dms).\n", escape_essid(network->ssid, network->ssid_len), - MAC_ARG(network->bssid), + print_mac(mac, network->bssid), jiffies_to_msecs(jiffies - network-> last_scanned)); diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index e475f2e1be1375e0125a6a51159833b70bbf6e4e..c4d122ddd72c76d76397c1ce244200df368670be 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -53,7 +53,7 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft /* Set a timer for timeout */ /* FIXME: make timeout configurable */ if (likely(mac->running)) - schedule_delayed_work(&mac->associnfo.timeout, 5 * HZ); + queue_delayed_work(mac->wq, &mac->associnfo.timeout, 5 * HZ); spin_unlock_irqrestore(&mac->lock, flags); } @@ -372,6 +372,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, u16 status = le16_to_cpup(&resp->status); struct ieee80211softmac_network *network = NULL; unsigned long flags; + DECLARE_MAC_BUF(mac2); if (unlikely(!mac->running)) return -ENODEV; @@ -388,7 +389,8 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, /* someone sending us things without us knowing him? Ignore. */ if (!network) { - dprintk(KERN_INFO PFX "Received unrequested assocation response from " MAC_FMT "\n", MAC_ARG(resp->header.addr3)); + dprintk(KERN_INFO PFX "Received unrequested assocation response from %s\n", + print_mac(mac2, resp->header.addr3)); spin_unlock_irqrestore(&mac->lock, flags); return 0; } @@ -417,7 +419,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, network->authenticated = 0; /* we don't want to do this more than once ... */ network->auth_desynced_once = 1; - schedule_delayed_work(&mac->associnfo.work, 0); + queue_delayed_work(mac->wq, &mac->associnfo.work, 0); break; } default: @@ -439,7 +441,7 @@ ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac) spin_lock_irqsave(&mac->lock, flags); mac->associnfo.associating = 1; - schedule_delayed_work(&mac->associnfo.work, 0); + queue_delayed_work(mac->wq, &mac->associnfo.work, 0); spin_unlock_irqrestore(&mac->lock, flags); } @@ -481,7 +483,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); return 0; } - schedule_delayed_work(&mac->associnfo.work, 0); + queue_delayed_work(mac->wq, &mac->associnfo.work, 0); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 826c32d24461198614b5ba3eb5eb4a0f33951ea2..a53a751d07025e54613175af93d01c62bb179248 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -35,6 +35,7 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, { struct ieee80211softmac_auth_queue_item *auth; unsigned long flags; + DECLARE_MAC_BUF(mac2); if (net->authenticating || net->authenticated) return 0; @@ -43,7 +44,7 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, /* Add the network if it's not already added */ ieee80211softmac_add_network(mac, net); - dprintk(KERN_NOTICE PFX "Queueing Authentication Request to "MAC_FMT"\n", MAC_ARG(net->bssid)); + dprintk(KERN_NOTICE PFX "Queueing Authentication Request to %s\n", print_mac(mac2, net->bssid)); /* Queue the auth request */ auth = (struct ieee80211softmac_auth_queue_item *) kmalloc(sizeof(struct ieee80211softmac_auth_queue_item), GFP_KERNEL); @@ -61,7 +62,7 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, /* add to list */ list_add_tail(&auth->list, &mac->auth_queue); - schedule_delayed_work(&auth->work, 0); + queue_delayed_work(mac->wq, &auth->work, 0); spin_unlock_irqrestore(&mac->lock, flags); return 0; @@ -76,6 +77,7 @@ ieee80211softmac_auth_queue(struct work_struct *work) struct ieee80211softmac_auth_queue_item *auth; struct ieee80211softmac_network *net; unsigned long flags; + DECLARE_MAC_BUF(mac2); auth = container_of(work, struct ieee80211softmac_auth_queue_item, work.work); @@ -95,17 +97,18 @@ ieee80211softmac_auth_queue(struct work_struct *work) } net->authenticated = 0; /* add a timeout call so we eventually give up waiting for an auth reply */ - schedule_delayed_work(&auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT); + queue_delayed_work(mac->wq, &auth->work, IEEE80211SOFTMAC_AUTH_TIMEOUT); auth->retry--; spin_unlock_irqrestore(&mac->lock, flags); if (ieee80211softmac_send_mgt_frame(mac, auth->net, IEEE80211_STYPE_AUTH, auth->state)) - dprintk(KERN_NOTICE PFX "Sending Authentication Request to "MAC_FMT" failed (this shouldn't happen, wait for the timeout).\n", MAC_ARG(net->bssid)); + dprintk(KERN_NOTICE PFX "Sending Authentication Request to %s failed (this shouldn't happen, wait for the timeout).\n", + print_mac(mac2, net->bssid)); else - dprintk(KERN_NOTICE PFX "Sent Authentication Request to "MAC_FMT".\n", MAC_ARG(net->bssid)); + dprintk(KERN_NOTICE PFX "Sent Authentication Request to %s.\n", print_mac(mac2, net->bssid)); return; } - printkl(KERN_WARNING PFX "Authentication timed out with "MAC_FMT"\n", MAC_ARG(net->bssid)); + printkl(KERN_WARNING PFX "Authentication timed out with %s\n", print_mac(mac2, net->bssid)); /* Remove this item from the queue */ spin_lock_irqsave(&mac->lock, flags); net->authenticating = 0; @@ -142,6 +145,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) struct ieee80211softmac_network *net = NULL; unsigned long flags; u8 * data; + DECLARE_MAC_BUF(mac2); if (unlikely(!mac->running)) return -ENODEV; @@ -161,7 +165,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) /* Make sure that we've got an auth queue item for this request */ if(aq == NULL) { - dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but no queue item exists.\n", MAC_ARG(auth->header.addr2)); + dprintkl(KERN_DEBUG PFX "Authentication response received from %s but no queue item exists.\n", print_mac(mac2, auth->header.addr2)); /* Error #? */ return -1; } @@ -169,7 +173,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) /* Check for out of order authentication */ if(!net->authenticating) { - dprintkl(KERN_DEBUG PFX "Authentication response received from "MAC_FMT" but did not request authentication.\n",MAC_ARG(auth->header.addr2)); + dprintkl(KERN_DEBUG PFX "Authentication response received from %s but did not request authentication.\n",print_mac(mac2, auth->header.addr2)); return -1; } @@ -187,7 +191,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) spin_unlock_irqrestore(&mac->lock, flags); /* Send event */ - printkl(KERN_NOTICE PFX "Open Authentication completed with "MAC_FMT"\n", MAC_ARG(net->bssid)); + printkl(KERN_NOTICE PFX "Open Authentication completed with %s\n", print_mac(mac2, net->bssid)); ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net); break; default: @@ -197,8 +201,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) net->authenticating = 0; spin_unlock_irqrestore(&mac->lock, flags); - printkl(KERN_NOTICE PFX "Open Authentication with "MAC_FMT" failed, error code: %i\n", - MAC_ARG(net->bssid), le16_to_cpup(&auth->status)); + printkl(KERN_NOTICE PFX "Open Authentication with %s failed, error code: %i\n", + print_mac(mac2, net->bssid), le16_to_cpup(&auth->status)); /* Count the error? */ break; } @@ -238,7 +242,7 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) * request. */ cancel_delayed_work(&aq->work); INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); - schedule_delayed_work(&aq->work, 0); + queue_delayed_work(mac->wq, &aq->work, 0); spin_unlock_irqrestore(&mac->lock, flags); return 0; case IEEE80211SOFTMAC_AUTH_SHARED_PASS: @@ -253,13 +257,13 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) net->authenticating = 0; net->authenticated = 1; spin_unlock_irqrestore(&mac->lock, flags); - printkl(KERN_NOTICE PFX "Shared Key Authentication completed with "MAC_FMT"\n", - MAC_ARG(net->bssid)); + printkl(KERN_NOTICE PFX "Shared Key Authentication completed with %s\n", + print_mac(mac2, net->bssid)); ieee80211softmac_call_events(mac, IEEE80211SOFTMAC_EVENT_AUTHENTICATED, net); break; default: - printkl(KERN_NOTICE PFX "Shared Key Authentication with "MAC_FMT" failed, error code: %i\n", - MAC_ARG(net->bssid), le16_to_cpup(&auth->status)); + printkl(KERN_NOTICE PFX "Shared Key Authentication with %s failed, error code: %i\n", + print_mac(mac2, net->bssid), le16_to_cpup(&auth->status)); /* Lock and reset flags */ spin_lock_irqsave(&mac->lock, flags); net->authenticating = 0; @@ -375,6 +379,7 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de struct ieee80211softmac_network *net = NULL; struct ieee80211softmac_device *mac = ieee80211_priv(dev); + DECLARE_MAC_BUF(mac2); if (unlikely(!mac->running)) return -ENODEV; @@ -387,8 +392,8 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de net = ieee80211softmac_get_network_by_bssid(mac, deauth->header.addr2); if (net == NULL) { - dprintkl(KERN_DEBUG PFX "Received deauthentication packet from "MAC_FMT", but that network is unknown.\n", - MAC_ARG(deauth->header.addr2)); + dprintkl(KERN_DEBUG PFX "Received deauthentication packet from %s, but that network is unknown.\n", + print_mac(mac2, deauth->header.addr2)); return 0; } @@ -403,6 +408,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de ieee80211softmac_deauth_from_net(mac, net); /* let's try to re-associate */ - schedule_delayed_work(&mac->associnfo.work, 0); + queue_delayed_work(mac->wq, &mac->associnfo.work, 0); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index b3e33a4d48691f7ee3400ff9e22e612ebd49ffe4..8cef05b60f16403dc9f3c9b3cfca1436683edf1c 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c @@ -172,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve /* User may have subscribed to ANY event, so * we tell them which event triggered it. */ eventptr->event_type = event; - schedule_delayed_work(&eventptr->work, 0); + queue_delayed_work(mac->wq, &eventptr->work, 0); } } } diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 6398e6e674936af9be7124f897da0478a681e724..07505ca859af7f7c91f69c54b39a731a24d75cbd 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c @@ -36,8 +36,13 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) dev = alloc_ieee80211(sizeof(*softmac) + sizeof_priv); if (!dev) return NULL; - softmac = ieee80211_priv(dev); + softmac->wq = create_freezeable_workqueue("softmac"); + if (!softmac->wq) { + free_ieee80211(dev); + return NULL; + } + softmac->dev = dev; softmac->ieee = netdev_priv(dev); spin_lock_init(&softmac->lock); @@ -105,7 +110,7 @@ ieee80211softmac_clear_pending_work(struct ieee80211softmac_device *sm) cancel_delayed_work(&eventptr->work); spin_unlock_irqrestore(&sm->lock, flags); - flush_scheduled_work(); + flush_workqueue(sm->wq); /* now we should be save and no longer need locking... */ spin_lock_irqsave(&sm->lock, flags); @@ -139,6 +144,7 @@ void free_ieee80211softmac(struct net_device *dev) ieee80211softmac_clear_pending_work(sm); kfree(sm->scaninfo); kfree(sm->wpa.IE); + destroy_workqueue(sm->wq); free_ieee80211(dev); } EXPORT_SYMBOL_GPL(free_ieee80211softmac); diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index abea3648680ebf02b978931fa5c20b3a2e0101fc..bfab8d7db88f8426bf260c8f7d4d823814afa9d9 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c @@ -123,7 +123,7 @@ void ieee80211softmac_scan(struct work_struct *work) spin_unlock_irqrestore(&sm->lock, flags); break; } - schedule_delayed_work(&si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY); + queue_delayed_work(sm->wq, &si->softmac_scan, IEEE80211SOFTMAC_PROBE_DELAY); spin_unlock_irqrestore(&sm->lock, flags); return; } else { @@ -190,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) sm->scaninfo->started = 1; sm->scaninfo->stop = 0; INIT_COMPLETION(sm->scaninfo->finished); - schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); + queue_delayed_work(sm->wq, &sm->scaninfo->softmac_scan, 0); spin_unlock_irqrestore(&sm->lock, flags); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 5742dc803b796af7423f36cf871ce60e74fe62fc..ac36767b56e892c63d7eb0c0684b4d1b088f835e 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -72,6 +72,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, struct ieee80211softmac_device *sm = ieee80211_priv(net_dev); struct ieee80211softmac_auth_queue_item *authptr; int length = 0; + DECLARE_MAC_BUF(mac); check_assoc_again: mutex_lock(&sm->associnfo.mutex); @@ -90,7 +91,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, /* We must unlock to avoid deadlocks with the assoc workqueue * on the associnfo.mutex */ mutex_unlock(&sm->associnfo.mutex); - flush_scheduled_work(); + flush_workqueue(sm->wq); /* Avoid race! Check assoc status again. Maybe someone started an * association while we flushed. */ goto check_assoc_again; @@ -113,7 +114,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, sm->associnfo.associating = 1; /* queue lower level code to do work (if necessary) */ - schedule_delayed_work(&sm->associnfo.work, 0); + queue_delayed_work(sm->wq, &sm->associnfo.work, 0); mutex_unlock(&sm->associnfo.mutex); @@ -348,7 +349,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* force reassociation */ mac->associnfo.bssvalid = 0; if (mac->associnfo.associated) - schedule_delayed_work(&mac->associnfo.work, 0); + queue_delayed_work(mac->wq, &mac->associnfo.work, 0); } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { /* the bssid we have is no longer fixed */ mac->associnfo.bssfixed = 0; @@ -365,7 +366,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* tell the other code that this bssid should be used no matter what */ mac->associnfo.bssfixed = 1; /* queue associate if new bssid or (old one again and not associated) */ - schedule_delayed_work(&mac->associnfo.work, 0); + queue_delayed_work(mac->wq, &mac->associnfo.work, 0); } out: diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index fb7909774254f777636744bf086e8f152302e3e2..d894f616c3d63ce732073d90f185441f3411da75 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -394,6 +394,14 @@ config INET_XFRM_MODE_BEET If unsure, say Y. +config INET_LRO + tristate "Large Receive Offload (ipv4/tcp)" + + ---help--- + Support for Large Receive Offload (ipv4/tcp). + + If unsure, say Y. + config INET_DIAG tristate "INET: socket monitoring interface" default y diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index fbf1674e0c2ab1e92cbd23df734b9240e6843841..a02c36d0a13e4581efd11ebbf9a77ab75f7f010f 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_INET_ESP) += esp4.o obj-$(CONFIG_INET_IPCOMP) += ipcomp.o obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o +obj-$(CONFIG_INET_LRO) += inet_lro.o obj-$(CONFIG_INET_TUNNEL) += tunnel4.o obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index e68103475cca2a76a349d270d1db72c62132b4c7..621b128897d7af80015506b4ae7abb8116ad6d23 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -241,7 +241,7 @@ EXPORT_SYMBOL(build_ehash_secret); * Create an inet socket. */ -static int inet_create(struct socket *sock, int protocol) +static int inet_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct list_head *p; @@ -253,6 +253,9 @@ static int inet_create(struct socket *sock, int protocol) int try_loading_module = 0; int err; + if (net != &init_net) + return -EAFNOSUPPORT; + if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM && !inet_ehash_secret) @@ -320,7 +323,7 @@ static int inet_create(struct socket *sock, int protocol) BUG_TRAP(answer_prot->slab != NULL); err = -ENOBUFS; - sk = sk_alloc(PF_INET, GFP_KERNEL, answer_prot, 1); + sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, 1); if (sk == NULL) goto out; @@ -939,7 +942,7 @@ static struct inet_protosw inetsw_array[] = } }; -#define INETSW_ARRAY_LEN (sizeof(inetsw_array) / sizeof(struct inet_protosw)) +#define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array) void inet_register_protosw(struct inet_protosw *p) { @@ -1299,6 +1302,10 @@ static int __init init_ipv4_mibs(void) sizeof(struct icmp_mib), __alignof__(struct icmp_mib)) < 0) goto err_icmp_mib; + if (snmp_mib_init((void **)icmpmsg_statistics, + sizeof(struct icmpmsg_mib), + __alignof__(struct icmpmsg_mib)) < 0) + goto err_icmpmsg_mib; if (snmp_mib_init((void **)tcp_statistics, sizeof(struct tcp_mib), __alignof__(struct tcp_mib)) < 0) @@ -1321,6 +1328,8 @@ static int __init init_ipv4_mibs(void) err_udp_mib: snmp_mib_free((void **)tcp_statistics); err_tcp_mib: + snmp_mib_free((void **)icmpmsg_statistics); +err_icmpmsg_mib: snmp_mib_free((void **)icmp_statistics); err_icmp_mib: snmp_mib_free((void **)ip_statistics); diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 39f6211f1496050957ba6082d36a01799822c21b..4e8e3b079f5b606cf7c70aed28563889b2202aa7 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -65,6 +66,7 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) char buf[60]; } tmp_iph; + skb_push(skb, -skb_network_offset(skb)); top_iph = ip_hdr(skb); iph = &tmp_iph.iph; @@ -80,28 +82,30 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) goto error; } - ah = (struct ip_auth_hdr *)((char *)top_iph+top_iph->ihl*4); - ah->nexthdr = top_iph->protocol; + ah = ip_auth_hdr(skb); + ah->nexthdr = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_AH; top_iph->tos = 0; top_iph->tot_len = htons(skb->len); top_iph->frag_off = 0; top_iph->ttl = 0; - top_iph->protocol = IPPROTO_AH; top_iph->check = 0; ahp = x->data; - ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + - ahp->icv_trunc_len) >> 2) - 2; + ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; - ah->seq_no = htonl(++x->replay.oseq); - xfrm_aevent_doreplay(x); + ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq); + + spin_lock_bh(&x->lock); err = ah_mac_digest(ahp, skb, ah->auth_data); + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); + spin_unlock_bh(&x->lock); + if (err) goto error; - memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; @@ -111,8 +115,6 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } - ip_send_check(top_iph); - err = 0; error: @@ -123,21 +125,23 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) { int ah_hlen; int ihl; + int nexthdr; int err = -EINVAL; struct iphdr *iph; struct ip_auth_hdr *ah; struct ah_data *ahp; char work_buf[60]; - if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) + if (!pskb_may_pull(skb, sizeof(*ah))) goto out; - ah = (struct ip_auth_hdr*)skb->data; + ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; + nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; - if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) && - ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len)) + if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && + ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; if (!pskb_may_pull(skb, ah_hlen)) @@ -151,7 +155,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; - ah = (struct ip_auth_hdr*)skb->data; + ah = (struct ip_auth_hdr *)skb->data; iph = ip_hdr(skb); ihl = skb->data - skb_network_header(skb); @@ -180,13 +184,12 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } } - ((struct iphdr*)work_buf)->protocol = ah->nexthdr; skb->network_header += ah_hlen; memcpy(skb_network_header(skb), work_buf, ihl); skb->transport_header = skb->network_header; __skb_pull(skb, ah_hlen + ihl); - return 0; + return nexthdr; out: return err; @@ -219,10 +222,6 @@ static int ah_init_state(struct xfrm_state *x) if (!x->aalg) goto error; - /* null auth can use a zero length key */ - if (x->aalg->alg_key_len > 512) - goto error; - if (x->encap) goto error; @@ -230,14 +229,13 @@ static int ah_init_state(struct xfrm_state *x) if (ahp == NULL) return -ENOMEM; - ahp->key = x->aalg->alg_key; - ahp->key_len = (x->aalg->alg_key_len+7)/8; tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; ahp->tfm = tfm; - if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) + if (crypto_hash_setkey(tfm, x->aalg->alg_key, + (x->aalg->alg_key_len + 7) / 8)) goto error; /* @@ -266,7 +264,8 @@ static int ah_init_state(struct xfrm_state *x) if (!ahp->work_icv) goto error; - x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); + x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + + ahp->icv_trunc_len); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); x->data = ahp; @@ -302,6 +301,7 @@ static struct xfrm_type ah_type = .description = "AH4", .owner = THIS_MODULE, .proto = IPPROTO_AH, + .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah_init_state, .destructor = ah_destroy, .input = ah_input, diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 9ab9d534fbac28597f0a5b2b5c0c07d3e4485f29..36d6798947b59026eb400c6b06882099fddd8ba0 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -103,6 +103,7 @@ #include #endif +#include #include #include #include @@ -252,7 +253,7 @@ static int arp_constructor(struct neighbour *neigh) neigh->parms = neigh_parms_clone(parms); rcu_read_unlock(); - if (dev->hard_header == NULL) { + if (!dev->header_ops) { neigh->nud_state = NUD_NOARP; neigh->ops = &arp_direct_ops; neigh->output = neigh->ops->queue_xmit; @@ -309,10 +310,12 @@ static int arp_constructor(struct neighbour *neigh) neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->broadcast, dev->addr_len); } - if (dev->hard_header_cache) + + if (dev->header_ops->cache) neigh->ops = &arp_hh_ops; else neigh->ops = &arp_generic_ops; + if (neigh->nud_state&NUD_VALID) neigh->output = neigh->ops->connected_output; else @@ -590,8 +593,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, /* * Fill the device header for the ARP frame */ - if (dev->hard_header && - dev->hard_header(skb,dev,ptype,dest_hw,src_hw,skb->len) < 0) + if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0) goto out; /* @@ -931,6 +933,9 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev, { struct arphdr *arp; + if (dev->nd_net != &init_net) + goto freeskb; + /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ if (!pskb_may_pull(skb, (sizeof(struct arphdr) + (2 * dev->addr_len) + @@ -977,7 +982,7 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev) if (mask && mask != htonl(0xFFFFFFFF)) return -EINVAL; if (!dev && (r->arp_flags & ATF_COM)) { - dev = dev_getbyhwaddr(r->arp_ha.sa_family, r->arp_ha.sa_data); + dev = dev_getbyhwaddr(&init_net, r->arp_ha.sa_family, r->arp_ha.sa_data); if (!dev) return -ENODEV; } @@ -1165,7 +1170,7 @@ int arp_ioctl(unsigned int cmd, void __user *arg) rtnl_lock(); if (r.arp_dev[0]) { err = -ENODEV; - if ((dev = __dev_get_by_name(r.arp_dev)) == NULL) + if ((dev = __dev_get_by_name(&init_net, r.arp_dev)) == NULL) goto out; /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ @@ -1201,6 +1206,9 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, vo { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch (event) { case NETDEV_CHANGEADDR: neigh_changeaddr(&arp_tbl, dev); @@ -1370,24 +1378,8 @@ static const struct seq_operations arp_seq_ops = { static int arp_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &arp_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &arp_seq_ops, + sizeof(struct neigh_seq_state)); } static const struct file_operations arp_seq_fops = { @@ -1400,7 +1392,7 @@ static const struct file_operations arp_seq_fops = { static int __init arp_proc_init(void) { - if (!proc_net_fops_create("arp", S_IRUGO, &arp_seq_fops)) + if (!proc_net_fops_create(&init_net, "arp", S_IRUGO, &arp_seq_fops)) return -ENOMEM; return 0; } diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index ab56a052ce31cb53e873df670a4afdc8dc68972a..805a78e6ed55b0fb497632ae1b88aab4743a82ac 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1831,67 +1831,74 @@ int cipso_v4_sock_setattr(struct sock *sk, } /** - * cipso_v4_sock_getattr - Get the security attributes from a sock - * @sk: the sock + * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions + * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: - * Query @sk to see if there is a CIPSO option attached to the sock and if - * there is return the CIPSO security attributes in @secattr. This function - * requires that @sk be locked, or privately held, but it does not do any - * locking itself. Returns zero on success and negative values on failure. + * Inspect @cipso and return the security attributes in @secattr. Returns zero + * on success and negative values on failure. * */ -int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) +static int cipso_v4_getattr(const unsigned char *cipso, + struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; - struct inet_sock *sk_inet; - unsigned char *cipso_ptr; u32 doi; struct cipso_v4_doi *doi_def; - sk_inet = inet_sk(sk); - if (sk_inet->opt == NULL || sk_inet->opt->cipso == 0) - return -ENOMSG; - cipso_ptr = sk_inet->opt->__data + sk_inet->opt->cipso - - sizeof(struct iphdr); - ret_val = cipso_v4_cache_check(cipso_ptr, cipso_ptr[1], secattr); - if (ret_val == 0) - return ret_val; + if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) + return 0; - doi = ntohl(get_unaligned((__be32 *)&cipso_ptr[2])); + doi = ntohl(get_unaligned((__be32 *)&cipso[2])); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); - if (doi_def == NULL) { - rcu_read_unlock(); - return -ENOMSG; - } - + if (doi_def == NULL) + goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ - switch (cipso_ptr[6]) { + switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: - ret_val = cipso_v4_parsetag_rbm(doi_def, - &cipso_ptr[6], - secattr); + ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: - ret_val = cipso_v4_parsetag_enum(doi_def, - &cipso_ptr[6], - secattr); + ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: - ret_val = cipso_v4_parsetag_rng(doi_def, - &cipso_ptr[6], - secattr); + ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; } - rcu_read_unlock(); +getattr_return: + rcu_read_unlock(); return ret_val; } +/** + * cipso_v4_sock_getattr - Get the security attributes from a sock + * @sk: the sock + * @secattr: the security attributes + * + * Description: + * Query @sk to see if there is a CIPSO option attached to the sock and if + * there is return the CIPSO security attributes in @secattr. This function + * requires that @sk be locked, or privately held, but it does not do any + * locking itself. Returns zero on success and negative values on failure. + * + */ +int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) +{ + struct ip_options *opt; + + opt = inet_sk(sk)->opt; + if (opt == NULL || opt->cipso == 0) + return -ENOMSG; + + return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), + secattr); +} + /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet @@ -1905,45 +1912,7 @@ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { - int ret_val = -ENOMSG; - unsigned char *cipso_ptr; - u32 doi; - struct cipso_v4_doi *doi_def; - - cipso_ptr = CIPSO_V4_OPTPTR(skb); - if (cipso_v4_cache_check(cipso_ptr, cipso_ptr[1], secattr) == 0) - return 0; - - doi = ntohl(get_unaligned((__be32 *)&cipso_ptr[2])); - rcu_read_lock(); - doi_def = cipso_v4_doi_search(doi); - if (doi_def == NULL) - goto skbuff_getattr_return; - - /* XXX - This code assumes only one tag per CIPSO option which isn't - * really a good assumption to make but since we only support the MAC - * tags right now it is a safe assumption. */ - switch (cipso_ptr[6]) { - case CIPSO_V4_TAG_RBITMAP: - ret_val = cipso_v4_parsetag_rbm(doi_def, - &cipso_ptr[6], - secattr); - break; - case CIPSO_V4_TAG_ENUM: - ret_val = cipso_v4_parsetag_enum(doi_def, - &cipso_ptr[6], - secattr); - break; - case CIPSO_V4_TAG_RANGE: - ret_val = cipso_v4_parsetag_rng(doi_def, - &cipso_ptr[6], - secattr); - break; - } - -skbuff_getattr_return: - rcu_read_unlock(); - return ret_val; + return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 5dbe5803b7d591968a6c1376f37f25692ea1c302..55d199e4ae212813dd737a9cf2706b7f9cccbe38 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -203,8 +203,6 @@ static void inetdev_destroy(struct in_device *in_dev) ASSERT_RTNL(); dev = in_dev->dev; - if (dev == &loopback_dev) - return; in_dev->dead = 1; @@ -420,7 +418,7 @@ struct in_device *inetdev_by_index(int ifindex) struct net_device *dev; struct in_device *in_dev = NULL; read_lock(&dev_base_lock); - dev = __dev_get_by_index(ifindex); + dev = __dev_get_by_index(&init_net, ifindex); if (dev) in_dev = in_dev_get(dev); read_unlock(&dev_base_lock); @@ -506,7 +504,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct nlmsghdr *nlh) goto errout; } - dev = __dev_get_by_index(ifm->ifa_index); + dev = __dev_get_by_index(&init_net, ifm->ifa_index); if (dev == NULL) { err = -ENODEV; goto errout; @@ -628,7 +626,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) *colon = 0; #ifdef CONFIG_KMOD - dev_load(ifr.ifr_name); + dev_load(&init_net, ifr.ifr_name); #endif switch (cmd) { @@ -669,7 +667,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) rtnl_lock(); ret = -ENODEV; - if ((dev = __dev_get_by_name(ifr.ifr_name)) == NULL) + if ((dev = __dev_get_by_name(&init_net, ifr.ifr_name)) == NULL) goto done; if (colon) @@ -909,7 +907,7 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) */ read_lock(&dev_base_lock); rcu_read_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((in_dev = __in_dev_get_rcu(dev)) == NULL) continue; @@ -988,7 +986,7 @@ __be32 inet_confirm_addr(const struct net_device *dev, __be32 dst, __be32 local, read_lock(&dev_base_lock); rcu_read_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((in_dev = __in_dev_get_rcu(dev))) { addr = confirm_addr_indev(in_dev, dst, local, scope); if (addr) @@ -1051,6 +1049,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, struct net_device *dev = ptr; struct in_device *in_dev = __in_dev_get_rtnl(dev); + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + ASSERT_RTNL(); if (!in_dev) { @@ -1058,7 +1059,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, in_dev = inetdev_init(dev); if (!in_dev) return notifier_from_errno(-ENOMEM); - if (dev == &loopback_dev) { + if (dev->flags & IFF_LOOPBACK) { IN_DEV_CONF_SET(in_dev, NOXFRM, 1); IN_DEV_CONF_SET(in_dev, NOPOLICY, 1); } @@ -1074,7 +1075,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, case NETDEV_UP: if (dev->mtu < 68) break; - if (dev == &loopback_dev) { + if (dev->flags & IFF_LOOPBACK) { struct in_ifaddr *ifa; if ((ifa = inet_alloc_ifa()) != NULL) { ifa->ifa_local = @@ -1182,7 +1183,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) s_ip_idx = ip_idx = cb->args[1]; idx = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (idx < s_idx) goto cont; if (idx > s_idx) @@ -1241,7 +1242,7 @@ static void devinet_copy_dflt_conf(int i) struct net_device *dev; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); @@ -1330,7 +1331,7 @@ void inet_forward_change(void) IPV4_DEVCONF_DFLT(FORWARDING) = on; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 98767a4f1185befb14c519fb73395056066f159f..6b1a31a74cf2add02caf27ee4b9ee5cad35f30cc 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -15,7 +16,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; - struct iphdr *top_iph; struct ip_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; @@ -27,9 +27,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) int alen; int nfrags; - /* Strip IP+ESP header. */ - __skb_pull(skb, skb_transport_offset(skb)); - /* Now skb is pure payload to encrypt */ + /* skb is pure payload to encrypt */ err = -ENOMEM; @@ -59,12 +57,12 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) tail[clen - skb->len - 2] = (clen - skb->len) - 2; pskb_put(skb, trailer, clen - skb->len); - __skb_push(skb, skb->data - skb_network_header(skb)); - top_iph = ip_hdr(skb); - esph = (struct ip_esp_hdr *)(skb_network_header(skb) + - top_iph->ihl * 4); - top_iph->tot_len = htons(skb->len + alen); - *(skb_tail_pointer(trailer) - 1) = top_iph->protocol; + skb_push(skb, -skb_network_offset(skb)); + esph = ip_esp_hdr(skb); + *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_ESP; + + spin_lock_bh(&x->lock); /* this is non-NULL only with UDP Encapsulation */ if (x->encap) { @@ -75,7 +73,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) uh = (struct udphdr *)esph; uh->source = encap->encap_sport; uh->dest = encap->encap_dport; - uh->len = htons(skb->len + alen - top_iph->ihl*4); + uh->len = htons(skb->len + alen - skb_transport_offset(skb)); uh->check = 0; switch (encap->encap_type) { @@ -90,13 +88,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) break; } - top_iph->protocol = IPPROTO_UDP; - } else - top_iph->protocol = IPPROTO_ESP; + *skb_mac_header(skb) = IPPROTO_UDP; + } esph->spi = x->id.spi; - esph->seq_no = htonl(++x->replay.oseq); - xfrm_aevent_doreplay(x); + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { @@ -112,7 +108,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) - goto error; + goto unlock; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); @@ -121,7 +117,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) } while (0); if (unlikely(err)) - goto error; + goto unlock; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); @@ -134,7 +130,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } - ip_send_check(top_iph); +unlock: + spin_unlock_bh(&x->lock); error: return err; @@ -155,7 +152,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) struct sk_buff *trailer; int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; - int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; + int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen; int nfrags; int ihl; u8 nexthdr[2]; @@ -163,7 +160,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) int padlen; int err; - if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) + if (!pskb_may_pull(skb, sizeof(*esph))) goto out; if (elen <= 0 || (elen & (blksize-1))) @@ -191,7 +188,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; - esph = (struct ip_esp_hdr*)skb->data; + esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) @@ -204,7 +201,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) if (!sg) goto out; } - skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); + skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); @@ -256,17 +253,15 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ - if (x->props.mode == XFRM_MODE_TRANSPORT || - x->props.mode == XFRM_MODE_BEET) + if (x->props.mode == XFRM_MODE_TRANSPORT) skb->ip_summed = CHECKSUM_UNNECESSARY; } - iph->protocol = nexthdr[1]; pskb_trim(skb, skb->len - alen - padlen - 2); __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen); skb_set_transport_header(skb, -ihl); - return 0; + return nexthdr[1]; out: return -EINVAL; @@ -343,11 +338,6 @@ static int esp_init_state(struct xfrm_state *x) struct crypto_blkcipher *tfm; u32 align; - /* null auth and encryption can have zero length keys */ - if (x->aalg) { - if (x->aalg->alg_key_len > 512) - goto error; - } if (x->ealg == NULL) goto error; @@ -359,15 +349,14 @@ static int esp_init_state(struct xfrm_state *x) struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; - esp->auth.key = x->aalg->alg_key; - esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; - if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) + if (crypto_hash_setkey(hash, x->aalg->alg_key, + (x->aalg->alg_key_len + 7) / 8)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); @@ -389,8 +378,7 @@ static int esp_init_state(struct xfrm_state *x) if (!esp->auth.work_icv) goto error; } - esp->conf.key = x->ealg->alg_key; - esp->conf.key_len = (x->ealg->alg_key_len+7)/8; + tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; @@ -403,7 +391,8 @@ static int esp_init_state(struct xfrm_state *x) goto error; esp->conf.ivinitted = 0; } - if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) + if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, + (x->ealg->alg_key_len + 7) / 8)) goto error; x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) @@ -443,6 +432,7 @@ static struct xfrm_type esp_type = .description = "ESP4", .owner = THIS_MODULE, .proto = IPPROTO_ESP, + .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp_init_state, .destructor = esp_destroy, .get_mtu = esp4_get_mtu, diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index eff6bce453eed8c178f70863a934dc5207c2c17b..78b514ba1414a8ebd22b0b0a423110b61b6326ce 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -49,6 +49,8 @@ #define FFprint(a...) printk(KERN_DEBUG a) +static struct sock *fibnl; + #ifndef CONFIG_IP_MULTIPLE_TABLES struct fib_table *ip_fib_local_table; @@ -334,7 +336,7 @@ static int rtentry_to_fib_config(int cmd, struct rtentry *rt, colon = strchr(devname, ':'); if (colon) *colon = 0; - dev = __dev_get_by_name(devname); + dev = __dev_get_by_name(&init_net, devname); if (!dev) return -ENODEV; cfg->fc_oif = dev->ifindex; @@ -487,7 +489,7 @@ static int rtm_to_fib_config(struct sk_buff *skb, struct nlmsghdr *nlh, } nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) { - switch (attr->nla_type) { + switch (nla_type(attr)) { case RTA_DST: cfg->fc_dst = nla_get_be32(attr); break; @@ -784,17 +786,12 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb ) } } -static void nl_fib_input(struct sock *sk, int len) +static void nl_fib_input(struct sk_buff *skb) { - struct sk_buff *skb = NULL; - struct nlmsghdr *nlh = NULL; struct fib_result_nl *frn; - u32 pid; + struct nlmsghdr *nlh; struct fib_table *tb; - - skb = skb_dequeue(&sk->sk_receive_queue); - if (skb == NULL) - return; + u32 pid; nlh = nlmsg_hdr(skb); if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || @@ -811,13 +808,13 @@ static void nl_fib_input(struct sock *sk, int len) pid = NETLINK_CB(skb).pid; /* pid of sending process */ NETLINK_CB(skb).pid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; /* unicast */ - netlink_unicast(sk, skb, pid, MSG_DONTWAIT); + netlink_unicast(fibnl, skb, pid, MSG_DONTWAIT); } static void nl_fib_lookup_init(void) { - netlink_kernel_create(NETLINK_FIB_LOOKUP, 0, nl_fib_input, NULL, - THIS_MODULE); + fibnl = netlink_kernel_create(&init_net, NETLINK_FIB_LOOKUP, 0, + nl_fib_input, NULL, THIS_MODULE); } static void fib_disable_ip(struct net_device *dev, int force) @@ -860,6 +857,9 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo struct net_device *dev = ptr; struct in_device *in_dev = __in_dev_get_rtnl(dev); + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_UNREGISTER) { fib_disable_ip(dev, 2); return NOTIFY_DONE; diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 9ad1d9ff9ce777232b69ad84d083fa3ba3b0b4c9..527a6e0af5b60eb48b9e15459e8877db10c29db8 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -1038,24 +1039,8 @@ static const struct seq_operations fib_seq_ops = { static int fib_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &fib_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &fib_seq_ops, + sizeof(struct fib_iter_state)); } static const struct file_operations fib_seq_fops = { @@ -1068,13 +1053,13 @@ static const struct file_operations fib_seq_fops = { int __init fib_proc_init(void) { - if (!proc_net_fops_create("route", S_IRUGO, &fib_seq_fops)) + if (!proc_net_fops_create(&init_net, "route", S_IRUGO, &fib_seq_fops)) return -ENOMEM; return 0; } void __init fib_proc_exit(void) { - proc_net_remove("route"); + proc_net_remove(&init_net, "route"); } #endif /* CONFIG_PROC_FS */ diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 2a947840210ece1a1e218972725e5bda5b21ea6f..f16839c6a721ab98d2402e16a9aa51087954afad 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -76,8 +76,6 @@ static struct fib4_rule local_rule = { }, }; -static LIST_HEAD(fib4_rules); - #ifdef CONFIG_NET_CLS_ROUTE u32 fib_rules_tclass(struct fib_result *res) { @@ -279,9 +277,9 @@ static u32 fib4_rule_default_pref(void) struct list_head *pos; struct fib_rule *rule; - if (!list_empty(&fib4_rules)) { - pos = fib4_rules.next; - if (pos->next != &fib4_rules) { + if (!list_empty(&fib4_rules_ops.rules_list)) { + pos = fib4_rules_ops.rules_list.next; + if (pos->next != &fib4_rules_ops.rules_list) { rule = list_entry(pos->next, struct fib_rule, list); if (rule->pref) return rule->pref - 1; @@ -317,15 +315,15 @@ static struct fib_rules_ops fib4_rules_ops = { .flush_cache = fib4_rule_flush_cache, .nlgroup = RTNLGRP_IPV4_RULE, .policy = fib4_rule_policy, - .rules_list = &fib4_rules, + .rules_list = LIST_HEAD_INIT(fib4_rules_ops.rules_list), .owner = THIS_MODULE, }; void __init fib4_rules_init(void) { - list_add_tail(&local_rule.common.list, &fib4_rules); - list_add_tail(&main_rule.common.list, &fib4_rules); - list_add_tail(&default_rule.common.list, &fib4_rules); + list_add_tail(&local_rule.common.list, &fib4_rules_ops.rules_list); + list_add_tail(&main_rule.common.list, &fib4_rules_ops.rules_list); + list_add_tail(&default_rule.common.list, &fib4_rules_ops.rules_list); fib_rules_register(&fib4_rules_ops); } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index c434119deb527d8e1754dbbdc9b81cb08987e8f7..1351a2617dcef072ee0adebdd59ea1007b9a4bbd 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -533,7 +533,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, return -EINVAL; if (inet_addr_type(nh->nh_gw) != RTN_UNICAST) return -EINVAL; - if ((dev = __dev_get_by_index(nh->nh_oif)) == NULL) + if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL) return -ENODEV; if (!(dev->flags&IFF_UP)) return -ENETDOWN; @@ -743,7 +743,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) int remaining; nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { - int type = nla->nla_type; + int type = nla_type(nla); if (type) { if (type > RTAX_MAX) @@ -799,7 +799,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg) if (nhs != 1 || nh->nh_gw) goto err_inval; nh->nh_scope = RT_SCOPE_NOWHERE; - nh->nh_dev = dev_get_by_index(fi->fib_nh->nh_oif); + nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif); err = -ENODEV; if (nh->nh_dev == NULL) goto failure; diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 9ca786a6fd3c8a7546b86e00dc81ae1de6fd45c0..81a8285d6d6a4251c7cd166635f7058046078bf6 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include #include @@ -85,23 +86,14 @@ #define MAX_STAT_DEPTH 32 #define KEYLENGTH (8*sizeof(t_key)) -#define MASK_PFX(k, l) (((l)==0)?0:(k >> (KEYLENGTH-l)) << (KEYLENGTH-l)) -#define TKEY_GET_MASK(offset, bits) (((bits)==0)?0:((t_key)(-1) << (KEYLENGTH - bits) >> offset)) typedef unsigned int t_key; #define T_TNODE 0 #define T_LEAF 1 #define NODE_TYPE_MASK 0x1UL -#define NODE_PARENT(node) \ - ((struct tnode *)rcu_dereference(((node)->parent & ~NODE_TYPE_MASK))) - #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK) -#define NODE_SET_PARENT(node, ptr) \ - rcu_assign_pointer((node)->parent, \ - ((unsigned long)(ptr)) | NODE_TYPE(node)) - #define IS_TNODE(n) (!(n->parent & T_LEAF)) #define IS_LEAF(n) (n->parent & T_LEAF) @@ -174,6 +166,19 @@ static void tnode_free(struct tnode *tn); static struct kmem_cache *fn_alias_kmem __read_mostly; static struct trie *trie_local = NULL, *trie_main = NULL; +static inline struct tnode *node_parent(struct node *node) +{ + struct tnode *ret; + + ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK); + return rcu_dereference(ret); +} + +static inline void node_set_parent(struct node *node, struct tnode *ptr) +{ + rcu_assign_pointer(node->parent, + (unsigned long)ptr | NODE_TYPE(node)); +} /* rcu_read_lock needs to be hold by caller from readside */ @@ -189,6 +194,11 @@ static inline int tnode_child_length(const struct tnode *tn) return 1 << tn->bits; } +static inline t_key mask_pfx(t_key k, unsigned short l) +{ + return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l); +} + static inline t_key tkey_extract_bits(t_key a, int offset, int bits) { if (offset < KEYLENGTH) @@ -446,7 +456,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int w tn->full_children++; if (n) - NODE_SET_PARENT(n, tn); + node_set_parent(n, tn); rcu_assign_pointer(tn->child[i], n); } @@ -481,7 +491,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) continue; /* compress one level */ - NODE_SET_PARENT(n, NULL); + node_set_parent(n, NULL); tnode_free(tn); return n; } @@ -636,7 +646,7 @@ static struct node *resize(struct trie *t, struct tnode *tn) /* compress one level */ - NODE_SET_PARENT(n, NULL); + node_set_parent(n, NULL); tnode_free(tn); return n; } @@ -673,7 +683,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn) inode->pos == oldtnode->pos + oldtnode->bits && inode->bits > 1) { struct tnode *left, *right; - t_key m = TKEY_GET_MASK(inode->pos, 1); + t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos; left = tnode_new(inode->key&(~m), inode->pos + 1, inode->bits - 1); @@ -961,24 +971,21 @@ fib_find_node(struct trie *t, u32 key) static struct node *trie_rebalance(struct trie *t, struct tnode *tn) { int wasfull; - t_key cindex, key; - struct tnode *tp = NULL; - - key = tn->key; - - while (tn != NULL && NODE_PARENT(tn) != NULL) { + t_key cindex, key = tn->key; + struct tnode *tp; - tp = NODE_PARENT(tn); + while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { cindex = tkey_extract_bits(key, tp->pos, tp->bits); wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); tn = (struct tnode *) resize (t, (struct tnode *)tn); tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull); - if (!NODE_PARENT(tn)) + tp = node_parent((struct node *) tn); + if (!tp) break; - - tn = NODE_PARENT(tn); + tn = tp; } + /* Handle last (top) tnode */ if (IS_TNODE(tn)) tn = (struct tnode*) resize(t, (struct tnode *)tn); @@ -1031,7 +1038,7 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen) pos = tn->pos + tn->bits; n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits)); - BUG_ON(n && NODE_PARENT(n) != tn); + BUG_ON(n && node_parent(n) != tn); } else break; } @@ -1083,7 +1090,7 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen) if (t->trie && n == NULL) { /* Case 2: n is NULL, and will just insert a new leaf */ - NODE_SET_PARENT(l, tp); + node_set_parent((struct node *)l, tp); cindex = tkey_extract_bits(key, tp->pos, tp->bits); put_child(t, (struct tnode *)tp, cindex, (struct node *)l); @@ -1114,7 +1121,7 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen) goto err; } - NODE_SET_PARENT(tn, tp); + node_set_parent((struct node *)tn, tp); missbit = tkey_extract_bits(key, newpos, 1); put_child(t, tn, missbit, (struct node *)l); @@ -1364,7 +1371,8 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result bits = pn->bits; if (!chopped_off) - cindex = tkey_extract_bits(MASK_PFX(key, current_prefix_length), pos, bits); + cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length), + pos, bits); n = tnode_get_child(pn, cindex); @@ -1450,8 +1458,8 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result * to find a matching prefix. */ - node_prefix = MASK_PFX(cn->key, cn->pos); - key_prefix = MASK_PFX(key, cn->pos); + node_prefix = mask_pfx(cn->key, cn->pos); + key_prefix = mask_pfx(key, cn->pos); pref_mismatch = key_prefix^node_prefix; mp = 0; @@ -1495,12 +1503,13 @@ fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result if (chopped_off <= pn->bits) { cindex &= ~(1 << (chopped_off-1)); } else { - if (NODE_PARENT(pn) == NULL) + struct tnode *parent = node_parent((struct node *) pn); + if (!parent) goto failed; /* Get Child's index */ - cindex = tkey_extract_bits(pn->key, NODE_PARENT(pn)->pos, NODE_PARENT(pn)->bits); - pn = NODE_PARENT(pn); + cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits); + pn = parent; chopped_off = 0; #ifdef CONFIG_IP_FIB_TRIE_STATS @@ -1536,7 +1545,7 @@ static int trie_leaf_remove(struct trie *t, t_key key) check_tnode(tn); n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits)); - BUG_ON(n && NODE_PARENT(n) != tn); + BUG_ON(n && node_parent(n) != tn); } l = (struct leaf *) n; @@ -1551,7 +1560,7 @@ static int trie_leaf_remove(struct trie *t, t_key key) t->revision++; t->size--; - tp = NODE_PARENT(n); + tp = node_parent(n); tnode_free((struct tnode *) n); if (tp) { @@ -1703,7 +1712,7 @@ static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf) p = (struct tnode*) trie; /* Start */ } else - p = (struct tnode *) NODE_PARENT(c); + p = node_parent(c); while (p) { int pos, last; @@ -1740,7 +1749,7 @@ static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf) up: /* No more children go up one step */ c = (struct node *) p; - p = (struct tnode *) NODE_PARENT(p); + p = node_parent(c); } return NULL; /* Ready. Root of trie */ } @@ -2043,7 +2052,7 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter) } /* Current node exhausted, pop back up */ - p = NODE_PARENT(tn); + p = node_parent((struct node *)tn); if (p) { cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1; tn = p; @@ -2317,7 +2326,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) return 0; - if (!NODE_PARENT(n)) { + if (!node_parent(n)) { if (iter->trie == trie_local) seq_puts(seq, ":\n"); else @@ -2326,7 +2335,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v) if (IS_TNODE(n)) { struct tnode *tn = (struct tnode *) n; - __be32 prf = htonl(MASK_PFX(tn->key, tn->pos)); + __be32 prf = htonl(mask_pfx(tn->key, tn->pos)); seq_indent(seq, iter->depth-1); seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n", @@ -2370,25 +2379,8 @@ static const struct seq_operations fib_trie_seq_ops = { static int fib_trie_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &fib_trie_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; - memset(s, 0, sizeof(*s)); -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &fib_trie_seq_ops, + sizeof(struct fib_trie_iter)); } static const struct file_operations fib_trie_fops = { @@ -2491,25 +2483,8 @@ static const struct seq_operations fib_route_seq_ops = { static int fib_route_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct fib_trie_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &fib_route_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; - memset(s, 0, sizeof(*s)); -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &fib_route_seq_ops, + sizeof(struct fib_trie_iter)); } static const struct file_operations fib_route_fops = { @@ -2522,30 +2497,30 @@ static const struct file_operations fib_route_fops = { int __init fib_proc_init(void) { - if (!proc_net_fops_create("fib_trie", S_IRUGO, &fib_trie_fops)) + if (!proc_net_fops_create(&init_net, "fib_trie", S_IRUGO, &fib_trie_fops)) goto out1; - if (!proc_net_fops_create("fib_triestat", S_IRUGO, &fib_triestat_fops)) + if (!proc_net_fops_create(&init_net, "fib_triestat", S_IRUGO, &fib_triestat_fops)) goto out2; - if (!proc_net_fops_create("route", S_IRUGO, &fib_route_fops)) + if (!proc_net_fops_create(&init_net, "route", S_IRUGO, &fib_route_fops)) goto out3; return 0; out3: - proc_net_remove("fib_triestat"); + proc_net_remove(&init_net, "fib_triestat"); out2: - proc_net_remove("fib_trie"); + proc_net_remove(&init_net, "fib_trie"); out1: return -ENOMEM; } void __init fib_proc_exit(void) { - proc_net_remove("fib_trie"); - proc_net_remove("fib_triestat"); - proc_net_remove("route"); + proc_net_remove(&init_net, "fib_trie"); + proc_net_remove(&init_net, "fib_triestat"); + proc_net_remove(&init_net, "route"); } #endif /* CONFIG_PROC_FS */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 02a899bec1969fcb6511722d41fe20009b6f2dbc..272c69e106e9a6a416249fcb77607ac43c16940a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -115,6 +115,7 @@ struct icmp_bxm { * Statistics */ DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly; +DEFINE_SNMP_STAT(struct icmpmsg_mib, icmpmsg_statistics) __read_mostly; /* An array of errno for error messages from dest unreach. */ /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ @@ -214,8 +215,6 @@ int sysctl_icmp_errors_use_inbound_ifaddr __read_mostly; */ struct icmp_control { - int output_entry; /* Field for increment on output */ - int input_entry; /* Field for increment on input */ void (*handler)(struct sk_buff *skb); short error; /* This ICMP is classed as an error message */ }; @@ -316,12 +315,10 @@ static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code) /* * Maintain the counters used in the SNMP statistics for outgoing ICMP */ -static void icmp_out_count(int type) +void icmp_out_count(unsigned char type) { - if (type <= NR_ICMP_TYPES) { - ICMP_INC_STATS(icmp_pointers[type].output_entry); - ICMP_INC_STATS(ICMP_MIB_OUTMSGS); - } + ICMPMSGOUT_INC_STATS(type); + ICMP_INC_STATS(ICMP_MIB_OUTMSGS); } /* @@ -390,7 +387,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) return; icmp_param->data.icmph.checksum = 0; - icmp_out_count(icmp_param->data.icmph.type); inet->tos = ip_hdr(skb)->tos; daddr = ipc.addr = rt->rt_src; @@ -517,7 +513,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) struct net_device *dev = NULL; if (rt->fl.iif && sysctl_icmp_errors_use_inbound_ifaddr) - dev = dev_get_by_index(rt->fl.iif); + dev = dev_get_by_index(&init_net, rt->fl.iif); if (dev) { saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); @@ -952,6 +948,7 @@ int icmp_rcv(struct sk_buff *skb) icmph = icmp_hdr(skb); + ICMPMSGIN_INC_STATS_BH(icmph->type); /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * @@ -986,7 +983,6 @@ int icmp_rcv(struct sk_buff *skb) } } - ICMP_INC_STATS_BH(icmp_pointers[icmph->type].input_entry); icmp_pointers[icmph->type].handler(skb); drop: @@ -1002,109 +998,71 @@ int icmp_rcv(struct sk_buff *skb) */ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { [ICMP_ECHOREPLY] = { - .output_entry = ICMP_MIB_OUTECHOREPS, - .input_entry = ICMP_MIB_INECHOREPS, .handler = icmp_discard, }, [1] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_INERRORS, .handler = icmp_discard, .error = 1, }, [2] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_INERRORS, .handler = icmp_discard, .error = 1, }, [ICMP_DEST_UNREACH] = { - .output_entry = ICMP_MIB_OUTDESTUNREACHS, - .input_entry = ICMP_MIB_INDESTUNREACHS, .handler = icmp_unreach, .error = 1, }, [ICMP_SOURCE_QUENCH] = { - .output_entry = ICMP_MIB_OUTSRCQUENCHS, - .input_entry = ICMP_MIB_INSRCQUENCHS, .handler = icmp_unreach, .error = 1, }, [ICMP_REDIRECT] = { - .output_entry = ICMP_MIB_OUTREDIRECTS, - .input_entry = ICMP_MIB_INREDIRECTS, .handler = icmp_redirect, .error = 1, }, [6] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_INERRORS, .handler = icmp_discard, .error = 1, }, [7] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_INERRORS, .handler = icmp_discard, .error = 1, }, [ICMP_ECHO] = { - .output_entry = ICMP_MIB_OUTECHOS, - .input_entry = ICMP_MIB_INECHOS, .handler = icmp_echo, }, [9] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_INERRORS, .handler = icmp_discard, .error = 1, }, [10] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_INERRORS, .handler = icmp_discard, .error = 1, }, [ICMP_TIME_EXCEEDED] = { - .output_entry = ICMP_MIB_OUTTIMEEXCDS, - .input_entry = ICMP_MIB_INTIMEEXCDS, .handler = icmp_unreach, .error = 1, }, [ICMP_PARAMETERPROB] = { - .output_entry = ICMP_MIB_OUTPARMPROBS, - .input_entry = ICMP_MIB_INPARMPROBS, .handler = icmp_unreach, .error = 1, }, [ICMP_TIMESTAMP] = { - .output_entry = ICMP_MIB_OUTTIMESTAMPS, - .input_entry = ICMP_MIB_INTIMESTAMPS, .handler = icmp_timestamp, }, [ICMP_TIMESTAMPREPLY] = { - .output_entry = ICMP_MIB_OUTTIMESTAMPREPS, - .input_entry = ICMP_MIB_INTIMESTAMPREPS, .handler = icmp_discard, }, [ICMP_INFO_REQUEST] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_DUMMY, .handler = icmp_discard, }, [ICMP_INFO_REPLY] = { - .output_entry = ICMP_MIB_DUMMY, - .input_entry = ICMP_MIB_DUMMY, .handler = icmp_discard, }, [ICMP_ADDRESS] = { - .output_entry = ICMP_MIB_OUTADDRMASKS, - .input_entry = ICMP_MIB_INADDRMASKS, .handler = icmp_address, }, [ICMP_ADDRESSREPLY] = { - .output_entry = ICMP_MIB_OUTADDRMASKREPS, - .input_entry = ICMP_MIB_INADDRMASKREPS, .handler = icmp_address_reply, }, }; @@ -1146,4 +1104,5 @@ void __init icmp_init(struct net_proto_family *ops) EXPORT_SYMBOL(icmp_err_convert); EXPORT_SYMBOL(icmp_send); EXPORT_SYMBOL(icmp_statistics); +EXPORT_SYMBOL(icmpmsg_statistics); EXPORT_SYMBOL(xrlim_allow); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index a646409c2d068040ac0054b9f2cf3a29472a495d..7dbc282d4f9f3eb86a363f19f0fa19f29f1ad97b 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -91,6 +91,7 @@ #include #include +#include #include #include #include @@ -1694,8 +1695,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]); } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) { #ifdef CONFIG_IP_MULTICAST - struct in_device *in_dev = pmc->interface; struct ip_sf_list *psf; + in_dev = pmc->interface; #endif /* filter mode change */ @@ -1798,7 +1799,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, { int err; - if (iml->sflist == 0) { + if (iml->sflist == NULL) { /* any-source empty exclude case */ return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr, iml->sfmode, 0, NULL, 0); @@ -2166,7 +2167,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, return -EFAULT; } for (i=0; iin_dev = NULL; - for_each_netdev(state->dev) { + for_each_netdev(&init_net, state->dev) { struct in_device *in_dev; in_dev = in_dev_get(state->dev); if (!in_dev) @@ -2410,23 +2410,8 @@ static const struct seq_operations igmp_mc_seq_ops = { static int igmp_mc_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - rc = seq_open(file, &igmp_mc_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &igmp_mc_seq_ops, + sizeof(struct igmp_mc_iter_state)); } static const struct file_operations igmp_mc_seq_fops = { @@ -2453,7 +2438,7 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq) state->idev = NULL; state->im = NULL; - for_each_netdev(state->dev) { + for_each_netdev(&init_net, state->dev) { struct in_device *idev; idev = in_dev_get(state->dev); if (unlikely(idev == NULL)) @@ -2584,23 +2569,8 @@ static const struct seq_operations igmp_mcf_seq_ops = { static int igmp_mcf_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - rc = seq_open(file, &igmp_mcf_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &igmp_mcf_seq_ops, + sizeof(struct igmp_mcf_iter_state)); } static const struct file_operations igmp_mcf_seq_fops = { @@ -2613,8 +2583,8 @@ static const struct file_operations igmp_mcf_seq_fops = { int __init igmp_mc_proc_init(void) { - proc_net_fops_create("igmp", S_IRUGO, &igmp_mc_seq_fops); - proc_net_fops_create("mcfilter", S_IRUGO, &igmp_mcf_seq_fops); + proc_net_fops_create(&init_net, "igmp", S_IRUGO, &igmp_mc_seq_fops); + proc_net_fops_create(&init_net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops); return 0; } #endif diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index fbe7714f21d08593beecb3945f2f85848b506be8..3cef12835c4b2bfa86b50a74a788488cd5ac9ea1 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -33,6 +33,19 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg); * This array holds the first and last local port number. */ int sysctl_local_port_range[2] = { 32768, 61000 }; +DEFINE_SEQLOCK(sysctl_port_range_lock); + +void inet_get_local_port_range(int *low, int *high) +{ + unsigned seq; + do { + seq = read_seqbegin(&sysctl_port_range_lock); + + *low = sysctl_local_port_range[0]; + *high = sysctl_local_port_range[1]; + } while (read_seqretry(&sysctl_port_range_lock, seq)); +} +EXPORT_SYMBOL(inet_get_local_port_range); int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb) @@ -77,10 +90,11 @@ int inet_csk_get_port(struct inet_hashinfo *hashinfo, local_bh_disable(); if (!snum) { - int low = sysctl_local_port_range[0]; - int high = sysctl_local_port_range[1]; - int remaining = (high - low) + 1; - int rover = net_random() % (high - low) + low; + int remaining, rover, low, high; + + inet_get_local_port_range(&low, &high); + remaining = high - low; + rover = net_random() % remaining + low; do { head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index def007ec1d6f85b3f1f9830554c8a4c0be70a905..7eb83ebed2ec5754af310cb17db9a8108bcb0578 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -11,6 +11,7 @@ * 2 of the License, or (at your option) any later version. */ +#include #include #include #include @@ -112,7 +113,7 @@ static int inet_csk_diag_fill(struct sock *sk, } #endif -#define EXPIRES_IN_MS(tmo) ((tmo - jiffies) * 1000 + HZ - 1) / HZ +#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) if (icsk->icsk_pending == ICSK_TIME_RETRANS) { r->idiag_timer = 1; @@ -190,7 +191,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, r->id.idiag_dst[0] = tw->tw_daddr; r->idiag_state = tw->tw_substate; r->idiag_timer = 3; - r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ; + r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); r->idiag_rqueue = 0; r->idiag_wqueue = 0; r->idiag_uid = 0; @@ -838,15 +839,11 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) static DEFINE_MUTEX(inet_diag_mutex); -static void inet_diag_rcv(struct sock *sk, int len) +static void inet_diag_rcv(struct sk_buff *skb) { - unsigned int qlen = 0; - - do { - mutex_lock(&inet_diag_mutex); - netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg); - mutex_unlock(&inet_diag_mutex); - } while (qlen); + mutex_lock(&inet_diag_mutex); + netlink_rcv_skb(skb, &inet_diag_rcv_msg); + mutex_unlock(&inet_diag_mutex); } static DEFINE_SPINLOCK(inet_diag_register_lock); @@ -896,8 +893,8 @@ static int __init inet_diag_init(void) if (!inet_diag_table) goto out; - idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, - NULL, THIS_MODULE); + idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0, + inet_diag_rcv, NULL, THIS_MODULE); if (idiagnl == NULL) goto out_free_table; err = 0; diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index fb662621c54ecf65a5812726859553b8e03a42d8..fac6398e436709c714b95992c9045e740454d004 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -279,19 +279,18 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row, int ret; if (!snum) { - int low = sysctl_local_port_range[0]; - int high = sysctl_local_port_range[1]; - int range = high - low; - int i; - int port; + int i, remaining, low, high, port; static u32 hint; u32 offset = hint + inet_sk_port_offset(sk); struct hlist_node *node; struct inet_timewait_sock *tw = NULL; + inet_get_local_port_range(&low, &high); + remaining = high - low; + local_bh_disable(); - for (i = 1; i <= range; i++) { - port = low + (i + offset) % range; + for (i = 1; i <= remaining; i++) { + port = low + (i + offset) % remaining; head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; spin_lock(&head->lock); diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c new file mode 100644 index 0000000000000000000000000000000000000000..4545b64e28151f76e2a7f301f87bc0c322a1829b --- /dev/null +++ b/net/ipv4/inet_lro.c @@ -0,0 +1,600 @@ +/* + * linux/net/ipv4/inet_lro.c + * + * Large Receive Offload (ipv4 / tcp) + * + * (C) Copyright IBM Corp. 2007 + * + * Authors: + * Jan-Bernd Themann + * Christoph Raisch + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jan-Bernd Themann "); +MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)"); + +#define TCP_HDR_LEN(tcph) (tcph->doff << 2) +#define IP_HDR_LEN(iph) (iph->ihl << 2) +#define TCP_PAYLOAD_LENGTH(iph, tcph) \ + (ntohs(iph->tot_len) - IP_HDR_LEN(iph) - TCP_HDR_LEN(tcph)) + +#define IPH_LEN_WO_OPTIONS 5 +#define TCPH_LEN_WO_OPTIONS 5 +#define TCPH_LEN_W_TIMESTAMP 8 + +#define LRO_MAX_PG_HLEN 64 + +#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; } + +/* + * Basic tcp checks whether packet is suitable for LRO + */ + +static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph, + int len, struct net_lro_desc *lro_desc) +{ + /* check ip header: don't aggregate padded frames */ + if (ntohs(iph->tot_len) != len) + return -1; + + if (TCP_PAYLOAD_LENGTH(iph, tcph) == 0) + return -1; + + if (iph->ihl != IPH_LEN_WO_OPTIONS) + return -1; + + if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack + || tcph->rst || tcph->syn || tcph->fin) + return -1; + + if (INET_ECN_is_ce(ipv4_get_dsfield(iph))) + return -1; + + if (tcph->doff != TCPH_LEN_WO_OPTIONS + && tcph->doff != TCPH_LEN_W_TIMESTAMP) + return -1; + + /* check tcp options (only timestamp allowed) */ + if (tcph->doff == TCPH_LEN_W_TIMESTAMP) { + u32 *topt = (u32 *)(tcph + 1); + + if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) + | (TCPOPT_TIMESTAMP << 8) + | TCPOLEN_TIMESTAMP)) + return -1; + + /* timestamp should be in right order */ + topt++; + if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval), + ntohl(*topt))) + return -1; + + /* timestamp reply should not be zero */ + topt++; + if (*topt == 0) + return -1; + } + + return 0; +} + +static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc) +{ + struct iphdr *iph = lro_desc->iph; + struct tcphdr *tcph = lro_desc->tcph; + u32 *p; + __wsum tcp_hdr_csum; + + tcph->ack_seq = lro_desc->tcp_ack; + tcph->window = lro_desc->tcp_window; + + if (lro_desc->tcp_saw_tstamp) { + p = (u32 *)(tcph + 1); + *(p+2) = lro_desc->tcp_rcv_tsecr; + } + + iph->tot_len = htons(lro_desc->ip_tot_len); + + iph->check = 0; + iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl); + + tcph->check = 0; + tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0); + lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum); + tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + lro_desc->ip_tot_len - + IP_HDR_LEN(iph), IPPROTO_TCP, + lro_desc->data_csum); +} + +static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len) +{ + __wsum tcp_csum; + __wsum tcp_hdr_csum; + __wsum tcp_ps_hdr_csum; + + tcp_csum = ~csum_unfold(tcph->check); + tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum); + + tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, + len + TCP_HDR_LEN(tcph), + IPPROTO_TCP, 0); + + return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum), + tcp_ps_hdr_csum); +} + +static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb, + struct iphdr *iph, struct tcphdr *tcph, + u16 vlan_tag, struct vlan_group *vgrp) +{ + int nr_frags; + u32 *ptr; + u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); + + nr_frags = skb_shinfo(skb)->nr_frags; + lro_desc->parent = skb; + lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]); + lro_desc->iph = iph; + lro_desc->tcph = tcph; + lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len; + lro_desc->tcp_ack = ntohl(tcph->ack_seq); + lro_desc->tcp_window = tcph->window; + + lro_desc->pkt_aggr_cnt = 1; + lro_desc->ip_tot_len = ntohs(iph->tot_len); + + if (tcph->doff == 8) { + ptr = (u32 *)(tcph+1); + lro_desc->tcp_saw_tstamp = 1; + lro_desc->tcp_rcv_tsval = *(ptr+1); + lro_desc->tcp_rcv_tsecr = *(ptr+2); + } + + lro_desc->mss = tcp_data_len; + lro_desc->vgrp = vgrp; + lro_desc->vlan_tag = vlan_tag; + lro_desc->active = 1; + + lro_desc->data_csum = lro_tcp_data_csum(iph, tcph, + tcp_data_len); +} + +static inline void lro_clear_desc(struct net_lro_desc *lro_desc) +{ + memset(lro_desc, 0, sizeof(struct net_lro_desc)); +} + +static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph, + struct tcphdr *tcph, int tcp_data_len) +{ + struct sk_buff *parent = lro_desc->parent; + u32 *topt; + + lro_desc->pkt_aggr_cnt++; + lro_desc->ip_tot_len += tcp_data_len; + lro_desc->tcp_next_seq += tcp_data_len; + lro_desc->tcp_window = tcph->window; + lro_desc->tcp_ack = tcph->ack_seq; + + /* don't update tcp_rcv_tsval, would not work with PAWS */ + if (lro_desc->tcp_saw_tstamp) { + topt = (u32 *) (tcph + 1); + lro_desc->tcp_rcv_tsecr = *(topt + 2); + } + + lro_desc->data_csum = csum_block_add(lro_desc->data_csum, + lro_tcp_data_csum(iph, tcph, + tcp_data_len), + parent->len); + + parent->len += tcp_data_len; + parent->data_len += tcp_data_len; + if (tcp_data_len > lro_desc->mss) + lro_desc->mss = tcp_data_len; +} + +static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb, + struct iphdr *iph, struct tcphdr *tcph) +{ + struct sk_buff *parent = lro_desc->parent; + int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); + + lro_add_common(lro_desc, iph, tcph, tcp_data_len); + + skb_pull(skb, (skb->len - tcp_data_len)); + parent->truesize += skb->truesize; + + if (lro_desc->last_skb) + lro_desc->last_skb->next = skb; + else + skb_shinfo(parent)->frag_list = skb; + + lro_desc->last_skb = skb; +} + +static void lro_add_frags(struct net_lro_desc *lro_desc, + int len, int hlen, int truesize, + struct skb_frag_struct *skb_frags, + struct iphdr *iph, struct tcphdr *tcph) +{ + struct sk_buff *skb = lro_desc->parent; + int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph); + + lro_add_common(lro_desc, iph, tcph, tcp_data_len); + + skb->truesize += truesize; + + skb_frags[0].page_offset += hlen; + skb_frags[0].size -= hlen; + + while (tcp_data_len > 0) { + *(lro_desc->next_frag) = *skb_frags; + tcp_data_len -= skb_frags->size; + lro_desc->next_frag++; + skb_frags++; + skb_shinfo(skb)->nr_frags++; + } +} + +static int lro_check_tcp_conn(struct net_lro_desc *lro_desc, + struct iphdr *iph, + struct tcphdr *tcph) +{ + if ((lro_desc->iph->saddr != iph->saddr) + || (lro_desc->iph->daddr != iph->daddr) + || (lro_desc->tcph->source != tcph->source) + || (lro_desc->tcph->dest != tcph->dest)) + return -1; + return 0; +} + +static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr, + struct net_lro_desc *lro_arr, + struct iphdr *iph, + struct tcphdr *tcph) +{ + struct net_lro_desc *lro_desc = NULL; + struct net_lro_desc *tmp; + int max_desc = lro_mgr->max_desc; + int i; + + for (i = 0; i < max_desc; i++) { + tmp = &lro_arr[i]; + if (tmp->active) + if (!lro_check_tcp_conn(tmp, iph, tcph)) { + lro_desc = tmp; + goto out; + } + } + + for (i = 0; i < max_desc; i++) { + if (!lro_arr[i].active) { + lro_desc = &lro_arr[i]; + goto out; + } + } + + LRO_INC_STATS(lro_mgr, no_desc); +out: + return lro_desc; +} + +static void lro_flush(struct net_lro_mgr *lro_mgr, + struct net_lro_desc *lro_desc) +{ + if (lro_desc->pkt_aggr_cnt > 1) + lro_update_tcp_ip_header(lro_desc); + + skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss; + + if (lro_desc->vgrp) { + if (test_bit(LRO_F_NAPI, &lro_mgr->features)) + vlan_hwaccel_receive_skb(lro_desc->parent, + lro_desc->vgrp, + lro_desc->vlan_tag); + else + vlan_hwaccel_rx(lro_desc->parent, + lro_desc->vgrp, + lro_desc->vlan_tag); + + } else { + if (test_bit(LRO_F_NAPI, &lro_mgr->features)) + netif_receive_skb(lro_desc->parent); + else + netif_rx(lro_desc->parent); + } + + LRO_INC_STATS(lro_mgr, flushed); + lro_clear_desc(lro_desc); +} + +static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, + struct vlan_group *vgrp, u16 vlan_tag, void *priv) +{ + struct net_lro_desc *lro_desc; + struct iphdr *iph; + struct tcphdr *tcph; + u64 flags; + int vlan_hdr_len = 0; + + if (!lro_mgr->get_skb_header + || lro_mgr->get_skb_header(skb, (void *)&iph, (void *)&tcph, + &flags, priv)) + goto out; + + if (!(flags & LRO_IPV4) || !(flags & LRO_TCP)) + goto out; + + lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); + if (!lro_desc) + goto out; + + if ((skb->protocol == htons(ETH_P_8021Q)) + && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features)) + vlan_hdr_len = VLAN_HLEN; + + if (!lro_desc->active) { /* start new lro session */ + if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL)) + goto out; + + skb->ip_summed = lro_mgr->ip_summed_aggr; + lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp); + LRO_INC_STATS(lro_mgr, aggregated); + return 0; + } + + if (lro_desc->tcp_next_seq != ntohl(tcph->seq)) + goto out2; + + if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc)) + goto out2; + + lro_add_packet(lro_desc, skb, iph, tcph); + LRO_INC_STATS(lro_mgr, aggregated); + + if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) || + lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu)) + lro_flush(lro_mgr, lro_desc); + + return 0; + +out2: /* send aggregated SKBs to stack */ + lro_flush(lro_mgr, lro_desc); + +out: /* Original SKB has to be posted to stack */ + skb->ip_summed = lro_mgr->ip_summed; + return 1; +} + + +static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, + void *mac_hdr, + int hlen, __wsum sum, + u32 ip_summed) +{ + struct sk_buff *skb; + struct skb_frag_struct *skb_frags; + int data_len = len; + int hdr_len = min(len, hlen); + + skb = netdev_alloc_skb(lro_mgr->dev, hlen); + if (!skb) + return NULL; + + skb->len = len; + skb->data_len = len - hdr_len; + skb->truesize += true_size; + skb->tail += hdr_len; + + memcpy(skb->data, mac_hdr, hdr_len); + + skb_frags = skb_shinfo(skb)->frags; + while (data_len > 0) { + *skb_frags = *frags; + data_len -= frags->size; + skb_frags++; + frags++; + skb_shinfo(skb)->nr_frags++; + } + + skb_shinfo(skb)->frags[0].page_offset += hdr_len; + skb_shinfo(skb)->frags[0].size -= hdr_len; + + skb->ip_summed = ip_summed; + skb->csum = sum; + skb->protocol = eth_type_trans(skb, lro_mgr->dev); + return skb; +} + +static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, + struct vlan_group *vgrp, + u16 vlan_tag, void *priv, __wsum sum) +{ + struct net_lro_desc *lro_desc; + struct iphdr *iph; + struct tcphdr *tcph; + struct sk_buff *skb; + u64 flags; + void *mac_hdr; + int mac_hdr_len; + int hdr_len = LRO_MAX_PG_HLEN; + int vlan_hdr_len = 0; + + if (!lro_mgr->get_frag_header + || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph, + (void *)&tcph, &flags, priv)) { + mac_hdr = page_address(frags->page) + frags->page_offset; + goto out1; + } + + if (!(flags & LRO_IPV4) || !(flags & LRO_TCP)) + goto out1; + + hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr); + mac_hdr_len = (int)((void *)(iph) - mac_hdr); + + lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); + if (!lro_desc) + goto out1; + + if (!lro_desc->active) { /* start new lro session */ + if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL)) + goto out1; + + skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr, + hdr_len, 0, lro_mgr->ip_summed_aggr); + if (!skb) + goto out; + + if ((skb->protocol == htons(ETH_P_8021Q)) + && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features)) + vlan_hdr_len = VLAN_HLEN; + + iph = (void *)(skb->data + vlan_hdr_len); + tcph = (void *)((u8 *)skb->data + vlan_hdr_len + + IP_HDR_LEN(iph)); + + lro_init_desc(lro_desc, skb, iph, tcph, 0, NULL); + LRO_INC_STATS(lro_mgr, aggregated); + return NULL; + } + + if (lro_desc->tcp_next_seq != ntohl(tcph->seq)) + goto out2; + + if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc)) + goto out2; + + lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph); + LRO_INC_STATS(lro_mgr, aggregated); + + if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) || + lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu)) + lro_flush(lro_mgr, lro_desc); + + return NULL; + +out2: /* send aggregated packets to the stack */ + lro_flush(lro_mgr, lro_desc); + +out1: /* Original packet has to be posted to the stack */ + skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr, + hdr_len, sum, lro_mgr->ip_summed); +out: + return skb; +} + +void lro_receive_skb(struct net_lro_mgr *lro_mgr, + struct sk_buff *skb, + void *priv) +{ + if (__lro_proc_skb(lro_mgr, skb, NULL, 0, priv)) { + if (test_bit(LRO_F_NAPI, &lro_mgr->features)) + netif_receive_skb(skb); + else + netif_rx(skb); + } +} +EXPORT_SYMBOL(lro_receive_skb); + +void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr, + struct sk_buff *skb, + struct vlan_group *vgrp, + u16 vlan_tag, + void *priv) +{ + if (__lro_proc_skb(lro_mgr, skb, vgrp, vlan_tag, priv)) { + if (test_bit(LRO_F_NAPI, &lro_mgr->features)) + vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); + else + vlan_hwaccel_rx(skb, vgrp, vlan_tag); + } +} +EXPORT_SYMBOL(lro_vlan_hwaccel_receive_skb); + +void lro_receive_frags(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, void *priv, __wsum sum) +{ + struct sk_buff *skb; + + skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0, + priv, sum); + if (!skb) + return; + + if (test_bit(LRO_F_NAPI, &lro_mgr->features)) + netif_receive_skb(skb); + else + netif_rx(skb); +} +EXPORT_SYMBOL(lro_receive_frags); + +void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr, + struct skb_frag_struct *frags, + int len, int true_size, + struct vlan_group *vgrp, + u16 vlan_tag, void *priv, __wsum sum) +{ + struct sk_buff *skb; + + skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp, + vlan_tag, priv, sum); + if (!skb) + return; + + if (test_bit(LRO_F_NAPI, &lro_mgr->features)) + vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag); + else + vlan_hwaccel_rx(skb, vgrp, vlan_tag); +} +EXPORT_SYMBOL(lro_vlan_hwaccel_receive_frags); + +void lro_flush_all(struct net_lro_mgr *lro_mgr) +{ + int i; + struct net_lro_desc *lro_desc = lro_mgr->lro_arr; + + for (i = 0; i < lro_mgr->max_desc; i++) { + if (lro_desc[i].active) + lro_flush(lro_mgr, &lro_desc[i]); + } +} +EXPORT_SYMBOL(lro_flush_all); + +void lro_flush_pkt(struct net_lro_mgr *lro_mgr, + struct iphdr *iph, struct tcphdr *tcph) +{ + struct net_lro_desc *lro_desc; + + lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph); + if (lro_desc->active) + lro_flush(lro_mgr, lro_desc); +} +EXPORT_SYMBOL(lro_flush_pkt); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 2586df09b9b6cac0bfe2c5679e02d35038d8449e..4e189e28f3062f45f6aeca63476423ca386e757a 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -8,7 +8,7 @@ * From code orinally in TCP */ - +#include #include #include #include @@ -292,7 +292,7 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, if (timeo >= timewait_len) { slot = INET_TWDR_TWKILL_SLOTS - 1; } else { - slot = (timeo + twdr->period - 1) / twdr->period; + slot = DIV_ROUND_UP(timeo, twdr->period); if (slot >= INET_TWDR_TWKILL_SLOTS) slot = INET_TWDR_TWKILL_SLOTS - 1; } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 8c95cf09f87ac213e81b0a2421d568a4a7a6bd92..afbf938836f52505d4321d6eba99e75d26162789 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -105,7 +105,7 @@ int ip_forward(struct sk_buff *skb) * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ - if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr) + if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb->sp) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 0231bdcb2ab7620b55c1aaeda45d256c5a0de2ef..fabb86db763b01efb6fde8d29178420e275f9dd7 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -292,7 +292,7 @@ static void ip_expire(unsigned long arg) if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) { struct sk_buff *head = qp->fragments; /* Send an ICMP "Fragment Reassembly Timeout" message. */ - if ((head->dev = dev_get_by_index(qp->iif)) != NULL) { + if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) { icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); dev_put(head->dev); } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5c14ed63e56c97c8b53f188fd0bfd6997a3e8f8c..f151900efaf936b47fdec786f8bfd8152435b95b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -262,7 +262,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int int i; for (i=1; i<100; i++) { sprintf(name, "gre%d", i); - if (__dev_get_by_name(name) == NULL) + if (__dev_get_by_name(&init_net, name) == NULL) break; } if (i==100) @@ -684,7 +684,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_error; } - if (dev->hard_header) { + if (dev->header_ops) { gre_hlen = 0; tiph = (struct iphdr*)skb->data; } else { @@ -1063,8 +1063,9 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) */ -static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int ipgre_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { struct ip_tunnel *t = netdev_priv(dev); struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); @@ -1091,6 +1092,10 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh return -t->hlen; } +static const struct header_ops ipgre_header_ops = { + .create = ipgre_header, +}; + static int ipgre_open(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); @@ -1132,7 +1137,6 @@ static int ipgre_close(struct net_device *dev) static void ipgre_tunnel_setup(struct net_device *dev) { - SET_MODULE_OWNER(dev); dev->uninit = ipgre_tunnel_uninit; dev->destructor = free_netdev; dev->hard_start_xmit = ipgre_tunnel_xmit; @@ -1188,7 +1192,7 @@ static int ipgre_tunnel_init(struct net_device *dev) if (!iph->saddr) return -EINVAL; dev->flags = IFF_BROADCAST; - dev->hard_header = ipgre_header; + dev->header_ops = &ipgre_header_ops; dev->open = ipgre_open; dev->stop = ipgre_close; } @@ -1196,7 +1200,7 @@ static int ipgre_tunnel_init(struct net_device *dev) } if (!tdev && tunnel->parms.link) - tdev = __dev_get_by_index(tunnel->parms.link); + tdev = __dev_get_by_index(&init_net, tunnel->parms.link); if (tdev) { hlen = tdev->hard_header_len; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 97069399d8645ffe3e4af364ba353db7a1011687..41d8964591e785420f6800ab3329d87cc2884704 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -382,6 +382,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct iphdr *iph; u32 len; + if (dev->nd_net != &init_net) + goto drop; + /* When the interface is in promisc. mode, drop all the crap * that it receives, do not try to analyse it. */ diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 0f1d7beacf782042b4ed9115b4441f88214f29f3..699f06781fd891df9882d1c77c0c335f8d7a7252 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -169,7 +169,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) IP_INC_STATS(IPSTATS_MIB_OUTBCASTPKTS); /* Be paranoid, rather than too clever. */ - if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) { + if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); @@ -1261,6 +1261,10 @@ int ip_push_pending_frames(struct sock *sk) skb->priority = sk->sk_priority; skb->dst = dst_clone(&rt->u.dst); + if (iph->protocol == IPPROTO_ICMP) + icmp_out_count(((struct icmphdr *) + skb_transport_header(skb))->type); + /* Netfilter gets whole the not fragmented skb. */ err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 6b420aedcdcf55a5578eed3a07c4f54f4ffdb7fd..f51f20e487c8c74ec078781790d35e7302ba6cb3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -602,7 +602,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, dev_put(dev); } } else - dev = __dev_get_by_index(mreq.imr_ifindex); + dev = __dev_get_by_index(&init_net, mreq.imr_ifindex); err = -EADDRNOTAVAIL; @@ -659,7 +659,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, break; } msf = kmalloc(optlen, GFP_KERNEL); - if (msf == 0) { + if (!msf) { err = -ENOBUFS; break; } @@ -816,7 +816,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, break; } gsf = kmalloc(optlen,GFP_KERNEL); - if (gsf == 0) { + if (!gsf) { err = -ENOBUFS; break; } @@ -836,7 +836,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, } msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); msf = kmalloc(msize,GFP_KERNEL); - if (msf == 0) { + if (!msf) { err = -ENOBUFS; goto mc_msf_out; } diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index e787044a85148ebceaafdbb964c7dd15c2e99daa..0bfeb02a5f87b7f99676fd5d73d89a4f9db70ba8 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -75,7 +75,6 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) { int err = -ENOMEM; - struct iphdr *iph; struct ip_comp_hdr *ipch; if (skb_linearize_cow(skb)) @@ -84,12 +83,14 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ - iph = ip_hdr(skb); ipch = (void *)skb->data; - iph->protocol = ipch->nexthdr; skb->transport_header = skb->network_header + sizeof(*ipch); __skb_pull(skb, sizeof(*ipch)); err = ipcomp_decompress(x, skb); + if (err) + goto out; + + err = ipch->nexthdr; out: return err; @@ -98,10 +99,9 @@ static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) { struct ipcomp_data *ipcd = x->data; - const int ihlen = ip_hdrlen(skb); - const int plen = skb->len - ihlen; + const int plen = skb->len; int dlen = IPCOMP_SCRATCH_SIZE; - u8 *start = skb->data + ihlen; + u8 *start = skb->data; const int cpu = get_cpu(); u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); @@ -118,7 +118,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); put_cpu(); - pskb_trim(skb, ihlen + dlen + sizeof(struct ip_comp_hdr)); + pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); return 0; out: @@ -131,12 +131,8 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) int err; struct ip_comp_hdr *ipch; struct ipcomp_data *ipcd = x->data; - int hdr_len = 0; - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = htons(skb->len); - hdr_len = iph->ihl * 4; - if ((skb->len - hdr_len) < ipcd->threshold) { + if (skb->len < ipcd->threshold) { /* Don't bother compressing */ goto out_ok; } @@ -145,25 +141,19 @@ static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) goto out_ok; err = ipcomp_compress(x, skb); - iph = ip_hdr(skb); if (err) { goto out_ok; } /* Install ipcomp header, convert into ipcomp datagram. */ - iph->tot_len = htons(skb->len); - ipch = (struct ip_comp_hdr *)((char *)iph + iph->ihl * 4); - ipch->nexthdr = iph->protocol; + ipch = ip_comp_hdr(skb); + ipch->nexthdr = *skb_mac_header(skb); ipch->flags = 0; ipch->cpi = htons((u16 )ntohl(x->id.spi)); - iph->protocol = IPPROTO_COMP; - ip_send_check(iph); - return 0; - + *skb_mac_header(skb) = IPPROTO_COMP; out_ok: - if (x->props.mode == XFRM_MODE_TUNNEL) - ip_send_check(iph); + skb_push(skb, -skb_network_offset(skb)); return 0; } diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index c5b24707753997db84ddcd310ae876f94d8243eb..c5c107a01823097120b21698ed24021647abb24e 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include #include @@ -189,11 +190,15 @@ static int __init ic_open_devs(void) rtnl_lock(); /* bring loopback device up first */ - if (dev_change_flags(&loopback_dev, loopback_dev.flags | IFF_UP) < 0) - printk(KERN_ERR "IP-Config: Failed to open %s\n", loopback_dev.name); + for_each_netdev(&init_net, dev) { + if (!(dev->flags & IFF_LOOPBACK)) + continue; + if (dev_change_flags(dev, dev->flags | IFF_UP) < 0) + printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); + } - for_each_netdev(dev) { - if (dev == &loopback_dev) + for_each_netdev(&init_net, dev) { + if (dev->flags & IFF_LOOPBACK) continue; if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : (!(dev->flags & IFF_LOOPBACK) && @@ -425,6 +430,9 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt unsigned char *sha, *tha; /* s for "source", t for "target" */ struct ic_device *d; + if (dev->nd_net != &init_net) + goto drop; + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; @@ -749,8 +757,8 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d /* Chain packet down the line... */ skb->dev = dev; skb->protocol = htons(ETH_P_IP); - if ((dev->hard_header && - dev->hard_header(skb, dev, ntohs(skb->protocol), dev->broadcast, dev->dev_addr, skb->len) < 0) || + if (dev_hard_header(skb, dev, ntohs(skb->protocol), + dev->broadcast, dev->dev_addr, skb->len) < 0 || dev_queue_xmit(skb) < 0) printk("E"); } @@ -834,6 +842,9 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str struct ic_device *d; int len, ext_len; + if (dev->nd_net != &init_net) + goto drop; + /* Perform verifications before taking the lock. */ if (skb->pkt_type == PACKET_OTHERHOST) goto drop; @@ -1253,7 +1264,7 @@ static int __init ip_auto_config(void) __be32 addr; #ifdef CONFIG_PROC_FS - proc_net_fops_create("pnp", S_IRUGO, &pnp_seq_fops); + proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); #endif /* CONFIG_PROC_FS */ if (!ic_enable) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 396437242a1b36186db17ff5aaa1bf5d60a1734b..5cd5bbe1379aa10c343631bdd05dfa322041d347 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -225,7 +225,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c int i; for (i=1; i<100; i++) { sprintf(name, "tunl%d", i); - if (__dev_get_by_name(name) == NULL) + if (__dev_get_by_name(&init_net, name) == NULL) break; } if (i==100) @@ -237,7 +237,6 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c return NULL; nt = netdev_priv(dev); - SET_MODULE_OWNER(dev); dev->init = ipip_tunnel_init; nt->parms = *parms; @@ -775,7 +774,6 @@ static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) static void ipip_tunnel_setup(struct net_device *dev) { - SET_MODULE_OWNER(dev); dev->uninit = ipip_tunnel_uninit; dev->hard_start_xmit = ipip_tunnel_xmit; dev->get_stats = ipip_tunnel_get_stats; @@ -822,7 +820,7 @@ static int ipip_tunnel_init(struct net_device *dev) } if (!tdev && tunnel->parms.link) - tdev = __dev_get_by_index(tunnel->parms.link); + tdev = __dev_get_by_index(&init_net, tunnel->parms.link); if (tdev) { dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7003cc1b7fe26c5c87c055d15f489214f26ae356..37bb497d92af2af2d714ed4d4694d25a4b9c5471 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -124,7 +125,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) { struct net_device *dev; - dev = __dev_get_by_name("tunl0"); + dev = __dev_get_by_name(&init_net, "tunl0"); if (dev) { int err; @@ -148,7 +149,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) dev = NULL; - if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) { + if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) { dev->flags |= IFF_MULTICAST; in_dev = __in_dev_get_rtnl(dev); @@ -1082,13 +1083,18 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct net_device *dev = ptr; struct vif_device *v; int ct; + + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; v=&vif_table[0]; for (ct=0;ctdev==ptr) + if (v->dev==dev) vif_delete(ct); } return NOTIFY_DONE; @@ -1708,26 +1714,8 @@ static const struct seq_operations ipmr_vif_seq_ops = { static int ipmr_vif_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &ipmr_vif_seq_ops); - if (rc) - goto out_kfree; - - s->ct = 0; - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; - + return seq_open_private(file, &ipmr_vif_seq_ops, + sizeof(struct ipmr_vif_iter)); } static const struct file_operations ipmr_vif_fops = { @@ -1871,25 +1859,8 @@ static const struct seq_operations ipmr_mfc_seq_ops = { static int ipmr_mfc_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &ipmr_mfc_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; - + return seq_open_private(file, &ipmr_mfc_seq_ops, + sizeof(struct ipmr_mfc_iter)); } static const struct file_operations ipmr_mfc_fops = { @@ -1922,7 +1893,7 @@ void __init ip_mr_init(void) ipmr_expire_timer.function=ipmr_expire_process; register_netdevice_notifier(&ip_mr_notifier); #ifdef CONFIG_PROC_FS - proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops); - proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops); + proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops); + proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops); #endif } diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 8d6901d4e94f897d850a5399214d82e6dd48c3d5..341474eefa557846a9332348bea21cd247106e62 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -616,12 +617,12 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, int ip_vs_app_init(void) { /* we will replace it with proc_net_ipvs_create() soon */ - proc_net_fops_create("ip_vs_app", 0, &ip_vs_app_fops); + proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops); return 0; } void ip_vs_app_cleanup(void) { - proc_net_remove("ip_vs_app"); + proc_net_remove(&init_net, "ip_vs_app"); } diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index d612a6a5d957e591c27de755d41bc2efaf6c61cd..4b702f708d30637954662a1ccc7f66cd95586569 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c @@ -35,6 +35,7 @@ #include #include +#include #include @@ -922,7 +923,7 @@ int ip_vs_conn_init(void) rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); } - proc_net_fops_create("ip_vs_conn", 0, &ip_vs_conn_fops); + proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops); /* calculate the random value for connection hash */ get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd)); @@ -938,6 +939,6 @@ void ip_vs_conn_cleanup(void) /* Release the empty cache */ kmem_cache_destroy(ip_vs_conn_cachep); - proc_net_remove("ip_vs_conn"); + proc_net_remove(&init_net, "ip_vs_conn"); vfree(ip_vs_conn_tab); } diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index f005a2f929f4121825c2aa94d15d79b2ea558608..fbca2a2ff29fa5c5435f9023b8da89b36641833d 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -961,7 +961,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff **pskb, * ... don't know why 1st test DOES NOT include 2nd (?) */ if (unlikely(skb->pkt_type != PACKET_HOST - || skb->dev == &loopback_dev || skb->sk)) { + || skb->dev->flags & IFF_LOOPBACK || skb->sk)) { IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n", skb->pkt_type, ip_hdr(skb)->protocol, diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f656d41d8d419ef626e0e22e9caf812d70eba3aa..7345fc252a2315943b77c084a05ecaa45668d3e5 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -1791,24 +1792,8 @@ static const struct seq_operations ip_vs_info_seq_ops = { static int ip_vs_info_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &ip_vs_info_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &ip_vs_info_seq_ops, + sizeof(struct ip_vs_iter)); } static const struct file_operations ip_vs_info_fops = { @@ -2356,8 +2341,8 @@ int ip_vs_control_init(void) return ret; } - proc_net_fops_create("ip_vs", 0, &ip_vs_info_fops); - proc_net_fops_create("ip_vs_stats",0, &ip_vs_stats_fops); + proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); + proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); sysctl_header = register_sysctl_table(vs_root_table); @@ -2390,8 +2375,8 @@ void ip_vs_control_cleanup(void) cancel_work_sync(&defense_work.work); ip_vs_kill_estimator(&ip_vs_stats); unregister_sysctl_table(sysctl_header); - proc_net_remove("ip_vs_stats"); - proc_net_remove("ip_vs"); + proc_net_remove(&init_net, "ip_vs_stats"); + proc_net_remove(&init_net, "ip_vs"); nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); } diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index 6225acac7a3baf322e96db668ff0b2d11483b9af..6a1fec416eafc6fc0076dcb06632559238cd6da8 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c @@ -50,6 +50,7 @@ #include /* for proc_net_create/proc_net_remove */ #include +#include #include @@ -843,7 +844,7 @@ static int __init ip_vs_lblcr_init(void) INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); sysctl_header = register_sysctl_table(lblcr_root_table); #ifdef CONFIG_IP_VS_LBLCR_DEBUG - proc_net_create("ip_vs_lblcr", 0, ip_vs_lblcr_getinfo); + proc_net_create(&init_net, "ip_vs_lblcr", 0, ip_vs_lblcr_getinfo); #endif return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); } @@ -852,7 +853,7 @@ static int __init ip_vs_lblcr_init(void) static void __exit ip_vs_lblcr_cleanup(void) { #ifdef CONFIG_IP_VS_LBLCR_DEBUG - proc_net_remove("ip_vs_lblcr"); + proc_net_remove(&init_net, "ip_vs_lblcr"); #endif unregister_sysctl_table(sysctl_header); unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c index 356f067484e393634176357f193b9565cfdb2814..1960747f354c8c89966b920426d7487640476617 100644 --- a/net/ipv4/ipvs/ip_vs_sync.c +++ b/net/ipv4/ipvs/ip_vs_sync.c @@ -387,7 +387,7 @@ static int set_mcast_if(struct sock *sk, char *ifname) struct net_device *dev; struct inet_sock *inet = inet_sk(sk); - if ((dev = __dev_get_by_name(ifname)) == NULL) + if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) @@ -412,7 +412,7 @@ static int set_sync_mesg_maxlen(int sync_state) int num; if (sync_state == IP_VS_STATE_MASTER) { - if ((dev = __dev_get_by_name(ip_vs_master_mcast_ifn)) == NULL) + if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL) return -ENODEV; num = (dev->mtu - sizeof(struct iphdr) - @@ -423,7 +423,7 @@ static int set_sync_mesg_maxlen(int sync_state) IP_VS_DBG(7, "setting the maximum length of sync sending " "message %d.\n", sync_send_mesg_maxlen); } else if (sync_state == IP_VS_STATE_BACKUP) { - if ((dev = __dev_get_by_name(ip_vs_backup_mcast_ifn)) == NULL) + if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL) return -ENODEV; sync_recv_mesg_maxlen = dev->mtu - @@ -451,7 +451,7 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); - if ((dev = __dev_get_by_name(ifname)) == NULL) + if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -472,7 +472,7 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname) __be32 addr; struct sockaddr_in sin; - if ((dev = __dev_get_by_name(ifname)) == NULL) + if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL) return -ENODEV; addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 702d94db19b9f5edb12d0a8c77379014ace18211..23cbfc7c80fd982f7b1f57a51d7cc2125342432a 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -249,10 +250,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) if (entry->info->indev && entry->skb->dev) { pmsg->hw_type = entry->skb->dev->type; - if (entry->skb->dev->hard_header_parse) - pmsg->hw_addrlen = - entry->skb->dev->hard_header_parse(entry->skb, - pmsg->hw_addr); + pmsg->hw_addrlen = dev_parse_header(entry->skb, + pmsg->hw_addr); } if (data_len) @@ -476,7 +475,7 @@ ipq_dev_drop(int ifindex) #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static inline void -ipq_rcv_skb(struct sk_buff *skb) +__ipq_rcv_skb(struct sk_buff *skb) { int status, type, pid, flags, nlmsglen, skblen; struct nlmsghdr *nlh; @@ -534,19 +533,10 @@ ipq_rcv_skb(struct sk_buff *skb) } static void -ipq_rcv_sk(struct sock *sk, int len) +ipq_rcv_skb(struct sk_buff *skb) { - struct sk_buff *skb; - unsigned int qlen; - mutex_lock(&ipqnl_mutex); - - for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { - skb = skb_dequeue(&sk->sk_receive_queue); - ipq_rcv_skb(skb); - kfree_skb(skb); - } - + __ipq_rcv_skb(skb); mutex_unlock(&ipqnl_mutex); } @@ -556,6 +546,9 @@ ipq_rcv_dev_event(struct notifier_block *this, { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) ipq_dev_drop(dev->ifindex); @@ -575,7 +568,7 @@ ipq_rcv_nl_event(struct notifier_block *this, if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL && n->pid) { write_lock_bh(&queue_lock); - if (n->pid == peer_pid) + if ((n->net == &init_net) && (n->pid == peer_pid)) __ipq_reset(); write_unlock_bh(&queue_lock); } @@ -667,14 +660,14 @@ static int __init ip_queue_init(void) struct proc_dir_entry *proc; netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk, - NULL, THIS_MODULE); + ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0, + ipq_rcv_skb, NULL, THIS_MODULE); if (ipqnl == NULL) { printk(KERN_ERR "ip_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; } - proc = proc_net_create(IPQ_PROC_FS_NAME, 0, ipq_get_info); + proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info); if (proc) proc->owner = THIS_MODULE; else { @@ -695,8 +688,7 @@ static int __init ip_queue_init(void) cleanup_sysctl: unregister_sysctl_table(ipq_sysctl_header); unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(IPQ_PROC_FS_NAME); - + proc_net_remove(&init_net, IPQ_PROC_FS_NAME); cleanup_ipqnl: sock_release(ipqnl->sk_socket); mutex_lock(&ipqnl_mutex); @@ -715,7 +707,7 @@ static void __exit ip_queue_fini(void) unregister_sysctl_table(ipq_sysctl_header); unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(IPQ_PROC_FS_NAME); + proc_net_remove(&init_net, IPQ_PROC_FS_NAME); sock_release(ipqnl->sk_socket); mutex_lock(&ipqnl_mutex); diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 69bd362b5fa20d8c1ed42f79aed68f93a431b98f..27f14e1ebd8b342ff3a8f556c3d990cb665e6540 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #define CLUSTERIP_VERSION "0.8" @@ -400,7 +401,7 @@ checkentry(const char *tablename, return false; } - dev = dev_get_by_name(e->ip.iniface); + dev = dev_get_by_name(&init_net, e->ip.iniface); if (!dev) { printk(KERN_WARNING "CLUSTERIP: no such interface %s\n", e->ip.iniface); return false; @@ -726,7 +727,7 @@ static int __init ipt_clusterip_init(void) goto cleanup_target; #ifdef CONFIG_PROC_FS - clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", proc_net); + clusterip_procdir = proc_mkdir("ipt_CLUSTERIP", init_net.proc_net); if (!clusterip_procdir) { printk(KERN_ERR "CLUSTERIP: Unable to proc dir entry\n"); ret = -ENOMEM; diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 7c4e4be7c8b3e40de45716346e8abf19022e8eb5..3e0b562b2db704571b044b928e6bea712b9167b7 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -125,6 +125,9 @@ static int masq_device_event(struct notifier_block *this, { const struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for conntracks which were associated with that device, diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index 6ca43e4ca7e31e54c6c9d124ba02c77371ea28da..c636d6d63574315e9602cae61cf3ecb83d263830 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -409,7 +409,8 @@ static int __init ipt_ulog_init(void) for (i = 0; i < ULOG_MAXNLGROUPS; i++) setup_timer(&ulog_buffers[i].timer, ulog_timer, i); - nflognl = netlink_kernel_create(NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, + nflognl = netlink_kernel_create(&init_net, + NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL, NULL, THIS_MODULE); if (!nflognl) return -ENOMEM; diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c index 6d0c0f7364adbcac35fc4a332e11f6841e0430b3..11d39fb5f38bade83b7ce74aa5a435f6e38da854 100644 --- a/net/ipv4/netfilter/ipt_recent.c +++ b/net/ipv4/netfilter/ipt_recent.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -380,25 +381,14 @@ static const struct seq_operations recent_seq_ops = { static int recent_seq_open(struct inode *inode, struct file *file) { struct proc_dir_entry *pde = PDE(inode); - struct seq_file *seq; struct recent_iter_state *st; - int ret; - st = kzalloc(sizeof(*st), GFP_KERNEL); + st = __seq_open_private(file, &recent_seq_ops, sizeof(*st)); if (st == NULL) return -ENOMEM; - ret = seq_open(file, &recent_seq_ops); - if (ret) { - kfree(st); - goto out; - } - st->table = pde->data; - seq = file->private_data; - seq->private = st; -out: - return ret; + return 0; } static ssize_t recent_proc_write(struct file *file, const char __user *input, @@ -487,7 +477,7 @@ static int __init ipt_recent_init(void) #ifdef CONFIG_PROC_FS if (err) return err; - proc_dir = proc_mkdir("ipt_recent", proc_net); + proc_dir = proc_mkdir("ipt_recent", init_net.proc_net); if (proc_dir == NULL) { xt_unregister_match(&recent_match); err = -ENOMEM; @@ -501,7 +491,7 @@ static void __exit ipt_recent_exit(void) BUG_ON(!list_empty(&tables)); xt_unregister_match(&recent_match); #ifdef CONFIG_PROC_FS - remove_proc_entry("ipt_recent", proc_net); + remove_proc_entry("ipt_recent", init_net.proc_net); #endif } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index f813e02aab3022d890887f4ae4a50711305a6a2a..2fcb9249a8da8184f22b17734a34863b8f098e12 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -360,35 +360,32 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) #include #include -static int ipv4_tuple_to_nfattr(struct sk_buff *skb, +static int ipv4_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NFA_PUT(skb, CTA_IP_V4_SRC, sizeof(u_int32_t), + NLA_PUT(skb, CTA_IP_V4_SRC, sizeof(u_int32_t), &tuple->src.u3.ip); - NFA_PUT(skb, CTA_IP_V4_DST, sizeof(u_int32_t), + NLA_PUT(skb, CTA_IP_V4_DST, sizeof(u_int32_t), &tuple->dst.u3.ip); return 0; -nfattr_failure: +nla_put_failure: return -1; } -static const size_t cta_min_ip[CTA_IP_MAX] = { - [CTA_IP_V4_SRC-1] = sizeof(u_int32_t), - [CTA_IP_V4_DST-1] = sizeof(u_int32_t), +static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = { + [CTA_IP_V4_SRC] = { .type = NLA_U32 }, + [CTA_IP_V4_DST] = { .type = NLA_U32 }, }; -static int ipv4_nfattr_to_tuple(struct nfattr *tb[], +static int ipv4_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { - if (!tb[CTA_IP_V4_SRC-1] || !tb[CTA_IP_V4_DST-1]) + if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST]) return -EINVAL; - if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) - return -EINVAL; - - t->src.u3.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_SRC-1]); - t->dst.u3.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]); + t->src.u3.ip = *(__be32 *)nla_data(tb[CTA_IP_V4_SRC]); + t->dst.u3.ip = *(__be32 *)nla_data(tb[CTA_IP_V4_DST]); return 0; } @@ -411,8 +408,9 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = { .print_conntrack = ipv4_print_conntrack, .get_l4proto = ipv4_get_l4proto, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = ipv4_tuple_to_nfattr, - .nfattr_to_tuple = ipv4_nfattr_to_tuple, + .tuple_to_nlattr = ipv4_tuple_to_nlattr, + .nlattr_to_tuple = ipv4_nlattr_to_tuple, + .nla_policy = ipv4_nla_policy, #endif #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c index b3dd5de9a25868c26bad3dd97546cbaafbb3fcb7..741f3dfaa5a101efe385dc9c30755a8e8de218dd 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -173,22 +174,8 @@ static const struct seq_operations ct_seq_ops = { static int ct_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct ct_iter_state *st; - int ret; - - st = kzalloc(sizeof(struct ct_iter_state), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; - ret = seq_open(file, &ct_seq_ops); - if (ret) - goto out_free; - seq = file->private_data; - seq->private = st; - return ret; -out_free: - kfree(st); - return ret; + return seq_open_private(file, &ct_seq_ops, + sizeof(struct ct_iter_state)); } static const struct file_operations ct_file_ops = { @@ -290,22 +277,8 @@ static const struct seq_operations exp_seq_ops = { static int exp_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct ct_expect_iter_state *st; - int ret; - - st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); - if (!st) - return -ENOMEM; - ret = seq_open(file, &exp_seq_ops); - if (ret) - goto out_free; - seq = file->private_data; - seq->private = st; - return ret; -out_free: - kfree(st); - return ret; + return seq_open_private(file, &exp_seq_ops, + sizeof(struct ct_expect_iter_state)); } static const struct file_operations ip_exp_file_ops = { @@ -408,16 +381,16 @@ int __init nf_conntrack_ipv4_compat_init(void) { struct proc_dir_entry *proc, *proc_exp, *proc_stat; - proc = proc_net_fops_create("ip_conntrack", 0440, &ct_file_ops); + proc = proc_net_fops_create(&init_net, "ip_conntrack", 0440, &ct_file_ops); if (!proc) goto err1; - proc_exp = proc_net_fops_create("ip_conntrack_expect", 0440, + proc_exp = proc_net_fops_create(&init_net, "ip_conntrack_expect", 0440, &ip_exp_file_ops); if (!proc_exp) goto err2; - proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, proc_net_stat); + proc_stat = create_proc_entry("ip_conntrack", S_IRUGO, init_net.proc_net_stat); if (!proc_stat) goto err3; @@ -427,16 +400,16 @@ int __init nf_conntrack_ipv4_compat_init(void) return 0; err3: - proc_net_remove("ip_conntrack_expect"); + proc_net_remove(&init_net, "ip_conntrack_expect"); err2: - proc_net_remove("ip_conntrack"); + proc_net_remove(&init_net, "ip_conntrack"); err1: return -ENOMEM; } void __exit nf_conntrack_ipv4_compat_fini(void) { - remove_proc_entry("ip_conntrack", proc_net_stat); - proc_net_remove("ip_conntrack_expect"); - proc_net_remove("ip_conntrack"); + remove_proc_entry("ip_conntrack", init_net.proc_net_stat); + proc_net_remove(&init_net, "ip_conntrack_expect"); + proc_net_remove(&init_net, "ip_conntrack"); } diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 6593fd2c5b109aea332bb95df585617fb25eb305..11fedc73049c109d17fa741ba8ad6222bfd4b61b 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -232,45 +232,42 @@ icmp_error(struct sk_buff *skb, unsigned int dataoff, #include #include -static int icmp_tuple_to_nfattr(struct sk_buff *skb, +static int icmp_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { - NFA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(u_int16_t), + NLA_PUT(skb, CTA_PROTO_ICMP_ID, sizeof(u_int16_t), &t->src.u.icmp.id); - NFA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t), + NLA_PUT(skb, CTA_PROTO_ICMP_TYPE, sizeof(u_int8_t), &t->dst.u.icmp.type); - NFA_PUT(skb, CTA_PROTO_ICMP_CODE, sizeof(u_int8_t), + NLA_PUT(skb, CTA_PROTO_ICMP_CODE, sizeof(u_int8_t), &t->dst.u.icmp.code); return 0; -nfattr_failure: +nla_put_failure: return -1; } -static const size_t cta_min_proto[CTA_PROTO_MAX] = { - [CTA_PROTO_ICMP_TYPE-1] = sizeof(u_int8_t), - [CTA_PROTO_ICMP_CODE-1] = sizeof(u_int8_t), - [CTA_PROTO_ICMP_ID-1] = sizeof(u_int16_t) +static const struct nla_policy icmp_nla_policy[CTA_PROTO_MAX+1] = { + [CTA_PROTO_ICMP_TYPE] = { .type = NLA_U8 }, + [CTA_PROTO_ICMP_CODE] = { .type = NLA_U8 }, + [CTA_PROTO_ICMP_ID] = { .type = NLA_U16 }, }; -static int icmp_nfattr_to_tuple(struct nfattr *tb[], +static int icmp_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *tuple) { - if (!tb[CTA_PROTO_ICMP_TYPE-1] - || !tb[CTA_PROTO_ICMP_CODE-1] - || !tb[CTA_PROTO_ICMP_ID-1]) - return -EINVAL; - - if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) + if (!tb[CTA_PROTO_ICMP_TYPE] + || !tb[CTA_PROTO_ICMP_CODE] + || !tb[CTA_PROTO_ICMP_ID]) return -EINVAL; tuple->dst.u.icmp.type = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_TYPE-1]); + *(u_int8_t *)nla_data(tb[CTA_PROTO_ICMP_TYPE]); tuple->dst.u.icmp.code = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]); + *(u_int8_t *)nla_data(tb[CTA_PROTO_ICMP_CODE]); tuple->src.u.icmp.id = - *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]); + *(__be16 *)nla_data(tb[CTA_PROTO_ICMP_ID]); if (tuple->dst.u.icmp.type >= sizeof(invmap) || !invmap[tuple->dst.u.icmp.type]) @@ -327,8 +324,9 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = .destroy = NULL, .me = NULL, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = icmp_tuple_to_nfattr, - .nfattr_to_tuple = icmp_nfattr_to_tuple, + .tuple_to_nlattr = icmp_tuple_to_nlattr, + .nlattr_to_tuple = icmp_nlattr_to_tuple, + .nla_policy = icmp_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_header = &icmp_sysctl_header, diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index deab27facbad5b2822a79aa9ab80ddddc656a211..7221aa20e6ff61175a12c3815aaab3186bc1d8c3 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -544,46 +544,46 @@ EXPORT_SYMBOL(nf_nat_protocol_unregister); #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) int -nf_nat_port_range_to_nfattr(struct sk_buff *skb, +nf_nat_port_range_to_nlattr(struct sk_buff *skb, const struct nf_nat_range *range) { - NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), + NLA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16), &range->min.tcp.port); - NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16), + NLA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16), &range->max.tcp.port); return 0; -nfattr_failure: +nla_put_failure: return -1; } -EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range); +EXPORT_SYMBOL_GPL(nf_nat_port_nlattr_to_range); int -nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range) +nf_nat_port_nlattr_to_range(struct nlattr *tb[], struct nf_nat_range *range) { int ret = 0; /* we have to return whether we actually parsed something or not */ - if (tb[CTA_PROTONAT_PORT_MIN-1]) { + if (tb[CTA_PROTONAT_PORT_MIN]) { ret = 1; range->min.tcp.port = - *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]); + *(__be16 *)nla_data(tb[CTA_PROTONAT_PORT_MIN]); } - if (!tb[CTA_PROTONAT_PORT_MAX-1]) { + if (!tb[CTA_PROTONAT_PORT_MAX]) { if (ret) range->max.tcp.port = range->min.tcp.port; } else { ret = 1; range->max.tcp.port = - *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]); + *(__be16 *)nla_data(tb[CTA_PROTONAT_PORT_MAX]); } return ret; } -EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr); +EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nlattr); #endif /* Noone using conntrack by the time this called. */ diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c index 2e40cc83526aaccfb9f27fba36f392cd6061f0cd..d562290b182049ea66ee9cb385547fba737e0ffa 100644 --- a/net/ipv4/netfilter/nf_nat_proto_gre.c +++ b/net/ipv4/netfilter/nf_nat_proto_gre.c @@ -142,8 +142,8 @@ static struct nf_nat_protocol gre __read_mostly = { .in_range = gre_in_range, .unique_tuple = gre_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .range_to_nfattr = nf_nat_port_range_to_nfattr, - .nfattr_to_range = nf_nat_port_nfattr_to_range, + .range_to_nlattr = nf_nat_port_range_to_nlattr, + .nlattr_to_range = nf_nat_port_nlattr_to_range, #endif }; diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c index f71ef9b5f4283f5ec08e824e705f858484074b5a..898d737711552e64b7f39a2022f3b845d5953d79 100644 --- a/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c @@ -79,7 +79,7 @@ struct nf_nat_protocol nf_nat_protocol_icmp = { .in_range = icmp_in_range, .unique_tuple = icmp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .range_to_nfattr = nf_nat_port_range_to_nfattr, - .nfattr_to_range = nf_nat_port_nfattr_to_range, + .range_to_nlattr = nf_nat_port_range_to_nlattr, + .nlattr_to_range = nf_nat_port_nlattr_to_range, #endif }; diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c index 123c95913f2815aa6f2cde7071634b9de8c2c7dd..5bbbb2acdc70a0d2a58ac34dc275b5ba546d314d 100644 --- a/net/ipv4/netfilter/nf_nat_proto_tcp.c +++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c @@ -145,7 +145,7 @@ struct nf_nat_protocol nf_nat_protocol_tcp = { .in_range = tcp_in_range, .unique_tuple = tcp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .range_to_nfattr = nf_nat_port_range_to_nfattr, - .nfattr_to_range = nf_nat_port_nfattr_to_range, + .range_to_nlattr = nf_nat_port_range_to_nlattr, + .nlattr_to_range = nf_nat_port_nlattr_to_range, #endif }; diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c index 1c4c70e25cd4bf8135bfd3bcf8b9344ee653bb3b..a0af4fd95584f0c67bc834833aad79ddb168e6d7 100644 --- a/net/ipv4/netfilter/nf_nat_proto_udp.c +++ b/net/ipv4/netfilter/nf_nat_proto_udp.c @@ -135,7 +135,7 @@ struct nf_nat_protocol nf_nat_protocol_udp = { .in_range = udp_in_range, .unique_tuple = udp_unique_tuple, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .range_to_nfattr = nf_nat_port_range_to_nfattr, - .nfattr_to_range = nf_nat_port_nfattr_to_range, + .range_to_nlattr = nf_nat_port_range_to_nlattr, + .nlattr_to_range = nf_nat_port_nlattr_to_range, #endif }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3b690cf2a4ee62263a16c11c6f69d861f42e1056..e5b05b0391010956b914099ee01166c915c8d7df 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -34,6 +34,7 @@ * 2 of the License, or (at your option) any later version. */ #include +#include #include #include #include @@ -123,33 +124,30 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { static const struct snmp_mib snmp4_icmp_list[] = { SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS), SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS), - SNMP_MIB_ITEM("InDestUnreachs", ICMP_MIB_INDESTUNREACHS), - SNMP_MIB_ITEM("InTimeExcds", ICMP_MIB_INTIMEEXCDS), - SNMP_MIB_ITEM("InParmProbs", ICMP_MIB_INPARMPROBS), - SNMP_MIB_ITEM("InSrcQuenchs", ICMP_MIB_INSRCQUENCHS), - SNMP_MIB_ITEM("InRedirects", ICMP_MIB_INREDIRECTS), - SNMP_MIB_ITEM("InEchos", ICMP_MIB_INECHOS), - SNMP_MIB_ITEM("InEchoReps", ICMP_MIB_INECHOREPS), - SNMP_MIB_ITEM("InTimestamps", ICMP_MIB_INTIMESTAMPS), - SNMP_MIB_ITEM("InTimestampReps", ICMP_MIB_INTIMESTAMPREPS), - SNMP_MIB_ITEM("InAddrMasks", ICMP_MIB_INADDRMASKS), - SNMP_MIB_ITEM("InAddrMaskReps", ICMP_MIB_INADDRMASKREPS), SNMP_MIB_ITEM("OutMsgs", ICMP_MIB_OUTMSGS), SNMP_MIB_ITEM("OutErrors", ICMP_MIB_OUTERRORS), - SNMP_MIB_ITEM("OutDestUnreachs", ICMP_MIB_OUTDESTUNREACHS), - SNMP_MIB_ITEM("OutTimeExcds", ICMP_MIB_OUTTIMEEXCDS), - SNMP_MIB_ITEM("OutParmProbs", ICMP_MIB_OUTPARMPROBS), - SNMP_MIB_ITEM("OutSrcQuenchs", ICMP_MIB_OUTSRCQUENCHS), - SNMP_MIB_ITEM("OutRedirects", ICMP_MIB_OUTREDIRECTS), - SNMP_MIB_ITEM("OutEchos", ICMP_MIB_OUTECHOS), - SNMP_MIB_ITEM("OutEchoReps", ICMP_MIB_OUTECHOREPS), - SNMP_MIB_ITEM("OutTimestamps", ICMP_MIB_OUTTIMESTAMPS), - SNMP_MIB_ITEM("OutTimestampReps", ICMP_MIB_OUTTIMESTAMPREPS), - SNMP_MIB_ITEM("OutAddrMasks", ICMP_MIB_OUTADDRMASKS), - SNMP_MIB_ITEM("OutAddrMaskReps", ICMP_MIB_OUTADDRMASKREPS), SNMP_MIB_SENTINEL }; +static struct { + char *name; + int index; +} icmpmibmap[] = { + { "DestUnreachs", ICMP_DEST_UNREACH }, + { "TimeExcds", ICMP_TIME_EXCEEDED }, + { "ParmProbs", ICMP_PARAMETERPROB }, + { "SrcQuenchs", ICMP_SOURCE_QUENCH }, + { "Redirects", ICMP_REDIRECT }, + { "Echos", ICMP_ECHO }, + { "EchoReps", ICMP_ECHOREPLY }, + { "Timestamps", ICMP_TIMESTAMP }, + { "TimestampReps", ICMP_TIMESTAMPREPLY }, + { "AddrMasks", ICMP_ADDRESS }, + { "AddrMaskReps", ICMP_ADDRESSREPLY }, + { NULL, 0 } +}; + + static const struct snmp_mib snmp4_tcp_list[] = { SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM), SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN), @@ -244,9 +242,79 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER), SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED), SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES), + SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD), + SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD), + SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO), + SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS), SNMP_MIB_SENTINEL }; +static void icmpmsg_put(struct seq_file *seq) +{ +#define PERLINE 16 + + int j, i, count; + static int out[PERLINE]; + + count = 0; + for (i = 0; i < ICMPMSG_MIB_MAX; i++) { + + if (snmp_fold_field((void **) icmpmsg_statistics, i)) + out[count++] = i; + if (count < PERLINE) + continue; + + seq_printf(seq, "\nIcmpMsg:"); + for (j = 0; j < PERLINE; ++j) + seq_printf(seq, " %sType%u", i & 0x100 ? "Out" : "In", + i & 0xff); + seq_printf(seq, "\nIcmpMsg: "); + for (j = 0; j < PERLINE; ++j) + seq_printf(seq, " %lu", + snmp_fold_field((void **) icmpmsg_statistics, + out[j])); + seq_putc(seq, '\n'); + } + if (count) { + seq_printf(seq, "\nIcmpMsg:"); + for (j = 0; j < count; ++j) + seq_printf(seq, " %sType%u", out[j] & 0x100 ? "Out" : + "In", out[j] & 0xff); + seq_printf(seq, "\nIcmpMsg:"); + for (j = 0; j < count; ++j) + seq_printf(seq, " %lu", snmp_fold_field((void **) + icmpmsg_statistics, out[j])); + } + +#undef PERLINE +} + +static void icmp_put(struct seq_file *seq) +{ + int i; + + seq_puts(seq, "\nIcmp: InMsgs InErrors"); + for (i=0; icmpmibmap[i].name != NULL; i++) + seq_printf(seq, " In%s", icmpmibmap[i].name); + seq_printf(seq, " OutMsgs OutErrors"); + for (i=0; icmpmibmap[i].name != NULL; i++) + seq_printf(seq, " Out%s", icmpmibmap[i].name); + seq_printf(seq, "\nIcmp: %lu %lu", + snmp_fold_field((void **) icmp_statistics, ICMP_MIB_INMSGS), + snmp_fold_field((void **) icmp_statistics, ICMP_MIB_INERRORS)); + for (i=0; icmpmibmap[i].name != NULL; i++) + seq_printf(seq, " %lu", + snmp_fold_field((void **) icmpmsg_statistics, + icmpmibmap[i].index)); + seq_printf(seq, " %lu %lu", + snmp_fold_field((void **) icmp_statistics, ICMP_MIB_OUTMSGS), + snmp_fold_field((void **) icmp_statistics, ICMP_MIB_OUTERRORS)); + for (i=0; icmpmibmap[i].name != NULL; i++) + seq_printf(seq, " %lu", + snmp_fold_field((void **) icmpmsg_statistics, + icmpmibmap[i].index)); +} + /* * Called from the PROCfs module. This outputs /proc/net/snmp. */ @@ -267,15 +335,8 @@ static int snmp_seq_show(struct seq_file *seq, void *v) snmp_fold_field((void **)ip_statistics, snmp4_ipstats_list[i].entry)); - seq_puts(seq, "\nIcmp:"); - for (i = 0; snmp4_icmp_list[i].name != NULL; i++) - seq_printf(seq, " %s", snmp4_icmp_list[i].name); - - seq_puts(seq, "\nIcmp:"); - for (i = 0; snmp4_icmp_list[i].name != NULL; i++) - seq_printf(seq, " %lu", - snmp_fold_field((void **)icmp_statistics, - snmp4_icmp_list[i].entry)); + icmp_put(seq); /* RFC 2011 compatibility */ + icmpmsg_put(seq); seq_puts(seq, "\nTcp:"); for (i = 0; snmp4_tcp_list[i].name != NULL; i++) @@ -332,6 +393,8 @@ static const struct file_operations snmp_seq_fops = { .release = single_release, }; + + /* * Output /proc/net/netstat */ @@ -380,20 +443,20 @@ int __init ip_misc_proc_init(void) { int rc = 0; - if (!proc_net_fops_create("netstat", S_IRUGO, &netstat_seq_fops)) + if (!proc_net_fops_create(&init_net, "netstat", S_IRUGO, &netstat_seq_fops)) goto out_netstat; - if (!proc_net_fops_create("snmp", S_IRUGO, &snmp_seq_fops)) + if (!proc_net_fops_create(&init_net, "snmp", S_IRUGO, &snmp_seq_fops)) goto out_snmp; - if (!proc_net_fops_create("sockstat", S_IRUGO, &sockstat_seq_fops)) + if (!proc_net_fops_create(&init_net, "sockstat", S_IRUGO, &sockstat_seq_fops)) goto out_sockstat; out: return rc; out_sockstat: - proc_net_remove("snmp"); + proc_net_remove(&init_net, "snmp"); out_snmp: - proc_net_remove("netstat"); + proc_net_remove(&init_net, "netstat"); out_netstat: rc = -ENOMEM; goto out; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index c6d71526f625b47c08c3b026fc25e42c1a43f408..3916faca3afef64fcbc8d583715b384f364c56d7 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include #include @@ -313,6 +314,9 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } + if (iph->protocol == IPPROTO_ICMP) + icmp_out_count(((struct icmphdr *) + skb_transport_header(skb))->type); err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output); @@ -898,24 +902,8 @@ static const struct seq_operations raw_seq_ops = { static int raw_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct raw_iter_state *s; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - goto out; - rc = seq_open(file, &raw_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &raw_seq_ops, + sizeof(struct raw_iter_state)); } static const struct file_operations raw_seq_fops = { @@ -928,13 +916,13 @@ static const struct file_operations raw_seq_fops = { int __init raw_proc_init(void) { - if (!proc_net_fops_create("raw", S_IRUGO, &raw_seq_fops)) + if (!proc_net_fops_create(&init_net, "raw", S_IRUGO, &raw_seq_fops)) return -ENOMEM; return 0; } void __init raw_proc_exit(void) { - proc_net_remove("raw"); + proc_net_remove(&init_net, "raw"); } #endif /* CONFIG_PROC_FS */ diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c7ca94bd152cc945c99c1ab63fcf7d3985d5f639..21b12de9e6536663d5da2b60bd1d94b1d984acdd 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include #include @@ -91,6 +92,7 @@ #include #include #include +#include #include #include #include @@ -135,7 +137,8 @@ static unsigned long rt_deadline; #define RTprint(a...) printk(KERN_DEBUG a) static struct timer_list rt_flush_timer; -static struct timer_list rt_periodic_timer; +static void rt_check_expire(struct work_struct *work); +static DECLARE_DELAYED_WORK(expires_work, rt_check_expire); static struct timer_list rt_secret_timer; /* @@ -243,7 +246,7 @@ static spinlock_t *rt_hash_locks; static struct rt_hash_bucket *rt_hash_table; static unsigned rt_hash_mask; -static int rt_hash_log; +static unsigned int rt_hash_log; static unsigned int rt_hash_rnd; static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); @@ -372,23 +375,8 @@ static const struct seq_operations rt_cache_seq_ops = { static int rt_cache_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct rt_cache_iter_state *s; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - goto out; - rc = seq_open(file, &rt_cache_seq_ops); - if (rc) - goto out_kfree; - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &rt_cache_seq_ops, + sizeof(struct rt_cache_iter_state)); } static const struct file_operations rt_cache_seq_fops = { @@ -571,33 +559,32 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) (fl1->iif ^ fl2->iif)) == 0; } -/* This runs via a timer and thus is always in BH context. */ -static void rt_check_expire(unsigned long dummy) +static void rt_check_expire(struct work_struct *work) { static unsigned int rover; unsigned int i = rover, goal; struct rtable *rth, **rthp; - unsigned long now = jiffies; u64 mult; mult = ((u64)ip_rt_gc_interval) << rt_hash_log; if (ip_rt_gc_timeout > 1) do_div(mult, ip_rt_gc_timeout); goal = (unsigned int)mult; - if (goal > rt_hash_mask) goal = rt_hash_mask + 1; + if (goal > rt_hash_mask) + goal = rt_hash_mask + 1; for (; goal > 0; goal--) { unsigned long tmo = ip_rt_gc_timeout; i = (i + 1) & rt_hash_mask; rthp = &rt_hash_table[i].chain; - if (*rthp == 0) + if (*rthp == NULL) continue; - spin_lock(rt_hash_lock_addr(i)); + spin_lock_bh(rt_hash_lock_addr(i)); while ((rth = *rthp) != NULL) { if (rth->u.dst.expires) { /* Entry is expired even if it is in use */ - if (time_before_eq(now, rth->u.dst.expires)) { + if (time_before_eq(jiffies, rth->u.dst.expires)) { tmo >>= 1; rthp = &rth->u.dst.rt_next; continue; @@ -612,14 +599,10 @@ static void rt_check_expire(unsigned long dummy) *rthp = rth->u.dst.rt_next; rt_free(rth); } - spin_unlock(rt_hash_lock_addr(i)); - - /* Fallback loop breaker. */ - if (time_after(jiffies, now)) - break; + spin_unlock_bh(rt_hash_lock_addr(i)); } rover = i; - mod_timer(&rt_periodic_timer, jiffies + ip_rt_gc_interval); + schedule_delayed_work(&expires_work, ip_rt_gc_interval); } /* This can run from both BH and non-BH contexts, the latter @@ -1404,8 +1387,8 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, { struct rtable *rt = (struct rtable *) dst; struct in_device *idev = rt->idev; - if (dev != &loopback_dev && idev && idev->dev == dev) { - struct in_device *loopback_idev = in_dev_get(&loopback_dev); + if (dev != init_net.loopback_dev && idev && idev->dev == dev) { + struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev); if (loopback_idev) { rt->idev = loopback_idev; in_dev_put(idev); @@ -1557,7 +1540,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, #endif rth->rt_iif = rth->fl.iif = dev->ifindex; - rth->u.dst.dev = &loopback_dev; + rth->u.dst.dev = init_net.loopback_dev; dev_hold(rth->u.dst.dev); rth->idev = in_dev_get(rth->u.dst.dev); rth->fl.oif = 0; @@ -1814,7 +1797,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (res.type == RTN_LOCAL) { int result; result = fib_validate_source(saddr, daddr, tos, - loopback_dev.ifindex, + init_net.loopback_dev->ifindex, dev, &spec_dst, &itag); if (result < 0) goto martian_source; @@ -1881,7 +1864,7 @@ out: return err; #endif rth->rt_iif = rth->fl.iif = dev->ifindex; - rth->u.dst.dev = &loopback_dev; + rth->u.dst.dev = init_net.loopback_dev; dev_hold(rth->u.dst.dev); rth->idev = in_dev_get(rth->u.dst.dev); rth->rt_gateway = daddr; @@ -2151,7 +2134,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) RT_SCOPE_UNIVERSE), } }, .mark = oldflp->mark, - .iif = loopback_dev.ifindex, + .iif = init_net.loopback_dev->ifindex, .oif = oldflp->oif }; struct fib_result res; unsigned flags = 0; @@ -2212,7 +2195,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) if (oldflp->oif) { - dev_out = dev_get_by_index(oldflp->oif); + dev_out = dev_get_by_index(&init_net, oldflp->oif); err = -ENODEV; if (dev_out == NULL) goto out; @@ -2245,9 +2228,9 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK); if (dev_out) dev_put(dev_out); - dev_out = &loopback_dev; + dev_out = init_net.loopback_dev; dev_hold(dev_out); - fl.oif = loopback_dev.ifindex; + fl.oif = init_net.loopback_dev->ifindex; res.type = RTN_LOCAL; flags |= RTCF_LOCAL; goto make_route; @@ -2292,7 +2275,7 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) fl.fl4_src = fl.fl4_dst; if (dev_out) dev_put(dev_out); - dev_out = &loopback_dev; + dev_out = init_net.loopback_dev; dev_hold(dev_out); fl.oif = dev_out->ifindex; if (res.fi) @@ -2591,7 +2574,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void if (iif) { struct net_device *dev; - dev = __dev_get_by_index(iif); + dev = __dev_get_by_index(&init_net, iif); if (dev == NULL) { err = -ENODEV; goto errout_free; @@ -2992,17 +2975,14 @@ int __init ip_rt_init(void) init_timer(&rt_flush_timer); rt_flush_timer.function = rt_run_flush; - init_timer(&rt_periodic_timer); - rt_periodic_timer.function = rt_check_expire; init_timer(&rt_secret_timer); rt_secret_timer.function = rt_secret_rebuild; /* All the timers, started at system startup tend to synchronize. Perturb it a bit. */ - rt_periodic_timer.expires = jiffies + net_random() % ip_rt_gc_interval + - ip_rt_gc_interval; - add_timer(&rt_periodic_timer); + schedule_delayed_work(&expires_work, + net_random() % ip_rt_gc_interval + ip_rt_gc_interval); rt_secret_timer.expires = jiffies + net_random() % ip_rt_secret_interval + ip_rt_secret_interval; @@ -3011,15 +2991,15 @@ int __init ip_rt_init(void) #ifdef CONFIG_PROC_FS { struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */ - if (!proc_net_fops_create("rt_cache", S_IRUGO, &rt_cache_seq_fops) || + if (!proc_net_fops_create(&init_net, "rt_cache", S_IRUGO, &rt_cache_seq_fops) || !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO, - proc_net_stat))) { + init_net.proc_net_stat))) { return -ENOMEM; } rtstat_pde->proc_fops = &rt_cpu_seq_fops; } #ifdef CONFIG_NET_CLS_ROUTE - create_proc_read_entry("rt_acct", 0, proc_net, ip_rt_acct_read, NULL); + create_proc_read_entry("rt_acct", 0, init_net.proc_net, ip_rt_acct_read, NULL); #endif #endif #ifdef CONFIG_XFRM diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 53ef0f4bbdaa507137d0a99b7a120fe43bddf1bb..eb286abcf5dc7f600e84c1cb43de53e90962fe50 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -89,6 +90,74 @@ static int ipv4_sysctl_forward_strategy(ctl_table *table, return 1; } +extern seqlock_t sysctl_port_range_lock; +extern int sysctl_local_port_range[2]; + +/* Update system visible IP port range */ +static void set_local_port_range(int range[2]) +{ + write_seqlock(&sysctl_port_range_lock); + sysctl_local_port_range[0] = range[0]; + sysctl_local_port_range[1] = range[1]; + write_sequnlock(&sysctl_port_range_lock); +} + +/* Validate changes from /proc interface. */ +static int ipv4_local_port_range(ctl_table *table, int write, struct file *filp, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + int range[2] = { sysctl_local_port_range[0], + sysctl_local_port_range[1] }; + ctl_table tmp = { + .data = &range, + .maxlen = sizeof(range), + .mode = table->mode, + .extra1 = &ip_local_port_range_min, + .extra2 = &ip_local_port_range_max, + }; + + ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos); + + if (write && ret == 0) { + if (range[1] <= range[0]) + ret = -EINVAL; + else + set_local_port_range(range); + } + + return ret; +} + +/* Validate changes from sysctl interface. */ +static int ipv4_sysctl_local_port_range(ctl_table *table, int __user *name, + int nlen, void __user *oldval, + size_t __user *oldlenp, + void __user *newval, size_t newlen) +{ + int ret; + int range[2] = { sysctl_local_port_range[0], + sysctl_local_port_range[1] }; + ctl_table tmp = { + .data = &range, + .maxlen = sizeof(range), + .mode = table->mode, + .extra1 = &ip_local_port_range_min, + .extra2 = &ip_local_port_range_max, + }; + + ret = sysctl_intvec(&tmp, name, nlen, oldval, oldlenp, newval, newlen); + if (ret == 0 && newval && newlen) { + if (range[1] <= range[0]) + ret = -EINVAL; + else + set_local_port_range(range); + } + return ret; +} + + static int proc_tcp_congestion_control(ctl_table *ctl, int write, struct file * filp, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -427,10 +496,8 @@ ctl_table ipv4_table[] = { .data = &sysctl_local_port_range, .maxlen = sizeof(sysctl_local_port_range), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = ip_local_port_range_min, - .extra2 = ip_local_port_range_max + .proc_handler = &ipv4_local_port_range, + .strategy = &ipv4_sysctl_local_port_range, }, { .ctl_name = NET_IPV4_ICMP_ECHO_IGNORE_ALL, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7e740112b2383c93b535a0657baf3f5b6fe75d48..4f322003835dc1886b4202259a91bd641d9139c3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -247,6 +247,7 @@ * TCP_CLOSE socket is finished */ +#include #include #include #include @@ -2014,7 +2015,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) if (tp->rx_opt.tstamp_ok) info->tcpi_options |= TCPI_OPT_TIMESTAMPS; - if (tp->rx_opt.sack_ok) + if (tcp_is_sack(tp)) info->tcpi_options |= TCPI_OPT_SACK; if (tp->rx_opt.wscale_ok) { info->tcpi_options |= TCPI_OPT_WSCALE; @@ -2030,8 +2031,13 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_snd_mss = tp->mss_cache; info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; - info->tcpi_unacked = tp->packets_out; - info->tcpi_sacked = tp->sacked_out; + if (sk->sk_state == TCP_LISTEN) { + info->tcpi_unacked = sk->sk_ack_backlog; + info->tcpi_sacked = sk->sk_max_ack_backlog; + } else { + info->tcpi_unacked = tp->packets_out; + info->tcpi_sacked = tp->sacked_out; + } info->tcpi_lost = tp->lost_out; info->tcpi_retrans = tp->retrans_out; info->tcpi_fackets = tp->fackets_out; @@ -2210,7 +2216,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) goto out; mss = skb_shinfo(skb)->gso_size; - skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss; + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 4586211e3757f9b30c78b95bdbe1d1a858ea9070..5dba0fc8f579f8b5b31ca47b0e458a440d524de1 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -210,7 +210,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt) { const struct inet_connection_sock *icsk = inet_csk(sk); - if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) { + if (icsk->icsk_ca_state == TCP_CA_Open) { struct bictcp *ca = inet_csk_ca(sk); cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; ca->delayed_ack += cnt; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 485d7ea35f75598ed2da741f4a9f376da20284ea..80bd084a9f9198d5b4c31f380ffcd5448a8390b0 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -314,7 +314,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) struct bictcp *ca = inet_csk_ca(sk); u32 delay; - if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) { + if (icsk->icsk_ca_state == TCP_CA_Open) { cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; ca->delayed_ack += cnt; } diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 57c5f0b10e6ccce7b90a51c75782a89bbac8112b..3904d2158a92c016de79ebf6a7da4b18ad867b8d 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -25,11 +25,13 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, const struct tcp_sock *tp = tcp_sk(sk); struct tcp_info *info = _info; - if (sk->sk_state == TCP_LISTEN) + if (sk->sk_state == TCP_LISTEN) { r->idiag_rqueue = sk->sk_ack_backlog; - else + r->idiag_wqueue = sk->sk_max_ack_backlog; + } else { r->idiag_rqueue = tp->rcv_nxt - tp->copied_seq; - r->idiag_wqueue = tp->write_seq - tp->snd_una; + r->idiag_wqueue = tp->write_seq - tp->snd_una; + } if (info != NULL) tcp_get_info(sk, info); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f893e90061ebfebe87ad359bd94c700db70b02d6..0a42e934034678bfd57cd2936e7700e4ea1ebd1c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -85,7 +85,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 2; int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; -int sysctl_tcp_frto __read_mostly; +int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_nometrics_save __read_mostly; @@ -104,6 +104,7 @@ int sysctl_tcp_abc __read_mostly; #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ +#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -111,13 +112,10 @@ int sysctl_tcp_abc __read_mostly; #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) -#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) -#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) -#define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4) - #define IsSackFrto() (sysctl_tcp_frto == 0x2) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) +#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) /* Adapt the MSS value used to make delayed ack decision to the * real world. @@ -198,6 +196,55 @@ static inline int tcp_in_quickack_mode(const struct sock *sk) return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } +static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) +{ + if (tp->ecn_flags&TCP_ECN_OK) + tp->ecn_flags |= TCP_ECN_QUEUE_CWR; +} + +static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) +{ + if (tcp_hdr(skb)->cwr) + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +} + +static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) +{ + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; +} + +static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) +{ + if (tp->ecn_flags&TCP_ECN_OK) { + if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + /* Funny extension: if ECT is not set on a segment, + * it is surely retransmit. It is not in ECN RFC, + * but Linux follows this rule. */ + else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) + tcp_enter_quickack_mode((struct sock *)tp); + } +} + +static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) +{ + if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || th->cwr)) + tp->ecn_flags &= ~TCP_ECN_OK; +} + +static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) +{ + if ((tp->ecn_flags&TCP_ECN_OK) && (!th->ece || !th->cwr)) + tp->ecn_flags &= ~TCP_ECN_OK; +} + +static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) +{ + if (th->ece && !th->syn && (tp->ecn_flags&TCP_ECN_OK)) + return 1; + return 0; +} + /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. @@ -810,6 +857,21 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) } } +/* + * Packet counting of FACK is based on in-order assumptions, therefore TCP + * disables it when reordering is detected + */ +static void tcp_disable_fack(struct tcp_sock *tp) +{ + tp->rx_opt.sack_ok &= ~2; +} + +/* Take a notice that peer is sending DSACKs */ +static void tcp_dsack_seen(struct tcp_sock *tp) +{ + tp->rx_opt.sack_ok |= 4; +} + /* Initialize metrics on socket. */ static void tcp_init_metrics(struct sock *sk) @@ -831,7 +893,7 @@ static void tcp_init_metrics(struct sock *sk) } if (dst_metric(dst, RTAX_REORDERING) && tp->reordering != dst_metric(dst, RTAX_REORDERING)) { - tp->rx_opt.sack_ok &= ~2; + tcp_disable_fack(tp); tp->reordering = dst_metric(dst, RTAX_REORDERING); } @@ -893,9 +955,9 @@ static void tcp_update_reordering(struct sock *sk, const int metric, /* This exciting event is worth to be remembered. 8) */ if (ts) NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); - else if (IsReno(tp)) + else if (tcp_is_reno(tp)) NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); - else if (IsFack(tp)) + else if (tcp_is_fack(tp)) NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); else NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); @@ -907,8 +969,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif - /* Disable FACK yet. */ - tp->rx_opt.sack_ok &= ~2; + tcp_disable_fack(tp); } } @@ -959,7 +1020,216 @@ static void tcp_update_reordering(struct sock *sk, const int metric, * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. + * + * SACK block validation. + * ---------------------- + * + * SACK block range validation checks that the received SACK block fits to + * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. + * Note that SND.UNA is not included to the range though being valid because + * it means that the receiver is rather inconsistent with itself reporting + * SACK reneging when it should advance SND.UNA. Such SACK block this is + * perfectly valid, however, in light of RFC2018 which explicitly states + * that "SACK block MUST reflect the newest segment. Even if the newest + * segment is going to be discarded ...", not that it looks very clever + * in case of head skb. Due to potentional receiver driven attacks, we + * choose to avoid immediate execution of a walk in write queue due to + * reneging and defer head skb's loss recovery to standard loss recovery + * procedure that will eventually trigger (nothing forbids us doing this). + * + * Implements also blockage to start_seq wrap-around. Problem lies in the + * fact that though start_seq (s) is before end_seq (i.e., not reversed), + * there's no guarantee that it will be before snd_nxt (n). The problem + * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt + * wrap (s_w): + * + * <- outs wnd -> <- wrapzone -> + * u e n u_w e_w s n_w + * | | | | | | | + * |<------------+------+----- TCP seqno space --------------+---------->| + * ...-- <2^31 ->| |<--------... + * ...---- >2^31 ------>| |<--------... + * + * Current code wouldn't be vulnerable but it's better still to discard such + * crazy SACK blocks. Doing this check for start_seq alone closes somewhat + * similar case (end_seq after snd_nxt wrap) as earlier reversed check in + * snd_nxt wrap -> snd_una region will then become "well defined", i.e., + * equal to the ideal case (infinite seqno space without wrap caused issues). + * + * With D-SACK the lower bound is extended to cover sequence space below + * SND.UNA down to undo_marker, which is the last point of interest. Yet + * again, DSACK block must not to go across snd_una (for the same reason as + * for the normal SACK blocks, explained above). But there all simplicity + * ends, TCP might receive valid D-SACKs below that. As long as they reside + * fully below undo_marker they do not affect behavior in anyway and can + * therefore be safely ignored. In rare cases (which are more or less + * theoretical ones), the D-SACK will nicely cross that boundary due to skb + * fragmentation and packet reordering past skb's retransmission. To consider + * them correctly, the acceptable range must be extended even more though + * the exact amount is rather hard to quantify. However, tp->max_window can + * be used as an exaggerated estimate. + */ +static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, + u32 start_seq, u32 end_seq) +{ + /* Too far in future, or reversed (interpretation is ambiguous) */ + if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) + return 0; + + /* Nasty start_seq wrap-around check (see comments above) */ + if (!before(start_seq, tp->snd_nxt)) + return 0; + + /* In outstanding window? ...This is valid exit for DSACKs too. + * start_seq == snd_una is non-sensical (see comments above) + */ + if (after(start_seq, tp->snd_una)) + return 1; + + if (!is_dsack || !tp->undo_marker) + return 0; + + /* ...Then it's D-SACK, and must reside below snd_una completely */ + if (!after(end_seq, tp->snd_una)) + return 0; + + if (!before(start_seq, tp->undo_marker)) + return 1; + + /* Too old */ + if (!after(end_seq, tp->undo_marker)) + return 0; + + /* Undo_marker boundary crossing (overestimates a lot). Known already: + * start_seq < undo_marker and end_seq >= undo_marker. + */ + return !before(start_seq, end_seq - tp->max_window); +} + +/* Check for lost retransmit. This superb idea is borrowed from "ratehalving". + * Event "C". Later note: FACK people cheated me again 8), we have to account + * for reordering! Ugly, but should help. + * + * Search retransmitted skbs from write_queue that were sent when snd_nxt was + * less than what is now known to be received by the other end (derived from + * SACK blocks by the caller). Also calculate the lowest snd_nxt among the + * remaining retransmitted skbs to avoid some costly processing per ACKs. */ +static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + int flag = 0; + int cnt = 0; + u32 new_low_seq = 0; + + tcp_for_write_queue(skb, sk) { + u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; + + if (skb == tcp_send_head(sk)) + break; + if (cnt == tp->retrans_out) + break; + if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) + continue; + + if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) + continue; + + if (after(received_upto, ack_seq) && + (tcp_is_fack(tp) || + !before(received_upto, + ack_seq + tp->reordering * tp->mss_cache))) { + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; + tp->retrans_out -= tcp_skb_pcount(skb); + + /* clear lost hint */ + tp->retransmit_skb_hint = NULL; + + if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { + tp->lost_out += tcp_skb_pcount(skb); + TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; + flag |= FLAG_DATA_SACKED; + NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); + } + } else { + if (!new_low_seq || before(ack_seq, new_low_seq)) + new_low_seq = ack_seq; + cnt += tcp_skb_pcount(skb); + } + } + + if (tp->retrans_out) + tp->lost_retrans_low = new_low_seq; + + return flag; +} + +static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, + struct tcp_sack_block_wire *sp, int num_sacks, + u32 prior_snd_una) +{ + u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq)); + u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq)); + int dup_sack = 0; + + if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { + dup_sack = 1; + tcp_dsack_seen(tp); + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); + } else if (num_sacks > 1) { + u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq)); + u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq)); + + if (!after(end_seq_0, end_seq_1) && + !before(start_seq_0, start_seq_1)) { + dup_sack = 1; + tcp_dsack_seen(tp); + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); + } + } + + /* D-SACK for already forgotten data... Do dumb counting. */ + if (dup_sack && + !after(end_seq_0, prior_snd_una) && + after(end_seq_0, tp->undo_marker)) + tp->undo_retrans--; + + return dup_sack; +} + +/* Check if skb is fully within the SACK block. In presence of GSO skbs, + * the incoming SACK may not exactly match but we can find smaller MSS + * aligned portion of it that matches. Therefore we might need to fragment + * which may fail and creates some hassle (caller must handle error case + * returns). + */ +int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, + u32 start_seq, u32 end_seq) +{ + int in_sack, err; + unsigned int pkt_len; + + in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && + !before(end_seq, TCP_SKB_CB(skb)->end_seq); + + if (tcp_skb_pcount(skb) > 1 && !in_sack && + after(TCP_SKB_CB(skb)->end_seq, start_seq)) { + + in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); + + if (!in_sack) + pkt_len = start_seq - TCP_SKB_CB(skb)->seq; + else + pkt_len = end_seq - TCP_SKB_CB(skb)->seq; + err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size); + if (err < 0) + return err; + } + + return in_sack; +} + static int tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) { @@ -972,38 +1242,24 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; int reord = tp->packets_out; int prior_fackets; - u32 lost_retrans = 0; + u32 highest_sack_end_seq = 0; int flag = 0; int found_dup_sack = 0; int cached_fack_count; int i; int first_sack_index; - if (!tp->sacked_out) - tp->fackets_out = 0; + if (!tp->sacked_out) { + if (WARN_ON(tp->fackets_out)) + tp->fackets_out = 0; + tp->highest_sack = tp->snd_una; + } prior_fackets = tp->fackets_out; - /* Check for D-SACK. */ - if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { + found_dup_sack = tcp_check_dsack(tp, ack_skb, sp, + num_sacks, prior_snd_una); + if (found_dup_sack) flag |= FLAG_DSACKING_ACK; - found_dup_sack = 1; - tp->rx_opt.sack_ok |= 4; - NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); - } else if (num_sacks > 1 && - !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && - !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { - flag |= FLAG_DSACKING_ACK; - found_dup_sack = 1; - tp->rx_opt.sack_ok |= 4; - NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); - } - - /* D-SACK for already forgotten data... - * Do dumb counting. */ - if (found_dup_sack && - !after(ntohl(sp[0].end_seq), prior_snd_una) && - after(ntohl(sp[0].end_seq), tp->undo_marker)) - tp->undo_retrans--; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can @@ -1083,6 +1339,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ int fack_count; int dup_sack = (found_dup_sack && (i == first_sack_index)); + if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) { + if (dup_sack) { + if (!tp->undo_marker) + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO); + else + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDOLD); + } else { + /* Don't count olds caused by ACK reordering */ + if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && + !after(end_seq, tp->snd_una)) + continue; + NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD); + } + continue; + } + skb = cached_skb; fack_count = cached_fack_count; @@ -1091,7 +1363,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ flag |= FLAG_DATA_LOST; tcp_for_write_queue_from(skb, sk) { - int in_sack, pcount; + int in_sack; u8 sacked; if (skb == tcp_send_head(sk)) @@ -1110,30 +1382,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; - in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && - !before(end_seq, TCP_SKB_CB(skb)->end_seq); - - pcount = tcp_skb_pcount(skb); - - if (pcount > 1 && !in_sack && - after(TCP_SKB_CB(skb)->end_seq, start_seq)) { - unsigned int pkt_len; - - in_sack = !after(start_seq, - TCP_SKB_CB(skb)->seq); - - if (!in_sack) - pkt_len = (start_seq - - TCP_SKB_CB(skb)->seq); - else - pkt_len = (end_seq - - TCP_SKB_CB(skb)->seq); - if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size)) - break; - pcount = tcp_skb_pcount(skb); - } + in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); + if (in_sack < 0) + break; - fack_count += pcount; + fack_count += tcp_skb_pcount(skb); sacked = TCP_SKB_CB(skb)->sacked; @@ -1160,11 +1413,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ continue; } - if ((sacked&TCPCB_SACKED_RETRANS) && - after(end_seq, TCP_SKB_CB(skb)->ack_seq) && - (!lost_retrans || after(end_seq, lost_retrans))) - lost_retrans = end_seq; - if (!in_sack) continue; @@ -1217,6 +1465,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; + + if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack)) { + tp->highest_sack = TCP_SKB_CB(skb)->seq; + highest_sack_end_seq = TCP_SKB_CB(skb)->end_seq; + } } else { if (dup_sack && (sacked&TCPCB_RETRANS)) reord = min(fack_count, reord); @@ -1236,45 +1489,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ } } - /* Check for lost retransmit. This superb idea is - * borrowed from "ratehalving". Event "C". - * Later note: FACK people cheated me again 8), - * we have to account for reordering! Ugly, - * but should help. - */ - if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) { - struct sk_buff *skb; - - tcp_for_write_queue(skb, sk) { - if (skb == tcp_send_head(sk)) - break; - if (after(TCP_SKB_CB(skb)->seq, lost_retrans)) - break; - if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) - continue; - if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) && - after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) && - (IsFack(tp) || - !before(lost_retrans, - TCP_SKB_CB(skb)->ack_seq + tp->reordering * - tp->mss_cache))) { - TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; - tp->retrans_out -= tcp_skb_pcount(skb); - - /* clear lost hint */ - tp->retransmit_skb_hint = NULL; + if (tp->retrans_out && + after(highest_sack_end_seq, tp->lost_retrans_low) && + icsk->icsk_ca_state == TCP_CA_Recovery) + flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq); - if (!(TCP_SKB_CB(skb)->sacked&(TCPCB_LOST|TCPCB_SACKED_ACKED))) { - tp->lost_out += tcp_skb_pcount(skb); - TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; - flag |= FLAG_DATA_SACKED; - NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); - } - } - } - } - - tp->left_out = tp->sacked_out + tp->lost_out; + tcp_verify_left_out(tp); if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss && (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) @@ -1289,6 +1509,56 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ return flag; } +/* If we receive more dupacks than we expected counting segments + * in assumption of absent reordering, interpret this as reordering. + * The only another reason could be bug in receiver TCP. + */ +static void tcp_check_reno_reordering(struct sock *sk, const int addend) +{ + struct tcp_sock *tp = tcp_sk(sk); + u32 holes; + + holes = max(tp->lost_out, 1U); + holes = min(holes, tp->packets_out); + + if ((tp->sacked_out + holes) > tp->packets_out) { + tp->sacked_out = tp->packets_out - holes; + tcp_update_reordering(sk, tp->packets_out + addend, 0); + } +} + +/* Emulate SACKs for SACKless connection: account for a new dupack. */ + +static void tcp_add_reno_sack(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + tp->sacked_out++; + tcp_check_reno_reordering(sk, 0); + tcp_verify_left_out(tp); +} + +/* Account for ACK, ACKing some data in Reno Recovery phase. */ + +static void tcp_remove_reno_sacks(struct sock *sk, int acked) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (acked > 0) { + /* One ACK acked hole. The rest eat duplicate ACKs. */ + if (acked-1 >= tp->sacked_out) + tp->sacked_out = 0; + else + tp->sacked_out -= acked-1; + } + tcp_check_reno_reordering(sk, acked); + tcp_verify_left_out(tp); +} + +static inline void tcp_reset_reno_sack(struct tcp_sock *tp) +{ + tp->sacked_out = 0; +} + /* F-RTO can only be used if TCP has never retransmitted anything other than * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) */ @@ -1376,11 +1646,13 @@ void tcp_enter_frto(struct sock *sk) tp->undo_retrans = 0; skb = tcp_write_queue_head(sk); + if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) + tp->undo_marker = 0; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); /* Earlier loss recovery underway (see RFC4138; Appendix B). * The last condition is necessary at least in tp->frto_counter case. @@ -1405,17 +1677,15 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - int cnt = 0; - tp->sacked_out = 0; tp->lost_out = 0; - tp->fackets_out = 0; tp->retrans_out = 0; + if (tcp_is_reno(tp)) + tcp_reset_reno_sack(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; - cnt += tcp_skb_pcount(skb); /* * Count the retransmission made on RTO correctly (only when * waiting for the first ACK and did not get it)... @@ -1427,30 +1697,25 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) /* ...enter this if branch just for the first segment */ flag |= FLAG_DATA_ACKED; } else { + if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) + tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); } - if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { - /* Do not mark those segments lost that were - * forward transmitted after RTO - */ - if (!after(TCP_SKB_CB(skb)->end_seq, - tp->frto_highmark)) { - TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; - tp->lost_out += tcp_skb_pcount(skb); - } - } else { - tp->sacked_out += tcp_skb_pcount(skb); - tp->fackets_out = cnt; + /* Don't lost mark skbs that were fwd transmitted after RTO */ + if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) && + !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { + TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; + tp->lost_out += tcp_skb_pcount(skb); } } - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; - tp->undo_marker = 0; tp->frto_counter = 0; + tp->bytes_acked = 0; tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); @@ -1458,22 +1723,26 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) tp->high_seq = tp->frto_highmark; TCP_ECN_queue_cwr(tp); - clear_all_retrans_hints(tp); + tcp_clear_retrans_hints_partial(tp); } -void tcp_clear_retrans(struct tcp_sock *tp) +static void tcp_clear_retrans_partial(struct tcp_sock *tp) { - tp->left_out = 0; tp->retrans_out = 0; - - tp->fackets_out = 0; - tp->sacked_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = 0; } +void tcp_clear_retrans(struct tcp_sock *tp) +{ + tcp_clear_retrans_partial(tp); + + tp->fackets_out = 0; + tp->sacked_out = 0; +} + /* Enter Loss state. If "how" is not zero, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. @@ -1483,7 +1752,6 @@ void tcp_enter_loss(struct sock *sk, int how) const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - int cnt = 0; /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || @@ -1497,17 +1765,26 @@ void tcp_enter_loss(struct sock *sk, int how) tp->snd_cwnd_stamp = tcp_time_stamp; tp->bytes_acked = 0; - tcp_clear_retrans(tp); + tcp_clear_retrans_partial(tp); + + if (tcp_is_reno(tp)) + tcp_reset_reno_sack(tp); - /* Push undo marker, if it was plain RTO and nothing - * was retransmitted. */ - if (!how) + if (!how) { + /* Push undo marker, if it was plain RTO and nothing + * was retransmitted. */ tp->undo_marker = tp->snd_una; + tcp_clear_retrans_hints_partial(tp); + } else { + tp->sacked_out = 0; + tp->fackets_out = 0; + tcp_clear_all_retrans_hints(tp); + } tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; - cnt += tcp_skb_pcount(skb); + if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS) tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; @@ -1515,12 +1792,9 @@ void tcp_enter_loss(struct sock *sk, int how) TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); - } else { - tp->sacked_out += tcp_skb_pcount(skb); - tp->fackets_out = cnt; } } - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); @@ -1529,8 +1803,6 @@ void tcp_enter_loss(struct sock *sk, int how) TCP_ECN_queue_cwr(tp); /* Abort FRTO algorithm if one is in progress */ tp->frto_counter = 0; - - clear_all_retrans_hints(tp); } static int tcp_check_sack_reneging(struct sock *sk) @@ -1560,7 +1832,7 @@ static int tcp_check_sack_reneging(struct sock *sk) static inline int tcp_fackets_out(struct tcp_sock *tp) { - return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; + return tcp_is_reno(tp) ? tp->sacked_out+1 : tp->fackets_out; } static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) @@ -1708,55 +1980,18 @@ static int tcp_time_to_recover(struct sock *sk) return 0; } -/* If we receive more dupacks than we expected counting segments - * in assumption of absent reordering, interpret this as reordering. - * The only another reason could be bug in receiver TCP. +/* RFC: This is from the original, I doubt that this is necessary at all: + * clear xmit_retrans hint if seq of this skb is beyond hint. How could we + * retransmitted past LOST markings in the first place? I'm not fully sure + * about undo and end of connection cases, which can cause R without L? */ -static void tcp_check_reno_reordering(struct sock *sk, const int addend) -{ - struct tcp_sock *tp = tcp_sk(sk); - u32 holes; - - holes = max(tp->lost_out, 1U); - holes = min(holes, tp->packets_out); - - if ((tp->sacked_out + holes) > tp->packets_out) { - tp->sacked_out = tp->packets_out - holes; - tcp_update_reordering(sk, tp->packets_out + addend, 0); - } -} - -/* Emulate SACKs for SACKless connection: account for a new dupack. */ - -static void tcp_add_reno_sack(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - tp->sacked_out++; - tcp_check_reno_reordering(sk, 0); - tcp_sync_left_out(tp); -} - -/* Account for ACK, ACKing some data in Reno Recovery phase. */ - -static void tcp_remove_reno_sacks(struct sock *sk, int acked) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (acked > 0) { - /* One ACK acked hole. The rest eat duplicate ACKs. */ - if (acked-1 >= tp->sacked_out) - tp->sacked_out = 0; - else - tp->sacked_out -= acked-1; - } - tcp_check_reno_reordering(sk, acked); - tcp_sync_left_out(tp); -} - -static inline void tcp_reset_reno_sack(struct tcp_sock *tp) +static void tcp_verify_retransmit_hint(struct tcp_sock *tp, + struct sk_buff *skb) { - tp->sacked_out = 0; - tp->left_out = tp->lost_out; + if ((tp->retransmit_skb_hint != NULL) && + before(TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) + tp->retransmit_skb_hint = NULL; } /* Mark head of queue up as lost. */ @@ -1786,20 +2021,13 @@ static void tcp_mark_head_lost(struct sock *sk, cnt += tcp_skb_pcount(skb); if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq)) break; - if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { + if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); - - /* clear xmit_retransmit_queue hints - * if this is beyond hint */ - if (tp->retransmit_skb_hint != NULL && - before(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) - tp->retransmit_skb_hint = NULL; - + tcp_verify_retransmit_hint(tp, skb); } } - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); } /* Account newly detected lost packet(s) */ @@ -1808,7 +2036,7 @@ static void tcp_update_scoreboard(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - if (IsFack(tp)) { + if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; @@ -1822,7 +2050,7 @@ static void tcp_update_scoreboard(struct sock *sk) * Hence, we can detect timed out packets during fast * retransmit without falling to slow start. */ - if (!IsReno(tp) && tcp_head_timedout(sk)) { + if (!tcp_is_reno(tp) && tcp_head_timedout(sk)) { struct sk_buff *skb; skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint @@ -1837,19 +2065,13 @@ static void tcp_update_scoreboard(struct sock *sk) if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); - - /* clear xmit_retrans hint */ - if (tp->retransmit_skb_hint && - before(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) - - tp->retransmit_skb_hint = NULL; + tcp_verify_retransmit_hint(tp, skb); } } tp->scoreboard_skb_hint = skb; - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); } } @@ -1880,7 +2102,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag) int decr = tp->snd_cwnd_cnt + 1; if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) || - (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { + (tcp_is_reno(tp) && !(flag&FLAG_NOT_DUP))) { tp->snd_cwnd_cnt = decr&1; decr >>= 1; @@ -1913,7 +2135,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", msg, NIPQUAD(inet->daddr), ntohs(inet->dport), - tp->snd_cwnd, tp->left_out, + tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } @@ -1945,7 +2167,7 @@ static void tcp_undo_cwr(struct sock *sk, const int undo) /* There is something screwy going on with the retrans hints after an undo */ - clear_all_retrans_hints(tp); + tcp_clear_all_retrans_hints(tp); } static inline int tcp_may_undo(struct tcp_sock *tp) @@ -1971,7 +2193,7 @@ static int tcp_try_undo_recovery(struct sock *sk) NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); tp->undo_marker = 0; } - if (tp->snd_una == tp->high_seq && IsReno(tp)) { + if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ @@ -2001,7 +2223,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); /* Partial ACK arrived. Force Hoe's retransmit. */ - int failed = IsReno(tp) || tp->fackets_out>tp->reordering; + int failed = tcp_is_reno(tp) || tp->fackets_out>tp->reordering; if (tcp_may_undo(tp)) { /* Plain luck! Hole if filled with delayed @@ -2038,16 +2260,15 @@ static int tcp_try_undo_loss(struct sock *sk) TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } - clear_all_retrans_hints(tp); + tcp_clear_all_retrans_hints(tp); DBGUNDO(sk, "partial loss"); tp->lost_out = 0; - tp->left_out = tp->sacked_out; tcp_undo_cwr(sk, 1); NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); inet_csk(sk)->icsk_retransmits = 0; tp->undo_marker = 0; - if (!IsReno(tp)) + if (tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return 1; } @@ -2066,7 +2287,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); if (tp->retrans_out == 0) tp->retrans_stamp = 0; @@ -2077,7 +2298,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { int state = TCP_CA_Open; - if (tp->left_out || tp->retrans_out || tp->undo_marker) + if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { @@ -2130,7 +2351,7 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) * tcp_xmit_retransmit_queue(). */ static void -tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) +tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -2142,8 +2363,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) * 1. Reno does not count dupacks (sacked_out) automatically. */ if (!tp->packets_out) tp->sacked_out = 0; - /* 2. SACK counts snd_fack in packets inaccurately. */ - if (tp->sacked_out == 0) + + if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. @@ -2164,8 +2385,8 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); } - /* D. Synchronize left_out to current state. */ - tcp_sync_left_out(tp); + /* D. Check consistency of the current state. */ + tcp_verify_left_out(tp); /* E. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ @@ -2194,14 +2415,14 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) if (!tp->undo_marker || /* For SACK case do not Open to allow to undo * catching for all duplicate ACKs. */ - IsReno(tp) || tp->snd_una != tp->high_seq) { + tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: - if (IsReno(tp)) + if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; @@ -2214,14 +2435,10 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { - if (IsReno(tp) && is_dupack) + if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); - } else { - int acked = prior_packets - tp->packets_out; - if (IsReno(tp)) - tcp_remove_reno_sacks(sk, acked); - do_lost = tcp_try_undo_partial(sk, acked); - } + } else + do_lost = tcp_try_undo_partial(sk, pkts_acked); break; case TCP_CA_Loss: if (flag&FLAG_DATA_ACKED) @@ -2235,7 +2452,7 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) return; /* Loss is undone; fall through to processing in Open state. */ default: - if (IsReno(tp)) { + if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) @@ -2263,7 +2480,7 @@ tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag) /* Otherwise enter Recovery state */ - if (IsReno(tp)) + if (tcp_is_reno(tp)) NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); else NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); @@ -2361,8 +2578,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ - -static void tcp_ack_packets_out(struct sock *sk) +static void tcp_rearm_rto(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -2373,158 +2589,143 @@ static void tcp_ack_packets_out(struct sock *sk) } } -static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, - __u32 now, __s32 *seq_rtt) +/* If we get here, the whole TSO packet has not been acked. */ +static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct tcp_skb_cb *scb = TCP_SKB_CB(skb); - __u32 seq = tp->snd_una; - __u32 packets_acked; - int acked = 0; + u32 packets_acked; - /* If we get here, the whole TSO packet has not been - * acked. - */ - BUG_ON(!after(scb->end_seq, seq)); + BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); - if (tcp_trim_head(sk, skb, seq - scb->seq)) + if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { - __u8 sacked = scb->sacked; - - acked |= FLAG_DATA_ACKED; - if (sacked) { - if (sacked & TCPCB_RETRANS) { - if (sacked & TCPCB_SACKED_RETRANS) - tp->retrans_out -= packets_acked; - acked |= FLAG_RETRANS_DATA_ACKED; - *seq_rtt = -1; - } else if (*seq_rtt < 0) - *seq_rtt = now - scb->when; - if (sacked & TCPCB_SACKED_ACKED) - tp->sacked_out -= packets_acked; - if (sacked & TCPCB_LOST) - tp->lost_out -= packets_acked; - if (sacked & TCPCB_URG) { - if (tp->urg_mode && - !before(seq, tp->snd_up)) - tp->urg_mode = 0; - } - } else if (*seq_rtt < 0) - *seq_rtt = now - scb->when; - - if (tp->fackets_out) { - __u32 dval = min(tp->fackets_out, packets_acked); - tp->fackets_out -= dval; - } - /* hint's skb might be NULL but we don't need to care */ - tp->fastpath_cnt_hint -= min_t(u32, packets_acked, - tp->fastpath_cnt_hint); - tp->packets_out -= packets_acked; - BUG_ON(tcp_skb_pcount(skb) == 0); - BUG_ON(!before(scb->seq, scb->end_seq)); + BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } - return acked; + return packets_acked; } -/* Remove acknowledged frames from the retransmission queue. */ -static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) +/* Remove acknowledged frames from the retransmission queue. If our packet + * is before the ack sequence we can discard it as it's confirmed to have + * arrived at the other end. + */ +static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; - __u32 now = tcp_time_stamp; - int acked = 0; + u32 now = tcp_time_stamp; + int fully_acked = 1; + int flag = 0; int prior_packets = tp->packets_out; - __s32 seq_rtt = -1; + s32 seq_rtt = -1; ktime_t last_ackt = net_invalid_timestamp(); - while ((skb = tcp_write_queue_head(sk)) && - skb != tcp_send_head(sk)) { + while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); - __u8 sacked = scb->sacked; + u32 end_seq; + u32 packets_acked; + u8 sacked = scb->sacked; - /* If our packet is before the ack sequence we can - * discard it as it's confirmed to have arrived at - * the other end. - */ if (after(scb->end_seq, tp->snd_una)) { - if (tcp_skb_pcount(skb) > 1 && - after(tp->snd_una, scb->seq)) - acked |= tcp_tso_acked(sk, skb, - now, &seq_rtt); - break; - } + if (tcp_skb_pcount(skb) == 1 || + !after(tp->snd_una, scb->seq)) + break; - /* Initial outgoing SYN's get put onto the write_queue - * just like anything else we transmit. It is not - * true data, and if we misinform our callers that - * this ACK acks real data, we will erroneously exit - * connection startup slow start one packet too - * quickly. This is severely frowned upon behavior. - */ - if (!(scb->flags & TCPCB_FLAG_SYN)) { - acked |= FLAG_DATA_ACKED; + packets_acked = tcp_tso_acked(sk, skb); + if (!packets_acked) + break; + + fully_acked = 0; + end_seq = tp->snd_una; } else { - acked |= FLAG_SYN_ACKED; - tp->retrans_stamp = 0; + packets_acked = tcp_skb_pcount(skb); + end_seq = scb->end_seq; } /* MTU probing checks */ - if (icsk->icsk_mtup.probe_size) { - if (!after(tp->mtu_probe.probe_seq_end, TCP_SKB_CB(skb)->end_seq)) { - tcp_mtup_probe_success(sk, skb); - } + if (fully_acked && icsk->icsk_mtup.probe_size && + !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) { + tcp_mtup_probe_success(sk, skb); } if (sacked) { if (sacked & TCPCB_RETRANS) { if (sacked & TCPCB_SACKED_RETRANS) - tp->retrans_out -= tcp_skb_pcount(skb); - acked |= FLAG_RETRANS_DATA_ACKED; + tp->retrans_out -= packets_acked; + flag |= FLAG_RETRANS_DATA_ACKED; seq_rtt = -1; + if ((flag & FLAG_DATA_ACKED) || + (packets_acked > 1)) + flag |= FLAG_NONHEAD_RETRANS_ACKED; } else if (seq_rtt < 0) { seq_rtt = now - scb->when; - last_ackt = skb->tstamp; + if (fully_acked) + last_ackt = skb->tstamp; } + if (sacked & TCPCB_SACKED_ACKED) - tp->sacked_out -= tcp_skb_pcount(skb); + tp->sacked_out -= packets_acked; if (sacked & TCPCB_LOST) - tp->lost_out -= tcp_skb_pcount(skb); - if (sacked & TCPCB_URG) { - if (tp->urg_mode && - !before(scb->end_seq, tp->snd_up)) - tp->urg_mode = 0; - } + tp->lost_out -= packets_acked; + + if ((sacked & TCPCB_URG) && tp->urg_mode && + !before(end_seq, tp->snd_up)) + tp->urg_mode = 0; } else if (seq_rtt < 0) { seq_rtt = now - scb->when; - last_ackt = skb->tstamp; + if (fully_acked) + last_ackt = skb->tstamp; + } + tp->packets_out -= packets_acked; + + /* Initial outgoing SYN's get put onto the write_queue + * just like anything else we transmit. It is not + * true data, and if we misinform our callers that + * this ACK acks real data, we will erroneously exit + * connection startup slow start one packet too + * quickly. This is severely frowned upon behavior. + */ + if (!(scb->flags & TCPCB_FLAG_SYN)) { + flag |= FLAG_DATA_ACKED; + } else { + flag |= FLAG_SYN_ACKED; + tp->retrans_stamp = 0; } - tcp_dec_pcount_approx(&tp->fackets_out, skb); - tcp_packets_out_dec(tp, skb); + + if (!fully_acked) + break; + tcp_unlink_write_queue(skb, sk); sk_stream_free_skb(sk, skb); - clear_all_retrans_hints(tp); + tcp_clear_all_retrans_hints(tp); } - if (acked&FLAG_ACKED) { + if (flag & FLAG_ACKED) { u32 pkts_acked = prior_packets - tp->packets_out; const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; - tcp_ack_update_rtt(sk, acked, seq_rtt); - tcp_ack_packets_out(sk); + tcp_ack_update_rtt(sk, flag, seq_rtt); + tcp_rearm_rto(sk); + + tp->fackets_out -= min(pkts_acked, tp->fackets_out); + /* hint's skb might be NULL but we don't need to care */ + tp->fastpath_cnt_hint -= min_t(u32, pkts_acked, + tp->fastpath_cnt_hint); + if (tcp_is_reno(tp)) + tcp_remove_reno_sacks(sk, pkts_acked); if (ca_ops->pkts_acked) { s32 rtt_us = -1; /* Is the ACK triggering packet unambiguous? */ - if (!(acked & FLAG_RETRANS_DATA_ACKED)) { + if (!(flag & FLAG_RETRANS_DATA_ACKED)) { /* High resolution needed and available? */ if (ca_ops->flags & TCP_CONG_RTT_STAMP && !ktime_equal(last_ackt, @@ -2543,8 +2744,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) BUG_TRAP((int)tp->sacked_out >= 0); BUG_TRAP((int)tp->lost_out >= 0); BUG_TRAP((int)tp->retrans_out >= 0); - if (!tp->packets_out && tp->rx_opt.sack_ok) { - const struct inet_connection_sock *icsk = inet_csk(sk); + if (!tp->packets_out && tcp_is_sack(tp)) { + icsk = inet_csk(sk); if (tp->lost_out) { printk(KERN_DEBUG "Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); @@ -2563,7 +2764,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } #endif *seq_rtt_p = seq_rtt; - return acked; + return flag; } static void tcp_ack_probe(struct sock *sk) @@ -2658,6 +2859,7 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_cnt = 0; + tp->bytes_acked = 0; TCP_ECN_queue_cwr(tp); tcp_moderate_cwnd(tp); } @@ -2712,18 +2914,22 @@ static int tcp_process_frto(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); /* Duplicate the behavior from Loss state (fastretrans_alert) */ if (flag&FLAG_DATA_ACKED) inet_csk(sk)->icsk_retransmits = 0; + if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || + ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) + tp->undo_marker = 0; + if (!before(tp->snd_una, tp->frto_highmark)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); return 1; } - if (!IsSackFrto() || IsReno(tp)) { + if (!IsSackFrto() || tcp_is_reno(tp)) { /* RFC4138 shortcoming in step 2; should also have case c): * ACK isn't duplicate nor advances window, e.g., opposite dir * data, winupdate @@ -2782,6 +2988,8 @@ static int tcp_process_frto(struct sock *sk, int flag) break; } tp->frto_counter = 0; + tp->undo_marker = 0; + NET_INC_STATS_BH(LINUX_MIB_TCPSPURIOUSRTOS); } return 0; } @@ -2862,6 +3070,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, &seq_rtt); + /* Guarantee sacktag reordering detection against wrap-arounds */ + if (before(tp->frto_highmark, tp->snd_una)) + tp->frto_highmark = 0; if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); @@ -2870,7 +3081,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight, 0); - tcp_fastretrans_alert(sk, prior_packets, flag); + tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) tcp_cong_avoid(sk, ack, prior_in_flight, 1); @@ -3207,7 +3418,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); - if (tp->rx_opt.sack_ok) + if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_stream_mem_reclaim(sk); @@ -3237,7 +3448,7 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_se static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) { - if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { if (before(seq, tp->rcv_nxt)) NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); else @@ -3267,7 +3478,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); - if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) @@ -3583,7 +3794,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (!skb_peek(&tp->out_of_order_queue)) { /* Initial out of order segment, build 1 SACK. */ - if (tp->rx_opt.sack_ok) { + if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->rx_opt.dsack = 0; tp->rx_opt.eff_sacks = 1; @@ -3648,7 +3859,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) } add_sack: - if (tp->rx_opt.sack_ok) + if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); } } @@ -3837,7 +4048,7 @@ static int tcp_prune_queue(struct sock *sk) * is in a sad state like this, we care only about integrity * of the connection not performance. */ - if (tp->rx_opt.sack_ok) + if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_stream_mem_reclaim(sk); } @@ -4538,8 +4749,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tp->tcp_header_len = sizeof(struct tcphdr); } - if (tp->rx_opt.sack_ok && sysctl_tcp_fack) - tp->rx_opt.sack_ok |= 2; + if (tcp_is_sack(tp) && sysctl_tcp_fack) + tcp_enable_fack(tp); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e089a978e128057e8b7bc245beaa3aad81be12a0..38cf73a5673144ed40cab33d07501a972c5299be 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -62,6 +62,7 @@ #include #include +#include #include #include #include @@ -2249,7 +2250,7 @@ int tcp_proc_register(struct tcp_seq_afinfo *afinfo) afinfo->seq_fops->llseek = seq_lseek; afinfo->seq_fops->release = seq_release_private; - p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); + p = proc_net_fops_create(&init_net, afinfo->name, S_IRUGO, afinfo->seq_fops); if (p) p->data = afinfo; else @@ -2261,7 +2262,7 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo) { if (!afinfo) return; - proc_net_remove(afinfo->name); + proc_net_remove(&init_net, afinfo->name); memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); } @@ -2469,6 +2470,5 @@ EXPORT_SYMBOL(tcp_v4_syn_recv_sock); EXPORT_SYMBOL(tcp_proc_register); EXPORT_SYMBOL(tcp_proc_unregister); #endif -EXPORT_SYMBOL(sysctl_local_port_range); EXPORT_SYMBOL(sysctl_tcp_low_latency); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index a12b08fca5ad013f817b15d0cea589867d074e58..b61b76847ad9355034d1d1352200bb304ebe9a5f 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -368,6 +368,12 @@ void tcp_twsk_destructor(struct sock *sk) EXPORT_SYMBOL_GPL(tcp_twsk_destructor); +static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, + struct request_sock *req) +{ + tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; +} + /* This is not only more efficient than what we used to do, it eliminates * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM * @@ -399,7 +405,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newicsk->icsk_rto = TCP_TIMEOUT_INIT; newtp->packets_out = 0; - newtp->left_out = 0; newtp->retrans_out = 0; newtp->sacked_out = 0; newtp->fackets_out = 0; @@ -440,7 +445,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { if (sysctl_tcp_fack) - newtp->rx_opt.sack_ok |= 2; + tcp_enable_fack(newtp); } newtp->window_clamp = req->window_clamp; newtp->rcv_ssthresh = req->rcv_wnd; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 666d8a58d14a3d573f11963de87773f25201d573..324b4207254ae411728dd8a3cc1740f5ac0ce267 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -61,6 +61,18 @@ int sysctl_tcp_base_mss __read_mostly = 512; /* By default, RFC2861 behavior. */ int sysctl_tcp_slow_start_after_idle __read_mostly = 1; +static inline void tcp_packets_out_inc(struct sock *sk, + const struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + int orig = tp->packets_out; + + tp->packets_out += tcp_skb_pcount(skb); + if (!orig) + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + inet_csk(sk)->icsk_rto, TCP_RTO_MAX); +} + static void update_send_head(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); @@ -269,6 +281,56 @@ static u16 tcp_select_window(struct sock *sk) return new_win; } +static inline void TCP_ECN_send_synack(struct tcp_sock *tp, + struct sk_buff *skb) +{ + TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; + if (!(tp->ecn_flags&TCP_ECN_OK)) + TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; +} + +static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + + tp->ecn_flags = 0; + if (sysctl_tcp_ecn) { + TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; + tp->ecn_flags = TCP_ECN_OK; + } +} + +static __inline__ void +TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) +{ + if (inet_rsk(req)->ecn_ok) + th->ece = 1; +} + +static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, + int tcp_header_len) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (tp->ecn_flags & TCP_ECN_OK) { + /* Not-retransmitted data segment: set ECT and inject CWR. */ + if (skb->len != tcp_header_len && + !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { + INET_ECN_xmit(sk); + if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { + tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; + tcp_hdr(skb)->cwr = 1; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + } + } else { + /* ACK or retransmitted segment: clear ECT|CE */ + INET_ECN_dontxmit(sk); + } + if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) + tcp_hdr(skb)->ece = 1; + } +} + static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp, __u32 tstamp, __u8 **md5_hash) { @@ -584,16 +646,32 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; } else { - unsigned int factor; - - factor = skb->len + (mss_now - 1); - factor /= mss_now; - skb_shinfo(skb)->gso_segs = factor; + skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); skb_shinfo(skb)->gso_size = mss_now; skb_shinfo(skb)->gso_type = sk->sk_gso_type; } } +/* When a modification to fackets out becomes necessary, we need to check + * skb is counted to fackets_out or not. Another important thing is to + * tweak SACK fastpath hint too as it would overwrite all changes unless + * hint is also changed. + */ +static void tcp_adjust_fackets_out(struct tcp_sock *tp, struct sk_buff *skb, + int decr) +{ + if (!tp->sacked_out || tcp_is_reno(tp)) + return; + + if (!before(tp->highest_sack, TCP_SKB_CB(skb)->seq)) + tp->fackets_out -= decr; + + /* cnt_hint is "off-by-one" compared with fackets_out (see sacktag) */ + if (tp->fastpath_skb_hint != NULL && + after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) + tp->fastpath_cnt_hint -= decr; +} + /* Function to create two new TCP segments. Shrinks the given segment * to the specified size and appends a new segment with the rest of the * packet to the list. This won't be called frequently, I hope. @@ -609,7 +687,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss BUG_ON(len > skb->len); - clear_all_retrans_hints(tp); + tcp_clear_retrans_hints_partial(tp); nsize = skb_headlen(skb) - len; if (nsize < 0) nsize = 0; @@ -634,6 +712,10 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; + if (tcp_is_sack(tp) && tp->sacked_out && + (TCP_SKB_CB(skb)->seq == tp->highest_sack)) + tp->highest_sack = TCP_SKB_CB(buff)->seq; + /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->flags; TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); @@ -682,32 +764,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= diff; - if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { + if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) tp->lost_out -= diff; - tp->left_out -= diff; - } - - if (diff > 0) { - /* Adjust Reno SACK estimate. */ - if (!tp->rx_opt.sack_ok) { - tp->sacked_out -= diff; - if ((int)tp->sacked_out < 0) - tp->sacked_out = 0; - tcp_sync_left_out(tp); - } - tp->fackets_out -= diff; - if ((int)tp->fackets_out < 0) - tp->fackets_out = 0; - /* SACK fastpath might overwrite it unless dealt with */ - if (tp->fastpath_skb_hint != NULL && - after(TCP_SKB_CB(tp->fastpath_skb_hint)->seq, - TCP_SKB_CB(skb)->seq)) { - tp->fastpath_cnt_hint -= diff; - if ((int)tp->fastpath_cnt_hint < 0) - tp->fastpath_cnt_hint = 0; - } + /* Adjust Reno SACK estimate. */ + if (tcp_is_reno(tp) && diff > 0) { + tcp_dec_pcount_approx_int(&tp->sacked_out, diff); + tcp_verify_left_out(tp); } + tcp_adjust_fackets_out(tp, skb, diff); } /* Link BUFF into the send queue. */ @@ -1654,8 +1719,9 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); - /* changing transmit queue under us so clear hints */ - clear_all_retrans_hints(tp); + if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out && + (TCP_SKB_CB(next_skb)->seq == tp->highest_sack))) + return; /* Ok. We will be able to collapse the packet. */ tcp_unlink_write_queue(next_skb, sk); @@ -1683,21 +1749,23 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) tp->retrans_out -= tcp_skb_pcount(next_skb); - if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) { + if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) tp->lost_out -= tcp_skb_pcount(next_skb); - tp->left_out -= tcp_skb_pcount(next_skb); - } /* Reno case is special. Sigh... */ - if (!tp->rx_opt.sack_ok && tp->sacked_out) { + if (tcp_is_reno(tp) && tp->sacked_out) tcp_dec_pcount_approx(&tp->sacked_out, next_skb); - tp->left_out -= tcp_skb_pcount(next_skb); + + tcp_adjust_fackets_out(tp, next_skb, tcp_skb_pcount(next_skb)); + tp->packets_out -= tcp_skb_pcount(next_skb); + + /* changed transmit queue under us so clear hints */ + tcp_clear_retrans_hints_partial(tp); + /* manually tune sacktag skb hint */ + if (tp->fastpath_skb_hint == next_skb) { + tp->fastpath_skb_hint = skb; + tp->fastpath_cnt_hint -= tcp_skb_pcount(skb); } - /* Not quite right: it can be > snd.fack, but - * it is better to underestimate fackets. - */ - tcp_dec_pcount_approx(&tp->fackets_out, next_skb); - tcp_packets_out_dec(tp, next_skb); sk_stream_free_skb(sk, next_skb); } } @@ -1731,12 +1799,12 @@ void tcp_simple_retransmit(struct sock *sk) } } - clear_all_retrans_hints(tp); + tcp_clear_all_retrans_hints(tp); if (!lost) return; - tcp_sync_left_out(tp); + tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ @@ -1846,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) printk(KERN_DEBUG "retrans_out leaked.\n"); } #endif + if (!tp->retrans_out) + tp->lost_retrans_low = tp->snd_nxt; TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; tp->retrans_out += tcp_skb_pcount(skb); @@ -1938,40 +2008,35 @@ void tcp_xmit_retransmit_queue(struct sock *sk) return; /* No forward retransmissions in Reno are possible. */ - if (!tp->rx_opt.sack_ok) + if (tcp_is_reno(tp)) return; /* Yeah, we have to make difficult choice between forward transmission * and retransmission... Both ways have their merits... * * For now we do not retransmit anything, while we have some new - * segments to send. + * segments to send. In the other cases, follow rule 3 for + * NextSeg() specified in RFC3517. */ if (tcp_may_send_now(sk)) return; - if (tp->forward_skb_hint) { + /* If nothing is SACKed, highest_sack in the loop won't be valid */ + if (!tp->sacked_out) + return; + + if (tp->forward_skb_hint) skb = tp->forward_skb_hint; - packet_cnt = tp->forward_cnt_hint; - } else{ + else skb = tcp_write_queue_head(sk); - packet_cnt = 0; - } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; - tp->forward_cnt_hint = packet_cnt; tp->forward_skb_hint = skb; - /* Similar to the retransmit loop above we - * can pretend that the retransmitted SKB - * we send out here will be composed of one - * real MSS sized packet because tcp_retransmit_skb() - * will fragment it if necessary. - */ - if (++packet_cnt > tp->fackets_out) + if (after(TCP_SKB_CB(skb)->seq, tp->highest_sack)) break; if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index b76398d1b4547d291e52051276c80ff7df36880b..87dd5bff315fcede15cb1bd88f072a48d4df4ca9 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -228,7 +229,7 @@ static __init int tcpprobe_init(void) if (!tcp_probe.log) goto err0; - if (!proc_net_fops_create(procname, S_IRUSR, &tcpprobe_fops)) + if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &tcpprobe_fops)) goto err0; ret = register_jprobe(&tcp_jprobe); @@ -238,7 +239,7 @@ static __init int tcpprobe_init(void) pr_info("TCP probe registered (port=%d)\n", port); return 0; err1: - proc_net_remove(procname); + proc_net_remove(&init_net, procname); err0: kfree(tcp_probe.log); return ret; @@ -247,7 +248,7 @@ module_init(tcpprobe_init); static __exit void tcpprobe_exit(void) { - proc_net_remove(procname); + proc_net_remove(&init_net, procname); unregister_jprobe(&tcp_jprobe); kfree(tcp_probe.log); } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index e9b151b3a598e46dfd162bdc4645bdb083d48e83..d8970ecfcfc892dd35667dfe540e67e9d49bad30 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -315,7 +315,7 @@ static void tcp_retransmit_timer(struct sock *sk) if (icsk->icsk_retransmits == 0) { if (icsk->icsk_ca_state == TCP_CA_Disorder || icsk->icsk_ca_state == TCP_CA_Recovery) { - if (tp->rx_opt.sack_ok) { + if (tcp_is_sack(tp)) { if (icsk->icsk_ca_state == TCP_CA_Recovery) NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); else diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 69d4bd10f9c6ef31e5ce3c9762486bd61ab98bda..cb9fc58efb2f1da00ad4789370be1d1ac07f1fcd 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -98,6 +98,7 @@ #include #include #include +#include #include #include #include @@ -113,9 +114,8 @@ DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly; struct hlist_head udp_hash[UDP_HTABLE_SIZE]; DEFINE_RWLOCK(udp_hash_lock); -static int udp_port_rover; - -static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[]) +static inline int __udp_lib_lport_inuse(__u16 num, + const struct hlist_head udptable[]) { struct sock *sk; struct hlist_node *node; @@ -132,11 +132,10 @@ static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[]) * @sk: socket struct in question * @snum: port number to look up * @udptable: hash list table, must be of UDP_HTABLE_SIZE - * @port_rover: pointer to record of last unallocated port * @saddr_comp: AF-dependent comparison of bound local IP addresses */ int __udp_lib_get_port(struct sock *sk, unsigned short snum, - struct hlist_head udptable[], int *port_rover, + struct hlist_head udptable[], int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2 ) ) { @@ -146,49 +145,56 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum, int error = 1; write_lock_bh(&udp_hash_lock); - if (snum == 0) { - int best_size_so_far, best, result, i; - - if (*port_rover > sysctl_local_port_range[1] || - *port_rover < sysctl_local_port_range[0]) - *port_rover = sysctl_local_port_range[0]; - best_size_so_far = 32767; - best = result = *port_rover; - for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { - int size; - - head = &udptable[result & (UDP_HTABLE_SIZE - 1)]; - if (hlist_empty(head)) { - if (result > sysctl_local_port_range[1]) - result = sysctl_local_port_range[0] + - ((result - sysctl_local_port_range[0]) & - (UDP_HTABLE_SIZE - 1)); + + if (!snum) { + int i, low, high; + unsigned rover, best, best_size_so_far; + + inet_get_local_port_range(&low, &high); + + best_size_so_far = UINT_MAX; + best = rover = net_random() % (high - low) + low; + + /* 1st pass: look for empty (or shortest) hash chain */ + for (i = 0; i < UDP_HTABLE_SIZE; i++) { + int size = 0; + + head = &udptable[rover & (UDP_HTABLE_SIZE - 1)]; + if (hlist_empty(head)) goto gotit; - } - size = 0; + sk_for_each(sk2, node, head) { if (++size >= best_size_so_far) goto next; } best_size_so_far = size; - best = result; + best = rover; next: - ; + /* fold back if end of range */ + if (++rover > high) + rover = low + ((rover - low) + & (UDP_HTABLE_SIZE - 1)); + + } - result = best; - for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; - i++, result += UDP_HTABLE_SIZE) { - if (result > sysctl_local_port_range[1]) - result = sysctl_local_port_range[0] - + ((result - sysctl_local_port_range[0]) & - (UDP_HTABLE_SIZE - 1)); - if (! __udp_lib_lport_inuse(result, udptable)) - break; + + /* 2nd pass: find hole in shortest hash chain */ + rover = best; + for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++) { + if (! __udp_lib_lport_inuse(rover, udptable)) + goto gotit; + rover += UDP_HTABLE_SIZE; + if (rover > high) + rover = low + ((rover - low) + & (UDP_HTABLE_SIZE - 1)); } - if (i >= (1 << 16) / UDP_HTABLE_SIZE) - goto fail; + + + /* All ports in use! */ + goto fail; + gotit: - *port_rover = snum = result; + snum = rover; } else { head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; @@ -201,6 +207,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum, (*saddr_comp)(sk, sk2) ) goto fail; } + inet_sk(sk)->num = snum; sk->sk_hash = snum; if (sk_unhashed(sk)) { @@ -217,7 +224,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum, int udp_get_port(struct sock *sk, unsigned short snum, int (*scmp)(const struct sock *, const struct sock *)) { - return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp); + return __udp_lib_get_port(sk, snum, udp_hash, scmp); } int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) @@ -1560,7 +1567,7 @@ int udp_proc_register(struct udp_seq_afinfo *afinfo) afinfo->seq_fops->llseek = seq_lseek; afinfo->seq_fops->release = seq_release_private; - p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); + p = proc_net_fops_create(&init_net, afinfo->name, S_IRUGO, afinfo->seq_fops); if (p) p->data = afinfo; else @@ -1572,7 +1579,7 @@ void udp_proc_unregister(struct udp_seq_afinfo *afinfo) { if (!afinfo) return; - proc_net_remove(afinfo->name); + proc_net_remove(&init_net, afinfo->name); memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); } diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 820a477cfaa6e9899234b03ee8120c9ca158babd..6c55828e41badfb84a1ad4c4852015f3da5f3276 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -9,7 +9,7 @@ extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int ); extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, - struct hlist_head udptable[], int *port_rover, + struct hlist_head udptable[], int (*)(const struct sock*,const struct sock*)); extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index f34fd686a8f15ac6e49b687742701c1037ab1417..94977205abb47581686bfd46bc1f5232100f005a 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -16,12 +16,11 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics) __read_mostly; struct hlist_head udplite_hash[UDP_HTABLE_SIZE]; -static int udplite_port_rover; int udplite_get_port(struct sock *sk, unsigned short p, int (*c)(const struct sock *, const struct sock *)) { - return __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c); + return __udp_lib_get_port(sk, p, udplite_hash, c); } static int udplite_v4_get_port(struct sock *sk, unsigned short snum) diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 2fa108245413d38bd7c88fac7c71e570e98e40b9..e9bbfde19ac32420a6dfd77b090e8b22fd97edc6 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -54,12 +54,14 @@ static int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) int xfrm_nr = 0; int decaps = 0; int err = xfrm4_parse_spi(skb, ip_hdr(skb)->protocol, &spi, &seq); + unsigned int nhoff = offsetof(struct iphdr, protocol); if (err != 0) goto drop; do { const struct iphdr *iph = ip_hdr(skb); + int nexthdr; if (xfrm_nr == XFRM_MAX_DEPTH) goto drop; @@ -82,9 +84,12 @@ static int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) if (xfrm_state_check_expire(x)) goto drop_unlock; - if (x->type->input(x, skb)) + nexthdr = x->type->input(x, skb); + if (nexthdr <= 0) goto drop_unlock; + skb_network_header(skb)[nhoff] = nexthdr; + /* only the first xfrm gets the encap type */ encap_type = 0; diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index a73e710740c22396c4bff28337c53ea6b8624565..73d2338bec55f306985f82897b644b1f6f70df1a 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -20,38 +20,33 @@ /* Add encapsulation header. * * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt. - * The following fields in it shall be filled in by x->type->output: - * tot_len - * check - * - * On exit, skb->h will be set to the start of the payload to be processed - * by x->type->output and skb->nh will be set to the top IP header. */ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) { + struct ip_beet_phdr *ph; struct iphdr *iph, *top_iph; int hdrlen, optlen; iph = ip_hdr(skb); - skb->transport_header = skb->network_header; hdrlen = 0; optlen = iph->ihl * 4 - sizeof(*iph); if (unlikely(optlen)) hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); - skb_push(skb, x->props.header_len - IPV4_BEET_PHMAXLEN + hdrlen); - skb_reset_network_header(skb); - top_iph = ip_hdr(skb); - skb->transport_header += sizeof(*iph) - hdrlen; + skb_set_network_header(skb, IPV4_BEET_PHMAXLEN - x->props.header_len - + hdrlen); + skb->mac_header = skb->network_header + + offsetof(struct iphdr, protocol); + skb->transport_header = skb->network_header + sizeof(*iph); + + ph = (struct ip_beet_phdr *)__skb_pull(skb, sizeof(*iph) - hdrlen); + top_iph = ip_hdr(skb); memmove(top_iph, iph, sizeof(*iph)); if (unlikely(optlen)) { - struct ip_beet_phdr *ph; - BUG_ON(optlen < 0); - ph = (struct ip_beet_phdr *)skb_transport_header(skb); ph->padlen = 4 - (optlen & 4); ph->hdrlen = optlen / 8; ph->nexthdr = top_iph->protocol; diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c index 601047161ea6102ff80fb4e89ebd25672dae2378..fd840c7d75ea9bc0214bb14dd238e3cee820521a 100644 --- a/net/ipv4/xfrm4_mode_transport.c +++ b/net/ipv4/xfrm4_mode_transport.c @@ -17,18 +17,17 @@ * * The IP header will be moved forward to make space for the encapsulation * header. - * - * On exit, skb->h will be set to the start of the payload to be processed - * by x->type->output and skb->nh will be set to the top IP header. */ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; + skb_set_network_header(skb, -x->props.header_len); + skb->mac_header = skb->network_header + + offsetof(struct iphdr, protocol); skb->transport_header = skb->network_header + ihl; - skb_push(skb, x->props.header_len); - skb_reset_network_header(skb); + __skb_pull(skb, ihl); memmove(skb_network_header(skb), iph, ihl); return 0; } diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 9963700e74c1e6185f31ea807b839a0d67a87081..1ae9d32276f0a8daa13cf8abbf743b34c691f0f8 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -31,13 +31,7 @@ static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) /* Add encapsulation header. * - * The top IP header will be constructed per RFC 2401. The following fields - * in it shall be filled in by x->type->output: - * tot_len - * check - * - * On exit, skb->h will be set to the start of the payload to be processed - * by x->type->output and skb->nh will be set to the top IP header. + * The top IP header will be constructed per RFC 2401. */ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) { @@ -47,10 +41,11 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) int flags; iph = ip_hdr(skb); - skb->transport_header = skb->network_header; - skb_push(skb, x->props.header_len); - skb_reset_network_header(skb); + skb_set_network_header(skb, -x->props.header_len); + skb->mac_header = skb->network_header + + offsetof(struct iphdr, protocol); + skb->transport_header = skb->network_header + sizeof(*iph); top_iph = ip_hdr(skb); top_iph->ihl = 5; diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 44ef208a75cb175bfbff21cae090212a6d996b07..434ef302ba83fd3d1bca83f84ec743a152ed4ef3 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -41,58 +40,32 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) return ret; } -static int xfrm4_output_one(struct sk_buff *skb) +static inline int xfrm4_output_one(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; + struct iphdr *iph; int err; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - err = skb_checksum_help(skb); - if (err) - goto error_nolock; - } - if (x->props.mode == XFRM_MODE_TUNNEL) { err = xfrm4_tunnel_check_size(skb); if (err) goto error_nolock; } - do { - spin_lock_bh(&x->lock); - err = xfrm_state_check(x, skb); - if (err) - goto error; - - err = x->mode->output(x, skb); - if (err) - goto error; - - err = x->type->output(x, skb); - if (err) - goto error; - - x->curlft.bytes += skb->len; - x->curlft.packets++; + err = xfrm_output(skb); + if (err) + goto error_nolock; - spin_unlock_bh(&x->lock); - - if (!(skb->dst = dst_pop(dst))) { - err = -EHOSTUNREACH; - goto error_nolock; - } - dst = skb->dst; - x = dst->xfrm; - } while (x && (x->props.mode != XFRM_MODE_TUNNEL)); + iph = ip_hdr(skb); + iph->tot_len = htons(skb->len); + ip_send_check(iph); IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; err = 0; out_exit: return err; -error: - spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 4ff8ed30024f3fcf11d83dc1b422d2fc71ecc5a4..329825ca68fe71730387583c9d96129ee3770db2 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -306,7 +306,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, xdst = (struct xfrm_dst *)dst; if (xdst->u.rt.idev->dev == dev) { - struct in_device *loopback_idev = in_dev_get(&loopback_dev); + struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev); BUG_ON(!loopback_idev); do { diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index 9275c79119b6f32d9bb69c7c64d9be853378eb7b..1312417608e2a34ba7c6e05746d605b2930d05bf 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -12,17 +12,13 @@ static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = htons(skb->len); - ip_send_check(iph); - + skb_push(skb, -skb_network_offset(skb)); return 0; } static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb) { - return 0; + return IPPROTO_IP; } static int ipip_init_state(struct xfrm_state *x) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 45b4c82148a065b43a4a3e14648032a69f00721d..52d10d213217c0bb8c7be5bbda65d9e31ad45e3d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -62,6 +62,7 @@ #include #include +#include #include #include @@ -73,6 +74,7 @@ #include #include #include +#include #include #include @@ -212,6 +214,12 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; +/* Check if a valid qdisc is available */ +static inline int addrconf_qdisc_ok(struct net_device *dev) +{ + return (dev->qdisc != &noop_qdisc); +} + static void addrconf_del_timer(struct inet6_ifaddr *ifp) { if (del_timer(&ifp->timer)) @@ -260,9 +268,15 @@ static int snmp6_alloc_dev(struct inet6_dev *idev) sizeof(struct icmpv6_mib), __alignof__(struct icmpv6_mib)) < 0) goto err_icmp; + if (snmp_mib_init((void **)idev->stats.icmpv6msg, + sizeof(struct icmpv6msg_mib), + __alignof__(struct icmpv6msg_mib)) < 0) + goto err_icmpmsg; return 0; +err_icmpmsg: + snmp_mib_free((void **)idev->stats.icmpv6); err_icmp: snmp_mib_free((void **)idev->stats.ipv6); err_ip: @@ -271,6 +285,7 @@ static int snmp6_alloc_dev(struct inet6_dev *idev) static int snmp6_free_dev(struct inet6_dev *idev) { + snmp_mib_free((void **)idev->stats.icmpv6msg); snmp_mib_free((void **)idev->stats.icmpv6); snmp_mib_free((void **)idev->stats.ipv6); return 0; @@ -376,7 +391,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) } #endif - if (netif_running(dev) && netif_carrier_ok(dev)) + if (netif_running(dev) && addrconf_qdisc_ok(dev)) ndev->if_flags |= IF_READY; ipv6_mc_init_dev(ndev); @@ -449,7 +464,7 @@ static void addrconf_forward_change(void) struct inet6_dev *idev; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { @@ -911,7 +926,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, read_lock(&dev_base_lock); rcu_read_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { struct inet6_dev *idev; struct inet6_ifaddr *ifa; @@ -1857,7 +1872,7 @@ int addrconf_set_dstaddr(void __user *arg) if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq))) goto err_exit; - dev = __dev_get_by_index(ireq.ifr6_ifindex); + dev = __dev_get_by_index(&init_net, ireq.ifr6_ifindex); err = -ENODEV; if (dev == NULL) @@ -1888,7 +1903,7 @@ int addrconf_set_dstaddr(void __user *arg) if (err == 0) { err = -ENOBUFS; - if ((dev = __dev_get_by_name(p.name)) == NULL) + if ((dev = __dev_get_by_name(&init_net, p.name)) == NULL) goto err_exit; err = dev_open(dev); } @@ -1918,7 +1933,7 @@ static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, if (!valid_lft || prefered_lft > valid_lft) return -EINVAL; - if ((dev = __dev_get_by_index(ifindex)) == NULL) + if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) return -ENODEV; if ((idev = addrconf_add_dev(dev)) == NULL) @@ -1969,7 +1984,7 @@ static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen) struct inet6_dev *idev; struct net_device *dev; - if ((dev = __dev_get_by_index(ifindex)) == NULL) + if ((dev = __dev_get_by_index(&init_net, ifindex)) == NULL) return -ENODEV; if ((idev = __in6_dev_get(dev)) == NULL) @@ -2064,7 +2079,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) return; } - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { struct in_device * in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { struct in_ifaddr * ifa; @@ -2220,12 +2235,12 @@ static void ip6_tnl_add_linklocal(struct inet6_dev *idev) /* first try to inherit the link-local address from the link device */ if (idev->dev->iflink && - (link_dev = __dev_get_by_index(idev->dev->iflink))) { + (link_dev = __dev_get_by_index(&init_net, idev->dev->iflink))) { if (!ipv6_inherit_linklocal(idev, link_dev)) return; } /* then try to inherit it from any device */ - for_each_netdev(link_dev) { + for_each_netdev(&init_net, link_dev) { if (!ipv6_inherit_linklocal(idev, link_dev)) return; } @@ -2258,6 +2273,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, int run_pending = 0; int err; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch(event) { case NETDEV_REGISTER: if (!idev && dev->mtu >= IPV6_MIN_MTU) { @@ -2272,7 +2290,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, break; if (event == NETDEV_UP) { - if (!netif_carrier_ok(dev)) { + if (!addrconf_qdisc_ok(dev)) { /* device is not ready yet. */ printk(KERN_INFO "ADDRCONF(NETDEV_UP): %s: " @@ -2284,7 +2302,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (idev) idev->if_flags |= IF_READY; } else { - if (!netif_carrier_ok(dev)) { + if (!addrconf_qdisc_ok(dev)) { /* device is still not ready. */ break; } @@ -2399,7 +2417,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) ASSERT_RTNL(); - if (dev == &loopback_dev && how == 1) + if (dev == init_net.loopback_dev && how == 1) how = 0; rt6_ifdown(dev); @@ -2491,9 +2509,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) else ipv6_mc_down(idev); - /* Step 5: netlink notification of this interface */ idev->tstamp = jiffies; - inet6_ifinfo_notify(RTM_DELLINK, idev); /* Shot the device (if unregistered) */ @@ -2797,24 +2813,8 @@ static const struct seq_operations if6_seq_ops = { static int if6_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct if6_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &if6_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &if6_seq_ops, + sizeof(struct if6_iter_state)); } static const struct file_operations if6_fops = { @@ -2827,14 +2827,14 @@ static const struct file_operations if6_fops = { int __init if6_proc_init(void) { - if (!proc_net_fops_create("if_inet6", S_IRUGO, &if6_fops)) + if (!proc_net_fops_create(&init_net, "if_inet6", S_IRUGO, &if6_fops)) return -ENOMEM; return 0; } void if6_proc_exit(void) { - proc_net_remove("if_inet6"); + proc_net_remove(&init_net, "if_inet6"); } #endif /* CONFIG_PROC_FS */ @@ -3080,7 +3080,7 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) valid_lft = INFINITY_LIFE_TIME; } - dev = __dev_get_by_index(ifm->ifa_index); + dev = __dev_get_by_index(&init_net, ifm->ifa_index); if (dev == NULL) return -ENODEV; @@ -3264,7 +3264,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, s_ip_idx = ip_idx = cb->args[1]; idx = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (idx < s_idx) goto cont; if (idx > s_idx) @@ -3373,7 +3373,7 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, ifm = nlmsg_data(nlh); if (ifm->ifa_index) - dev = __dev_get_by_index(ifm->ifa_index); + dev = __dev_get_by_index(&init_net, ifm->ifa_index); if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL) { err = -EADDRNOTAVAIL; @@ -3585,7 +3585,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) read_lock(&dev_base_lock); idx = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (idx < s_idx) goto cont; if ((idev = in6_dev_get(dev)) == NULL) @@ -4203,16 +4203,19 @@ int __init addrconf_init(void) * device and it being up should be removed. */ rtnl_lock(); - if (!ipv6_add_dev(&loopback_dev)) + if (!ipv6_add_dev(init_net.loopback_dev)) err = -ENOMEM; rtnl_unlock(); if (err) return err; - ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev); + ip6_null_entry.u.dst.dev = init_net.loopback_dev; + ip6_null_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES - ip6_prohibit_entry.rt6i_idev = in6_dev_get(&loopback_dev); - ip6_blk_hole_entry.rt6i_idev = in6_dev_get(&loopback_dev); + ip6_prohibit_entry.u.dst.dev = init_net.loopback_dev; + ip6_prohibit_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); + ip6_blk_hole_entry.u.dst.dev = init_net.loopback_dev; + ip6_blk_hole_entry.rt6i_idev = in6_dev_get(init_net.loopback_dev); #endif register_netdevice_notifier(&ipv6_dev_notf); @@ -4262,12 +4265,12 @@ void __exit addrconf_cleanup(void) * clean dev list. */ - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (__in6_dev_get(dev) == NULL) continue; addrconf_ifdown(dev, 1); } - addrconf_ifdown(&loopback_dev, 2); + addrconf_ifdown(init_net.loopback_dev, 2); /* * Check hash table. @@ -4293,6 +4296,6 @@ void __exit addrconf_cleanup(void) rtnl_unlock(); #ifdef CONFIG_PROC_FS - proc_net_remove("if_inet6"); + proc_net_remove(&init_net, "if_inet6"); #endif } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b5f96372ad7344b3b815b85a5e29a0b08ac8ad13..bc929381fa46c59926266efde1a6702d2460f1e4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -81,7 +81,7 @@ static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } -static int inet6_create(struct socket *sock, int protocol) +static int inet6_create(struct net *net, struct socket *sock, int protocol) { struct inet_sock *inet; struct ipv6_pinfo *np; @@ -94,6 +94,9 @@ static int inet6_create(struct socket *sock, int protocol) int try_loading_module = 0; int err; + if (net != &init_net) + return -EAFNOSUPPORT; + if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM && !inet_ehash_secret) @@ -159,7 +162,7 @@ static int inet6_create(struct socket *sock, int protocol) BUG_TRAP(answer_prot->slab != NULL); err = -ENOBUFS; - sk = sk_alloc(PF_INET6, GFP_KERNEL, answer_prot, 1); + sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, 1); if (sk == NULL) goto out; @@ -299,7 +302,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EINVAL; goto out; } - dev = dev_get_by_index(sk->sk_bound_dev_if); + dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); if (!dev) { err = -ENODEV; goto out; @@ -716,6 +719,9 @@ static int __init init_ipv6_mibs(void) if (snmp_mib_init((void **)icmpv6_statistics, sizeof (struct icmpv6_mib), __alignof__(struct icmpv6_mib)) < 0) goto err_icmp_mib; + if (snmp_mib_init((void **)icmpv6msg_statistics, + sizeof (struct icmpv6msg_mib), __alignof__(struct icmpv6_mib)) < 0) + goto err_icmpmsg_mib; if (snmp_mib_init((void **)udp_stats_in6, sizeof (struct udp_mib), __alignof__(struct udp_mib)) < 0) goto err_udp_mib; @@ -727,6 +733,8 @@ static int __init init_ipv6_mibs(void) err_udplite_mib: snmp_mib_free((void **)udp_stats_in6); err_udp_mib: + snmp_mib_free((void **)icmpv6msg_statistics); +err_icmpmsg_mib: snmp_mib_free((void **)icmpv6_statistics); err_icmp_mib: snmp_mib_free((void **)ipv6_statistics); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 53f46ab6af7062528e907cf3a2e4676f2fd12afe..f9f689162692e9b6334b9a2d0e04ccbd069d60fb 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -235,11 +236,12 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) char hdrs[0]; } *tmp_ext; - top_iph = (struct ipv6hdr *)skb->data; + skb_push(skb, -skb_network_offset(skb)); + top_iph = ipv6_hdr(skb); top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); - nexthdr = *skb_network_header(skb); - *skb_network_header(skb) = IPPROTO_AH; + nexthdr = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_AH; /* When there are no extension headers, we only need to save the first * 8 bytes of the base IP header. @@ -268,7 +270,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) goto error_free_iph; } - ah = (struct ip_auth_hdr *)skb_transport_header(skb); + ah = ip_auth_hdr(skb); ah->nexthdr = nexthdr; top_iph->priority = 0; @@ -278,19 +280,19 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) top_iph->hop_limit = 0; ahp = x->data; - ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + - ahp->icv_trunc_len) >> 2) - 2; + ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; - ah->seq_no = htonl(++x->replay.oseq); - xfrm_aevent_doreplay(x); + ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq); + + spin_lock_bh(&x->lock); err = ah_mac_digest(ahp, skb, ah->auth_data); - if (err) - goto error_free_iph; memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); + spin_unlock_bh(&x->lock); - err = 0; + if (err) + goto error_free_iph; memcpy(top_iph, tmp_base, sizeof(tmp_base)); if (tmp_ext) { @@ -324,7 +326,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) * There is offset of AH before IPv6 header after the process. */ - struct ipv6_auth_hdr *ah; + struct ip_auth_hdr *ah; struct ipv6hdr *ip6h; struct ah_data *ahp; unsigned char *tmp_hdr = NULL; @@ -343,13 +345,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) goto out; hdr_len = skb->data - skb_network_header(skb); - ah = (struct ipv6_auth_hdr*)skb->data; + ah = (struct ip_auth_hdr *)skb->data; ahp = x->data; nexthdr = ah->nexthdr; ah_hlen = (ah->hdrlen + 2) << 2; - if (ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_full_len) && - ah_hlen != XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len)) + if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && + ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) goto out; if (!pskb_may_pull(skb, ah_hlen)) @@ -429,10 +431,6 @@ static int ah6_init_state(struct xfrm_state *x) if (!x->aalg) goto error; - /* null auth can use a zero length key */ - if (x->aalg->alg_key_len > 512) - goto error; - if (x->encap) goto error; @@ -440,14 +438,13 @@ static int ah6_init_state(struct xfrm_state *x) if (ahp == NULL) return -ENOMEM; - ahp->key = x->aalg->alg_key; - ahp->key_len = (x->aalg->alg_key_len+7)/8; tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; ahp->tfm = tfm; - if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) + if (crypto_hash_setkey(tfm, x->aalg->alg_key, + (x->aalg->alg_key_len + 7) / 8)) goto error; /* @@ -476,7 +473,8 @@ static int ah6_init_state(struct xfrm_state *x) if (!ahp->work_icv) goto error; - x->props.header_len = XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len); + x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + + ahp->icv_trunc_len); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct ipv6hdr); x->data = ahp; @@ -511,6 +509,7 @@ static struct xfrm_type ah6_type = .description = "AH6", .owner = THIS_MODULE, .proto = IPPROTO_AH, + .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah6_init_state, .destructor = ah6_destroy, .input = ah6_input, diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index b8c533fbdb63f0ab320b7d27735f2e5a6ea2b040..f915c4df9820085fb5a5ff5798c152f89e4f7f4d 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -111,10 +112,10 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) } else { /* router, no matching interface: just pick one */ - dev = dev_get_by_flags(IFF_UP, IFF_UP|IFF_LOOPBACK); + dev = dev_get_by_flags(&init_net, IFF_UP, IFF_UP|IFF_LOOPBACK); } } else - dev = dev_get_by_index(ifindex); + dev = dev_get_by_index(&init_net, ifindex); if (dev == NULL) { err = -ENODEV; @@ -195,7 +196,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) write_unlock_bh(&ipv6_sk_ac_lock); - dev = dev_get_by_index(pac->acl_ifindex); + dev = dev_get_by_index(&init_net, pac->acl_ifindex); if (dev) { ipv6_dev_ac_dec(dev, &pac->acl_addr); dev_put(dev); @@ -223,7 +224,7 @@ void ipv6_sock_ac_close(struct sock *sk) if (pac->acl_ifindex != prev_index) { if (dev) dev_put(dev); - dev = dev_get_by_index(pac->acl_ifindex); + dev = dev_get_by_index(&init_net, pac->acl_ifindex); prev_index = pac->acl_ifindex; } if (dev) @@ -413,7 +414,7 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) break; read_unlock_bh(&idev->lock); in6_dev_put(idev); - return aca != 0; + return aca != NULL; } return 0; } @@ -428,7 +429,7 @@ int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr) if (dev) return ipv6_chk_acast_dev(dev, addr); read_lock(&dev_base_lock); - for_each_netdev(dev) + for_each_netdev(&init_net, dev) if (ipv6_chk_acast_dev(dev, addr)) { found = 1; break; @@ -452,7 +453,7 @@ static inline struct ifacaddr6 *ac6_get_first(struct seq_file *seq) struct ac6_iter_state *state = ac6_seq_private(seq); state->idev = NULL; - for_each_netdev(state->dev) { + for_each_netdev(&init_net, state->dev) { struct inet6_dev *idev; idev = in6_dev_get(state->dev); if (!idev) @@ -548,24 +549,8 @@ static const struct seq_operations ac6_seq_ops = { static int ac6_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct ac6_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &ac6_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &ac6_seq_ops, + sizeof(struct ac6_iter_state)); } static const struct file_operations ac6_seq_fops = { @@ -578,7 +563,7 @@ static const struct file_operations ac6_seq_fops = { int __init ac6_proc_init(void) { - if (!proc_net_fops_create("anycast6", S_IRUGO, &ac6_seq_fops)) + if (!proc_net_fops_create(&init_net, "anycast6", S_IRUGO, &ac6_seq_fops)) return -ENOMEM; return 0; @@ -586,7 +571,7 @@ int __init ac6_proc_init(void) void ac6_proc_exit(void) { - proc_net_remove("anycast6"); + proc_net_remove(&init_net, "anycast6"); } #endif diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index fe0f49024a0a213a843cb854404ea428ebcca350..2ed689ac449ec67cd6e0519b1a4ddab4d156e43d 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -544,7 +544,7 @@ int datagram_send_ctl(struct msghdr *msg, struct flowi *fl, if (!src_info->ipi6_ifindex) return -EINVAL; else { - dev = dev_get_by_index(src_info->ipi6_ifindex); + dev = dev_get_by_index(&init_net, src_info->ipi6_ifindex); if (!dev) return -ENODEV; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 2db31ce3c7e61302dc66ad12054f009898547e47..9eb92859835171ec22709c04148ce9860ea2acbe 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -42,8 +43,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; - struct ipv6hdr *top_iph; - struct ipv6_esp_hdr *esph; + struct ip_esp_hdr *esph; struct crypto_blkcipher *tfm; struct blkcipher_desc desc; struct sk_buff *trailer; @@ -53,13 +53,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) int nfrags; u8 *tail; struct esp_data *esp = x->data; - int hdr_len = (skb_transport_offset(skb) + - sizeof(*esph) + esp->conf.ivlen); - /* Strip IP+ESP header. */ - __skb_pull(skb, hdr_len); - - /* Now skb is pure payload to encrypt */ + /* skb is pure payload to encrypt */ err = -ENOMEM; /* Round to block size */ @@ -88,15 +83,15 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) tail[clen-skb->len - 2] = (clen - skb->len) - 2; pskb_put(skb, trailer, clen - skb->len); - top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); - esph = (struct ipv6_esp_hdr *)skb_transport_header(skb); - top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); - *(skb_tail_pointer(trailer) - 1) = *skb_network_header(skb); - *skb_network_header(skb) = IPPROTO_ESP; + skb_push(skb, -skb_network_offset(skb)); + esph = ip_esp_hdr(skb); + *(skb_tail_pointer(trailer) - 1) = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_ESP; esph->spi = x->id.spi; - esph->seq_no = htonl(++x->replay.oseq); - xfrm_aevent_doreplay(x); + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); + + spin_lock_bh(&x->lock); if (esp->conf.ivlen) { if (unlikely(!esp->conf.ivinitted)) { @@ -112,7 +107,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) if (unlikely(nfrags > ESP_NUM_FAST_SG)) { sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC); if (!sg) - goto error; + goto unlock; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); @@ -121,7 +116,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) } while (0); if (unlikely(err)) - goto error; + goto unlock; if (esp->conf.ivlen) { memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); @@ -134,6 +129,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } +unlock: + spin_unlock_bh(&x->lock); + error: return err; } @@ -141,19 +139,19 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ipv6hdr *iph; - struct ipv6_esp_hdr *esph; + struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_blkcipher *tfm = esp->conf.tfm; struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; - int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; + int elen = skb->len - sizeof(*esph) - esp->conf.ivlen - alen; int hdr_len = skb_network_header_len(skb); int nfrags; int ret = 0; - if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) { + if (!pskb_may_pull(skb, sizeof(*esph))) { ret = -EINVAL; goto out; } @@ -188,7 +186,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; - esph = (struct ipv6_esp_hdr*)skb->data; + esph = (struct ip_esp_hdr *)skb->data; iph = ipv6_hdr(skb); /* Get ivec. This can be wrong, check against another impls. */ @@ -207,7 +205,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } } - skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); + skb_to_sgvec(skb, sg, sizeof(*esph) + esp->conf.ivlen, elen); ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); @@ -259,7 +257,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, int type, int code, int offset, __be32 info) { struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; - struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset); + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_DEST_UNREACH && @@ -297,11 +295,6 @@ static int esp6_init_state(struct xfrm_state *x) struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; - /* null auth and encryption can have zero length keys */ - if (x->aalg) { - if (x->aalg->alg_key_len > 512) - goto error; - } if (x->ealg == NULL) goto error; @@ -316,15 +309,14 @@ static int esp6_init_state(struct xfrm_state *x) struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; - esp->auth.key = x->aalg->alg_key; - esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; - if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) + if (crypto_hash_setkey(hash, x->aalg->alg_key, + (x->aalg->alg_key_len + 7) / 8)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); @@ -346,8 +338,6 @@ static int esp6_init_state(struct xfrm_state *x) if (!esp->auth.work_icv) goto error; } - esp->conf.key = x->ealg->alg_key; - esp->conf.key_len = (x->ealg->alg_key_len+7)/8; tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; @@ -360,9 +350,10 @@ static int esp6_init_state(struct xfrm_state *x) goto error; esp->conf.ivinitted = 0; } - if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) + if (crypto_blkcipher_setkey(tfm, x->ealg->alg_key, + (x->ealg->alg_key_len + 7) / 8)) goto error; - x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; + x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct ipv6hdr); x->data = esp; @@ -380,6 +371,7 @@ static struct xfrm_type esp6_type = .description = "ESP6", .owner = THIS_MODULE, .proto = IPPROTO_ESP, + .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp6_init_state, .destructor = esp6_destroy, .get_mtu = esp6_get_mtu, diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 53b3998a486ced67d9eae000cff6c9a1269ecf66..706622af206f1f7f5a04479223a1c4aa84334238 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -50,8 +50,6 @@ static struct fib6_rule local_rule = { }, }; -static LIST_HEAD(fib6_rules); - struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags, pol_lookup_t lookup) { @@ -268,14 +266,14 @@ static struct fib_rules_ops fib6_rules_ops = { .nlmsg_payload = fib6_rule_nlmsg_payload, .nlgroup = RTNLGRP_IPV6_RULE, .policy = fib6_rule_policy, - .rules_list = &fib6_rules, + .rules_list = LIST_HEAD_INIT(fib6_rules_ops.rules_list), .owner = THIS_MODULE, }; void __init fib6_rules_init(void) { - list_add_tail(&local_rule.common.list, &fib6_rules); - list_add_tail(&main_rule.common.list, &fib6_rules); + list_add_tail(&local_rule.common.list, &fib6_rules_ops.rules_list); + list_add_tail(&main_rule.common.list, &fib6_rules_ops.rules_list); fib_rules_register(&fib6_rules_ops); } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 6a6714d154ed08422141ffecd05032be02fb8a80..47b8ce232e8439cd7a213c035d3c3f17c7131fac 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -69,6 +69,8 @@ DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly; EXPORT_SYMBOL(icmpv6_statistics); +DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly; +EXPORT_SYMBOL(icmpv6msg_statistics); /* * The ICMP socket(s). This is the most convenient way to flow control @@ -456,8 +458,6 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, } err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr)); - if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB) - ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_OUTDESTUNREACHS, type - ICMPV6_DEST_UNREACH); ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); out_put: @@ -547,9 +547,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) } err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); - ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTECHOREPLIES); - ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); - out_put: if (likely(idev != NULL)) in6_dev_put(idev); @@ -656,10 +653,7 @@ static int icmpv6_rcv(struct sk_buff **pskb) type = hdr->icmp6_type; - if (type >= ICMPV6_DEST_UNREACH && type <= ICMPV6_PARAMPROB) - ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INDESTUNREACHS, type - ICMPV6_DEST_UNREACH); - else if (type >= ICMPV6_ECHO_REQUEST && type <= NDISC_REDIRECT) - ICMP6_INC_STATS_OFFSET_BH(idev, ICMP6_MIB_INECHOS, type - ICMPV6_ECHO_REQUEST); + ICMP6MSGIN_INC_STATS_BH(idev, type); switch (type) { case ICMPV6_ECHO_REQUEST: diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 116f94a49071d22474271fda9c77bf9f1efcb715..25b93170974928dc965cd15157f4110c3a7341db 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -139,6 +139,41 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); +static inline +void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, + struct in6_addr *daddr, struct in6_addr *saddr) +{ + __ip6_dst_store(sk, dst, daddr, saddr); + +#ifdef CONFIG_XFRM + if (dst) { + struct rt6_info *rt = (struct rt6_info *)dst; + rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); + } +#endif +} + +static inline +struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) +{ + struct dst_entry *dst; + + dst = __sk_dst_check(sk, cookie); + +#ifdef CONFIG_XFRM + if (dst) { + struct rt6_info *rt = (struct rt6_info *)dst; + if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { + sk->sk_dst_cache = NULL; + dst_release(dst); + dst = NULL; + } + } +#endif + + return dst; +} + int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) { struct sock *sk = skb->sk; @@ -166,7 +201,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) final_p = &final; } - dst = __sk_dst_check(sk, np->dst_cookie); + dst = __inet6_csk_dst_check(sk, np->dst_cookie); if (dst == NULL) { int err = ip6_dst_lookup(sk, &dst, &fl); @@ -186,7 +221,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) return err; } - __ip6_dst_store(sk, dst, NULL, NULL); + __inet6_csk_dst_store(sk, dst, NULL, NULL); } skb->dst = dst_clone(dst); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index ae6b0e7eb4885b1722fe6e2a89a5d167b0cdf5ab..1c2c2765543505d7e61dd021fa2fcf2b9482757f 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -254,18 +254,18 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, int ret; if (snum == 0) { - const int low = sysctl_local_port_range[0]; - const int high = sysctl_local_port_range[1]; - const int range = high - low; - int i, port; + int i, port, low, high, remaining; static u32 hint; const u32 offset = hint + inet6_sk_port_offset(sk); struct hlist_node *node; struct inet_timewait_sock *tw = NULL; + inet_get_local_port_range(&low, &high); + remaining = high - low; + local_bh_disable(); - for (i = 1; i <= range; i++) { - port = low + (i + offset) % range; + for (i = 1; i <= remaining; i++) { + port = low + (i + offset) % remaining; head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; spin_lock(&head->lock); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 6a612a701eaaa34b35d6d7778b80c7f62c6bde31..946cf389ab95716a27a98fda7ca448b99524993c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1313,7 +1313,7 @@ static int fib6_clean_node(struct fib6_walker_t *w) { int res; struct rt6_info *rt; - struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w; + struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w); for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { res = c->func(rt, c->arg); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 413a4ebb195c728ea86990e038412941c7370ad7..217d60f9fc80e89afc4e07a831ec9470c45b07e9 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -657,24 +658,8 @@ static const struct seq_operations ip6fl_seq_ops = { static int ip6fl_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct ip6fl_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &ip6fl_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &ip6fl_seq_ops, + sizeof(struct ip6fl_iter_state)); } static const struct file_operations ip6fl_seq_fops = { @@ -690,7 +675,7 @@ static const struct file_operations ip6fl_seq_fops = { void ip6_flowlabel_init(void) { #ifdef CONFIG_PROC_FS - proc_net_fops_create("ip6_flowlabel", S_IRUGO, &ip6fl_seq_fops); + proc_net_fops_create(&init_net, "ip6_flowlabel", S_IRUGO, &ip6fl_seq_fops); #endif } @@ -698,6 +683,6 @@ void ip6_flowlabel_cleanup(void) { del_timer(&ip6_fl_gc_timer); #ifdef CONFIG_PROC_FS - proc_net_remove("ip6_flowlabel"); + proc_net_remove(&init_net, "ip6_flowlabel"); #endif } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 30a5cb1b203e1e644c0e3182692b91549c9c9725..9149fc239759972d9ddbde68330a9ca572f2569e 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -61,6 +61,11 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt u32 pkt_len; struct inet6_dev *idev; + if (dev->nd_net != &init_net) { + kfree_skb(skb); + return 0; + } + if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); return 0; @@ -86,7 +91,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered - * via the loopback interface (lo) here; skb->dev = &loopback_dev. + * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 26de3c0ea31e516c23670c924b579be43128847b..011082ed921a24c632d79c3790d7ead18408f05a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -441,8 +441,10 @@ int ip6_forward(struct sk_buff *skb) /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. + We don't send redirects to frames decapsulated from IPsec. */ - if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) { + if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 && + !skb->sp) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; @@ -1397,6 +1399,13 @@ int ip6_push_pending_frames(struct sock *sk) skb->dst = dst_clone(&rt->u.dst); IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); + if (proto == IPPROTO_ICMPV6) { + struct inet6_dev *idev = ip6_dst_idev(skb->dst); + + ICMP6MSGOUT_INC_STATS_BH(idev, icmp6_hdr(skb)->icmp6_type); + ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); + } + err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); if (err) { if (err > 0) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index ca774d8e3be35e09c010db9aae493e7db6be997f..2320cc27ff9e305328cc8bc04103851d04b88a9b 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -235,7 +235,7 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p) int i; for (i = 1; i < IP6_TNL_MAX; i++) { sprintf(name, "ip6tnl%d", i); - if (__dev_get_by_name(name) == NULL) + if (__dev_get_by_name(&init_net, name) == NULL) break; } if (i == IP6_TNL_MAX) @@ -650,7 +650,7 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t) struct net_device *ldev = NULL; if (p->link) - ldev = dev_get_by_index(p->link); + ldev = dev_get_by_index(&init_net, p->link); if ((ipv6_addr_is_multicast(&p->laddr) || likely(ipv6_chk_addr(&p->laddr, ldev, 0))) && @@ -786,7 +786,7 @@ static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t) struct net_device *ldev = NULL; if (p->link) - ldev = dev_get_by_index(p->link); + ldev = dev_get_by_index(&init_net, p->link); if (unlikely(!ipv6_chk_addr(&p->laddr, ldev, 0))) printk(KERN_WARNING @@ -1313,7 +1313,6 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) static void ip6_tnl_dev_setup(struct net_device *dev) { - SET_MODULE_OWNER(dev); dev->uninit = ip6_tnl_dev_uninit; dev->destructor = free_netdev; dev->hard_start_xmit = ip6_tnl_xmit; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 473f165310ea79c8e7a23c350126c2aee19d80eb..28fc8edfdc3aa63f24f85df71869809d27df7e71 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -65,8 +65,7 @@ static LIST_HEAD(ipcomp6_tfms_list); static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) { int err = -ENOMEM; - struct ipv6hdr *iph; - struct ipv6_comp_hdr *ipch; + struct ip_comp_hdr *ipch; int plen, dlen; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch; @@ -79,7 +78,6 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ - iph = ipv6_hdr(skb); ipch = (void *)skb->data; skb->transport_header = skb->network_header + sizeof(*ipch); __skb_pull(skb, sizeof(*ipch)); @@ -94,12 +92,10 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) tfm = *per_cpu_ptr(ipcd->tfms, cpu); err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); - if (err) { - err = -EINVAL; + if (err) goto out_put_cpu; - } - if (dlen < (plen + sizeof(struct ipv6_comp_hdr))) { + if (dlen < (plen + sizeof(*ipch))) { err = -EINVAL; goto out_put_cpu; } @@ -123,17 +119,15 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) { int err; - struct ipv6hdr *top_iph; - struct ipv6_comp_hdr *ipch; + struct ip_comp_hdr *ipch; struct ipcomp_data *ipcd = x->data; int plen, dlen; u8 *start, *scratch; struct crypto_comp *tfm; int cpu; - int hdr_len = skb_transport_offset(skb); /* check whether datagram len is larger than threshold */ - if ((skb->len - hdr_len) < ipcd->threshold) { + if (skb->len < ipcd->threshold) { goto out_ok; } @@ -141,35 +135,33 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) goto out_ok; /* compression */ - plen = skb->len - hdr_len; + plen = skb->len; dlen = IPCOMP_SCRATCH_SIZE; - start = skb_transport_header(skb); + start = skb->data; cpu = get_cpu(); scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); tfm = *per_cpu_ptr(ipcd->tfms, cpu); err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); - if (err || (dlen + sizeof(struct ipv6_comp_hdr)) >= plen) { + if (err || (dlen + sizeof(*ipch)) >= plen) { put_cpu(); goto out_ok; } memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); put_cpu(); - pskb_trim(skb, hdr_len + dlen + sizeof(struct ip_comp_hdr)); + pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); /* insert ipcomp header and replace datagram */ - top_iph = (struct ipv6hdr *)skb->data; - - top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); - - ipch = (struct ipv6_comp_hdr *)start; - ipch->nexthdr = *skb_network_header(skb); + ipch = ip_comp_hdr(skb); + ipch->nexthdr = *skb_mac_header(skb); ipch->flags = 0; ipch->cpi = htons((u16 )ntohl(x->id.spi)); - *skb_network_header(skb) = IPPROTO_COMP; + *skb_mac_header(skb) = IPPROTO_COMP; out_ok: + skb_push(skb, -skb_network_offset(skb)); + return 0; } @@ -178,7 +170,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, { __be32 spi; struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; - struct ipv6_comp_hdr *ipcomph = (struct ipv6_comp_hdr*)(skb->data+offset); + struct ip_comp_hdr *ipcomph = + (struct ip_comp_hdr *)(skb->data + offset); struct xfrm_state *x; if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 6b038aa72e88b51fe77554bc4cab65af072ef2e8..1334fc174bcf72de093eaeff65d2ec64a7239229 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -249,7 +249,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, } if (ipv6_only_sock(sk) || - !(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) { + !ipv6_addr_v4mapped(&np->daddr)) { retv = -EADDRNOTAVAIL; break; } @@ -539,12 +539,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) goto e_inval; - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) - goto e_inval; - if (__dev_get_by_index(val) == NULL) { - retv = -ENODEV; - break; + if (val) { + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) + goto e_inval; + + if (__dev_get_by_index(&init_net, val) == NULL) { + retv = -ENODEV; + break; + } } np->mcast_oif = val; retv = 0; @@ -663,7 +666,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; } gsf = kmalloc(optlen,GFP_KERNEL); - if (gsf == 0) { + if (!gsf) { retv = -ENOBUFS; break; } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ae9881832a7ec548ae3c77456dc5089f01b49337..331d728c2035920a8cbe220115ff492abd073660 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -49,6 +49,7 @@ #include #include +#include #include #include @@ -214,7 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, struct in6_addr *addr) dst_release(&rt->u.dst); } } else - dev = dev_get_by_index(ifindex); + dev = dev_get_by_index(&init_net, ifindex); if (dev == NULL) { sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); @@ -265,7 +266,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) *lnk = mc_lst->next; write_unlock_bh(&ipv6_sk_mc_lock); - if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) { + if ((dev = dev_get_by_index(&init_net, mc_lst->ifindex)) != NULL) { struct inet6_dev *idev = in6_dev_get(dev); (void) ip6_mc_leave_src(sk, mc_lst, idev); @@ -300,7 +301,7 @@ static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) dst_release(&rt->u.dst); } } else - dev = dev_get_by_index(ifindex); + dev = dev_get_by_index(&init_net, ifindex); if (!dev) return NULL; @@ -331,7 +332,7 @@ void ipv6_sock_mc_close(struct sock *sk) np->ipv6_mc_list = mc_lst->next; write_unlock_bh(&ipv6_sk_mc_lock); - dev = dev_get_by_index(mc_lst->ifindex); + dev = dev_get_by_index(&init_net, mc_lst->ifindex); if (dev) { struct inet6_dev *idev = in6_dev_get(dev); @@ -1406,7 +1407,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) /* we assume size > sizeof(ra) here */ skb = sock_alloc_send_skb(sk, size + LL_RESERVED_SPACE(dev), 1, &err); - if (skb == 0) + if (!skb) return NULL; skb_reserve(skb, LL_RESERVED_SPACE(dev)); @@ -1437,17 +1438,12 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) static inline int mld_dev_queue_xmit2(struct sk_buff *skb) { struct net_device *dev = skb->dev; + unsigned char ha[MAX_ADDR_LEN]; - if (dev->hard_header) { - unsigned char ha[MAX_ADDR_LEN]; - int err; - - ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1); - err = dev->hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len); - if (err < 0) { - kfree_skb(skb); - return err; - } + ndisc_mc_map(&ipv6_hdr(skb)->daddr, ha, dev, 1); + if (dev_hard_header(skb, dev, ETH_P_IPV6, ha, NULL, skb->len) < 0) { + kfree_skb(skb); + return -EINVAL; } return dev_queue_xmit(skb); } @@ -1478,10 +1474,11 @@ static void mld_sendpack(struct sk_buff *skb) err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev, mld_dev_queue_xmit); if (!err) { - ICMP6_INC_STATS(idev,ICMP6_MIB_OUTMSGS); - IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS); + ICMP6MSGOUT_INC_STATS_BH(idev, ICMPV6_MLD2_REPORT); + ICMP6_INC_STATS_BH(idev, ICMP6_MIB_OUTMSGS); + IP6_INC_STATS_BH(idev, IPSTATS_MIB_OUTMCASTPKTS); } else - IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); + IP6_INC_STATS_BH(idev, IPSTATS_MIB_OUTDISCARDS); if (likely(idev != NULL)) in6_dev_put(idev); @@ -1821,10 +1818,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dev, mld_dev_queue_xmit); if (!err) { - if (type == ICMPV6_MGM_REDUCTION) - ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBREDUCTIONS); - else - ICMP6_INC_STATS(idev, ICMP6_MIB_OUTGROUPMEMBRESPONSES); + ICMP6MSGOUT_INC_STATS(idev, type); ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS); } else @@ -2150,7 +2144,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, /* callers have the socket lock and a write lock on ipv6_sk_mc_lock, * so no other readers or writers of iml or its sflist */ - if (iml->sflist == 0) { + if (!iml->sflist) { /* any-source empty exclude case */ return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); } @@ -2332,7 +2326,7 @@ static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq) struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); state->idev = NULL; - for_each_netdev(state->dev) { + for_each_netdev(&init_net, state->dev) { struct inet6_dev *idev; idev = in6_dev_get(state->dev); if (!idev) @@ -2432,24 +2426,8 @@ static const struct seq_operations igmp6_mc_seq_ops = { static int igmp6_mc_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct igmp6_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &igmp6_mc_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &igmp6_mc_seq_ops, + sizeof(struct igmp6_mc_iter_state)); } static const struct file_operations igmp6_mc_seq_fops = { @@ -2476,7 +2454,7 @@ static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq) state->idev = NULL; state->im = NULL; - for_each_netdev(state->dev) { + for_each_netdev(&init_net, state->dev) { struct inet6_dev *idev; idev = in6_dev_get(state->dev); if (unlikely(idev == NULL)) @@ -2606,24 +2584,8 @@ static const struct seq_operations igmp6_mcf_seq_ops = { static int igmp6_mcf_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct igmp6_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - - if (!s) - goto out; - - rc = seq_open(file, &igmp6_mcf_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &igmp6_mcf_seq_ops, + sizeof(struct igmp6_mcf_iter_state)); } static const struct file_operations igmp6_mcf_seq_fops = { @@ -2658,8 +2620,8 @@ int __init igmp6_init(struct net_proto_family *ops) np->hop_limit = 1; #ifdef CONFIG_PROC_FS - proc_net_fops_create("igmp6", S_IRUGO, &igmp6_mc_seq_fops); - proc_net_fops_create("mcfilter6", S_IRUGO, &igmp6_mcf_seq_fops); + proc_net_fops_create(&init_net, "igmp6", S_IRUGO, &igmp6_mc_seq_fops); + proc_net_fops_create(&init_net, "mcfilter6", S_IRUGO, &igmp6_mcf_seq_fops); #endif return 0; @@ -2671,7 +2633,7 @@ void igmp6_cleanup(void) igmp6_socket = NULL; /* for safety */ #ifdef CONFIG_PROC_FS - proc_net_remove("mcfilter6"); - proc_net_remove("igmp6"); + proc_net_remove(&init_net, "mcfilter6"); + proc_net_remove(&init_net, "igmp6"); #endif } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 8a1399ce38ce826ae774a54508382adae905603f..7fd841d410190382fe5ea1c4b31df9ebada0f07f 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -153,11 +153,11 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb) u8 nexthdr; int len; - iph = (struct ipv6hdr *)skb->data; - iph->payload_len = htons(skb->len - sizeof(*iph)); + skb_push(skb, -skb_network_offset(skb)); + iph = ipv6_hdr(skb); - nexthdr = *skb_network_header(skb); - *skb_network_header(skb) = IPPROTO_DSTOPTS; + nexthdr = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_DSTOPTS; dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb); dstopt->nexthdr = nexthdr; @@ -172,7 +172,9 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb) len = ((char *)hao - (char *)dstopt) + sizeof(*hao); memcpy(&hao->addr, &iph->saddr, sizeof(hao->addr)); + spin_lock_bh(&x->lock); memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr)); + spin_unlock_bh(&x->lock); BUG_TRAP(len == x->props.header_len); dstopt->hdrlen = (x->props.header_len >> 3) - 1; @@ -365,11 +367,11 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb) struct rt2_hdr *rt2; u8 nexthdr; - iph = (struct ipv6hdr *)skb->data; - iph->payload_len = htons(skb->len - sizeof(*iph)); + skb_push(skb, -skb_network_offset(skb)); + iph = ipv6_hdr(skb); - nexthdr = *skb_network_header(skb); - *skb_network_header(skb) = IPPROTO_ROUTING; + nexthdr = *skb_mac_header(skb); + *skb_mac_header(skb) = IPPROTO_ROUTING; rt2 = (struct rt2_hdr *)skb_transport_header(skb); rt2->rt_hdr.nexthdr = nexthdr; @@ -381,7 +383,9 @@ static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb) BUG_TRAP(rt2->rt_hdr.hdrlen == 2); memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr)); + spin_lock_bh(&x->lock); memcpy(&iph->daddr, x->coaddr, sizeof(iph->daddr)); + spin_unlock_bh(&x->lock); return 0; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 5b596659177cbbd8bd5e6c2db62b803af4ca1817..6cc33dc83d1cb354767de626024fd6cd982573c3 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -15,9 +15,10 @@ /* * Changes: * + * Pierre Ynard : export userland ND options + * through netlink (RDNSS support) * Lars Fenneberg : fixed MTU setting on receipt * of an RA. - * * Janos Farkas : kmalloc failure checks * Alexey Kuznetsov : state machine reworked * and moved to net/core. @@ -78,6 +79,9 @@ #include #include +#include +#include + #include #include #include @@ -161,6 +165,8 @@ struct ndisc_options { struct nd_opt_hdr *nd_opts_ri; struct nd_opt_hdr *nd_opts_ri_end; #endif + struct nd_opt_hdr *nd_useropts; + struct nd_opt_hdr *nd_useropts_end; }; #define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR] @@ -225,6 +231,22 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, return (cur <= end && cur->nd_opt_type == type ? cur : NULL); } +static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) +{ + return (opt->nd_opt_type == ND_OPT_RDNSS); +} + +static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, + struct nd_opt_hdr *end) +{ + if (!cur || !end || cur >= end) + return NULL; + do { + cur = ((void *)cur) + (cur->nd_opt_len << 3); + } while(cur < end && !ndisc_is_useropt(cur)); + return (cur <= end && ndisc_is_useropt(cur) ? cur : NULL); +} + static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, struct ndisc_options *ndopts) { @@ -256,7 +278,7 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, break; case ND_OPT_PREFIX_INFO: ndopts->nd_opts_pi_end = nd_opt; - if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) + if (!ndopts->nd_opt_array[nd_opt->nd_opt_type]) ndopts->nd_opt_array[nd_opt->nd_opt_type] = nd_opt; break; #ifdef CONFIG_IPV6_ROUTE_INFO @@ -267,14 +289,21 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, break; #endif default: - /* - * Unknown options must be silently ignored, - * to accommodate future extension to the protocol. - */ - ND_PRINTK2(KERN_NOTICE - "%s(): ignored unsupported option; type=%d, len=%d\n", - __FUNCTION__, - nd_opt->nd_opt_type, nd_opt->nd_opt_len); + if (ndisc_is_useropt(nd_opt)) { + ndopts->nd_useropts_end = nd_opt; + if (!ndopts->nd_useropts) + ndopts->nd_useropts = nd_opt; + } else { + /* + * Unknown options must be silently ignored, + * to accommodate future extension to the + * protocol. + */ + ND_PRINTK2(KERN_NOTICE + "%s(): ignored unsupported option; type=%d, len=%d\n", + __FUNCTION__, + nd_opt->nd_opt_type, nd_opt->nd_opt_len); + } } opt_len -= l; nd_opt = ((void *)nd_opt) + l; @@ -354,7 +383,7 @@ static int ndisc_constructor(struct neighbour *neigh) rcu_read_unlock(); neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST; - if (dev->hard_header == NULL) { + if (!dev->header_ops) { neigh->nud_state = NUD_NOARP; neigh->ops = &ndisc_direct_ops; neigh->output = neigh->ops->queue_xmit; @@ -371,7 +400,7 @@ static int ndisc_constructor(struct neighbour *neigh) neigh->nud_state = NUD_NOARP; memcpy(neigh->ha, dev->broadcast, dev->addr_len); } - if (dev->hard_header_cache) + if (dev->header_ops->cache) neigh->ops = &ndisc_hh_ops; else neigh->ops = &ndisc_generic_ops; @@ -431,7 +460,7 @@ static void __ndisc_send(struct net_device *dev, struct neighbour *neigh, struct in6_addr *daddr, struct in6_addr *saddr, struct icmp6hdr *icmp6h, struct in6_addr *target, - int llinfo, int icmp6_mib_outnd) + int llinfo) { struct flowi fl; struct dst_entry *dst; @@ -441,9 +470,11 @@ static void __ndisc_send(struct net_device *dev, struct inet6_dev *idev; int len; int err; - u8 *opt; + u8 *opt, type; + + type = icmp6h->icmp6_type; - ndisc_flow_init(&fl, icmp6h->icmp6_type, saddr, daddr, + ndisc_flow_init(&fl, type, saddr, daddr, dev->ifindex); dst = ndisc_dst_alloc(dev, neigh, daddr, ip6_output); @@ -504,7 +535,7 @@ static void __ndisc_send(struct net_device *dev, err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); if (!err) { - ICMP6_INC_STATS(idev, icmp6_mib_outnd); + ICMP6MSGOUT_INC_STATS(idev, type); ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); } @@ -542,8 +573,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh, __ndisc_send(dev, neigh, daddr, src_addr, &icmp6h, solicited_addr, - inc_opt ? ND_OPT_TARGET_LL_ADDR : 0, - ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS); + inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); } void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, @@ -564,8 +594,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, __ndisc_send(dev, neigh, daddr, saddr, &icmp6h, solicit, - !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0, - ICMP6_MIB_OUTNEIGHBORSOLICITS); + !ipv6_addr_any(saddr) ? ND_OPT_SOURCE_LL_ADDR : 0); } void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, @@ -599,8 +628,7 @@ void ndisc_send_rs(struct net_device *dev, struct in6_addr *saddr, #endif __ndisc_send(dev, NULL, daddr, saddr, &icmp6h, NULL, - send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0, - ICMP6_MIB_OUTROUTERSOLICITS); + send_sllao ? ND_OPT_SOURCE_LL_ADDR : 0); } @@ -808,7 +836,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) neigh_update(neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE); - if (neigh || !dev->hard_header) { + if (neigh || !dev->header_ops) { ndisc_send_na(dev, neigh, saddr, &msg->target, is_router, 1, (ifp != NULL && inc), inc); @@ -985,6 +1013,53 @@ static void ndisc_recv_rs(struct sk_buff *skb) in6_dev_put(idev); } +static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt) +{ + struct icmp6hdr *icmp6h = (struct icmp6hdr *)skb_transport_header(ra); + struct sk_buff *skb; + struct nlmsghdr *nlh; + struct nduseroptmsg *ndmsg; + int err; + int base_size = NLMSG_ALIGN(sizeof(struct nduseroptmsg) + + (opt->nd_opt_len << 3)); + size_t msg_size = base_size + nla_total_size(sizeof(struct in6_addr)); + + skb = nlmsg_new(msg_size, GFP_ATOMIC); + if (skb == NULL) { + err = -ENOBUFS; + goto errout; + } + + nlh = nlmsg_put(skb, 0, 0, RTM_NEWNDUSEROPT, base_size, 0); + if (nlh == NULL) { + goto nla_put_failure; + } + + ndmsg = nlmsg_data(nlh); + ndmsg->nduseropt_family = AF_INET6; + ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type; + ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code; + ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3; + + memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3); + + NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), + &ipv6_hdr(ra)->saddr); + nlmsg_end(skb, nlh); + + err = rtnl_notify(skb, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC); + if (err < 0) + goto errout; + + return; + +nla_put_failure: + nlmsg_free(skb); + err = -EMSGSIZE; +errout: + rtnl_set_sk_err(RTNLGRP_ND_USEROPT, err); +} + static void ndisc_router_discovery(struct sk_buff *skb) { struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); @@ -1217,6 +1292,15 @@ static void ndisc_router_discovery(struct sk_buff *skb) } } + if (ndopts.nd_useropts) { + struct nd_opt_hdr *opt; + for (opt = ndopts.nd_useropts; + opt; + opt = ndisc_next_useropt(opt, ndopts.nd_useropts_end)) { + ndisc_ra_useropt(skb, opt); + } + } + if (ndopts.nd_opts_tgt_lladdr || ndopts.nd_opts_rh) { ND_PRINTK2(KERN_WARNING "ICMPv6 RA: invalid RA options"); @@ -1455,7 +1539,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, buff, NULL, dst->dev, dst_output); if (!err) { - ICMP6_INC_STATS(idev, ICMP6_MIB_OUTREDIRECTS); + ICMP6MSGOUT_INC_STATS(idev, NDISC_REDIRECT); ICMP6_INC_STATS(idev, ICMP6_MIB_OUTMSGS); } @@ -1525,6 +1609,9 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch (event) { case NETDEV_CHANGEADDR: neigh_changeaddr(&nd_tbl, dev); diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 0004db38af6d8e8abed0451195b30b57814809e6..0473145ac534fd98dc60997418a94b1c68e37d91 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -247,10 +248,7 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) if (entry->info->indev && entry->skb->dev) { pmsg->hw_type = entry->skb->dev->type; - if (entry->skb->dev->hard_header_parse) - pmsg->hw_addrlen = - entry->skb->dev->hard_header_parse(entry->skb, - pmsg->hw_addr); + pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); } if (data_len) @@ -466,7 +464,7 @@ ipq_dev_drop(int ifindex) #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) static inline void -ipq_rcv_skb(struct sk_buff *skb) +__ipq_rcv_skb(struct sk_buff *skb) { int status, type, pid, flags, nlmsglen, skblen; struct nlmsghdr *nlh; @@ -524,19 +522,10 @@ ipq_rcv_skb(struct sk_buff *skb) } static void -ipq_rcv_sk(struct sock *sk, int len) +ipq_rcv_skb(struct sk_buff *skb) { - struct sk_buff *skb; - unsigned int qlen; - mutex_lock(&ipqnl_mutex); - - for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { - skb = skb_dequeue(&sk->sk_receive_queue); - ipq_rcv_skb(skb); - kfree_skb(skb); - } - + __ipq_rcv_skb(skb); mutex_unlock(&ipqnl_mutex); } @@ -546,6 +535,9 @@ ipq_rcv_dev_event(struct notifier_block *this, { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) ipq_dev_drop(dev->ifindex); @@ -565,7 +557,7 @@ ipq_rcv_nl_event(struct notifier_block *this, if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW && n->pid) { write_lock_bh(&queue_lock); - if (n->pid == peer_pid) + if ((n->net == &init_net) && (n->pid == peer_pid)) __ipq_reset(); write_unlock_bh(&queue_lock); } @@ -657,14 +649,14 @@ static int __init ip6_queue_init(void) struct proc_dir_entry *proc; netlink_register_notifier(&ipq_nl_notifier); - ipqnl = netlink_kernel_create(NETLINK_IP6_FW, 0, ipq_rcv_sk, NULL, - THIS_MODULE); + ipqnl = netlink_kernel_create(&init_net, NETLINK_IP6_FW, 0, + ipq_rcv_skb, NULL, THIS_MODULE); if (ipqnl == NULL) { printk(KERN_ERR "ip6_queue: failed to create netlink socket\n"); goto cleanup_netlink_notifier; } - proc = proc_net_create(IPQ_PROC_FS_NAME, 0, ipq_get_info); + proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info); if (proc) proc->owner = THIS_MODULE; else { @@ -685,7 +677,7 @@ static int __init ip6_queue_init(void) cleanup_sysctl: unregister_sysctl_table(ipq_sysctl_header); unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(IPQ_PROC_FS_NAME); + proc_net_remove(&init_net, IPQ_PROC_FS_NAME); cleanup_ipqnl: sock_release(ipqnl->sk_socket); @@ -705,7 +697,7 @@ static void __exit ip6_queue_fini(void) unregister_sysctl_table(ipq_sysctl_header); unregister_netdevice_notifier(&ipq_dev_notifier); - proc_net_remove(IPQ_PROC_FS_NAME); + proc_net_remove(&init_net, IPQ_PROC_FS_NAME); sock_release(ipqnl->sk_socket); mutex_lock(&ipqnl_mutex); diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index 2f487cda3b6bf346902cad7846aa6e9f3cc59c9a..3fd08d5567a6166be0c557c588078a637748eb32 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -167,7 +167,7 @@ static inline void send_unreach(struct sk_buff *skb_in, unsigned char code, unsigned int hooknum) { if (hooknum == NF_IP6_LOCAL_OUT && skb_in->dev == NULL) - skb_in->dev = &loopback_dev; + skb_in->dev = init_net.loopback_dev; icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL); } diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 3153e15e0f7c4a5e69e22775cc2097daa12d0985..37a3db9269532ff337dcdd0d1a3f53e803135fa9 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -337,36 +337,33 @@ static ctl_table nf_ct_ipv6_sysctl_table[] = { #include #include -static int ipv6_tuple_to_nfattr(struct sk_buff *skb, +static int ipv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NFA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, + NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, &tuple->src.u3.ip6); - NFA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, + NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, &tuple->dst.u3.ip6); return 0; -nfattr_failure: +nla_put_failure: return -1; } -static const size_t cta_min_ip[CTA_IP_MAX] = { - [CTA_IP_V6_SRC-1] = sizeof(u_int32_t)*4, - [CTA_IP_V6_DST-1] = sizeof(u_int32_t)*4, +static const struct nla_policy ipv6_nla_policy[CTA_IP_MAX+1] = { + [CTA_IP_V6_SRC] = { .len = sizeof(u_int32_t)*4 }, + [CTA_IP_V6_DST] = { .len = sizeof(u_int32_t)*4 }, }; -static int ipv6_nfattr_to_tuple(struct nfattr *tb[], +static int ipv6_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { - if (!tb[CTA_IP_V6_SRC-1] || !tb[CTA_IP_V6_DST-1]) + if (!tb[CTA_IP_V6_SRC] || !tb[CTA_IP_V6_DST]) return -EINVAL; - if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip)) - return -EINVAL; - - memcpy(&t->src.u3.ip6, NFA_DATA(tb[CTA_IP_V6_SRC-1]), + memcpy(&t->src.u3.ip6, nla_data(tb[CTA_IP_V6_SRC]), sizeof(u_int32_t) * 4); - memcpy(&t->dst.u3.ip6, NFA_DATA(tb[CTA_IP_V6_DST-1]), + memcpy(&t->dst.u3.ip6, nla_data(tb[CTA_IP_V6_DST]), sizeof(u_int32_t) * 4); return 0; @@ -382,8 +379,9 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = { .print_conntrack = ipv6_print_conntrack, .get_l4proto = ipv6_get_l4proto, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = ipv6_tuple_to_nfattr, - .nfattr_to_tuple = ipv6_nfattr_to_tuple, + .tuple_to_nlattr = ipv6_tuple_to_nlattr, + .nlattr_to_tuple = ipv6_nlattr_to_tuple, + .nla_policy = ipv6_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_path = nf_net_netfilter_sysctl_path, diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index ab154fb90018f5f39eb03e2610f7c95adeaa0ece..fbdc66920de4837a21461be7864fc36fbd5e7cb2 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -210,45 +210,42 @@ icmpv6_error(struct sk_buff *skb, unsigned int dataoff, #include #include -static int icmpv6_tuple_to_nfattr(struct sk_buff *skb, +static int icmpv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { - NFA_PUT(skb, CTA_PROTO_ICMPV6_ID, sizeof(u_int16_t), + NLA_PUT(skb, CTA_PROTO_ICMPV6_ID, sizeof(u_int16_t), &t->src.u.icmp.id); - NFA_PUT(skb, CTA_PROTO_ICMPV6_TYPE, sizeof(u_int8_t), + NLA_PUT(skb, CTA_PROTO_ICMPV6_TYPE, sizeof(u_int8_t), &t->dst.u.icmp.type); - NFA_PUT(skb, CTA_PROTO_ICMPV6_CODE, sizeof(u_int8_t), + NLA_PUT(skb, CTA_PROTO_ICMPV6_CODE, sizeof(u_int8_t), &t->dst.u.icmp.code); return 0; -nfattr_failure: +nla_put_failure: return -1; } -static const size_t cta_min_proto[CTA_PROTO_MAX] = { - [CTA_PROTO_ICMPV6_TYPE-1] = sizeof(u_int8_t), - [CTA_PROTO_ICMPV6_CODE-1] = sizeof(u_int8_t), - [CTA_PROTO_ICMPV6_ID-1] = sizeof(u_int16_t) +static const struct nla_policy icmpv6_nla_policy[CTA_PROTO_MAX+1] = { + [CTA_PROTO_ICMPV6_TYPE] = { .type = NLA_U8 }, + [CTA_PROTO_ICMPV6_CODE] = { .type = NLA_U8 }, + [CTA_PROTO_ICMPV6_ID] = { .type = NLA_U16 }, }; -static int icmpv6_nfattr_to_tuple(struct nfattr *tb[], +static int icmpv6_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *tuple) { - if (!tb[CTA_PROTO_ICMPV6_TYPE-1] - || !tb[CTA_PROTO_ICMPV6_CODE-1] - || !tb[CTA_PROTO_ICMPV6_ID-1]) - return -EINVAL; - - if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) + if (!tb[CTA_PROTO_ICMPV6_TYPE] + || !tb[CTA_PROTO_ICMPV6_CODE] + || !tb[CTA_PROTO_ICMPV6_ID]) return -EINVAL; tuple->dst.u.icmp.type = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_TYPE-1]); + *(u_int8_t *)nla_data(tb[CTA_PROTO_ICMPV6_TYPE]); tuple->dst.u.icmp.code = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMPV6_CODE-1]); + *(u_int8_t *)nla_data(tb[CTA_PROTO_ICMPV6_CODE]); tuple->src.u.icmp.id = - *(__be16 *)NFA_DATA(tb[CTA_PROTO_ICMPV6_ID-1]); + *(__be16 *)nla_data(tb[CTA_PROTO_ICMPV6_ID]); if (tuple->dst.u.icmp.type < 128 || tuple->dst.u.icmp.type - 128 >= sizeof(invmap) @@ -289,8 +286,9 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = .new = icmpv6_new, .error = icmpv6_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = icmpv6_tuple_to_nfattr, - .nfattr_to_tuple = icmpv6_nfattr_to_tuple, + .tuple_to_nlattr = icmpv6_tuple_to_nlattr, + .nlattr_to_tuple = icmpv6_nlattr_to_tuple, + .nla_policy = icmpv6_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_header = &icmpv6_sysctl_header, diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 920dc9cf6a842ed5aec4d277da073cef13b4e161..db945018579eb919ee6dd8c68749269b26cda466 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -85,47 +86,33 @@ static struct snmp_mib snmp6_ipstats_list[] = { }; static struct snmp_mib snmp6_icmp6_list[] = { -/* icmpv6 mib according to RFC 2466 - - Exceptions: {In|Out}AdminProhibs are removed, because I see - no good reasons to account them separately - of another dest.unreachs. - OutErrs is zero identically. - OutEchos too. - OutRouterAdvertisements too. - OutGroupMembQueries too. - */ +/* icmpv6 mib according to RFC 2466 */ SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), - SNMP_MIB_ITEM("Icmp6InDestUnreachs", ICMP6_MIB_INDESTUNREACHS), - SNMP_MIB_ITEM("Icmp6InPktTooBigs", ICMP6_MIB_INPKTTOOBIGS), - SNMP_MIB_ITEM("Icmp6InTimeExcds", ICMP6_MIB_INTIMEEXCDS), - SNMP_MIB_ITEM("Icmp6InParmProblems", ICMP6_MIB_INPARMPROBLEMS), - SNMP_MIB_ITEM("Icmp6InEchos", ICMP6_MIB_INECHOS), - SNMP_MIB_ITEM("Icmp6InEchoReplies", ICMP6_MIB_INECHOREPLIES), - SNMP_MIB_ITEM("Icmp6InGroupMembQueries", ICMP6_MIB_INGROUPMEMBQUERIES), - SNMP_MIB_ITEM("Icmp6InGroupMembResponses", ICMP6_MIB_INGROUPMEMBRESPONSES), - SNMP_MIB_ITEM("Icmp6InGroupMembReductions", ICMP6_MIB_INGROUPMEMBREDUCTIONS), - SNMP_MIB_ITEM("Icmp6InRouterSolicits", ICMP6_MIB_INROUTERSOLICITS), - SNMP_MIB_ITEM("Icmp6InRouterAdvertisements", ICMP6_MIB_INROUTERADVERTISEMENTS), - SNMP_MIB_ITEM("Icmp6InNeighborSolicits", ICMP6_MIB_INNEIGHBORSOLICITS), - SNMP_MIB_ITEM("Icmp6InNeighborAdvertisements", ICMP6_MIB_INNEIGHBORADVERTISEMENTS), - SNMP_MIB_ITEM("Icmp6InRedirects", ICMP6_MIB_INREDIRECTS), SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), - SNMP_MIB_ITEM("Icmp6OutDestUnreachs", ICMP6_MIB_OUTDESTUNREACHS), - SNMP_MIB_ITEM("Icmp6OutPktTooBigs", ICMP6_MIB_OUTPKTTOOBIGS), - SNMP_MIB_ITEM("Icmp6OutTimeExcds", ICMP6_MIB_OUTTIMEEXCDS), - SNMP_MIB_ITEM("Icmp6OutParmProblems", ICMP6_MIB_OUTPARMPROBLEMS), - SNMP_MIB_ITEM("Icmp6OutEchoReplies", ICMP6_MIB_OUTECHOREPLIES), - SNMP_MIB_ITEM("Icmp6OutRouterSolicits", ICMP6_MIB_OUTROUTERSOLICITS), - SNMP_MIB_ITEM("Icmp6OutNeighborSolicits", ICMP6_MIB_OUTNEIGHBORSOLICITS), - SNMP_MIB_ITEM("Icmp6OutNeighborAdvertisements", ICMP6_MIB_OUTNEIGHBORADVERTISEMENTS), - SNMP_MIB_ITEM("Icmp6OutRedirects", ICMP6_MIB_OUTREDIRECTS), - SNMP_MIB_ITEM("Icmp6OutGroupMembResponses", ICMP6_MIB_OUTGROUPMEMBRESPONSES), - SNMP_MIB_ITEM("Icmp6OutGroupMembReductions", ICMP6_MIB_OUTGROUPMEMBREDUCTIONS), SNMP_MIB_SENTINEL }; +/* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */ +static char *icmp6type2name[256] = { + [ICMPV6_DEST_UNREACH] = "DestUnreachs", + [ICMPV6_PKT_TOOBIG] = "PktTooBigs", + [ICMPV6_TIME_EXCEED] = "TimeExcds", + [ICMPV6_PARAMPROB] = "ParmProblems", + [ICMPV6_ECHO_REQUEST] = "EchoRequest", + [ICMPV6_ECHO_REPLY] = "EchoReplies", + [ICMPV6_MGM_QUERY] = "GroupMembQueries", + [ICMPV6_MGM_REPORT] = "GroupMembResponses", + [ICMPV6_MGM_REDUCTION] = "GroupMembReductions", + [ICMPV6_MLD2_REPORT] = "MLDv2Reports", + [NDISC_ROUTER_ADVERTISEMENT] = "RouterAdvertisements", + [NDISC_ROUTER_SOLICITATION] = "RouterSolicits", + [NDISC_NEIGHBOUR_ADVERTISEMENT] = "NeighborAdvertisements", + [NDISC_NEIGHBOUR_SOLICITATION] = "NeighborSolicits", + [NDISC_REDIRECT] = "NeighborRedirects", +}; + + static struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), @@ -142,6 +129,40 @@ static struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_SENTINEL }; +static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) +{ + static char name[32]; + int i; + + /* print by name -- deprecated items */ + for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { + int icmptype; + char *p; + + icmptype = i & 0xff; + p = icmp6type2name[icmptype]; + if (!p) /* don't print un-named types here */ + continue; + (void) snprintf(name, sizeof(name)-1, "Icmp6%s%s", + i & 0x100 ? "Out" : "In", p); + seq_printf(seq, "%-32s\t%lu\n", name, + snmp_fold_field(mib, i)); + } + + /* print by number (nonzero only) - ICMPMsgStat format */ + for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { + unsigned long val; + + val = snmp_fold_field(mib, i); + if (!val) + continue; + (void) snprintf(name, sizeof(name)-1, "Icmp6%sType%u", + i & 0x100 ? "Out" : "In", i & 0xff); + seq_printf(seq, "%-32s\t%lu\n", name, val); + } + return; +} + static inline void snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist) { @@ -159,9 +180,11 @@ static int snmp6_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list); snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list); + snmp6_seq_show_icmpv6msg(seq, (void **)idev->stats.icmpv6msg); } else { snmp6_seq_show_item(seq, (void **)ipv6_statistics, snmp6_ipstats_list); snmp6_seq_show_item(seq, (void **)icmpv6_statistics, snmp6_icmp6_list); + snmp6_seq_show_icmpv6msg(seq, (void **)icmpv6msg_statistics); snmp6_seq_show_item(seq, (void **)udp_stats_in6, snmp6_udp6_list); snmp6_seq_show_item(seq, (void **)udplite_stats_in6, snmp6_udplite6_list); } @@ -231,22 +254,22 @@ int __init ipv6_misc_proc_init(void) { int rc = 0; - if (!proc_net_fops_create("snmp6", S_IRUGO, &snmp6_seq_fops)) + if (!proc_net_fops_create(&init_net, "snmp6", S_IRUGO, &snmp6_seq_fops)) goto proc_snmp6_fail; - proc_net_devsnmp6 = proc_mkdir("dev_snmp6", proc_net); + proc_net_devsnmp6 = proc_mkdir("dev_snmp6", init_net.proc_net); if (!proc_net_devsnmp6) goto proc_dev_snmp6_fail; - if (!proc_net_fops_create("sockstat6", S_IRUGO, &sockstat6_seq_fops)) + if (!proc_net_fops_create(&init_net, "sockstat6", S_IRUGO, &sockstat6_seq_fops)) goto proc_sockstat6_fail; out: return rc; proc_sockstat6_fail: - proc_net_remove("dev_snmp6"); + proc_net_remove(&init_net, "dev_snmp6"); proc_dev_snmp6_fail: - proc_net_remove("snmp6"); + proc_net_remove(&init_net, "snmp6"); proc_snmp6_fail: rc = -ENOMEM; goto out; @@ -254,8 +277,8 @@ int __init ipv6_misc_proc_init(void) void ipv6_misc_proc_exit(void) { - proc_net_remove("sockstat6"); - proc_net_remove("dev_snmp6"); - proc_net_remove("snmp6"); + proc_net_remove(&init_net, "sockstat6"); + proc_net_remove(&init_net, "dev_snmp6"); + proc_net_remove(&init_net, "snmp6"); } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 77167afa3455a162fae611ce7fbbdeec764d949a..ca24ef19cd8f294c35d7d285751a5bb55c5d1838 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -35,6 +35,7 @@ #include #include +#include #include #include #include @@ -282,7 +283,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (!sk->sk_bound_dev_if) goto out; - dev = dev_get_by_index(sk->sk_bound_dev_if); + dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); if (!dev) { err = -ENODEV; goto out; @@ -1288,21 +1289,8 @@ static const struct seq_operations raw6_seq_ops = { static int raw6_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct raw6_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - goto out; - rc = seq_open(file, &raw6_seq_ops); - if (rc) - goto out_kfree; - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &raw6_seq_ops, + sizeof(struct raw6_iter_state)); } static const struct file_operations raw6_seq_fops = { @@ -1315,13 +1303,13 @@ static const struct file_operations raw6_seq_fops = { int __init raw6_proc_init(void) { - if (!proc_net_fops_create("raw6", S_IRUGO, &raw6_seq_fops)) + if (!proc_net_fops_create(&init_net, "raw6", S_IRUGO, &raw6_seq_fops)) return -ENOMEM; return 0; } void raw6_proc_exit(void) { - proc_net_remove("raw6"); + proc_net_remove(&init_net, "raw6"); } #endif /* CONFIG_PROC_FS */ diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index de795c04e34c1862723eb8100a8f376a5b375ebb..31601c99354107e6487cb4f9f161476a4a392cfb 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -301,7 +301,7 @@ static void ip6_frag_expire(unsigned long data) fq_kill(fq); - dev = dev_get_by_index(fq->iif); + dev = dev_get_by_index(&init_net, fq->iif); if (!dev) goto out; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 55ea80fac6016929efc6fcbdabfa0289f03f78fa..6ff19f9eb9ee43c85633ab391fa45cdb42ff5ccf 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -44,6 +44,7 @@ #include #endif +#include #include #include #include @@ -137,7 +138,6 @@ struct rt6_info ip6_null_entry = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .dev = &loopback_dev, .obsolete = -1, .error = -ENETUNREACH, .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, @@ -163,7 +163,6 @@ struct rt6_info ip6_prohibit_entry = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .dev = &loopback_dev, .obsolete = -1, .error = -EACCES, .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, @@ -183,7 +182,6 @@ struct rt6_info ip6_blk_hole_entry = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .dev = &loopback_dev, .obsolete = -1, .error = -EINVAL, .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, @@ -223,8 +221,8 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; - if (dev != &loopback_dev && idev != NULL && idev->dev == dev) { - struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev); + if (dev != init_net.loopback_dev && idev != NULL && idev->dev == dev) { + struct inet6_dev *loopback_idev = in6_dev_get(init_net.loopback_dev); if (loopback_idev != NULL) { rt->rt6i_idev = loopback_idev; in6_dev_put(idev); @@ -1129,7 +1127,7 @@ int ip6_route_add(struct fib6_config *cfg) #endif if (cfg->fc_ifindex) { err = -ENODEV; - dev = dev_get_by_index(cfg->fc_ifindex); + dev = dev_get_by_index(&init_net, cfg->fc_ifindex); if (!dev) goto out; idev = in6_dev_get(dev); @@ -1187,12 +1185,12 @@ int ip6_route_add(struct fib6_config *cfg) if ((cfg->fc_flags & RTF_REJECT) || (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) { /* hold loopback dev/idev if we haven't done so. */ - if (dev != &loopback_dev) { + if (dev != init_net.loopback_dev) { if (dev) { dev_put(dev); in6_dev_put(idev); } - dev = &loopback_dev; + dev = init_net.loopback_dev; dev_hold(dev); idev = in6_dev_get(dev); if (!idev) { @@ -1278,7 +1276,7 @@ int ip6_route_add(struct fib6_config *cfg) int remaining; nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { - int type = nla->nla_type; + int type = nla_type(nla); if (type) { if (type > RTAX_MAX) { @@ -1896,13 +1894,13 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, if (rt == NULL) return ERR_PTR(-ENOMEM); - dev_hold(&loopback_dev); + dev_hold(init_net.loopback_dev); in6_dev_hold(idev); rt->u.dst.flags = DST_HOST; rt->u.dst.input = ip6_input; rt->u.dst.output = ip6_output; - rt->rt6i_dev = &loopback_dev; + rt->rt6i_dev = init_net.loopback_dev; rt->rt6i_idev = idev; rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_mtu(&rt->u.dst)); @@ -2264,7 +2262,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void if (iif) { struct net_device *dev; - dev = __dev_get_by_index(iif); + dev = __dev_get_by_index(&init_net, iif); if (!dev) { err = -ENODEV; goto errout; @@ -2561,11 +2559,11 @@ void __init ip6_route_init(void) fib6_init(); #ifdef CONFIG_PROC_FS - p = proc_net_create("ipv6_route", 0, rt6_proc_info); + p = proc_net_create(&init_net, "ipv6_route", 0, rt6_proc_info); if (p) p->owner = THIS_MODULE; - proc_net_fops_create("rt6_stats", S_IRUGO, &rt6_stats_seq_fops); + proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops); #endif #ifdef CONFIG_XFRM xfrm6_init(); @@ -2585,8 +2583,8 @@ void ip6_route_cleanup(void) fib6_rules_cleanup(); #endif #ifdef CONFIG_PROC_FS - proc_net_remove("ipv6_route"); - proc_net_remove("rt6_stats"); + proc_net_remove(&init_net, "ipv6_route"); + proc_net_remove(&init_net, "rt6_stats"); #endif #ifdef CONFIG_XFRM xfrm6_fini(); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index eb20bb690abd736e6699f1af7987cb8a1287d3c3..466657a9a8bd813e4f07afbfef261b9200d38c83 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -167,7 +167,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int int i; for (i=1; i<100; i++) { sprintf(name, "sit%d", i); - if (__dev_get_by_name(name) == NULL) + if (__dev_get_by_name(&init_net, name) == NULL) break; } if (i==100) @@ -714,7 +714,6 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) static void ipip6_tunnel_setup(struct net_device *dev) { - SET_MODULE_OWNER(dev); dev->uninit = ipip6_tunnel_uninit; dev->destructor = free_netdev; dev->hard_start_xmit = ipip6_tunnel_xmit; @@ -761,7 +760,7 @@ static int ipip6_tunnel_init(struct net_device *dev) } if (!tdev && tunnel->parms.link) - tdev = __dev_get_by_index(tunnel->parms.link); + tdev = __dev_get_by_index(&init_net, tunnel->parms.link); if (tdev) { dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3e06799b37a65248047f050072b6208dacdca336..a07b59c528f331181a1c34a873d8e754e4e9c1d1 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -697,7 +697,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, if (!cmd.tcpm_keylen) { if (!tcp_sk(sk)->md5sig_info) return -ENOENT; - if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) + if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]); return tcp_v6_md5_do_del(sk, &sin6->sin6_addr); } @@ -720,7 +720,7 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); if (!newkey) return -ENOMEM; - if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) { + if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3], newkey, cmd.tcpm_keylen); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c347f3e30e2e5ef1ccd3469f6f2ab804649402c4..82ff26dd4470e894c46099f8db7f84eb8e9301d7 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -612,7 +612,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, daddr = NULL; if (daddr) { - if (ipv6_addr_type(daddr) == IPV6_ADDR_MAPPED) { + if (ipv6_addr_v4mapped(daddr)) { struct sockaddr_in sin; sin.sin_family = AF_INET; sin.sin_port = sin6 ? sin6->sin6_port : inet->dport; diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index 2e61d6ddece303665ce8209f845c9f25195ce322..13bb1e85676467e8e3a03d19b38518736d44e2ce 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -22,14 +22,6 @@ /* Add encapsulation header. * * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt. - * The following fields in it shall be filled in by x->type->output: - * payload_len - * - * On exit, skb->h will be set to the start of the encapsulation header to be - * filled in by x->type->output and skb->nh will be set to the nextheader field - * of the extension header directly preceding the encapsulation header, or in - * its absence, that of the top IP header. The value of skb->data will always - * point to the top IP header. */ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) { @@ -37,19 +29,17 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) u8 *prevhdr; int hdr_len; - skb_push(skb, x->props.header_len); iph = ipv6_hdr(skb); hdr_len = ip6_find_1stfragopt(skb, &prevhdr); - skb_set_network_header(skb, - (prevhdr - x->props.header_len) - skb->data); - skb_set_transport_header(skb, hdr_len); - memmove(skb->data, iph, hdr_len); - skb_reset_network_header(skb); + skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); + skb_set_network_header(skb, -x->props.header_len); + skb->transport_header = skb->network_header + hdr_len; + __skb_pull(skb, hdr_len); + top_iph = ipv6_hdr(skb); - skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); - skb->network_header += offsetof(struct ipv6hdr, nexthdr); + memmove(top_iph, iph, hdr_len); ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c index 6ad6d7ac6bd70e4604354bb466212ee0b5712501..957ae36b66958ab55fbe656e01d20bcba76f78a1 100644 --- a/net/ipv6/xfrm6_mode_ro.c +++ b/net/ipv6/xfrm6_mode_ro.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -36,12 +37,6 @@ * * The IP header and mutable extension headers will be moved forward to make * space for the route optimization header. - * - * On exit, skb->h will be set to the start of the encapsulation header to be - * filled in by x->type->output and skb->nh will be set to the nextheader field - * of the extension header directly preceding the encapsulation header, or in - * its absence, that of the top IP header. The value of skb->data will always - * point to the top IP header. */ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) { @@ -49,14 +44,17 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) u8 *prevhdr; int hdr_len; - skb_push(skb, x->props.header_len); iph = ipv6_hdr(skb); hdr_len = x->type->hdr_offset(x, skb, &prevhdr); - skb_set_network_header(skb, - (prevhdr - x->props.header_len) - skb->data); - skb_set_transport_header(skb, hdr_len); - memmove(skb->data, iph, hdr_len); + skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); + skb_set_network_header(skb, -x->props.header_len); + skb->transport_header = skb->network_header + hdr_len; + __skb_pull(skb, hdr_len); + memmove(ipv6_hdr(skb), iph, hdr_len); + + x->lastused = get_seconds(); + return 0; } diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index c026bfea820a55366225934187ad56cd0f2d83a5..4e344105b3fddeacfc79ca7492b62f223de2c02e 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c @@ -18,12 +18,6 @@ * * The IP header and mutable extension headers will be moved forward to make * space for the encapsulation header. - * - * On exit, skb->h will be set to the start of the encapsulation header to be - * filled in by x->type->output and skb->nh will be set to the nextheader field - * of the extension header directly preceding the encapsulation header, or in - * its absence, that of the top IP header. The value of skb->data will always - * point to the top IP header. */ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) { @@ -31,14 +25,14 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) u8 *prevhdr; int hdr_len; - skb_push(skb, x->props.header_len); iph = ipv6_hdr(skb); hdr_len = x->type->hdr_offset(x, skb, &prevhdr); - skb_set_network_header(skb, - (prevhdr - x->props.header_len) - skb->data); - skb_set_transport_header(skb, hdr_len); - memmove(skb->data, iph, hdr_len); + skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); + skb_set_network_header(skb, -x->props.header_len); + skb->transport_header = skb->network_header + hdr_len; + __skb_pull(skb, hdr_len); + memmove(ipv6_hdr(skb), iph, hdr_len); return 0; } diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 9fc95bc6509f718b675579ba037adfc5cc0a9065..ea22838791126f1df90aa82f43487ae121a6afc6 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -33,15 +33,7 @@ static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb) /* Add encapsulation header. * - * The top IP header will be constructed per RFC 2401. The following fields - * in it shall be filled in by x->type->output: - * payload_len - * - * On exit, skb->h will be set to the start of the encapsulation header to be - * filled in by x->type->output and skb->nh will be set to the nextheader field - * of the extension header directly preceding the encapsulation header, or in - * its absence, that of the top IP header. The value of skb->data will always - * point to the top IP header. + * The top IP header will be constructed per RFC 2401. */ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) { @@ -50,13 +42,13 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) struct ipv6hdr *iph, *top_iph; int dsfield; - skb_push(skb, x->props.header_len); iph = ipv6_hdr(skb); - skb_reset_network_header(skb); + skb_set_network_header(skb, -x->props.header_len); + skb->mac_header = skb->network_header + + offsetof(struct ipv6hdr, nexthdr); + skb->transport_header = skb->network_header + sizeof(*iph); top_iph = ipv6_hdr(skb); - skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); - skb->network_header += offsetof(struct ipv6hdr, nexthdr); top_iph->version = 6; if (xdst->route->ops->family == AF_INET6) { diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 56364a5f676ad71830c9b4118088e29b6da30cc5..4618c18e611dc3bd100809eeaec8910903d950e8 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -9,9 +9,9 @@ * 2 of the License, or (at your option) any later version. */ +#include #include #include -#include #include #include #include @@ -43,62 +43,31 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) return ret; } -static int xfrm6_output_one(struct sk_buff *skb) +static inline int xfrm6_output_one(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct xfrm_state *x = dst->xfrm; + struct ipv6hdr *iph; int err; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - err = skb_checksum_help(skb); - if (err) - goto error_nolock; - } - if (x->props.mode == XFRM_MODE_TUNNEL) { err = xfrm6_tunnel_check_size(skb); if (err) goto error_nolock; } - do { - spin_lock_bh(&x->lock); - err = xfrm_state_check(x, skb); - if (err) - goto error; - - err = x->mode->output(x, skb); - if (err) - goto error; + err = xfrm_output(skb); + if (err) + goto error_nolock; - err = x->type->output(x, skb); - if (err) - goto error; - - x->curlft.bytes += skb->len; - x->curlft.packets++; - if (x->props.mode == XFRM_MODE_ROUTEOPTIMIZATION) - x->lastused = get_seconds(); - - spin_unlock_bh(&x->lock); - - skb_reset_network_header(skb); - - if (!(skb->dst = dst_pop(dst))) { - err = -EHOSTUNREACH; - goto error_nolock; - } - dst = skb->dst; - x = dst->xfrm; - } while (x && (x->props.mode != XFRM_MODE_TUNNEL)); + iph = ipv6_hdr(skb); + iph->payload_len = htons(skb->len - sizeof(*iph)); IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; err = 0; out_exit: return err; -error: - spin_unlock_bh(&x->lock); error_nolock: kfree_skb(skb); goto out_exit; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 3ec0c4770ee35346693bc52ea35d434aea28d815..15aa4c58c3159ef9ec2a844bcca0305c0b034ec8 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -375,7 +375,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, xdst = (struct xfrm_dst *)dst; if (xdst->u.rt6.rt6i_idev->dev == dev) { - struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev); + struct inet6_dev *loopback_idev = in6_dev_get(init_net.loopback_dev); BUG_ON(!loopback_idev); do { diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 30f3236c402afd470d7a542d2121e03042a1110c..3f8a3abde67ea47c2c5a9cef9a4137d751477a4a 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -242,11 +242,7 @@ EXPORT_SYMBOL(xfrm6_tunnel_free_spi); static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) { - struct ipv6hdr *top_iph; - - top_iph = (struct ipv6hdr *)skb->data; - top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); - + skb_push(skb, -skb_network_offset(skb)); return 0; } diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 8400525177ab4c837df020947bbd74133487023f..29b063d4312052cefd1a9bbdfd02b62d8b212211 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -347,6 +347,9 @@ static int ipxitf_device_event(struct notifier_block *notifier, struct net_device *dev = ptr; struct ipx_interface *i, *tmp; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event != NETDEV_DOWN && event != NETDEV_UP) goto out; @@ -986,7 +989,7 @@ static int ipxitf_create(struct ipx_interface_definition *idef) if (intrfc) ipxitf_put(intrfc); - dev = dev_get_by_name(idef->ipx_device); + dev = dev_get_by_name(&init_net, idef->ipx_device); rc = -ENODEV; if (!dev) goto out; @@ -1094,7 +1097,7 @@ static int ipxitf_delete(struct ipx_interface_definition *idef) if (!dlink_type) goto out; - dev = __dev_get_by_name(idef->ipx_device); + dev = __dev_get_by_name(&init_net, idef->ipx_device); rc = -ENODEV; if (!dev) goto out; @@ -1189,7 +1192,7 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg) if (copy_from_user(&ifr, arg, sizeof(ifr))) break; sipx = (struct sockaddr_ipx *)&ifr.ifr_addr; - dev = __dev_get_by_name(ifr.ifr_name); + dev = __dev_get_by_name(&init_net, ifr.ifr_name); rc = -ENODEV; if (!dev) break; @@ -1360,11 +1363,14 @@ static struct proto ipx_proto = { .obj_size = sizeof(struct ipx_sock), }; -static int ipx_create(struct socket *sock, int protocol) +static int ipx_create(struct net *net, struct socket *sock, int protocol) { int rc = -ESOCKTNOSUPPORT; struct sock *sk; + if (net != &init_net) + return -EAFNOSUPPORT; + /* * SPX support is not anymore in the kernel sources. If you want to * ressurrect it, completing it and making it understand shared skbs, @@ -1375,7 +1381,7 @@ static int ipx_create(struct socket *sock, int protocol) goto out; rc = -ENOMEM; - sk = sk_alloc(PF_IPX, GFP_KERNEL, &ipx_proto, 1); + sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, 1); if (!sk) goto out; #ifdef IPX_REFCNT_DEBUG @@ -1644,6 +1650,9 @@ static int ipx_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty u16 ipx_pktsize; int rc = 0; + if (dev->nd_net != &init_net) + goto drop; + /* Not ours */ if (skb->pkt_type == PACKET_OTHERHOST) goto drop; diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index 4226e71ae1e3d04c9642c26d0cd4132f3765b674..d483a00dc427994abf293771391b28a4194c8b03 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -353,7 +354,7 @@ int __init ipx_proc_init(void) struct proc_dir_entry *p; int rc = -ENOMEM; - ipx_proc_dir = proc_mkdir("ipx", proc_net); + ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net); if (!ipx_proc_dir) goto out; @@ -381,7 +382,7 @@ int __init ipx_proc_init(void) out_route: remove_proc_entry("interface", ipx_proc_dir); out_interface: - remove_proc_entry("ipx", proc_net); + remove_proc_entry("ipx", init_net.proc_net); goto out; } @@ -390,7 +391,7 @@ void __exit ipx_proc_exit(void) remove_proc_entry("interface", ipx_proc_dir); remove_proc_entry("route", ipx_proc_dir); remove_proc_entry("socket", ipx_proc_dir); - remove_proc_entry("ipx", proc_net); + remove_proc_entry("ipx", init_net.proc_net); } #else /* CONFIG_PROC_FS */ diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 4c670cf6aefaeb3c06438aa14e435ba7c11fabe3..0328ae2654f43333fc09980368dec290641d24fb 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -60,7 +60,7 @@ #include -static int irda_create(struct socket *sock, int protocol); +static int irda_create(struct net *net, struct socket *sock, int protocol); static const struct proto_ops irda_stream_ops; static const struct proto_ops irda_seqpacket_ops; @@ -831,7 +831,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) IRDA_DEBUG(2, "%s()\n", __FUNCTION__); - err = irda_create(newsock, sk->sk_protocol); + err = irda_create(sk->sk_net, newsock, sk->sk_protocol); if (err) return err; @@ -1057,13 +1057,16 @@ static struct proto irda_proto = { * Create IrDA socket * */ -static int irda_create(struct socket *sock, int protocol) +static int irda_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + if (net != &init_net) + return -EAFNOSUPPORT; + /* Check for valid socket type */ switch (sock->type) { case SOCK_STREAM: /* For TTP connections with SAR disabled */ @@ -1075,7 +1078,7 @@ static int irda_create(struct socket *sock, int protocol) } /* Allocate networking socket */ - sk = sk_alloc(PF_IRDA, GFP_ATOMIC, &irda_proto, 1); + sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto, 1); if (sk == NULL) return -ENOMEM; @@ -1245,18 +1248,17 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; - int err; + int err = -EPIPE; IRDA_DEBUG(4, "%s(), len=%zd\n", __FUNCTION__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) + if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | + MSG_NOSIGNAL)) return -EINVAL; - if (sk->sk_shutdown & SEND_SHUTDOWN) { - send_sig(SIGPIPE, current, 0); - return -EPIPE; - } + if (sk->sk_shutdown & SEND_SHUTDOWN) + goto out_err; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; @@ -1283,7 +1285,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) - return -ENOBUFS; + goto out_err; skb_reserve(skb, self->max_header_size + 16); skb_reset_transport_header(skb); @@ -1291,7 +1293,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); - return err; + goto out_err; } /* @@ -1301,10 +1303,14 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, err = irttp_data_request(self->tsap, skb); if (err) { IRDA_DEBUG(0, "%s(), err=%d\n", __FUNCTION__, err); - return err; + goto out_err; } /* Tell client how much data we actually sent */ return len; + + out_err: + return sk_stream_error(sk, msg->msg_flags, err); + } /* diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c index a4c1c954582787ced123a932d0c3f8acc5b48f18..87039c2fb6a23cbece6b28825fd998b08fe8ae9c 100644 --- a/net/irda/irlan/irlan_client.c +++ b/net/irda/irlan/irlan_client.c @@ -436,6 +436,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, __u16 tmp_cpu; /* Temporary value in host order */ __u8 *bytes; int i; + DECLARE_MAC_BUF(mac); IRDA_DEBUG(4, "%s(), parm=%s\n", __FUNCTION__ , param); @@ -520,9 +521,8 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, /* FILTER_ENTRY, have we got an ethernet address? */ if (strcmp(param, "FILTER_ENTRY") == 0) { bytes = value; - IRDA_DEBUG(4, "Ethernet address = %02x:%02x:%02x:%02x:%02x:%02x\n", - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], - bytes[5]); + IRDA_DEBUG(4, "Ethernet address = %s\n", + print_mac(mac, bytes)); for (i = 0; i < 6; i++) self->dev->dev_addr[i] = bytes[i]; } diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index c421521c0a998171a8aba1cbf3b666143d45f241..340f04a36b027ba48e8eef9f6460f22ae909289f 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -60,8 +60,6 @@ static void irlan_eth_setup(struct net_device *dev) dev->set_multicast_list = irlan_eth_set_multicast_list; dev->destructor = free_netdev; - SET_MODULE_OWNER(dev); - ether_setup(dev); /* diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 3d76aafdb2e5c7bb25555dcfac05b619a92ba79a..f3236acc8d22e2706e5d1a643c085598cc982352 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -1219,29 +1219,11 @@ static const struct seq_operations irlap_seq_ops = { static int irlap_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); + if (irlap == NULL) + return -EINVAL; - if (!s) - goto out; - - if (irlap == NULL) { - rc = -EINVAL; - goto out_kfree; - } - - rc = seq_open(file, &irlap_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &irlap_seq_ops, + sizeof(struct irlap_iter_state)); } const struct file_operations irlap_seq_fops = { diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 25a3444a9234f1faba7ba2e2e88b18e452d1f44f..77ac27e811615a2e801f22c188d59762bf0ae431 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -1326,6 +1326,9 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, int command; __u8 control; + if (dev->nd_net != &init_net) + goto out; + /* FIXME: should we get our own field? */ self = (struct irlap_cb *) dev->atalk_ptr; diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 7efa930ed68418ad6a2b0ebcfcc09e92b14bd047..7db92ced2c02223b0de90bac9f95c1ddfa0939bb 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -2003,27 +2003,10 @@ static const struct seq_operations irlmp_seq_ops = { static int irlmp_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - struct irlmp_iter_state *s; - IRDA_ASSERT(irlmp != NULL, return -EINVAL;); - s = kmalloc(sizeof(*s), GFP_KERNEL); - if (!s) - goto out; - - rc = seq_open(file, &irlmp_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &irlmp_seq_ops, + sizeof(struct irlmp_iter_state)); } const struct file_operations irlmp_seq_fops = { diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index 1e429c92973960fe4be164a62ef4e578f2cdc520..cd9ff176ecde7b81370f0891763fd1352ba240f2 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -30,7 +31,7 @@ static struct genl_family irda_nl_family = { .maxattr = IRDA_NL_CMD_MAX, }; -static struct net_device * ifname_to_netdev(struct genl_info *info) +static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *info) { char * ifname; @@ -41,7 +42,7 @@ static struct net_device * ifname_to_netdev(struct genl_info *info) IRDA_DEBUG(5, "%s(): Looking for %s\n", __FUNCTION__, ifname); - return dev_get_by_name(ifname); + return dev_get_by_name(net, ifname); } static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info) @@ -57,7 +58,7 @@ static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info) IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __FUNCTION__, mode); - dev = ifname_to_netdev(info); + dev = ifname_to_netdev(&init_net, info); if (!dev) return -ENODEV; @@ -82,7 +83,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info) void *hdr; int ret = -ENOBUFS; - dev = ifname_to_netdev(info); + dev = ifname_to_netdev(&init_net, info); if (!dev) return -ENODEV; diff --git a/net/irda/irproc.c b/net/irda/irproc.c index 181cb51b48a8b5b50e64820a71581e702aa2f307..cae24fbda966f654d3817dcb38077b334ddd0b1a 100644 --- a/net/irda/irproc.c +++ b/net/irda/irproc.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -66,7 +67,7 @@ void __init irda_proc_register(void) int i; struct proc_dir_entry *d; - proc_irda = proc_mkdir("irda", proc_net); + proc_irda = proc_mkdir("irda", init_net.proc_net); if (proc_irda == NULL) return; proc_irda->owner = THIS_MODULE; @@ -92,7 +93,7 @@ void irda_proc_unregister(void) for (i=0; iprivate_data; - seq->private = s; -out: - return rc; -out_kfree: - kfree(s); - goto out; + return seq_open_private(file, &irttp_seq_ops, + sizeof(struct irttp_iter_state)); } const struct file_operations irttp_seq_fops = { diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 53ae14c35f70865eb4a224e9b51425d8add8d19a..43e01c8d382b5ab09f8d1c94f7b2572b3c89902b 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -41,6 +41,9 @@ static struct proto iucv_proto = { .obj_size = sizeof(struct iucv_sock), }; +static void iucv_sock_kill(struct sock *sk); +static void iucv_sock_close(struct sock *sk); + /* Call Back functions */ static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); @@ -213,7 +216,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; - sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1); + sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1); if (!sk) return NULL; @@ -221,6 +224,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); spin_lock_init(&iucv_sk(sk)->accept_q_lock); skb_queue_head_init(&iucv_sk(sk)->send_skb_q); + INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); + spin_lock_init(&iucv_sk(sk)->message_q.lock); skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); iucv_sk(sk)->send_tag = 0; @@ -240,7 +245,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) } /* Create an IUCV socket */ -static int iucv_sock_create(struct socket *sock, int protocol) +static int iucv_sock_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; @@ -670,6 +675,90 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, return err; } +static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) +{ + int dataleft, size, copied = 0; + struct sk_buff *nskb; + + dataleft = len; + while (dataleft) { + if (dataleft >= sk->sk_rcvbuf / 4) + size = sk->sk_rcvbuf / 4; + else + size = dataleft; + + nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); + if (!nskb) + return -ENOMEM; + + memcpy(nskb->data, skb->data + copied, size); + copied += size; + dataleft -= size; + + skb_reset_transport_header(nskb); + skb_reset_network_header(nskb); + nskb->len = size; + + skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); + } + + return 0; +} + +static void iucv_process_message(struct sock *sk, struct sk_buff *skb, + struct iucv_path *path, + struct iucv_message *msg) +{ + int rc; + + if (msg->flags & IPRMDATA) { + skb->data = NULL; + skb->len = 0; + } else { + rc = iucv_message_receive(path, msg, 0, skb->data, + msg->length, NULL); + if (rc) { + kfree_skb(skb); + return; + } + if (skb->truesize >= sk->sk_rcvbuf / 4) { + rc = iucv_fragment_skb(sk, skb, msg->length); + kfree_skb(skb); + skb = NULL; + if (rc) { + iucv_path_sever(path, NULL); + return; + } + skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + } else { + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb->len = msg->length; + } + } + + if (sock_queue_rcv_skb(sk, skb)) + skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); +} + +static void iucv_process_message_q(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + struct sk_buff *skb; + struct sock_msg_q *p, *n; + + list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { + skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); + if (!skb) + break; + iucv_process_message(sk, skb, p->path, &p->msg); + list_del(&p->list); + kfree(p); + if (!skb_queue_empty(&iucv->backlog_skb_q)) + break; + } +} + static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { @@ -681,8 +770,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, int err = 0; if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && - skb_queue_empty(&iucv->backlog_skb_q) && - skb_queue_empty(&sk->sk_receive_queue)) + skb_queue_empty(&iucv->backlog_skb_q) && + skb_queue_empty(&sk->sk_receive_queue) && + list_empty(&iucv->message_q.list)) return 0; if (flags & (MSG_OOB)) @@ -721,16 +811,23 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, kfree_skb(skb); /* Queue backlog skbs */ - rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { if (sock_queue_rcv_skb(sk, rskb)) { - skb_queue_head(&iucv_sk(sk)->backlog_skb_q, + skb_queue_head(&iucv->backlog_skb_q, rskb); break; } else { - rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); + rskb = skb_dequeue(&iucv->backlog_skb_q); } } + if (skb_queue_empty(&iucv->backlog_skb_q)) { + spin_lock_bh(&iucv->message_q.lock); + if (!list_empty(&iucv->message_q.list)) + iucv_process_message_q(sk); + spin_unlock_bh(&iucv->message_q.lock); + } + } else skb_queue_head(&sk->sk_receive_queue, skb); @@ -972,99 +1069,44 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) sk->sk_state_change(sk); } -static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, - struct sk_buff_head *fragmented_skb_q) -{ - int dataleft, size, copied = 0; - struct sk_buff *nskb; - - dataleft = len; - while (dataleft) { - if (dataleft >= sk->sk_rcvbuf / 4) - size = sk->sk_rcvbuf / 4; - else - size = dataleft; - - nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); - if (!nskb) - return -ENOMEM; - - memcpy(nskb->data, skb->data + copied, size); - copied += size; - dataleft -= size; - - skb_reset_transport_header(nskb); - skb_reset_network_header(nskb); - nskb->len = size; - - skb_queue_tail(fragmented_skb_q, nskb); - } - - return 0; -} - static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct iucv_sock *iucv = iucv_sk(sk); - struct sk_buff *skb, *fskb; - struct sk_buff_head fragmented_skb_q; - int rc; - - skb_queue_head_init(&fragmented_skb_q); + struct sk_buff *skb; + struct sock_msg_q *save_msg; + int len; if (sk->sk_shutdown & RCV_SHUTDOWN) return; + if (!list_empty(&iucv->message_q.list) || + !skb_queue_empty(&iucv->backlog_skb_q)) + goto save_message; + + len = atomic_read(&sk->sk_rmem_alloc); + len += msg->length + sizeof(struct sk_buff); + if (len > sk->sk_rcvbuf) + goto save_message; + skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); - if (!skb) { - iucv_path_sever(path, NULL); - return; - } + if (!skb) + goto save_message; - if (msg->flags & IPRMDATA) { - skb->data = NULL; - skb->len = 0; - } else { - rc = iucv_message_receive(path, msg, 0, skb->data, - msg->length, NULL); - if (rc) { - kfree_skb(skb); - return; - } - if (skb->truesize >= sk->sk_rcvbuf / 4) { - rc = iucv_fragment_skb(sk, skb, msg->length, - &fragmented_skb_q); - kfree_skb(skb); - skb = NULL; - if (rc) { - iucv_path_sever(path, NULL); - return; - } - } else { - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - skb->len = msg->length; - } - } - /* Queue the fragmented skb */ - fskb = skb_dequeue(&fragmented_skb_q); - while (fskb) { - if (!skb_queue_empty(&iucv->backlog_skb_q)) - skb_queue_tail(&iucv->backlog_skb_q, fskb); - else if (sock_queue_rcv_skb(sk, fskb)) - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb); - fskb = skb_dequeue(&fragmented_skb_q); - } + spin_lock(&iucv->message_q.lock); + iucv_process_message(sk, skb, path, msg); + spin_unlock(&iucv->message_q.lock); - /* Queue the original skb if it exists (was not fragmented) */ - if (skb) { - if (!skb_queue_empty(&iucv->backlog_skb_q)) - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); - else if (sock_queue_rcv_skb(sk, skb)) - skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); - } + return; + +save_message: + save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); + save_msg->path = path; + save_msg->msg = *msg; + spin_lock(&iucv->message_q.lock); + list_add_tail(&save_msg->list, &iucv->message_q.list); + spin_unlock(&iucv->message_q.lock); } static void iucv_callback_txdone(struct iucv_path *path, diff --git a/net/key/af_key.c b/net/key/af_key.c index 5502df115a633df76ea810bc6b92a8d0baa526cb..7969f8a716df6063dbaaeebad90630b6cd6472cd 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -26,8 +26,8 @@ #include #include #include +#include #include -#include #include @@ -136,11 +136,14 @@ static struct proto key_proto = { .obj_size = sizeof(struct pfkey_sock), }; -static int pfkey_create(struct socket *sock, int protocol) +static int pfkey_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; int err; + if (net != &init_net) + return -EAFNOSUPPORT; + if (!capable(CAP_NET_ADMIN)) return -EPERM; if (sock->type != SOCK_RAW) @@ -149,7 +152,7 @@ static int pfkey_create(struct socket *sock, int protocol) return -EPROTONOSUPPORT; err = -ENOMEM; - sk = sk_alloc(PF_KEY, GFP_KERNEL, &key_proto, 1); + sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, 1); if (sk == NULL) goto out; @@ -352,16 +355,14 @@ static int verify_address_len(void *p) switch (addr->sa_family) { case AF_INET: - len = sizeof(*sp) + sizeof(*sin) + (sizeof(uint64_t) - 1); - len /= sizeof(uint64_t); + len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); if (sp->sadb_address_len != len || sp->sadb_address_prefixlen > 32) return -EINVAL; break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case AF_INET6: - len = sizeof(*sp) + sizeof(*sin6) + (sizeof(uint64_t) - 1); - len /= sizeof(uint64_t); + len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t)); if (sp->sadb_address_len != len || sp->sadb_address_prefixlen > 128) return -EINVAL; @@ -386,14 +387,9 @@ static int verify_address_len(void *p) static inline int pfkey_sec_ctx_len(struct sadb_x_sec_ctx *sec_ctx) { - int len = 0; - - len += sizeof(struct sadb_x_sec_ctx); - len += sec_ctx->sadb_x_ctx_len; - len += sizeof(uint64_t) - 1; - len /= sizeof(uint64_t); - - return len; + return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + + sec_ctx->sadb_x_ctx_len, + sizeof(uint64_t)); } static inline int verify_sec_ctx_len(void *p) @@ -659,7 +655,8 @@ static inline int pfkey_mode_to_xfrm(int mode) } } -static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, int hsc) +static struct sk_buff *__pfkey_xfrm_state2msg(struct xfrm_state *x, + int add_keys, int hsc) { struct sk_buff *skb; struct sadb_msg *hdr; @@ -1013,6 +1010,24 @@ static struct sk_buff * pfkey_xfrm_state2msg(struct xfrm_state *x, int add_keys, return skb; } + +static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x) +{ + struct sk_buff *skb; + + spin_lock_bh(&x->lock); + skb = __pfkey_xfrm_state2msg(x, 1, 3); + spin_unlock_bh(&x->lock); + + return skb; +} + +static inline struct sk_buff *pfkey_xfrm_state2msg_expire(struct xfrm_state *x, + int hsc) +{ + return __pfkey_xfrm_state2msg(x, 0, hsc); +} + static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr, void **ext_hdrs) { @@ -1257,8 +1272,11 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h struct sadb_x_sa2 *sa2; struct sadb_address *saddr, *daddr; struct sadb_msg *out_hdr; + struct sadb_spirange *range; struct xfrm_state *x = NULL; int mode; + int err; + u32 min_spi, max_spi; u32 reqid; u8 proto; unsigned short family; @@ -1313,25 +1331,17 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h if (x == NULL) return -ENOENT; - resp_skb = ERR_PTR(-ENOENT); - - spin_lock_bh(&x->lock); - if (x->km.state != XFRM_STATE_DEAD) { - struct sadb_spirange *range = ext_hdrs[SADB_EXT_SPIRANGE-1]; - u32 min_spi, max_spi; + min_spi = 0x100; + max_spi = 0x0fffffff; - if (range != NULL) { - min_spi = range->sadb_spirange_min; - max_spi = range->sadb_spirange_max; - } else { - min_spi = 0x100; - max_spi = 0x0fffffff; - } - xfrm_alloc_spi(x, htonl(min_spi), htonl(max_spi)); - if (x->id.spi) - resp_skb = pfkey_xfrm_state2msg(x, 0, 3); + range = ext_hdrs[SADB_EXT_SPIRANGE-1]; + if (range) { + min_spi = range->sadb_spirange_min; + max_spi = range->sadb_spirange_max; } - spin_unlock_bh(&x->lock); + + err = xfrm_alloc_spi(x, min_spi, max_spi); + resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x); if (IS_ERR(resp_skb)) { xfrm_state_put(x); @@ -1421,12 +1431,8 @@ static int key_notify_sa(struct xfrm_state *x, struct km_event *c) { struct sk_buff *skb; struct sadb_msg *hdr; - int hsc = 3; - - if (c->event == XFRM_MSG_DELSA) - hsc = 0; - skb = pfkey_xfrm_state2msg(x, 0, hsc); + skb = pfkey_xfrm_state2msg(x); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1461,8 +1467,8 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, else err = xfrm_state_update(x); - xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, - AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); + xfrm_audit_state_add(x, err ? 0 : 1, + audit_get_loginuid(current->audit_context), 0); if (err < 0) { x->km.state = XFRM_STATE_DEAD; @@ -1515,8 +1521,8 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h c.event = XFRM_MSG_DELSA; km_state_notify(x, &c); out: - xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, - AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); + xfrm_audit_state_delete(x, err ? 0 : 1, + audit_get_loginuid(current->audit_context), 0); xfrm_state_put(x); return err; @@ -1538,7 +1544,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, if (x == NULL) return -ESRCH; - out_skb = pfkey_xfrm_state2msg(x, 1, 3); + out_skb = pfkey_xfrm_state2msg(x); proto = x->id.proto; xfrm_state_put(x); if (IS_ERR(out_skb)) @@ -1718,7 +1724,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) struct sk_buff *out_skb; struct sadb_msg *out_hdr; - out_skb = pfkey_xfrm_state2msg(x, 1, 3); + out_skb = pfkey_xfrm_state2msg(x); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); @@ -2268,8 +2274,8 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, hdr->sadb_msg_type != SADB_X_SPDUPDATE); - xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, - AUDIT_MAC_IPSEC_ADDSPD, err ? 0 : 1, xp, NULL); + xfrm_audit_policy_add(xp, err ? 0 : 1, + audit_get_loginuid(current->audit_context), 0); if (err) goto out; @@ -2352,8 +2358,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg if (xp == NULL) return -ENOENT; - xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, - AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); + xfrm_audit_policy_delete(xp, err ? 0 : 1, + audit_get_loginuid(current->audit_context), 0); if (err) goto out; @@ -2613,8 +2619,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h return -ENOENT; if (delete) { - xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, - AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); + xfrm_audit_policy_delete(xp, err ? 0 : 1, + audit_get_loginuid(current->audit_context), 0); if (err) goto out; @@ -2919,7 +2925,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c) else hsc = 1; - out_skb = pfkey_xfrm_state2msg(x, 0, hsc); + out_skb = pfkey_xfrm_state2msg_expire(x, hsc); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); @@ -3784,7 +3790,7 @@ static struct xfrm_mgr pfkeyv2_mgr = static void __exit ipsec_pfkey_exit(void) { xfrm_unregister_km(&pfkeyv2_mgr); - remove_proc_entry("net/pfkey", NULL); + remove_proc_entry("pfkey", init_net.proc_net); sock_unregister(PF_KEY); proto_unregister(&key_proto); } @@ -3801,7 +3807,7 @@ static int __init ipsec_pfkey_init(void) goto out_unregister_key_proto; #ifdef CONFIG_PROC_FS err = -ENOMEM; - if (create_proc_read_entry("net/pfkey", 0, NULL, pfkey_read_proc, NULL) == NULL) + if (create_proc_read_entry("pfkey", 0, init_net.proc_net, pfkey_read_proc, NULL) == NULL) goto out_sock_unregister; #endif err = xfrm_register_km(&pfkeyv2_mgr); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6b8a103cf9e66f198984fed8656ddacf9781718a..49eacba824df5bfdf1905c1df6e5503eaf15326d 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -150,14 +150,17 @@ static struct proto llc_proto = { * socket type we have available. * Returns 0 upon success, negative upon failure. */ -static int llc_ui_create(struct socket *sock, int protocol) +static int llc_ui_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; int rc = -ESOCKTNOSUPPORT; + if (net != &init_net) + return -EAFNOSUPPORT; + if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) { rc = -ENOMEM; - sk = llc_sk_alloc(PF_LLC, GFP_KERNEL, &llc_proto); + sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto); if (sk) { rc = 0; llc_ui_sk_init(sock, sk); @@ -249,7 +252,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) if (!sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -ENODEV; - llc->dev = dev_getfirstbyhwtype(addr->sllc_arphrd); + llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); if (!llc->dev) goto out; rc = -EUSERS; @@ -300,7 +303,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) goto out; rc = -ENODEV; rtnl_lock(); - llc->dev = dev_getbyhwaddr(addr->sllc_arphrd, addr->sllc_mac); + llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); rtnl_unlock(); if (!llc->dev) goto out; diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 3b8cfbe029a74ba8d47fb24bd1d1aad8f952d351..8ebc2769dfdabc88c288468002148d5c974fd68f 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -700,7 +700,7 @@ static struct sock *llc_create_incoming_sock(struct sock *sk, struct llc_addr *saddr, struct llc_addr *daddr) { - struct sock *newsk = llc_sk_alloc(sk->sk_family, GFP_ATOMIC, + struct sock *newsk = llc_sk_alloc(sk->sk_net, sk->sk_family, GFP_ATOMIC, sk->sk_prot); struct llc_sock *newllc, *llc = llc_sk(sk); @@ -867,9 +867,9 @@ static void llc_sk_init(struct sock* sk) * Allocates a LLC sock and initializes it. Returns the new LLC sock * or %NULL if there's no memory available for one */ -struct sock *llc_sk_alloc(int family, gfp_t priority, struct proto *prot) +struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) { - struct sock *sk = sk_alloc(family, priority, prot, 1); + struct sock *sk = sk_alloc(net, family, priority, prot, 1); if (!sk) goto out; diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index d4b13a031fd5e0375f3f76c74eb7c676c894fc4b..248b5903bb1348e07f594e5f54119963de82f365 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -19,6 +19,7 @@ #include #include #include +#include #include LIST_HEAD(llc_sap_list); @@ -162,7 +163,7 @@ static int __init llc_init(void) { struct net_device *dev; - dev = first_net_device(); + dev = first_net_device(&init_net); if (dev != NULL) dev = next_net_device(dev); diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 099ed8fec1454b3f38822f0e3d8e0870108e52d7..c40c9b2a345aca6058d222ad662f4a830c943d7e 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -12,6 +12,7 @@ * See the GNU General Public License for more details. */ #include +#include #include #include #include @@ -145,6 +146,9 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + if (dev->nd_net != &init_net) + goto drop; + /* * When the interface is in promisc. mode, drop all the crap that it * receives, do not try to analyse it. diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index 49be6c902c8326790898fc982b75ae3b7ca4874c..cb34bc0518e80acf384e10cdcdf58968ab61b215 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -24,10 +25,10 @@ #include #include -static void llc_ui_format_mac(struct seq_file *seq, unsigned char *mac) +static void llc_ui_format_mac(struct seq_file *seq, u8 *addr) { - seq_printf(seq, "%02X:%02X:%02X:%02X:%02X:%02X", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + DECLARE_MAC_BUF(mac); + seq_printf(seq, "%s", print_mac(mac, addr)); } static struct sock *llc_get_sk_idx(loff_t pos) @@ -127,8 +128,10 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v) if (llc->dev) llc_ui_format_mac(seq, llc->dev->dev_addr); - else - seq_printf(seq, "00:00:00:00:00:00"); + else { + u8 addr[6] = {0,0,0,0,0,0}; + llc_ui_format_mac(seq, addr); + } seq_printf(seq, "@%02X ", llc->sap->laddr.lsap); llc_ui_format_mac(seq, llc->daddr.mac); seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, @@ -231,7 +234,7 @@ int __init llc_proc_init(void) int rc = -ENOMEM; struct proc_dir_entry *p; - llc_proc_dir = proc_mkdir("llc", proc_net); + llc_proc_dir = proc_mkdir("llc", init_net.proc_net); if (!llc_proc_dir) goto out; llc_proc_dir->owner = THIS_MODULE; @@ -254,7 +257,7 @@ int __init llc_proc_init(void) out_core: remove_proc_entry("socket", llc_proc_dir); out_socket: - remove_proc_entry("llc", proc_net); + remove_proc_entry("llc", init_net.proc_net); goto out; } @@ -262,5 +265,5 @@ void llc_proc_exit(void) { remove_proc_entry("socket", llc_proc_dir); remove_proc_entry("core", llc_proc_dir); - remove_proc_entry("llc", proc_net); + remove_proc_entry("llc", init_net.proc_net); } diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index a9c2d0787d4aee8feafd7e935e06514ce469bb55..219cd9f9341fe099bc0ce5ba69c2da7442339ab8 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o rc80211_simple.o mac80211-objs-$(CONFIG_MAC80211_LEDS) += ieee80211_led.o mac80211-objs-$(CONFIG_MAC80211_DEBUGFS) += debugfs.o debugfs_sta.o debugfs_netdev.o debugfs_key.o +mac80211-objs-$(CONFIG_NET_SCHED) += wme.o mac80211-objs := \ ieee80211.o \ @@ -16,6 +17,10 @@ mac80211-objs := \ regdomain.o \ tkip.o \ aes_ccm.o \ - wme.o \ - ieee80211_cfg.o \ + cfg.o \ + rx.o \ + tx.o \ + key.o \ + util.o \ + event.o \ $(mac80211-objs-y) diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index e55569bee7d081c6a827883cbd900784a7e27066..bf7ba128b963a49c949b9e240209978dd691d2ac 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -7,6 +7,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -63,7 +64,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, s_0 = scratch + AES_BLOCK_LEN; e = scratch + 2 * AES_BLOCK_LEN; - num_blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last_len = data_len % AES_BLOCK_LEN; aes_ccm_prepare(tfm, b_0, aad, b, s_0, b); @@ -102,7 +103,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch, s_0 = scratch + AES_BLOCK_LEN; a = scratch + 2 * AES_BLOCK_LEN; - num_blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; + num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); last_len = data_len % AES_BLOCK_LEN; aes_ccm_prepare(tfm, b_0, aad, b, s_0, a); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c new file mode 100644 index 0000000000000000000000000000000000000000..9e2bc1fd0237dd83dc1bf923c3cb773bce79cf7d --- /dev/null +++ b/net/mac80211/cfg.c @@ -0,0 +1,106 @@ +/* + * mac80211 configuration hooks for cfg80211 + * + * Copyright 2006 Johannes Berg + * + * This file is GPLv2 as found in COPYING. + */ + +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "cfg.h" + +static enum ieee80211_if_types +nl80211_type_to_mac80211_type(enum nl80211_iftype type) +{ + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + return IEEE80211_IF_TYPE_STA; + case NL80211_IFTYPE_ADHOC: + return IEEE80211_IF_TYPE_IBSS; + case NL80211_IFTYPE_STATION: + return IEEE80211_IF_TYPE_STA; + case NL80211_IFTYPE_MONITOR: + return IEEE80211_IF_TYPE_MNTR; + default: + return IEEE80211_IF_TYPE_INVALID; + } +} + +static int ieee80211_add_iface(struct wiphy *wiphy, char *name, + enum nl80211_iftype type) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + enum ieee80211_if_types itype; + + if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) + return -ENODEV; + + itype = nl80211_type_to_mac80211_type(type); + if (itype == IEEE80211_IF_TYPE_INVALID) + return -EINVAL; + + return ieee80211_if_add(local->mdev, name, NULL, itype); +} + +static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct net_device *dev; + char *name; + + if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) + return -ENODEV; + + /* we're under RTNL */ + dev = __dev_get_by_index(&init_net, ifindex); + if (!dev) + return 0; + + name = dev->name; + + return ieee80211_if_remove(local->mdev, name, -1); +} + +static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex, + enum nl80211_iftype type) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + struct net_device *dev; + enum ieee80211_if_types itype; + struct ieee80211_sub_if_data *sdata; + + if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) + return -ENODEV; + + /* we're under RTNL */ + dev = __dev_get_by_index(&init_net, ifindex); + if (!dev) + return -ENODEV; + + if (netif_running(dev)) + return -EBUSY; + + itype = nl80211_type_to_mac80211_type(type); + if (itype == IEEE80211_IF_TYPE_INVALID) + return -EINVAL; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (sdata->type == IEEE80211_IF_TYPE_VLAN) + return -EOPNOTSUPP; + + ieee80211_if_reinit(dev); + ieee80211_if_set_type(dev, itype); + + return 0; +} + +struct cfg80211_ops mac80211_config_ops = { + .add_virtual_intf = ieee80211_add_iface, + .del_virtual_intf = ieee80211_del_iface, + .change_virtual_intf = ieee80211_change_iface, +}; diff --git a/net/mac80211/ieee80211_cfg.h b/net/mac80211/cfg.h similarity index 55% rename from net/mac80211/ieee80211_cfg.h rename to net/mac80211/cfg.h index 85ed2c924878cf4762558977dace9525e3c50859..7d7879f5b00b9d9fb7aa42ca8695d04e330dadb0 100644 --- a/net/mac80211/ieee80211_cfg.h +++ b/net/mac80211/cfg.h @@ -1,9 +1,9 @@ /* * mac80211 configuration hooks for cfg80211 */ -#ifndef __IEEE80211_CFG_H -#define __IEEE80211_CFG_H +#ifndef __CFG_H +#define __CFG_H extern struct cfg80211_ops mac80211_config_ops; -#endif /* __IEEE80211_CFG_H */ +#endif /* __CFG_H */ diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 476c8486f789819518ccdc47b7dfb3c6856f8397..60514b2c97b99c845eac2020a9f865557a59f170 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -28,8 +28,6 @@ static const char *ieee80211_mode_str(int mode) return "IEEE 802.11b"; case MODE_IEEE80211G: return "IEEE 802.11g"; - case MODE_ATHEROS_TURBO: - return "Atheros Turbo (5 GHz)"; default: return "UNKNOWN"; } @@ -86,16 +84,12 @@ DEBUGFS_READONLY_FILE(channel, 20, "%d", local->hw.conf.channel); DEBUGFS_READONLY_FILE(frequency, 20, "%d", local->hw.conf.freq); -DEBUGFS_READONLY_FILE(radar_detect, 20, "%d", - local->hw.conf.radar_detect); DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d", local->hw.conf.antenna_sel_tx); DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", local->hw.conf.antenna_sel_rx); DEBUGFS_READONLY_FILE(bridge_packets, 20, "%d", local->bridge_packets); -DEBUGFS_READONLY_FILE(key_tx_rx_threshold, 20, "%d", - local->key_tx_rx_threshold); DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", local->rts_threshold); DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", @@ -110,9 +104,6 @@ DEBUGFS_READONLY_FILE(mode, 20, "%s", ieee80211_mode_str(local->hw.conf.phymode)); DEBUGFS_READONLY_FILE(wep_iv, 20, "%#06x", local->wep_iv & 0xffffff); -DEBUGFS_READONLY_FILE(tx_power_reduction, 20, "%d.%d dBm", - local->hw.conf.tx_power_reduction / 10, - local->hw.conf.tx_power_reduction % 10); DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s", local->rate_ctrl ? local->rate_ctrl->ops->name : ""); @@ -305,11 +296,9 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(channel); DEBUGFS_ADD(frequency); - DEBUGFS_ADD(radar_detect); DEBUGFS_ADD(antenna_sel_tx); DEBUGFS_ADD(antenna_sel_rx); DEBUGFS_ADD(bridge_packets); - DEBUGFS_ADD(key_tx_rx_threshold); DEBUGFS_ADD(rts_threshold); DEBUGFS_ADD(fragmentation_threshold); DEBUGFS_ADD(short_retry_limit); @@ -317,7 +306,6 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(mode); DEBUGFS_ADD(wep_iv); - DEBUGFS_ADD(tx_power_reduction); DEBUGFS_ADD(modes); statsd = debugfs_create_dir("statistics", phyd); @@ -370,11 +358,9 @@ void debugfs_hw_del(struct ieee80211_local *local) { DEBUGFS_DEL(channel); DEBUGFS_DEL(frequency); - DEBUGFS_DEL(radar_detect); DEBUGFS_DEL(antenna_sel_tx); DEBUGFS_DEL(antenna_sel_rx); DEBUGFS_DEL(bridge_packets); - DEBUGFS_DEL(key_tx_rx_threshold); DEBUGFS_DEL(rts_threshold); DEBUGFS_DEL(fragmentation_threshold); DEBUGFS_DEL(short_retry_limit); @@ -382,7 +368,6 @@ void debugfs_hw_del(struct ieee80211_local *local) DEBUGFS_DEL(total_ps_buffered); DEBUGFS_DEL(mode); DEBUGFS_DEL(wep_iv); - DEBUGFS_DEL(tx_power_reduction); DEBUGFS_DEL(modes); DEBUGFS_STATS_DEL(transmitted_fragment_count); diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 7d56dc9e7326fb32deb9fb19b9a84afb4b191b22..c881524c87251ca39a02717663538f381248a836 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -14,17 +14,18 @@ #include "debugfs.h" #include "debugfs_key.h" -#define KEY_READ(name, buflen, format_string) \ +#define KEY_READ(name, prop, buflen, format_string) \ static ssize_t key_##name##_read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ char buf[buflen]; \ struct ieee80211_key *key = file->private_data; \ - int res = scnprintf(buf, buflen, format_string, key->name); \ + int res = scnprintf(buf, buflen, format_string, key->prop); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } -#define KEY_READ_D(name) KEY_READ(name, 20, "%d\n") +#define KEY_READ_D(name) KEY_READ(name, name, 20, "%d\n") +#define KEY_READ_X(name) KEY_READ(name, name, 20, "0x%x\n") #define KEY_OPS(name) \ static const struct file_operations key_ ##name## _ops = { \ @@ -36,11 +37,27 @@ static const struct file_operations key_ ##name## _ops = { \ KEY_READ_##format(name) \ KEY_OPS(name) -KEY_FILE(keylen, D); -KEY_FILE(force_sw_encrypt, D); -KEY_FILE(keyidx, D); -KEY_FILE(hw_key_idx, D); +#define KEY_CONF_READ(name, buflen, format_string) \ + KEY_READ(conf_##name, conf.name, buflen, format_string) +#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, 20, "%d\n") + +#define KEY_CONF_OPS(name) \ +static const struct file_operations key_ ##name## _ops = { \ + .read = key_conf_##name##_read, \ + .open = mac80211_open_file_generic, \ +} + +#define KEY_CONF_FILE(name, format) \ + KEY_CONF_READ_##format(name) \ + KEY_CONF_OPS(name) + +KEY_CONF_FILE(keylen, D); +KEY_CONF_FILE(keyidx, D); +KEY_CONF_FILE(hw_key_idx, D); +KEY_FILE(flags, X); KEY_FILE(tx_rx_count, D); +KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n"); +KEY_OPS(ifindex); static ssize_t key_algorithm_read(struct file *file, char __user *userbuf, @@ -49,7 +66,7 @@ static ssize_t key_algorithm_read(struct file *file, char *alg; struct ieee80211_key *key = file->private_data; - switch (key->alg) { + switch (key->conf.alg) { case ALG_WEP: alg = "WEP\n"; break; @@ -74,17 +91,20 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, int len; struct ieee80211_key *key = file->private_data; - switch (key->alg) { + switch (key->conf.alg) { case ALG_WEP: len = scnprintf(buf, sizeof(buf), "\n"); + break; case ALG_TKIP: len = scnprintf(buf, sizeof(buf), "%08x %04x\n", key->u.tkip.iv32, key->u.tkip.iv16); + break; case ALG_CCMP: tpn = key->u.ccmp.tx_pn; len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]); + break; default: return 0; } @@ -100,9 +120,10 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, int i, len; const u8 *rpn; - switch (key->alg) { + switch (key->conf.alg) { case ALG_WEP: len = scnprintf(buf, sizeof(buf), "\n"); + break; case ALG_TKIP: for (i = 0; i < NUM_RX_DATA_QUEUES; i++) p += scnprintf(p, sizeof(buf)+buf-p, @@ -110,6 +131,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, key->u.tkip.iv32_rx[i], key->u.tkip.iv16_rx[i]); len = p - buf; + break; case ALG_CCMP: for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { rpn = key->u.ccmp.rx_pn[i]; @@ -119,6 +141,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, rpn[3], rpn[4], rpn[5]); } len = p - buf; + break; default: return 0; } @@ -133,7 +156,7 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf, char buf[20]; int len; - if (key->alg != ALG_CCMP) + if (key->conf.alg != ALG_CCMP) return 0; len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); return simple_read_from_buffer(userbuf, count, ppos, buf, len); @@ -144,12 +167,12 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; - int i, res, bufsize = 2*key->keylen+2; + int i, res, bufsize = 2 * key->conf.keylen + 2; char *buf = kmalloc(bufsize, GFP_KERNEL); char *p = buf; - for (i = 0; i < key->keylen; i++) - p += scnprintf(p, bufsize+buf-p, "%02x", key->key[i]); + for (i = 0; i < key->conf.keylen; i++) + p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); p += scnprintf(p, bufsize+buf-p, "\n"); res = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); kfree(buf); @@ -164,12 +187,14 @@ KEY_OPS(key); void ieee80211_debugfs_key_add(struct ieee80211_local *local, struct ieee80211_key *key) { + static int keycount; char buf[20]; if (!local->debugfs.keys) return; - sprintf(buf, "%d", key->keyidx); + sprintf(buf, "%d", keycount); + keycount++; key->debugfs.dir = debugfs_create_dir(buf, local->debugfs.keys); @@ -177,7 +202,7 @@ void ieee80211_debugfs_key_add(struct ieee80211_local *local, return; DEBUGFS_ADD(keylen); - DEBUGFS_ADD(force_sw_encrypt); + DEBUGFS_ADD(flags); DEBUGFS_ADD(keyidx); DEBUGFS_ADD(hw_key_idx); DEBUGFS_ADD(tx_rx_count); @@ -186,6 +211,7 @@ void ieee80211_debugfs_key_add(struct ieee80211_local *local, DEBUGFS_ADD(rx_spec); DEBUGFS_ADD(replays); DEBUGFS_ADD(key); + DEBUGFS_ADD(ifindex); }; #define DEBUGFS_DEL(name) \ @@ -197,7 +223,7 @@ void ieee80211_debugfs_key_remove(struct ieee80211_key *key) return; DEBUGFS_DEL(keylen); - DEBUGFS_DEL(force_sw_encrypt); + DEBUGFS_DEL(flags); DEBUGFS_DEL(keyidx); DEBUGFS_DEL(hw_key_idx); DEBUGFS_DEL(tx_rx_count); @@ -206,6 +232,7 @@ void ieee80211_debugfs_key_remove(struct ieee80211_key *key) DEBUGFS_DEL(rx_spec); DEBUGFS_DEL(replays); DEBUGFS_DEL(key); + DEBUGFS_DEL(ifindex); debugfs_remove(key->debugfs.stalink); key->debugfs.stalink = NULL; @@ -219,7 +246,7 @@ void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) if (!sdata->debugfsdir) return; - sprintf(buf, "../keys/%d", sdata->default_key->keyidx); + sprintf(buf, "../keys/%d", sdata->default_key->conf.keyidx); sdata->debugfs.default_key = debugfs_create_symlink("default_key", sdata->debugfsdir, buf); } @@ -235,11 +262,12 @@ void ieee80211_debugfs_key_sta_link(struct ieee80211_key *key, struct sta_info *sta) { char buf[50]; + DECLARE_MAC_BUF(mac); if (!key->debugfs.dir) return; - sprintf(buf, "../sta/" MAC_FMT, MAC_ARG(sta->addr)); + sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr)); key->debugfs.stalink = debugfs_create_symlink("station", key->debugfs.dir, buf); } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 095be91829ca63ba2d660ba2a0b76f6d3f340414..f0e6ab7eb624dbd937b5e272b2c3e285c4140ad3 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -66,7 +66,8 @@ static ssize_t ieee80211_if_fmt_##name( \ const struct ieee80211_sub_if_data *sdata, char *buf, \ int buflen) \ { \ - return scnprintf(buf, buflen, MAC_FMT "\n", MAC_ARG(sdata->field));\ + DECLARE_MAC_BUF(mac); \ + return scnprintf(buf, buflen, "%s\n", print_mac(mac, sdata->field));\ } #define __IEEE80211_IF_FILE(name) \ @@ -112,13 +113,13 @@ static ssize_t ieee80211_if_fmt_flags( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { return scnprintf(buf, buflen, "%s%s%s%s%s%s%s\n", - sdata->u.sta.ssid_set ? "SSID\n" : "", - sdata->u.sta.bssid_set ? "BSSID\n" : "", - sdata->u.sta.prev_bssid_set ? "prev BSSID\n" : "", - sdata->u.sta.authenticated ? "AUTH\n" : "", - sdata->u.sta.associated ? "ASSOC\n" : "", - sdata->u.sta.probereq_poll ? "PROBEREQ POLL\n" : "", - sdata->use_protection ? "CTS prot\n" : ""); + sdata->u.sta.flags & IEEE80211_STA_SSID_SET ? "SSID\n" : "", + sdata->u.sta.flags & IEEE80211_STA_BSSID_SET ? "BSSID\n" : "", + sdata->u.sta.flags & IEEE80211_STA_PREV_BSSID_SET ? "prev BSSID\n" : "", + sdata->u.sta.flags & IEEE80211_STA_AUTHENTICATED ? "AUTH\n" : "", + sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED ? "ASSOC\n" : "", + sdata->u.sta.flags & IEEE80211_STA_PROBEREQ_POLL ? "PROBEREQ POLL\n" : "", + sdata->flags & IEEE80211_SDATA_USE_PROTECTION ? "CTS prot\n" : ""); } __IEEE80211_IF_FILE(flags); @@ -161,23 +162,6 @@ __IEEE80211_IF_FILE(beacon_tail_len); /* WDS attributes */ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC); -/* VLAN attributes */ -IEEE80211_IF_FILE(vlan_id, u.vlan.id, DEC); - -/* MONITOR attributes */ -static ssize_t ieee80211_if_fmt_mode( - const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) -{ - struct ieee80211_local *local = sdata->local; - - return scnprintf(buf, buflen, "%s\n", - ((local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER) || - local->open_count == local->monitors) ? - "hard" : "soft"); -} -__IEEE80211_IF_FILE(mode); - - #define DEBUGFS_ADD(name, type)\ sdata->debugfs.type.name = debugfs_create_file(#name, 0444,\ sdata->debugfsdir, sdata, &name##_ops); @@ -236,12 +220,10 @@ static void add_vlan_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD(drop_unencrypted, vlan); DEBUGFS_ADD(eapol, vlan); DEBUGFS_ADD(ieee8021_x, vlan); - DEBUGFS_ADD(vlan_id, vlan); } static void add_monitor_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_ADD(mode, monitor); } static void add_files(struct ieee80211_sub_if_data *sdata) @@ -331,12 +313,10 @@ static void del_vlan_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_DEL(drop_unencrypted, vlan); DEBUGFS_DEL(eapol, vlan); DEBUGFS_DEL(ieee8021_x, vlan); - DEBUGFS_DEL(vlan_id, vlan); } static void del_monitor_files(struct ieee80211_sub_if_data *sdata) { - DEBUGFS_DEL(mode, monitor); } static void del_files(struct ieee80211_sub_if_data *sdata, int type) diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index da34ea70276f828d47f69d790e8b201bcc08937c..8f5944c53d4e36166cbf09c0afce26e8e6392c76 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -60,9 +60,7 @@ static const struct file_operations sta_ ##name## _ops = { \ STA_OPS(name) STA_FILE(aid, aid, D); -STA_FILE(key_idx_compression, key_idx_compression, D); STA_FILE(dev, dev->name, S); -STA_FILE(vlan_id, vlan_id, D); STA_FILE(rx_packets, rx_packets, LU); STA_FILE(tx_packets, tx_packets, LU); STA_FILE(rx_bytes, rx_bytes, LU); @@ -204,15 +202,15 @@ STA_OPS(wme_tx_queue); void ieee80211_sta_debugfs_add(struct sta_info *sta) { - char buf[3*6]; struct dentry *stations_dir = sta->local->debugfs.stations; + DECLARE_MAC_BUF(mac); if (!stations_dir) return; - sprintf(buf, MAC_FMT, MAC_ARG(sta->addr)); + print_mac(mac, sta->addr); - sta->debugfs.dir = debugfs_create_dir(buf, stations_dir); + sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); if (!sta->debugfs.dir) return; diff --git a/net/mac80211/event.c b/net/mac80211/event.c new file mode 100644 index 0000000000000000000000000000000000000000..2280f40b4560ba3c963cc84f35e73a543bb47064 --- /dev/null +++ b/net/mac80211/event.c @@ -0,0 +1,43 @@ +/* + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * mac80211 - events + */ + +#include +#include +#include "ieee80211_i.h" + +/* + * indicate a failed Michael MIC to userspace; the passed packet + * (in the variable hdr) must be long enough to extract the TKIP + * fields like TSC + */ +void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, + struct ieee80211_hdr *hdr) +{ + union iwreq_data wrqu; + char *buf = kmalloc(128, GFP_ATOMIC); + DECLARE_MAC_BUF(mac); + + if (buf) { + /* TODO: needed parameters: count, key type, TSC */ + sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" + "keyid=%d %scast addr=%s)", + keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni", + print_mac(mac, hdr->addr2)); + memset(&wrqu, 0, sizeof(wrqu)); + wrqu.data.length = strlen(buf); + wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); + kfree(buf); + } + + /* + * TODO: re-add support for sending MIC failure indication + * with all info via nl80211 + */ +} diff --git a/net/mac80211/hostapd_ioctl.h b/net/mac80211/hostapd_ioctl.h deleted file mode 100644 index 52da513f060a4de8394c3953e647730d23c39113..0000000000000000000000000000000000000000 --- a/net/mac80211/hostapd_ioctl.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Host AP (software wireless LAN access point) user space daemon for - * Host AP kernel driver - * Copyright 2002-2003, Jouni Malinen - * Copyright 2002-2004, Instant802 Networks, Inc. - * Copyright 2005, Devicescape Software, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef HOSTAPD_IOCTL_H -#define HOSTAPD_IOCTL_H - -#ifdef __KERNEL__ -#include -#endif /* __KERNEL__ */ - -#define PRISM2_IOCTL_PRISM2_PARAM (SIOCIWFIRSTPRIV + 0) -#define PRISM2_IOCTL_GET_PRISM2_PARAM (SIOCIWFIRSTPRIV + 1) -#define PRISM2_IOCTL_HOSTAPD (SIOCIWFIRSTPRIV + 3) - -/* PRISM2_IOCTL_PRISM2_PARAM ioctl() subtypes: - * This table is no longer added to, the whole sub-ioctl - * mess shall be deleted completely. */ -enum { - PRISM2_PARAM_IEEE_802_1X = 23, - - /* Instant802 additions */ - PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES = 1001, - PRISM2_PARAM_PREAMBLE = 1003, - PRISM2_PARAM_SHORT_SLOT_TIME = 1006, - PRISM2_PARAM_NEXT_MODE = 1008, - PRISM2_PARAM_RADIO_ENABLED = 1010, - PRISM2_PARAM_ANTENNA_MODE = 1013, - PRISM2_PARAM_STAT_TIME = 1016, - PRISM2_PARAM_STA_ANTENNA_SEL = 1017, - PRISM2_PARAM_TX_POWER_REDUCTION = 1022, - PRISM2_PARAM_KEY_TX_RX_THRESHOLD = 1024, - PRISM2_PARAM_DEFAULT_WEP_ONLY = 1026, - PRISM2_PARAM_WIFI_WME_NOACK_TEST = 1033, - PRISM2_PARAM_SCAN_FLAGS = 1035, - PRISM2_PARAM_HW_MODES = 1036, - PRISM2_PARAM_CREATE_IBSS = 1037, - PRISM2_PARAM_WMM_ENABLED = 1038, - PRISM2_PARAM_MIXED_CELL = 1039, - PRISM2_PARAM_RADAR_DETECT = 1043, - PRISM2_PARAM_SPECTRUM_MGMT = 1044, -}; - -enum { - IEEE80211_KEY_MGMT_NONE = 0, - IEEE80211_KEY_MGMT_IEEE8021X = 1, - IEEE80211_KEY_MGMT_WPA_PSK = 2, - IEEE80211_KEY_MGMT_WPA_EAP = 3, -}; - - -/* Data structures used for get_hw_features ioctl */ -struct hostapd_ioctl_hw_modes_hdr { - int mode; - int num_channels; - int num_rates; -}; - -struct ieee80211_channel_data { - short chan; /* channel number (IEEE 802.11) */ - short freq; /* frequency in MHz */ - int flag; /* flag for hostapd use (IEEE80211_CHAN_*) */ -}; - -struct ieee80211_rate_data { - int rate; /* rate in 100 kbps */ - int flags; /* IEEE80211_RATE_ flags */ -}; - - -/* ADD_IF, REMOVE_IF, and UPDATE_IF 'type' argument */ -enum { - HOSTAP_IF_WDS = 1, HOSTAP_IF_VLAN = 2, HOSTAP_IF_BSS = 3, - HOSTAP_IF_STA = 4 -}; - -struct hostapd_if_wds { - u8 remote_addr[ETH_ALEN]; -}; - -struct hostapd_if_vlan { - u8 id; -}; - -struct hostapd_if_bss { - u8 bssid[ETH_ALEN]; -}; - -struct hostapd_if_sta { -}; - -#endif /* HOSTAPD_IOCTL_H */ diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c index ff2172ffd8611758ba00efa2abc349892ca11015..f484ca7ade9ce9ff718015cef06517e7b52d13a4 100644 --- a/net/mac80211/ieee80211.c +++ b/net/mac80211/ieee80211.c @@ -20,42 +20,19 @@ #include #include #include -#include -#include #include +#include #include -#include -#include "ieee80211_common.h" #include "ieee80211_i.h" #include "ieee80211_rate.h" #include "wep.h" -#include "wpa.h" -#include "tkip.h" #include "wme.h" #include "aes_ccm.h" #include "ieee80211_led.h" -#include "ieee80211_cfg.h" +#include "cfg.h" #include "debugfs.h" #include "debugfs_netdev.h" -#include "debugfs_key.h" - -/* privid for wiphys to determine whether they belong to us or not */ -void *mac80211_wiphy_privid = &mac80211_wiphy_privid; - -/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ -/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ -static const unsigned char rfc1042_header[] = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; - -/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ -static const unsigned char bridge_tunnel_header[] = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; - -/* No encapsulation header if EtherType < 0x600 (=length) */ -static const unsigned char eapol_header[] = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00, 0x88, 0x8e }; - /* * For seeing transmitted packets on monitor interfaces @@ -67,4267 +44,516 @@ struct ieee80211_tx_status_rtap_hdr { u8 data_retries; } __attribute__ ((packed)); +/* common interface routines */ -static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata, - struct ieee80211_hdr *hdr) -{ - /* Set the sequence number for this frame. */ - hdr->seq_ctrl = cpu_to_le16(sdata->sequence); - - /* Increase the sequence number. */ - sdata->sequence = (sdata->sequence + 0x10) & IEEE80211_SCTL_SEQ; -} - -struct ieee80211_key_conf * -ieee80211_key_data2conf(struct ieee80211_local *local, - const struct ieee80211_key *data) +static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr) { - struct ieee80211_key_conf *conf; - - conf = kmalloc(sizeof(*conf) + data->keylen, GFP_ATOMIC); - if (!conf) - return NULL; - - conf->hw_key_idx = data->hw_key_idx; - conf->alg = data->alg; - conf->keylen = data->keylen; - conf->flags = 0; - if (data->force_sw_encrypt) - conf->flags |= IEEE80211_KEY_FORCE_SW_ENCRYPT; - conf->keyidx = data->keyidx; - if (data->default_tx_key) - conf->flags |= IEEE80211_KEY_DEFAULT_TX_KEY; - if (local->default_wep_only) - conf->flags |= IEEE80211_KEY_DEFAULT_WEP_ONLY; - memcpy(conf->key, data->key, data->keylen); - - return conf; + memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ + return ETH_ALEN; } -struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, - int idx, size_t key_len, gfp_t flags) +/* must be called under mdev tx lock */ +static void ieee80211_configure_filter(struct ieee80211_local *local) { - struct ieee80211_key *key; + unsigned int changed_flags; + unsigned int new_flags = 0; - key = kzalloc(sizeof(struct ieee80211_key) + key_len, flags); - if (!key) - return NULL; - kref_init(&key->kref); - return key; -} + if (atomic_read(&local->iff_promiscs)) + new_flags |= FIF_PROMISC_IN_BSS; -static void ieee80211_key_release(struct kref *kref) -{ - struct ieee80211_key *key; + if (atomic_read(&local->iff_allmultis)) + new_flags |= FIF_ALLMULTI; - key = container_of(kref, struct ieee80211_key, kref); - if (key->alg == ALG_CCMP) - ieee80211_aes_key_free(key->u.ccmp.tfm); - ieee80211_debugfs_key_remove(key); - kfree(key); -} + if (local->monitors) + new_flags |= FIF_CONTROL | + FIF_OTHER_BSS | + FIF_BCN_PRBRESP_PROMISC; -void ieee80211_key_free(struct ieee80211_key *key) -{ - if (key) - kref_put(&key->kref, ieee80211_key_release); -} + changed_flags = local->filter_flags ^ new_flags; -static int rate_list_match(const int *rate_list, int rate) -{ - int i; + /* be a bit nasty */ + new_flags |= (1<<31); - if (!rate_list) - return 0; + local->ops->configure_filter(local_to_hw(local), + changed_flags, &new_flags, + local->mdev->mc_count, + local->mdev->mc_list); - for (i = 0; rate_list[i] >= 0; i++) - if (rate_list[i] == rate) - return 1; + WARN_ON(new_flags & (1<<31)); - return 0; + local->filter_flags = new_flags & ~(1<<31); } +/* master interface */ -void ieee80211_prepare_rates(struct ieee80211_local *local, - struct ieee80211_hw_mode *mode) +static int ieee80211_master_open(struct net_device *dev) { - int i; - - for (i = 0; i < mode->num_rates; i++) { - struct ieee80211_rate *rate = &mode->rates[i]; - - rate->flags &= ~(IEEE80211_RATE_SUPPORTED | - IEEE80211_RATE_BASIC); - - if (local->supp_rates[mode->mode]) { - if (!rate_list_match(local->supp_rates[mode->mode], - rate->rate)) - continue; - } - - rate->flags |= IEEE80211_RATE_SUPPORTED; - - /* Use configured basic rate set if it is available. If not, - * use defaults that are sane for most cases. */ - if (local->basic_rates[mode->mode]) { - if (rate_list_match(local->basic_rates[mode->mode], - rate->rate)) - rate->flags |= IEEE80211_RATE_BASIC; - } else switch (mode->mode) { - case MODE_IEEE80211A: - if (rate->rate == 60 || rate->rate == 120 || - rate->rate == 240) - rate->flags |= IEEE80211_RATE_BASIC; - break; - case MODE_IEEE80211B: - if (rate->rate == 10 || rate->rate == 20) - rate->flags |= IEEE80211_RATE_BASIC; - break; - case MODE_ATHEROS_TURBO: - if (rate->rate == 120 || rate->rate == 240 || - rate->rate == 480) - rate->flags |= IEEE80211_RATE_BASIC; - break; - case MODE_IEEE80211G: - if (rate->rate == 10 || rate->rate == 20 || - rate->rate == 55 || rate->rate == 110) - rate->flags |= IEEE80211_RATE_BASIC; - break; - } + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_sub_if_data *sdata; + int res = -EOPNOTSUPP; - /* Set ERP and MANDATORY flags based on phymode */ - switch (mode->mode) { - case MODE_IEEE80211A: - if (rate->rate == 60 || rate->rate == 120 || - rate->rate == 240) - rate->flags |= IEEE80211_RATE_MANDATORY; - break; - case MODE_IEEE80211B: - if (rate->rate == 10) - rate->flags |= IEEE80211_RATE_MANDATORY; - break; - case MODE_ATHEROS_TURBO: - break; - case MODE_IEEE80211G: - if (rate->rate == 10 || rate->rate == 20 || - rate->rate == 55 || rate->rate == 110 || - rate->rate == 60 || rate->rate == 120 || - rate->rate == 240) - rate->flags |= IEEE80211_RATE_MANDATORY; + /* we hold the RTNL here so can safely walk the list */ + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->dev != dev && netif_running(sdata->dev)) { + res = 0; break; } - if (ieee80211_is_erp_rate(mode->mode, rate->rate)) - rate->flags |= IEEE80211_RATE_ERP; } + return res; } - -static void ieee80211_key_threshold_notify(struct net_device *dev, - struct ieee80211_key *key, - struct sta_info *sta) +static int ieee80211_master_stop(struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct sk_buff *skb; - struct ieee80211_msg_key_notification *msg; - - /* if no one will get it anyway, don't even allocate it. - * unlikely because this is only relevant for APs - * where the device must be open... */ - if (unlikely(!local->apdev)) - return; - - skb = dev_alloc_skb(sizeof(struct ieee80211_frame_info) + - sizeof(struct ieee80211_msg_key_notification)); - if (!skb) - return; - - skb_reserve(skb, sizeof(struct ieee80211_frame_info)); - msg = (struct ieee80211_msg_key_notification *) - skb_put(skb, sizeof(struct ieee80211_msg_key_notification)); - msg->tx_rx_count = key->tx_rx_count; - memcpy(msg->ifname, dev->name, IFNAMSIZ); - if (sta) - memcpy(msg->addr, sta->addr, ETH_ALEN); - else - memset(msg->addr, 0xff, ETH_ALEN); - - key->tx_rx_count = 0; - - ieee80211_rx_mgmt(local, skb, NULL, - ieee80211_msg_key_threshold_notification); -} - - -static u8 * ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len) -{ - u16 fc; - - if (len < 24) - return NULL; - - fc = le16_to_cpu(hdr->frame_control); - - switch (fc & IEEE80211_FCTL_FTYPE) { - case IEEE80211_FTYPE_DATA: - switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { - case IEEE80211_FCTL_TODS: - return hdr->addr1; - case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): - return NULL; - case IEEE80211_FCTL_FROMDS: - return hdr->addr2; - case 0: - return hdr->addr3; - } - break; - case IEEE80211_FTYPE_MGMT: - return hdr->addr3; - case IEEE80211_FTYPE_CTL: - if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL) - return hdr->addr1; - else - return NULL; - } - - return NULL; -} - -int ieee80211_get_hdrlen(u16 fc) -{ - int hdrlen = 24; + struct ieee80211_sub_if_data *sdata; - switch (fc & IEEE80211_FCTL_FTYPE) { - case IEEE80211_FTYPE_DATA: - if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) - hdrlen = 30; /* Addr4 */ - /* - * The QoS Control field is two bytes and its presence is - * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to - * hdrlen if that bit is set. - * This works by masking out the bit and shifting it to - * bit position 1 so the result has the value 0 or 2. - */ - hdrlen += (fc & IEEE80211_STYPE_QOS_DATA) - >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1); - break; - case IEEE80211_FTYPE_CTL: - /* - * ACK and CTS are 10 bytes, all others 16. To see how - * to get this condition consider - * subtype mask: 0b0000000011110000 (0x00F0) - * ACK subtype: 0b0000000011010000 (0x00D0) - * CTS subtype: 0b0000000011000000 (0x00C0) - * bits that matter: ^^^ (0x00E0) - * value of those: 0b0000000011000000 (0x00C0) - */ - if ((fc & 0xE0) == 0xC0) - hdrlen = 10; - else - hdrlen = 16; - break; - } + /* we hold the RTNL here so can safely walk the list */ + list_for_each_entry(sdata, &local->interfaces, list) + if (sdata->dev != dev && netif_running(sdata->dev)) + dev_close(sdata->dev); - return hdrlen; + return 0; } -EXPORT_SYMBOL(ieee80211_get_hdrlen); -int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) +static void ieee80211_master_set_multicast_list(struct net_device *dev) { - const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *) skb->data; - int hdrlen; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - if (unlikely(skb->len < 10)) - return 0; - hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); - if (unlikely(hdrlen > skb->len)) - return 0; - return hdrlen; + ieee80211_configure_filter(local); } -EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); - -static int ieee80211_get_radiotap_len(struct sk_buff *skb) -{ - struct ieee80211_radiotap_header *hdr = - (struct ieee80211_radiotap_header *) skb->data; - return le16_to_cpu(hdr->it_len); -} +/* regular interfaces */ -#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP -static void ieee80211_dump_frame(const char *ifname, const char *title, - const struct sk_buff *skb) +static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) { - const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u16 fc; - int hdrlen; - - printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); - if (skb->len < 4) { - printk("\n"); - return; + /* FIX: what would be proper limits for MTU? + * This interface uses 802.3 frames. */ + if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6) { + printk(KERN_WARNING "%s: invalid MTU %d\n", + dev->name, new_mtu); + return -EINVAL; } - fc = le16_to_cpu(hdr->frame_control); - hdrlen = ieee80211_get_hdrlen(fc); - if (hdrlen > skb->len) - hdrlen = skb->len; - if (hdrlen >= 4) - printk(" FC=0x%04x DUR=0x%04x", - fc, le16_to_cpu(hdr->duration_id)); - if (hdrlen >= 10) - printk(" A1=" MAC_FMT, MAC_ARG(hdr->addr1)); - if (hdrlen >= 16) - printk(" A2=" MAC_FMT, MAC_ARG(hdr->addr2)); - if (hdrlen >= 24) - printk(" A3=" MAC_FMT, MAC_ARG(hdr->addr3)); - if (hdrlen >= 30) - printk(" A4=" MAC_FMT, MAC_ARG(hdr->addr4)); - printk("\n"); +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + dev->mtu = new_mtu; + return 0; } -#else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ -static inline void ieee80211_dump_frame(const char *ifname, const char *title, - struct sk_buff *skb) + +static inline int identical_mac_addr_allowed(int type1, int type2) { + return (type1 == IEEE80211_IF_TYPE_MNTR || + type2 == IEEE80211_IF_TYPE_MNTR || + (type1 == IEEE80211_IF_TYPE_AP && + type2 == IEEE80211_IF_TYPE_WDS) || + (type1 == IEEE80211_IF_TYPE_WDS && + (type2 == IEEE80211_IF_TYPE_WDS || + type2 == IEEE80211_IF_TYPE_AP)) || + (type1 == IEEE80211_IF_TYPE_AP && + type2 == IEEE80211_IF_TYPE_VLAN) || + (type1 == IEEE80211_IF_TYPE_VLAN && + (type2 == IEEE80211_IF_TYPE_AP || + type2 == IEEE80211_IF_TYPE_VLAN))); } -#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ - -static int ieee80211_is_eapol(const struct sk_buff *skb) +static int ieee80211_open(struct net_device *dev) { - const struct ieee80211_hdr *hdr; - u16 fc; - int hdrlen; - - if (unlikely(skb->len < 10)) - return 0; - - hdr = (const struct ieee80211_hdr *) skb->data; - fc = le16_to_cpu(hdr->frame_control); - - if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) - return 0; - - hdrlen = ieee80211_get_hdrlen(fc); + struct ieee80211_sub_if_data *sdata, *nsdata; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_if_init_conf conf; + int res; - if (unlikely(skb->len >= hdrlen + sizeof(eapol_header) && - memcmp(skb->data + hdrlen, eapol_header, - sizeof(eapol_header)) == 0)) - return 1; + sdata = IEEE80211_DEV_TO_SUB_IF(dev); - return 0; -} + /* we hold the RTNL here so can safely walk the list */ + list_for_each_entry(nsdata, &local->interfaces, list) { + struct net_device *ndev = nsdata->dev; + if (ndev != dev && ndev != local->mdev && netif_running(ndev) && + compare_ether_addr(dev->dev_addr, ndev->dev_addr) == 0) { + /* + * check whether it may have the same address + */ + if (!identical_mac_addr_allowed(sdata->type, + nsdata->type)) + return -ENOTUNIQ; -static ieee80211_txrx_result -ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx) -{ - struct rate_control_extra extra; - - memset(&extra, 0, sizeof(extra)); - extra.mode = tx->u.tx.mode; - extra.mgmt_data = tx->sdata && - tx->sdata->type == IEEE80211_IF_TYPE_MGMT; - extra.ethertype = tx->ethertype; - - tx->u.tx.rate = rate_control_get_rate(tx->local, tx->dev, tx->skb, - &extra); - if (unlikely(extra.probe != NULL)) { - tx->u.tx.control->flags |= IEEE80211_TXCTL_RATE_CTRL_PROBE; - tx->u.tx.probe_last_frag = 1; - tx->u.tx.control->alt_retry_rate = tx->u.tx.rate->val; - tx->u.tx.rate = extra.probe; - } else { - tx->u.tx.control->alt_retry_rate = -1; - } - if (!tx->u.tx.rate) - return TXRX_DROP; - if (tx->u.tx.mode->mode == MODE_IEEE80211G && - tx->sdata->use_protection && tx->fragmented && - extra.nonerp) { - tx->u.tx.last_frag_rate = tx->u.tx.rate; - tx->u.tx.probe_last_frag = extra.probe ? 1 : 0; - - tx->u.tx.rate = extra.nonerp; - tx->u.tx.control->rate = extra.nonerp; - tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; - } else { - tx->u.tx.last_frag_rate = tx->u.tx.rate; - tx->u.tx.control->rate = tx->u.tx.rate; + /* + * can only add VLANs to enabled APs + */ + if (sdata->type == IEEE80211_IF_TYPE_VLAN && + nsdata->type == IEEE80211_IF_TYPE_AP && + netif_running(nsdata->dev)) + sdata->u.vlan.ap = nsdata; + } } - tx->u.tx.control->tx_rate = tx->u.tx.rate->val; - if ((tx->u.tx.rate->flags & IEEE80211_RATE_PREAMBLE2) && - tx->local->short_preamble && - (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { - tx->u.tx.short_preamble = 1; - tx->u.tx.control->tx_rate = tx->u.tx.rate->val2; + + switch (sdata->type) { + case IEEE80211_IF_TYPE_WDS: + if (is_zero_ether_addr(sdata->u.wds.remote_addr)) + return -ENOLINK; + break; + case IEEE80211_IF_TYPE_VLAN: + if (!sdata->u.vlan.ap) + return -ENOLINK; + break; + case IEEE80211_IF_TYPE_AP: + case IEEE80211_IF_TYPE_STA: + case IEEE80211_IF_TYPE_MNTR: + case IEEE80211_IF_TYPE_IBSS: + /* no special treatment */ + break; + case IEEE80211_IF_TYPE_INVALID: + /* cannot happen */ + WARN_ON(1); + break; } - return TXRX_CONTINUE; -} + if (local->open_count == 0) { + res = 0; + if (local->ops->start) + res = local->ops->start(local_to_hw(local)); + if (res) + return res; + } + switch (sdata->type) { + case IEEE80211_IF_TYPE_VLAN: + list_add(&sdata->u.vlan.list, &sdata->u.vlan.ap->u.ap.vlans); + /* no need to tell driver */ + break; + case IEEE80211_IF_TYPE_MNTR: + /* must be before the call to ieee80211_configure_filter */ + local->monitors++; + if (local->monitors == 1) { + netif_tx_lock_bh(local->mdev); + ieee80211_configure_filter(local); + netif_tx_unlock_bh(local->mdev); -static ieee80211_txrx_result -ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx) -{ - if (tx->sta) - tx->u.tx.control->key_idx = tx->sta->key_idx_compression; - else - tx->u.tx.control->key_idx = HW_KEY_IDX_INVALID; - - if (unlikely(tx->u.tx.control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) - tx->key = NULL; - else if (tx->sta && tx->sta->key) - tx->key = tx->sta->key; - else if (tx->sdata->default_key) - tx->key = tx->sdata->default_key; - else if (tx->sdata->drop_unencrypted && - !(tx->sdata->eapol && ieee80211_is_eapol(tx->skb))) { - I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); - return TXRX_DROP; - } else - tx->key = NULL; - - if (tx->key) { - tx->key->tx_rx_count++; - if (unlikely(tx->local->key_tx_rx_threshold && - tx->key->tx_rx_count > - tx->local->key_tx_rx_threshold)) { - ieee80211_key_threshold_notify(tx->dev, tx->key, - tx->sta); + local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; + ieee80211_hw_config(local); } - } - - return TXRX_CONTINUE; -} + break; + case IEEE80211_IF_TYPE_STA: + case IEEE80211_IF_TYPE_IBSS: + sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET; + /* fall through */ + default: + conf.if_id = dev->ifindex; + conf.type = sdata->type; + conf.mac_addr = dev->dev_addr; + res = local->ops->add_interface(local_to_hw(local), &conf); + if (res && !local->open_count && local->ops->stop) + local->ops->stop(local_to_hw(local)); + if (res) + return res; + ieee80211_if_config(dev); + ieee80211_reset_erp_info(dev); + ieee80211_enable_keys(sdata); -static ieee80211_txrx_result -ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; - size_t hdrlen, per_fragm, num_fragm, payload_len, left; - struct sk_buff **frags, *first, *frag; - int i; - u16 seq; - u8 *pos; - int frag_threshold = tx->local->fragmentation_threshold; - - if (!tx->fragmented) - return TXRX_CONTINUE; - - first = tx->skb; - - hdrlen = ieee80211_get_hdrlen(tx->fc); - payload_len = first->len - hdrlen; - per_fragm = frag_threshold - hdrlen - FCS_LEN; - num_fragm = (payload_len + per_fragm - 1) / per_fragm; - - frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC); - if (!frags) - goto fail; - - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); - seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ; - pos = first->data + hdrlen + per_fragm; - left = payload_len - per_fragm; - for (i = 0; i < num_fragm - 1; i++) { - struct ieee80211_hdr *fhdr; - size_t copylen; - - if (left <= 0) - goto fail; - - /* reserve enough extra head and tail room for possible - * encryption */ - frag = frags[i] = - dev_alloc_skb(tx->local->tx_headroom + - frag_threshold + - IEEE80211_ENCRYPT_HEADROOM + - IEEE80211_ENCRYPT_TAILROOM); - if (!frag) - goto fail; - /* Make sure that all fragments use the same priority so - * that they end up using the same TX queue */ - frag->priority = first->priority; - skb_reserve(frag, tx->local->tx_headroom + - IEEE80211_ENCRYPT_HEADROOM); - fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); - memcpy(fhdr, first->data, hdrlen); - if (i == num_fragm - 2) - fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); - fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); - copylen = left > per_fragm ? per_fragm : left; - memcpy(skb_put(frag, copylen), pos, copylen); - - pos += copylen; - left -= copylen; + if (sdata->type == IEEE80211_IF_TYPE_STA && + !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) + netif_carrier_off(dev); + else + netif_carrier_on(dev); } - skb_trim(first, hdrlen + per_fragm); - - tx->u.tx.num_extra_frag = num_fragm - 1; - tx->u.tx.extra_frag = frags; - return TXRX_CONTINUE; - - fail: - printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); - if (frags) { - for (i = 0; i < num_fragm - 1; i++) - if (frags[i]) - dev_kfree_skb(frags[i]); - kfree(frags); + if (local->open_count == 0) { + res = dev_open(local->mdev); + WARN_ON(res); + tasklet_enable(&local->tx_pending_tasklet); + tasklet_enable(&local->tasklet); } - I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); - return TXRX_DROP; -} - -static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb) -{ - if (tx->key->force_sw_encrypt) { - if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) - return -1; - } else { - tx->u.tx.control->key_idx = tx->key->hw_key_idx; - if (tx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) { - if (ieee80211_wep_add_iv(tx->local, skb, tx->key) == - NULL) - return -1; - } - } - return 0; -} + local->open_count++; + netif_start_queue(dev); -void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; - - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - if (tx->u.tx.extra_frag) { - struct ieee80211_hdr *fhdr; - int i; - for (i = 0; i < tx->u.tx.num_extra_frag; i++) { - fhdr = (struct ieee80211_hdr *) - tx->u.tx.extra_frag[i]->data; - fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - } - } + return 0; } - -static ieee80211_txrx_result -ieee80211_tx_h_wep_encrypt(struct ieee80211_txrx_data *tx) +static int ieee80211_stop(struct net_device *dev) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; - u16 fc; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_if_init_conf conf; - fc = le16_to_cpu(hdr->frame_control); + sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (!tx->key || tx->key->alg != ALG_WEP || - ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && - ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || - (fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))) - return TXRX_CONTINUE; + netif_stop_queue(dev); - tx->u.tx.control->iv_len = WEP_IV_LEN; - tx->u.tx.control->icv_len = WEP_ICV_LEN; - ieee80211_tx_set_iswep(tx); + dev_mc_unsync(local->mdev, dev); - if (wep_encrypt_skb(tx, tx->skb) < 0) { - I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); - return TXRX_DROP; - } + /* down all dependent devices, that is VLANs */ + if (sdata->type == IEEE80211_IF_TYPE_AP) { + struct ieee80211_sub_if_data *vlan, *tmp; - if (tx->u.tx.extra_frag) { - int i; - for (i = 0; i < tx->u.tx.num_extra_frag; i++) { - if (wep_encrypt_skb(tx, tx->u.tx.extra_frag[i]) < 0) { - I802_DEBUG_INC(tx->local-> - tx_handlers_drop_wep); - return TXRX_DROP; - } - } + list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans, + u.vlan.list) + dev_close(vlan->dev); + WARN_ON(!list_empty(&sdata->u.ap.vlans)); } - return TXRX_CONTINUE; -} - - -static int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, - int rate, int erp, int short_preamble) -{ - int dur; + local->open_count--; - /* calculate duration (in microseconds, rounded up to next higher - * integer if it includes a fractional microsecond) to send frame of - * len bytes (does not include FCS) at the given rate. Duration will - * also include SIFS. - * - * rate is in 100 kbps, so divident is multiplied by 10 in the - * DIV_ROUND_UP() operations. - */ + switch (sdata->type) { + case IEEE80211_IF_TYPE_VLAN: + list_del(&sdata->u.vlan.list); + sdata->u.vlan.ap = NULL; + /* no need to tell driver */ + break; + case IEEE80211_IF_TYPE_MNTR: + local->monitors--; + if (local->monitors == 0) { + netif_tx_lock_bh(local->mdev); + ieee80211_configure_filter(local); + netif_tx_unlock_bh(local->mdev); - if (local->hw.conf.phymode == MODE_IEEE80211A || erp || - local->hw.conf.phymode == MODE_ATHEROS_TURBO) { - /* - * OFDM: - * - * N_DBPS = DATARATE x 4 - * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) - * (16 = SIGNAL time, 6 = tail bits) - * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext - * - * T_SYM = 4 usec - * 802.11a - 17.5.2: aSIFSTime = 16 usec - * 802.11g - 19.8.4: aSIFSTime = 10 usec + - * signal ext = 6 usec - */ - /* FIX: Atheros Turbo may have different (shorter) duration? */ - dur = 16; /* SIFS + signal ext */ - dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */ - dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */ - dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, - 4 * rate); /* T_SYM x N_SYM */ - } else { + local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; + ieee80211_hw_config(local); + } + break; + case IEEE80211_IF_TYPE_STA: + case IEEE80211_IF_TYPE_IBSS: + sdata->u.sta.state = IEEE80211_DISABLED; + del_timer_sync(&sdata->u.sta.timer); /* - * 802.11b or 802.11g with 802.11b compatibility: - * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + - * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. - * - * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 - * aSIFSTime = 10 usec - * aPreambleLength = 144 usec or 72 usec with short preamble - * aPLCPHeaderLength = 48 usec or 24 usec with short preamble + * When we get here, the interface is marked down. + * Call synchronize_rcu() to wait for the RX path + * should it be using the interface and enqueuing + * frames at this very time on another CPU. */ - dur = 10; /* aSIFSTime = 10 usec */ - dur += short_preamble ? (72 + 24) : (144 + 48); + synchronize_rcu(); + skb_queue_purge(&sdata->u.sta.skb_queue); - dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); + if (!local->ops->hw_scan && + local->scan_dev == sdata->dev) { + local->sta_scanning = 0; + cancel_delayed_work(&local->scan_work); + } + flush_workqueue(local->hw.workqueue); + /* fall through */ + default: + conf.if_id = dev->ifindex; + conf.type = sdata->type; + conf.mac_addr = dev->dev_addr; + /* disable all keys for as long as this netdev is down */ + ieee80211_disable_keys(sdata); + local->ops->remove_interface(local_to_hw(local), &conf); } - return dur; -} - - -/* Exported duration function for driver use */ -__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, - size_t frame_len, int rate) -{ - struct ieee80211_local *local = hw_to_local(hw); - u16 dur; - int erp; - - erp = ieee80211_is_erp_rate(hw->conf.phymode, rate); - dur = ieee80211_frame_duration(local, frame_len, rate, - erp, local->short_preamble); - - return cpu_to_le16(dur); -} -EXPORT_SYMBOL(ieee80211_generic_frame_duration); - - -static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr, - int next_frag_len) -{ - int rate, mrate, erp, dur, i; - struct ieee80211_rate *txrate = tx->u.tx.rate; - struct ieee80211_local *local = tx->local; - struct ieee80211_hw_mode *mode = tx->u.tx.mode; + if (local->open_count == 0) { + if (netif_running(local->mdev)) + dev_close(local->mdev); - erp = txrate->flags & IEEE80211_RATE_ERP; + if (local->ops->stop) + local->ops->stop(local_to_hw(local)); - /* - * data and mgmt (except PS Poll): - * - during CFP: 32768 - * - during contention period: - * if addr1 is group address: 0 - * if more fragments = 0 and addr1 is individual address: time to - * transmit one ACK plus SIFS - * if more fragments = 1 and addr1 is individual address: time to - * transmit next fragment plus 2 x ACK plus 3 x SIFS - * - * IEEE 802.11, 9.6: - * - control response frame (CTS or ACK) shall be transmitted using the - * same rate as the immediately previous frame in the frame exchange - * sequence, if this rate belongs to the PHY mandatory rates, or else - * at the highest possible rate belonging to the PHY rates in the - * BSSBasicRateSet - */ - - if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { - /* TODO: These control frames are not currently sent by - * 80211.o, but should they be implemented, this function - * needs to be updated to support duration field calculation. - * - * RTS: time needed to transmit pending data/mgmt frame plus - * one CTS frame plus one ACK frame plus 3 x SIFS - * CTS: duration of immediately previous RTS minus time - * required to transmit CTS and its SIFS - * ACK: 0 if immediately previous directed data/mgmt had - * more=0, with more=1 duration in ACK frame is duration - * from previous frame minus time needed to transmit ACK - * and its SIFS - * PS Poll: BIT(15) | BIT(14) | aid - */ - return 0; - } - - /* data/mgmt */ - if (0 /* FIX: data/mgmt during CFP */) - return 32768; - - if (group_addr) /* Group address as the destination - no ACK */ - return 0; - - /* Individual destination address: - * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) - * CTS and ACK frames shall be transmitted using the highest rate in - * basic rate set that is less than or equal to the rate of the - * immediately previous frame and that is using the same modulation - * (CCK or OFDM). If no basic rate set matches with these requirements, - * the highest mandatory rate of the PHY that is less than or equal to - * the rate of the previous frame is used. - * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps - */ - rate = -1; - mrate = 10; /* use 1 Mbps if everything fails */ - for (i = 0; i < mode->num_rates; i++) { - struct ieee80211_rate *r = &mode->rates[i]; - if (r->rate > txrate->rate) - break; - - if (IEEE80211_RATE_MODULATION(txrate->flags) != - IEEE80211_RATE_MODULATION(r->flags)) - continue; - - if (r->flags & IEEE80211_RATE_BASIC) - rate = r->rate; - else if (r->flags & IEEE80211_RATE_MANDATORY) - mrate = r->rate; - } - if (rate == -1) { - /* No matching basic rate found; use highest suitable mandatory - * PHY rate */ - rate = mrate; - } - - /* Time needed to transmit ACK - * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up - * to closest integer */ - - dur = ieee80211_frame_duration(local, 10, rate, erp, - local->short_preamble); - - if (next_frag_len) { - /* Frame is fragmented: duration increases with time needed to - * transmit next fragment plus ACK and 2 x SIFS. */ - dur *= 2; /* ACK + SIFS */ - /* next fragment */ - dur += ieee80211_frame_duration(local, next_frag_len, - txrate->rate, erp, - local->short_preamble); - } - - return dur; -} - - -static ieee80211_txrx_result -ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; - u16 dur; - struct ieee80211_tx_control *control = tx->u.tx.control; - struct ieee80211_hw_mode *mode = tx->u.tx.mode; - - if (!is_multicast_ether_addr(hdr->addr1)) { - if (tx->skb->len + FCS_LEN > tx->local->rts_threshold && - tx->local->rts_threshold < IEEE80211_MAX_RTS_THRESHOLD) { - control->flags |= IEEE80211_TXCTL_USE_RTS_CTS; - control->retry_limit = - tx->local->long_retry_limit; - } else { - control->retry_limit = - tx->local->short_retry_limit; - } - } else { - control->retry_limit = 1; - } - - if (tx->fragmented) { - /* Do not use multiple retry rates when sending fragmented - * frames. - * TODO: The last fragment could still use multiple retry - * rates. */ - control->alt_retry_rate = -1; - } - - /* Use CTS protection for unicast frames sent using extended rates if - * there are associated non-ERP stations and RTS/CTS is not configured - * for the frame. */ - if (mode->mode == MODE_IEEE80211G && - (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) && - tx->u.tx.unicast && tx->sdata->use_protection && - !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) - control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; - - /* Setup duration field for the first fragment of the frame. Duration - * for remaining fragments will be updated when they are being sent - * to low-level driver in ieee80211_tx(). */ - dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), - tx->fragmented ? tx->u.tx.extra_frag[0]->len : - 0); - hdr->duration_id = cpu_to_le16(dur); - - if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || - (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { - struct ieee80211_rate *rate; - - /* Do not use multiple retry rates when using RTS/CTS */ - control->alt_retry_rate = -1; - - /* Use min(data rate, max base rate) as CTS/RTS rate */ - rate = tx->u.tx.rate; - while (rate > mode->rates && - !(rate->flags & IEEE80211_RATE_BASIC)) - rate--; - - control->rts_cts_rate = rate->val; - control->rts_rate = rate; - } - - if (tx->sta) { - tx->sta->tx_packets++; - tx->sta->tx_fragments++; - tx->sta->tx_bytes += tx->skb->len; - if (tx->u.tx.extra_frag) { - int i; - tx->sta->tx_fragments += tx->u.tx.num_extra_frag; - for (i = 0; i < tx->u.tx.num_extra_frag; i++) { - tx->sta->tx_bytes += - tx->u.tx.extra_frag[i]->len; - } - } - } - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) -{ -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - struct sk_buff *skb = tx->skb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - u32 sta_flags; - - if (unlikely(tx->local->sta_scanning != 0) && - ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || - (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) - return TXRX_DROP; - - if (tx->u.tx.ps_buffered) - return TXRX_CONTINUE; - - sta_flags = tx->sta ? tx->sta->flags : 0; - - if (likely(tx->u.tx.unicast)) { - if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && - tx->sdata->type != IEEE80211_IF_TYPE_IBSS && - (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: dropped data frame to not " - "associated station " MAC_FMT "\n", - tx->dev->name, MAC_ARG(hdr->addr1)); -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); - return TXRX_DROP; - } - } else { - if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && - tx->local->num_sta == 0 && - !tx->local->allow_broadcast_always && - tx->sdata->type != IEEE80211_IF_TYPE_IBSS)) { - /* - * No associated STAs - no need to send multicast - * frames. - */ - return TXRX_DROP; - } - return TXRX_CONTINUE; - } - - if (unlikely(!tx->u.tx.mgmt_interface && tx->sdata->ieee802_1x && - !(sta_flags & WLAN_STA_AUTHORIZED))) { -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: dropped frame to " MAC_FMT - " (unauthorized port)\n", tx->dev->name, - MAC_ARG(hdr->addr1)); -#endif - I802_DEBUG_INC(tx->local->tx_handlers_drop_unauth_port); - return TXRX_DROP; - } - - return TXRX_CONTINUE; -} - -static ieee80211_txrx_result -ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; - - if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) - ieee80211_include_sequence(tx->sdata, hdr); - - return TXRX_CONTINUE; -} - -/* This function is called whenever the AP is about to exceed the maximum limit - * of buffered frames for power saving STAs. This situation should not really - * happen often during normal operation, so dropping the oldest buffered packet - * from each queue should be OK to make some room for new frames. */ -static void purge_old_ps_buffers(struct ieee80211_local *local) -{ - int total = 0, purged = 0; - struct sk_buff *skb; - struct ieee80211_sub_if_data *sdata; - struct sta_info *sta; - - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { - struct ieee80211_if_ap *ap; - if (sdata->dev == local->mdev || - sdata->type != IEEE80211_IF_TYPE_AP) - continue; - ap = &sdata->u.ap; - skb = skb_dequeue(&ap->ps_bc_buf); - if (skb) { - purged++; - dev_kfree_skb(skb); - } - total += skb_queue_len(&ap->ps_bc_buf); - } - read_unlock(&local->sub_if_lock); - - spin_lock_bh(&local->sta_lock); - list_for_each_entry(sta, &local->sta_list, list) { - skb = skb_dequeue(&sta->ps_tx_buf); - if (skb) { - purged++; - dev_kfree_skb(skb); - } - total += skb_queue_len(&sta->ps_tx_buf); - } - spin_unlock_bh(&local->sta_lock); - - local->total_ps_buffered = total; - printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", - local->mdev->name, purged); -} - - -static inline ieee80211_txrx_result -ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) -{ - /* broadcast/multicast frame */ - /* If any of the associated stations is in power save mode, - * the frame is buffered to be sent after DTIM beacon frame */ - if ((tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) && - tx->sdata->type != IEEE80211_IF_TYPE_WDS && - tx->sdata->bss && atomic_read(&tx->sdata->bss->num_sta_ps) && - !(tx->fc & IEEE80211_FCTL_ORDER)) { - if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) - purge_old_ps_buffers(tx->local); - if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= - AP_MAX_BC_BUFFER) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: BC TX buffer full - " - "dropping the oldest frame\n", - tx->dev->name); - } - dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); - } else - tx->local->total_ps_buffered++; - skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); - return TXRX_QUEUED; - } - - return TXRX_CONTINUE; -} - - -static inline ieee80211_txrx_result -ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) -{ - struct sta_info *sta = tx->sta; - - if (unlikely(!sta || - ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && - (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) - return TXRX_CONTINUE; - - if (unlikely((sta->flags & WLAN_STA_PS) && !sta->pspoll)) { - struct ieee80211_tx_packet_data *pkt_data; -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - printk(KERN_DEBUG "STA " MAC_FMT " aid %d: PS buffer (entries " - "before %d)\n", - MAC_ARG(sta->addr), sta->aid, - skb_queue_len(&sta->ps_tx_buf)); -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ - sta->flags |= WLAN_STA_TIM; - if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) - purge_old_ps_buffers(tx->local); - if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { - struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: STA " MAC_FMT " TX " - "buffer full - dropping oldest frame\n", - tx->dev->name, MAC_ARG(sta->addr)); - } - dev_kfree_skb(old); - } else - tx->local->total_ps_buffered++; - /* Queue frame to be sent after STA sends an PS Poll frame */ - if (skb_queue_empty(&sta->ps_tx_buf)) { - if (tx->local->ops->set_tim) - tx->local->ops->set_tim(local_to_hw(tx->local), - sta->aid, 1); - if (tx->sdata->bss) - bss_tim_set(tx->local, tx->sdata->bss, sta->aid); - } - pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; - pkt_data->jiffies = jiffies; - skb_queue_tail(&sta->ps_tx_buf, tx->skb); - return TXRX_QUEUED; - } -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - else if (unlikely(sta->flags & WLAN_STA_PS)) { - printk(KERN_DEBUG "%s: STA " MAC_FMT " in PS mode, but pspoll " - "set -> send frame\n", tx->dev->name, - MAC_ARG(sta->addr)); - } -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ - sta->pspoll = 0; - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx) -{ - if (unlikely(tx->u.tx.ps_buffered)) - return TXRX_CONTINUE; - - if (tx->u.tx.unicast) - return ieee80211_tx_h_unicast_ps_buf(tx); - else - return ieee80211_tx_h_multicast_ps_buf(tx); -} - - -/* - * deal with packet injection down monitor interface - * with Radiotap Header -- only called for monitor mode interface - */ - -static ieee80211_txrx_result -__ieee80211_parse_tx_radiotap( - struct ieee80211_txrx_data *tx, - struct sk_buff *skb, struct ieee80211_tx_control *control) -{ - /* - * this is the moment to interpret and discard the radiotap header that - * must be at the start of the packet injected in Monitor mode - * - * Need to take some care with endian-ness since radiotap - * args are little-endian - */ - - struct ieee80211_radiotap_iterator iterator; - struct ieee80211_radiotap_header *rthdr = - (struct ieee80211_radiotap_header *) skb->data; - struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode; - int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); - - /* - * default control situation for all injected packets - * FIXME: this does not suit all usage cases, expand to allow control - */ - - control->retry_limit = 1; /* no retry */ - control->key_idx = -1; /* no encryption key */ - control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | - IEEE80211_TXCTL_USE_CTS_PROTECT); - control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT | - IEEE80211_TXCTL_NO_ACK; - control->antenna_sel_tx = 0; /* default to default antenna */ - - /* - * for every radiotap entry that is present - * (ieee80211_radiotap_iterator_next returns -ENOENT when no more - * entries present, or -EINVAL on error) - */ - - while (!ret) { - int i, target_rate; - - ret = ieee80211_radiotap_iterator_next(&iterator); - - if (ret) - continue; - - /* see if this argument is something we can use */ - switch (iterator.this_arg_index) { - /* - * You must take care when dereferencing iterator.this_arg - * for multibyte types... the pointer is not aligned. Use - * get_unaligned((type *)iterator.this_arg) to dereference - * iterator.this_arg for type "type" safely on all arches. - */ - case IEEE80211_RADIOTAP_RATE: - /* - * radiotap rate u8 is in 500kbps units eg, 0x02=1Mbps - * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps - */ - target_rate = (*iterator.this_arg) * 5; - for (i = 0; i < mode->num_rates; i++) { - struct ieee80211_rate *r = &mode->rates[i]; - - if (r->rate > target_rate) - continue; - - control->rate = r; - - if (r->flags & IEEE80211_RATE_PREAMBLE2) - control->tx_rate = r->val2; - else - control->tx_rate = r->val; - - /* end on exact match */ - if (r->rate == target_rate) - i = mode->num_rates; - } - break; - - case IEEE80211_RADIOTAP_ANTENNA: - /* - * radiotap uses 0 for 1st ant, mac80211 is 1 for - * 1st ant - */ - control->antenna_sel_tx = (*iterator.this_arg) + 1; - break; - - case IEEE80211_RADIOTAP_DBM_TX_POWER: - control->power_level = *iterator.this_arg; - break; - - case IEEE80211_RADIOTAP_FLAGS: - if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { - /* - * this indicates that the skb we have been - * handed has the 32-bit FCS CRC at the end... - * we should react to that by snipping it off - * because it will be recomputed and added - * on transmission - */ - if (skb->len < (iterator.max_length + FCS_LEN)) - return TXRX_DROP; - - skb_trim(skb, skb->len - FCS_LEN); - } - break; - - default: - break; - } - } - - if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ - return TXRX_DROP; - - /* - * remove the radiotap header - * iterator->max_length was sanity-checked against - * skb->len by iterator init - */ - skb_pull(skb, iterator.max_length); - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result inline -__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, - struct sk_buff *skb, - struct net_device *dev, - struct ieee80211_tx_control *control) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_sub_if_data *sdata; - ieee80211_txrx_result res = TXRX_CONTINUE; - - int hdrlen; - - memset(tx, 0, sizeof(*tx)); - tx->skb = skb; - tx->dev = dev; /* use original interface */ - tx->local = local; - tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); - tx->sta = sta_info_get(local, hdr->addr1); - tx->fc = le16_to_cpu(hdr->frame_control); - - /* - * set defaults for things that can be set by - * injected radiotap headers - */ - control->power_level = local->hw.conf.power_level; - control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; - if (local->sta_antenna_sel != STA_ANTENNA_SEL_AUTO && tx->sta) - control->antenna_sel_tx = tx->sta->antenna_sel_tx; - - /* process and remove the injection radiotap header */ - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (unlikely(sdata->type == IEEE80211_IF_TYPE_MNTR)) { - if (__ieee80211_parse_tx_radiotap(tx, skb, control) == - TXRX_DROP) { - return TXRX_DROP; - } - /* - * we removed the radiotap header after this point, - * we filled control with what we could use - * set to the actual ieee header now - */ - hdr = (struct ieee80211_hdr *) skb->data; - res = TXRX_QUEUED; /* indication it was monitor packet */ - } - - tx->u.tx.control = control; - tx->u.tx.unicast = !is_multicast_ether_addr(hdr->addr1); - if (is_multicast_ether_addr(hdr->addr1)) - control->flags |= IEEE80211_TXCTL_NO_ACK; - else - control->flags &= ~IEEE80211_TXCTL_NO_ACK; - tx->fragmented = local->fragmentation_threshold < - IEEE80211_MAX_FRAG_THRESHOLD && tx->u.tx.unicast && - skb->len + FCS_LEN > local->fragmentation_threshold && - (!local->ops->set_frag_threshold); - if (!tx->sta) - control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; - else if (tx->sta->clear_dst_mask) { - control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; - tx->sta->clear_dst_mask = 0; - } - hdrlen = ieee80211_get_hdrlen(tx->fc); - if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { - u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; - tx->ethertype = (pos[0] << 8) | pos[1]; - } - control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; - - return res; -} - -static int inline is_ieee80211_device(struct net_device *dev, - struct net_device *master) -{ - return (wdev_priv(dev->ieee80211_ptr) == - wdev_priv(master->ieee80211_ptr)); -} - -/* Device in tx->dev has a reference added; use dev_put(tx->dev) when - * finished with it. */ -static int inline ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, - struct sk_buff *skb, - struct net_device *mdev, - struct ieee80211_tx_control *control) -{ - struct ieee80211_tx_packet_data *pkt_data; - struct net_device *dev; - - pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; - dev = dev_get_by_index(pkt_data->ifindex); - if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { - dev_put(dev); - dev = NULL; - } - if (unlikely(!dev)) - return -ENODEV; - __ieee80211_tx_prepare(tx, skb, dev, control); - return 0; -} - -static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local, - int queue) -{ - return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); -} - -static inline int __ieee80211_queue_pending(const struct ieee80211_local *local, - int queue) -{ - return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]); -} - -#define IEEE80211_TX_OK 0 -#define IEEE80211_TX_AGAIN 1 -#define IEEE80211_TX_FRAG_AGAIN 2 - -static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, - struct ieee80211_txrx_data *tx) -{ - struct ieee80211_tx_control *control = tx->u.tx.control; - int ret, i; - - if (!ieee80211_qdisc_installed(local->mdev) && - __ieee80211_queue_stopped(local, 0)) { - netif_stop_queue(local->mdev); - return IEEE80211_TX_AGAIN; - } - if (skb) { - ieee80211_dump_frame(local->mdev->name, "TX to low-level driver", skb); - ret = local->ops->tx(local_to_hw(local), skb, control); - if (ret) - return IEEE80211_TX_AGAIN; - local->mdev->trans_start = jiffies; - ieee80211_led_tx(local, 1); - } - if (tx->u.tx.extra_frag) { - control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | - IEEE80211_TXCTL_USE_CTS_PROTECT | - IEEE80211_TXCTL_CLEAR_DST_MASK | - IEEE80211_TXCTL_FIRST_FRAGMENT); - for (i = 0; i < tx->u.tx.num_extra_frag; i++) { - if (!tx->u.tx.extra_frag[i]) - continue; - if (__ieee80211_queue_stopped(local, control->queue)) - return IEEE80211_TX_FRAG_AGAIN; - if (i == tx->u.tx.num_extra_frag) { - control->tx_rate = tx->u.tx.last_frag_hwrate; - control->rate = tx->u.tx.last_frag_rate; - if (tx->u.tx.probe_last_frag) - control->flags |= - IEEE80211_TXCTL_RATE_CTRL_PROBE; - else - control->flags &= - ~IEEE80211_TXCTL_RATE_CTRL_PROBE; - } - - ieee80211_dump_frame(local->mdev->name, - "TX to low-level driver", - tx->u.tx.extra_frag[i]); - ret = local->ops->tx(local_to_hw(local), - tx->u.tx.extra_frag[i], - control); - if (ret) - return IEEE80211_TX_FRAG_AGAIN; - local->mdev->trans_start = jiffies; - ieee80211_led_tx(local, 1); - tx->u.tx.extra_frag[i] = NULL; - } - kfree(tx->u.tx.extra_frag); - tx->u.tx.extra_frag = NULL; - } - return IEEE80211_TX_OK; -} - -static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, - struct ieee80211_tx_control *control, int mgmt) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct sta_info *sta; - ieee80211_tx_handler *handler; - struct ieee80211_txrx_data tx; - ieee80211_txrx_result res = TXRX_DROP, res_prepare; - int ret, i; - - WARN_ON(__ieee80211_queue_pending(local, control->queue)); - - if (unlikely(skb->len < 10)) { - dev_kfree_skb(skb); - return 0; - } - - res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); - - if (res_prepare == TXRX_DROP) { - dev_kfree_skb(skb); - return 0; - } - - sta = tx.sta; - tx.u.tx.mgmt_interface = mgmt; - tx.u.tx.mode = local->hw.conf.mode; - - if (res_prepare == TXRX_QUEUED) { /* if it was an injected packet */ - res = TXRX_CONTINUE; - } else { - for (handler = local->tx_handlers; *handler != NULL; - handler++) { - res = (*handler)(&tx); - if (res != TXRX_CONTINUE) - break; - } - } - - skb = tx.skb; /* handlers are allowed to change skb */ - - if (sta) - sta_info_put(sta); - - if (unlikely(res == TXRX_DROP)) { - I802_DEBUG_INC(local->tx_handlers_drop); - goto drop; - } - - if (unlikely(res == TXRX_QUEUED)) { - I802_DEBUG_INC(local->tx_handlers_queued); - return 0; - } - - if (tx.u.tx.extra_frag) { - for (i = 0; i < tx.u.tx.num_extra_frag; i++) { - int next_len, dur; - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *) - tx.u.tx.extra_frag[i]->data; - - if (i + 1 < tx.u.tx.num_extra_frag) { - next_len = tx.u.tx.extra_frag[i + 1]->len; - } else { - next_len = 0; - tx.u.tx.rate = tx.u.tx.last_frag_rate; - tx.u.tx.last_frag_hwrate = tx.u.tx.rate->val; - } - dur = ieee80211_duration(&tx, 0, next_len); - hdr->duration_id = cpu_to_le16(dur); - } - } - -retry: - ret = __ieee80211_tx(local, skb, &tx); - if (ret) { - struct ieee80211_tx_stored_packet *store = - &local->pending_packet[control->queue]; - - if (ret == IEEE80211_TX_FRAG_AGAIN) - skb = NULL; - set_bit(IEEE80211_LINK_STATE_PENDING, - &local->state[control->queue]); - smp_mb(); - /* When the driver gets out of buffers during sending of - * fragments and calls ieee80211_stop_queue, there is - * a small window between IEEE80211_LINK_STATE_XOFF and - * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer - * gets available in that window (i.e. driver calls - * ieee80211_wake_queue), we would end up with ieee80211_tx - * called with IEEE80211_LINK_STATE_PENDING. Prevent this by - * continuing transmitting here when that situation is - * possible to have happened. */ - if (!__ieee80211_queue_stopped(local, control->queue)) { - clear_bit(IEEE80211_LINK_STATE_PENDING, - &local->state[control->queue]); - goto retry; - } - memcpy(&store->control, control, - sizeof(struct ieee80211_tx_control)); - store->skb = skb; - store->extra_frag = tx.u.tx.extra_frag; - store->num_extra_frag = tx.u.tx.num_extra_frag; - store->last_frag_hwrate = tx.u.tx.last_frag_hwrate; - store->last_frag_rate = tx.u.tx.last_frag_rate; - store->last_frag_rate_ctrl_probe = tx.u.tx.probe_last_frag; - } - return 0; - - drop: - if (skb) - dev_kfree_skb(skb); - for (i = 0; i < tx.u.tx.num_extra_frag; i++) - if (tx.u.tx.extra_frag[i]) - dev_kfree_skb(tx.u.tx.extra_frag[i]); - kfree(tx.u.tx.extra_frag); - return 0; -} - -static void ieee80211_tx_pending(unsigned long data) -{ - struct ieee80211_local *local = (struct ieee80211_local *)data; - struct net_device *dev = local->mdev; - struct ieee80211_tx_stored_packet *store; - struct ieee80211_txrx_data tx; - int i, ret, reschedule = 0; - - netif_tx_lock_bh(dev); - for (i = 0; i < local->hw.queues; i++) { - if (__ieee80211_queue_stopped(local, i)) - continue; - if (!__ieee80211_queue_pending(local, i)) { - reschedule = 1; - continue; - } - store = &local->pending_packet[i]; - tx.u.tx.control = &store->control; - tx.u.tx.extra_frag = store->extra_frag; - tx.u.tx.num_extra_frag = store->num_extra_frag; - tx.u.tx.last_frag_hwrate = store->last_frag_hwrate; - tx.u.tx.last_frag_rate = store->last_frag_rate; - tx.u.tx.probe_last_frag = store->last_frag_rate_ctrl_probe; - ret = __ieee80211_tx(local, store->skb, &tx); - if (ret) { - if (ret == IEEE80211_TX_FRAG_AGAIN) - store->skb = NULL; - } else { - clear_bit(IEEE80211_LINK_STATE_PENDING, - &local->state[i]); - reschedule = 1; - } - } - netif_tx_unlock_bh(dev); - if (reschedule) { - if (!ieee80211_qdisc_installed(dev)) { - if (!__ieee80211_queue_stopped(local, 0)) - netif_wake_queue(dev); - } else - netif_schedule(dev); - } -} - -static void ieee80211_clear_tx_pending(struct ieee80211_local *local) -{ - int i, j; - struct ieee80211_tx_stored_packet *store; - - for (i = 0; i < local->hw.queues; i++) { - if (!__ieee80211_queue_pending(local, i)) - continue; - store = &local->pending_packet[i]; - kfree_skb(store->skb); - for (j = 0; j < store->num_extra_frag; j++) - kfree_skb(store->extra_frag[j]); - kfree(store->extra_frag); - clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); - } -} - -static int ieee80211_master_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct ieee80211_tx_control control; - struct ieee80211_tx_packet_data *pkt_data; - struct net_device *odev = NULL; - struct ieee80211_sub_if_data *osdata; - int headroom; - int ret; - - /* - * copy control out of the skb so other people can use skb->cb - */ - pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; - memset(&control, 0, sizeof(struct ieee80211_tx_control)); - - if (pkt_data->ifindex) - odev = dev_get_by_index(pkt_data->ifindex); - if (unlikely(odev && !is_ieee80211_device(odev, dev))) { - dev_put(odev); - odev = NULL; - } - if (unlikely(!odev)) { -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: Discarded packet with nonexistent " - "originating device\n", dev->name); -#endif - dev_kfree_skb(skb); - return 0; - } - osdata = IEEE80211_DEV_TO_SUB_IF(odev); - - headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; - if (skb_headroom(skb) < headroom) { - if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - dev_put(odev); - return 0; - } - } - - control.ifindex = odev->ifindex; - control.type = osdata->type; - if (pkt_data->req_tx_status) - control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS; - if (pkt_data->do_not_encrypt) - control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; - if (pkt_data->requeue) - control.flags |= IEEE80211_TXCTL_REQUEUE; - control.queue = pkt_data->queue; - - ret = ieee80211_tx(odev, skb, &control, - control.type == IEEE80211_IF_TYPE_MGMT); - dev_put(odev); - - return ret; -} - - -int ieee80211_monitor_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_tx_packet_data *pkt_data; - struct ieee80211_radiotap_header *prthdr = - (struct ieee80211_radiotap_header *)skb->data; - u16 len; - - /* - * there must be a radiotap header at the - * start in this case - */ - if (unlikely(prthdr->it_version)) { - /* only version 0 is supported */ - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - - skb->dev = local->mdev; - - pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; - memset(pkt_data, 0, sizeof(*pkt_data)); - pkt_data->ifindex = dev->ifindex; - pkt_data->mgmt_iface = 0; - pkt_data->do_not_encrypt = 1; - - /* above needed because we set skb device to master */ - - /* - * fix up the pointers accounting for the radiotap - * header still being in there. We are being given - * a precooked IEEE80211 header so no need for - * normal processing - */ - len = le16_to_cpu(get_unaligned(&prthdr->it_len)); - skb_set_mac_header(skb, len); - skb_set_network_header(skb, len + sizeof(struct ieee80211_hdr)); - skb_set_transport_header(skb, len + sizeof(struct ieee80211_hdr)); - - /* - * pass the radiotap header up to - * the next stage intact - */ - dev_queue_xmit(skb); - - return NETDEV_TX_OK; -} - - -/** - * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type - * subinterfaces (wlan#, WDS, and VLAN interfaces) - * @skb: packet to be sent - * @dev: incoming interface - * - * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will - * not be freed, and caller is responsible for either retrying later or freeing - * skb). - * - * This function takes in an Ethernet header and encapsulates it with suitable - * IEEE 802.11 header based on which interface the packet is coming in. The - * encapsulated packet will then be passed to master interface, wlan#.11, for - * transmission (through low-level driver). - */ -int ieee80211_subif_start_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_tx_packet_data *pkt_data; - struct ieee80211_sub_if_data *sdata; - int ret = 1, head_need; - u16 ethertype, hdrlen, fc; - struct ieee80211_hdr hdr; - const u8 *encaps_data; - int encaps_len, skip_header_bytes; - int nh_pos, h_pos, no_encrypt = 0; - struct sta_info *sta; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (unlikely(skb->len < ETH_HLEN)) { - printk(KERN_DEBUG "%s: short skb (len=%d)\n", - dev->name, skb->len); - ret = 0; - goto fail; - } - - nh_pos = skb_network_header(skb) - skb->data; - h_pos = skb_transport_header(skb) - skb->data; - - /* convert Ethernet header to proper 802.11 header (based on - * operation mode) */ - ethertype = (skb->data[12] << 8) | skb->data[13]; - /* TODO: handling for 802.1x authorized/unauthorized port */ - fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; - - if (likely(sdata->type == IEEE80211_IF_TYPE_AP || - sdata->type == IEEE80211_IF_TYPE_VLAN)) { - fc |= IEEE80211_FCTL_FROMDS; - /* DA BSSID SA */ - memcpy(hdr.addr1, skb->data, ETH_ALEN); - memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); - memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); - hdrlen = 24; - } else if (sdata->type == IEEE80211_IF_TYPE_WDS) { - fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; - /* RA TA DA SA */ - memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); - memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); - memcpy(hdr.addr3, skb->data, ETH_ALEN); - memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); - hdrlen = 30; - } else if (sdata->type == IEEE80211_IF_TYPE_STA) { - fc |= IEEE80211_FCTL_TODS; - /* BSSID SA DA */ - memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); - memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); - memcpy(hdr.addr3, skb->data, ETH_ALEN); - hdrlen = 24; - } else if (sdata->type == IEEE80211_IF_TYPE_IBSS) { - /* DA SA BSSID */ - memcpy(hdr.addr1, skb->data, ETH_ALEN); - memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); - memcpy(hdr.addr3, sdata->u.sta.bssid, ETH_ALEN); - hdrlen = 24; - } else { - ret = 0; - goto fail; - } - - /* receiver is QoS enabled, use a QoS type frame */ - sta = sta_info_get(local, hdr.addr1); - if (sta) { - if (sta->flags & WLAN_STA_WME) { - fc |= IEEE80211_STYPE_QOS_DATA; - hdrlen += 2; - } - sta_info_put(sta); - } - - hdr.frame_control = cpu_to_le16(fc); - hdr.duration_id = 0; - hdr.seq_ctrl = 0; - - skip_header_bytes = ETH_HLEN; - if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { - encaps_data = bridge_tunnel_header; - encaps_len = sizeof(bridge_tunnel_header); - skip_header_bytes -= 2; - } else if (ethertype >= 0x600) { - encaps_data = rfc1042_header; - encaps_len = sizeof(rfc1042_header); - skip_header_bytes -= 2; - } else { - encaps_data = NULL; - encaps_len = 0; - } - - skb_pull(skb, skip_header_bytes); - nh_pos -= skip_header_bytes; - h_pos -= skip_header_bytes; - - /* TODO: implement support for fragments so that there is no need to - * reallocate and copy payload; it might be enough to support one - * extra fragment that would be copied in the beginning of the frame - * data.. anyway, it would be nice to include this into skb structure - * somehow - * - * There are few options for this: - * use skb->cb as an extra space for 802.11 header - * allocate new buffer if not enough headroom - * make sure that there is enough headroom in every skb by increasing - * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and - * alloc_skb() (net/core/skbuff.c) - */ - head_need = hdrlen + encaps_len + local->tx_headroom; - head_need -= skb_headroom(skb); - - /* We are going to modify skb data, so make a copy of it if happens to - * be cloned. This could happen, e.g., with Linux bridge code passing - * us broadcast frames. */ - - if (head_need > 0 || skb_cloned(skb)) { -#if 0 - printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " - "of headroom\n", dev->name, head_need); -#endif - - if (skb_cloned(skb)) - I802_DEBUG_INC(local->tx_expand_skb_head_cloned); - else - I802_DEBUG_INC(local->tx_expand_skb_head); - /* Since we have to reallocate the buffer, make sure that there - * is enough room for possible WEP IV/ICV and TKIP (8 bytes - * before payload and 12 after). */ - if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8), - 12, GFP_ATOMIC)) { - printk(KERN_DEBUG "%s: failed to reallocate TX buffer" - "\n", dev->name); - goto fail; - } - } - - if (encaps_data) { - memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); - nh_pos += encaps_len; - h_pos += encaps_len; - } - memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); - nh_pos += hdrlen; - h_pos += hdrlen; - - pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; - memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); - pkt_data->ifindex = dev->ifindex; - pkt_data->mgmt_iface = (sdata->type == IEEE80211_IF_TYPE_MGMT); - pkt_data->do_not_encrypt = no_encrypt; - - skb->dev = local->mdev; - sdata->stats.tx_packets++; - sdata->stats.tx_bytes += skb->len; - - /* Update skb pointers to various headers since this modified frame - * is going to go through Linux networking code that may potentially - * need things like pointer to IP header. */ - skb_set_mac_header(skb, 0); - skb_set_network_header(skb, nh_pos); - skb_set_transport_header(skb, h_pos); - - dev->trans_start = jiffies; - dev_queue_xmit(skb); - - return 0; - - fail: - if (!ret) - dev_kfree_skb(skb); - - return ret; -} - - -/* - * This is the transmit routine for the 802.11 type interfaces - * called by upper layers of the linux networking - * stack when it has a frame to transmit - */ -static int -ieee80211_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct ieee80211_sub_if_data *sdata; - struct ieee80211_tx_packet_data *pkt_data; - struct ieee80211_hdr *hdr; - u16 fc; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - if (skb->len < 10) { - dev_kfree_skb(skb); - return 0; - } - - if (skb_headroom(skb) < sdata->local->tx_headroom) { - if (pskb_expand_head(skb, sdata->local->tx_headroom, - 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - return 0; - } - } - - hdr = (struct ieee80211_hdr *) skb->data; - fc = le16_to_cpu(hdr->frame_control); - - pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; - memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); - pkt_data->ifindex = sdata->dev->ifindex; - pkt_data->mgmt_iface = (sdata->type == IEEE80211_IF_TYPE_MGMT); - - skb->priority = 20; /* use hardcoded priority for mgmt TX queue */ - skb->dev = sdata->local->mdev; - - /* - * We're using the protocol field of the the frame control header - * to request TX callback for hostapd. BIT(1) is checked. - */ - if ((fc & BIT(1)) == BIT(1)) { - pkt_data->req_tx_status = 1; - fc &= ~BIT(1); - hdr->frame_control = cpu_to_le16(fc); - } - - pkt_data->do_not_encrypt = !(fc & IEEE80211_FCTL_PROTECTED); - - sdata->stats.tx_packets++; - sdata->stats.tx_bytes += skb->len; - - dev_queue_xmit(skb); - - return 0; -} - - -static void ieee80211_beacon_add_tim(struct ieee80211_local *local, - struct ieee80211_if_ap *bss, - struct sk_buff *skb) -{ - u8 *pos, *tim; - int aid0 = 0; - int i, have_bits = 0, n1, n2; - - /* Generate bitmap for TIM only if there are any STAs in power save - * mode. */ - spin_lock_bh(&local->sta_lock); - if (atomic_read(&bss->num_sta_ps) > 0) - /* in the hope that this is faster than - * checking byte-for-byte */ - have_bits = !bitmap_empty((unsigned long*)bss->tim, - IEEE80211_MAX_AID+1); - - if (bss->dtim_count == 0) - bss->dtim_count = bss->dtim_period - 1; - else - bss->dtim_count--; - - tim = pos = (u8 *) skb_put(skb, 6); - *pos++ = WLAN_EID_TIM; - *pos++ = 4; - *pos++ = bss->dtim_count; - *pos++ = bss->dtim_period; - - if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) - aid0 = 1; - - if (have_bits) { - /* Find largest even number N1 so that bits numbered 1 through - * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits - * (N2 + 1) x 8 through 2007 are 0. */ - n1 = 0; - for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { - if (bss->tim[i]) { - n1 = i & 0xfe; - break; - } - } - n2 = n1; - for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { - if (bss->tim[i]) { - n2 = i; - break; - } - } - - /* Bitmap control */ - *pos++ = n1 | aid0; - /* Part Virt Bitmap */ - memcpy(pos, bss->tim + n1, n2 - n1 + 1); - - tim[1] = n2 - n1 + 4; - skb_put(skb, n2 - n1); - } else { - *pos++ = aid0; /* Bitmap control */ - *pos++ = 0; /* Part Virt Bitmap */ - } - spin_unlock_bh(&local->sta_lock); -} - - -struct sk_buff * ieee80211_beacon_get(struct ieee80211_hw *hw, int if_id, - struct ieee80211_tx_control *control) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct sk_buff *skb; - struct net_device *bdev; - struct ieee80211_sub_if_data *sdata = NULL; - struct ieee80211_if_ap *ap = NULL; - struct ieee80211_rate *rate; - struct rate_control_extra extra; - u8 *b_head, *b_tail; - int bh_len, bt_len; - - bdev = dev_get_by_index(if_id); - if (bdev) { - sdata = IEEE80211_DEV_TO_SUB_IF(bdev); - ap = &sdata->u.ap; - dev_put(bdev); - } - - if (!ap || sdata->type != IEEE80211_IF_TYPE_AP || - !ap->beacon_head) { -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - if (net_ratelimit()) - printk(KERN_DEBUG "no beacon data avail for idx=%d " - "(%s)\n", if_id, bdev ? bdev->name : "N/A"); -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - return NULL; - } - - /* Assume we are generating the normal beacon locally */ - b_head = ap->beacon_head; - b_tail = ap->beacon_tail; - bh_len = ap->beacon_head_len; - bt_len = ap->beacon_tail_len; - - skb = dev_alloc_skb(local->tx_headroom + - bh_len + bt_len + 256 /* maximum TIM len */); - if (!skb) - return NULL; - - skb_reserve(skb, local->tx_headroom); - memcpy(skb_put(skb, bh_len), b_head, bh_len); - - ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data); - - ieee80211_beacon_add_tim(local, ap, skb); - - if (b_tail) { - memcpy(skb_put(skb, bt_len), b_tail, bt_len); - } - - if (control) { - memset(&extra, 0, sizeof(extra)); - extra.mode = local->oper_hw_mode; - - rate = rate_control_get_rate(local, local->mdev, skb, &extra); - if (!rate) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: ieee80211_beacon_get: no rate " - "found\n", local->mdev->name); - } - dev_kfree_skb(skb); - return NULL; - } - - control->tx_rate = (local->short_preamble && - (rate->flags & IEEE80211_RATE_PREAMBLE2)) ? - rate->val2 : rate->val; - control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; - control->power_level = local->hw.conf.power_level; - control->flags |= IEEE80211_TXCTL_NO_ACK; - control->retry_limit = 1; - control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; - } - - ap->num_beacons++; - return skb; -} -EXPORT_SYMBOL(ieee80211_beacon_get); - -__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, - size_t frame_len, - const struct ieee80211_tx_control *frame_txctl) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_rate *rate; - int short_preamble = local->short_preamble; - int erp; - u16 dur; - - rate = frame_txctl->rts_rate; - erp = !!(rate->flags & IEEE80211_RATE_ERP); - - /* CTS duration */ - dur = ieee80211_frame_duration(local, 10, rate->rate, - erp, short_preamble); - /* Data frame duration */ - dur += ieee80211_frame_duration(local, frame_len, rate->rate, - erp, short_preamble); - /* ACK duration */ - dur += ieee80211_frame_duration(local, 10, rate->rate, - erp, short_preamble); - - return cpu_to_le16(dur); -} -EXPORT_SYMBOL(ieee80211_rts_duration); - - -__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, - size_t frame_len, - const struct ieee80211_tx_control *frame_txctl) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_rate *rate; - int short_preamble = local->short_preamble; - int erp; - u16 dur; - - rate = frame_txctl->rts_rate; - erp = !!(rate->flags & IEEE80211_RATE_ERP); - - /* Data frame duration */ - dur = ieee80211_frame_duration(local, frame_len, rate->rate, - erp, short_preamble); - if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { - /* ACK duration */ - dur += ieee80211_frame_duration(local, 10, rate->rate, - erp, short_preamble); - } - - return cpu_to_le16(dur); -} -EXPORT_SYMBOL(ieee80211_ctstoself_duration); - -void ieee80211_rts_get(struct ieee80211_hw *hw, - const void *frame, size_t frame_len, - const struct ieee80211_tx_control *frame_txctl, - struct ieee80211_rts *rts) -{ - const struct ieee80211_hdr *hdr = frame; - u16 fctl; - - fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS; - rts->frame_control = cpu_to_le16(fctl); - rts->duration = ieee80211_rts_duration(hw, frame_len, frame_txctl); - memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); - memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); -} -EXPORT_SYMBOL(ieee80211_rts_get); - -void ieee80211_ctstoself_get(struct ieee80211_hw *hw, - const void *frame, size_t frame_len, - const struct ieee80211_tx_control *frame_txctl, - struct ieee80211_cts *cts) -{ - const struct ieee80211_hdr *hdr = frame; - u16 fctl; - - fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS; - cts->frame_control = cpu_to_le16(fctl); - cts->duration = ieee80211_ctstoself_duration(hw, frame_len, frame_txctl); - memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); -} -EXPORT_SYMBOL(ieee80211_ctstoself_get); - -struct sk_buff * -ieee80211_get_buffered_bc(struct ieee80211_hw *hw, int if_id, - struct ieee80211_tx_control *control) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct sk_buff *skb; - struct sta_info *sta; - ieee80211_tx_handler *handler; - struct ieee80211_txrx_data tx; - ieee80211_txrx_result res = TXRX_DROP; - struct net_device *bdev; - struct ieee80211_sub_if_data *sdata; - struct ieee80211_if_ap *bss = NULL; - - bdev = dev_get_by_index(if_id); - if (bdev) { - sdata = IEEE80211_DEV_TO_SUB_IF(bdev); - bss = &sdata->u.ap; - dev_put(bdev); - } - if (!bss || sdata->type != IEEE80211_IF_TYPE_AP || !bss->beacon_head) - return NULL; - - if (bss->dtim_count != 0) - return NULL; /* send buffered bc/mc only after DTIM beacon */ - memset(control, 0, sizeof(*control)); - while (1) { - skb = skb_dequeue(&bss->ps_bc_buf); - if (!skb) - return NULL; - local->total_ps_buffered--; - - if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *) skb->data; - /* more buffered multicast/broadcast frames ==> set - * MoreData flag in IEEE 802.11 header to inform PS - * STAs */ - hdr->frame_control |= - cpu_to_le16(IEEE80211_FCTL_MOREDATA); - } - - if (ieee80211_tx_prepare(&tx, skb, local->mdev, control) == 0) - break; - dev_kfree_skb_any(skb); - } - sta = tx.sta; - tx.u.tx.ps_buffered = 1; - - for (handler = local->tx_handlers; *handler != NULL; handler++) { - res = (*handler)(&tx); - if (res == TXRX_DROP || res == TXRX_QUEUED) - break; - } - dev_put(tx.dev); - skb = tx.skb; /* handlers are allowed to change skb */ - - if (res == TXRX_DROP) { - I802_DEBUG_INC(local->tx_handlers_drop); - dev_kfree_skb(skb); - skb = NULL; - } else if (res == TXRX_QUEUED) { - I802_DEBUG_INC(local->tx_handlers_queued); - skb = NULL; - } - - if (sta) - sta_info_put(sta); - - return skb; -} -EXPORT_SYMBOL(ieee80211_get_buffered_bc); - -static int __ieee80211_if_config(struct net_device *dev, - struct sk_buff *beacon, - struct ieee80211_tx_control *control) -{ - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_if_conf conf; - static u8 scan_bssid[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - - if (!local->ops->config_interface || !netif_running(dev)) - return 0; - - memset(&conf, 0, sizeof(conf)); - conf.type = sdata->type; - if (sdata->type == IEEE80211_IF_TYPE_STA || - sdata->type == IEEE80211_IF_TYPE_IBSS) { - if (local->sta_scanning && - local->scan_dev == dev) - conf.bssid = scan_bssid; - else - conf.bssid = sdata->u.sta.bssid; - conf.ssid = sdata->u.sta.ssid; - conf.ssid_len = sdata->u.sta.ssid_len; - conf.generic_elem = sdata->u.sta.extra_ie; - conf.generic_elem_len = sdata->u.sta.extra_ie_len; - } else if (sdata->type == IEEE80211_IF_TYPE_AP) { - conf.ssid = sdata->u.ap.ssid; - conf.ssid_len = sdata->u.ap.ssid_len; - conf.generic_elem = sdata->u.ap.generic_elem; - conf.generic_elem_len = sdata->u.ap.generic_elem_len; - conf.beacon = beacon; - conf.beacon_control = control; - } - return local->ops->config_interface(local_to_hw(local), - dev->ifindex, &conf); -} - -int ieee80211_if_config(struct net_device *dev) -{ - return __ieee80211_if_config(dev, NULL, NULL); -} - -int ieee80211_if_config_beacon(struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_tx_control control; - struct sk_buff *skb; - - if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) - return 0; - skb = ieee80211_beacon_get(local_to_hw(local), dev->ifindex, &control); - if (!skb) - return -ENOMEM; - return __ieee80211_if_config(dev, skb, &control); -} - -int ieee80211_hw_config(struct ieee80211_local *local) -{ - struct ieee80211_hw_mode *mode; - struct ieee80211_channel *chan; - int ret = 0; - - if (local->sta_scanning) { - chan = local->scan_channel; - mode = local->scan_hw_mode; - } else { - chan = local->oper_channel; - mode = local->oper_hw_mode; - } - - local->hw.conf.channel = chan->chan; - local->hw.conf.channel_val = chan->val; - local->hw.conf.power_level = chan->power_level; - local->hw.conf.freq = chan->freq; - local->hw.conf.phymode = mode->mode; - local->hw.conf.antenna_max = chan->antenna_max; - local->hw.conf.chan = chan; - local->hw.conf.mode = mode; - -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "HW CONFIG: channel=%d freq=%d " - "phymode=%d\n", local->hw.conf.channel, local->hw.conf.freq, - local->hw.conf.phymode); -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - - if (local->ops->config) - ret = local->ops->config(local_to_hw(local), &local->hw.conf); - - return ret; -} - - -static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) -{ - /* FIX: what would be proper limits for MTU? - * This interface uses 802.3 frames. */ - if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6) { - printk(KERN_WARNING "%s: invalid MTU %d\n", - dev->name, new_mtu); - return -EINVAL; - } - -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - dev->mtu = new_mtu; - return 0; -} - - -static int ieee80211_change_mtu_apdev(struct net_device *dev, int new_mtu) -{ - /* FIX: what would be proper limits for MTU? - * This interface uses 802.11 frames. */ - if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN) { - printk(KERN_WARNING "%s: invalid MTU %d\n", - dev->name, new_mtu); - return -EINVAL; - } - -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - dev->mtu = new_mtu; - return 0; -} - -enum netif_tx_lock_class { - TX_LOCK_NORMAL, - TX_LOCK_MASTER, -}; - -static inline void netif_tx_lock_nested(struct net_device *dev, int subclass) -{ - spin_lock_nested(&dev->_xmit_lock, subclass); - dev->xmit_lock_owner = smp_processor_id(); -} - -static void ieee80211_set_multicast_list(struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - unsigned short flags; - - netif_tx_lock_nested(local->mdev, TX_LOCK_MASTER); - if (((dev->flags & IFF_ALLMULTI) != 0) ^ (sdata->allmulti != 0)) { - if (sdata->allmulti) { - sdata->allmulti = 0; - local->iff_allmultis--; - } else { - sdata->allmulti = 1; - local->iff_allmultis++; - } - } - if (((dev->flags & IFF_PROMISC) != 0) ^ (sdata->promisc != 0)) { - if (sdata->promisc) { - sdata->promisc = 0; - local->iff_promiscs--; - } else { - sdata->promisc = 1; - local->iff_promiscs++; - } - } - if (dev->mc_count != sdata->mc_count) { - local->mc_count = local->mc_count - sdata->mc_count + - dev->mc_count; - sdata->mc_count = dev->mc_count; - } - if (local->ops->set_multicast_list) { - flags = local->mdev->flags; - if (local->iff_allmultis) - flags |= IFF_ALLMULTI; - if (local->iff_promiscs) - flags |= IFF_PROMISC; - read_lock(&local->sub_if_lock); - local->ops->set_multicast_list(local_to_hw(local), flags, - local->mc_count); - read_unlock(&local->sub_if_lock); - } - netif_tx_unlock(local->mdev); -} - -struct dev_mc_list *ieee80211_get_mc_list_item(struct ieee80211_hw *hw, - struct dev_mc_list *prev, - void **ptr) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_sub_if_data *sdata = *ptr; - struct dev_mc_list *mc; - - if (!prev) { - WARN_ON(sdata); - sdata = NULL; - } - if (!prev || !prev->next) { - if (sdata) - sdata = list_entry(sdata->list.next, - struct ieee80211_sub_if_data, list); - else - sdata = list_entry(local->sub_if_list.next, - struct ieee80211_sub_if_data, list); - if (&sdata->list != &local->sub_if_list) - mc = sdata->dev->mc_list; - else - mc = NULL; - } else - mc = prev->next; - - *ptr = sdata; - return mc; -} -EXPORT_SYMBOL(ieee80211_get_mc_list_item); - -static struct net_device_stats *ieee80211_get_stats(struct net_device *dev) -{ - struct ieee80211_sub_if_data *sdata; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - return &(sdata->stats); -} - -static void ieee80211_if_shutdown(struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - ASSERT_RTNL(); - switch (sdata->type) { - case IEEE80211_IF_TYPE_STA: - case IEEE80211_IF_TYPE_IBSS: - sdata->u.sta.state = IEEE80211_DISABLED; - del_timer_sync(&sdata->u.sta.timer); - skb_queue_purge(&sdata->u.sta.skb_queue); - if (!local->ops->hw_scan && - local->scan_dev == sdata->dev) { - local->sta_scanning = 0; - cancel_delayed_work(&local->scan_work); - } - flush_workqueue(local->hw.workqueue); - break; - } -} - -static inline int identical_mac_addr_allowed(int type1, int type2) -{ - return (type1 == IEEE80211_IF_TYPE_MNTR || - type2 == IEEE80211_IF_TYPE_MNTR || - (type1 == IEEE80211_IF_TYPE_AP && - type2 == IEEE80211_IF_TYPE_WDS) || - (type1 == IEEE80211_IF_TYPE_WDS && - (type2 == IEEE80211_IF_TYPE_WDS || - type2 == IEEE80211_IF_TYPE_AP)) || - (type1 == IEEE80211_IF_TYPE_AP && - type2 == IEEE80211_IF_TYPE_VLAN) || - (type1 == IEEE80211_IF_TYPE_VLAN && - (type2 == IEEE80211_IF_TYPE_AP || - type2 == IEEE80211_IF_TYPE_VLAN))); -} - -static int ieee80211_master_open(struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata; - int res = -EOPNOTSUPP; - - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { - if (sdata->dev != dev && netif_running(sdata->dev)) { - res = 0; - break; - } - } - read_unlock(&local->sub_if_lock); - return res; -} - -static int ieee80211_master_stop(struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata; - - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) - if (sdata->dev != dev && netif_running(sdata->dev)) - dev_close(sdata->dev); - read_unlock(&local->sub_if_lock); - - return 0; -} - -static int ieee80211_mgmt_open(struct net_device *dev) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - - if (!netif_running(local->mdev)) - return -EOPNOTSUPP; - return 0; -} - -static int ieee80211_mgmt_stop(struct net_device *dev) -{ - return 0; -} - -/* Check if running monitor interfaces should go to a "soft monitor" mode - * and switch them if necessary. */ -static inline void ieee80211_start_soft_monitor(struct ieee80211_local *local) -{ - struct ieee80211_if_init_conf conf; - - if (local->open_count && local->open_count == local->monitors && - !(local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER) && - local->ops->remove_interface) { - conf.if_id = -1; - conf.type = IEEE80211_IF_TYPE_MNTR; - conf.mac_addr = NULL; - local->ops->remove_interface(local_to_hw(local), &conf); - } -} - -/* Check if running monitor interfaces should go to a "hard monitor" mode - * and switch them if necessary. */ -static void ieee80211_start_hard_monitor(struct ieee80211_local *local) -{ - struct ieee80211_if_init_conf conf; - - if (local->open_count && local->open_count == local->monitors && - !(local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER)) { - conf.if_id = -1; - conf.type = IEEE80211_IF_TYPE_MNTR; - conf.mac_addr = NULL; - local->ops->add_interface(local_to_hw(local), &conf); - } -} - -static int ieee80211_open(struct net_device *dev) -{ - struct ieee80211_sub_if_data *sdata, *nsdata; - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_if_init_conf conf; - int res; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - read_lock(&local->sub_if_lock); - list_for_each_entry(nsdata, &local->sub_if_list, list) { - struct net_device *ndev = nsdata->dev; - - if (ndev != dev && ndev != local->mdev && netif_running(ndev) && - compare_ether_addr(dev->dev_addr, ndev->dev_addr) == 0 && - !identical_mac_addr_allowed(sdata->type, nsdata->type)) { - read_unlock(&local->sub_if_lock); - return -ENOTUNIQ; - } - } - read_unlock(&local->sub_if_lock); - - if (sdata->type == IEEE80211_IF_TYPE_WDS && - is_zero_ether_addr(sdata->u.wds.remote_addr)) - return -ENOLINK; - - if (sdata->type == IEEE80211_IF_TYPE_MNTR && local->open_count && - !(local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER)) { - /* run the interface in a "soft monitor" mode */ - local->monitors++; - local->open_count++; - local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; - return 0; - } - ieee80211_start_soft_monitor(local); - - conf.if_id = dev->ifindex; - conf.type = sdata->type; - conf.mac_addr = dev->dev_addr; - res = local->ops->add_interface(local_to_hw(local), &conf); - if (res) { - if (sdata->type == IEEE80211_IF_TYPE_MNTR) - ieee80211_start_hard_monitor(local); - return res; - } - - if (local->open_count == 0) { - res = 0; - tasklet_enable(&local->tx_pending_tasklet); - tasklet_enable(&local->tasklet); - if (local->ops->open) - res = local->ops->open(local_to_hw(local)); - if (res == 0) { - res = dev_open(local->mdev); - if (res) { - if (local->ops->stop) - local->ops->stop(local_to_hw(local)); - } else { - res = ieee80211_hw_config(local); - if (res && local->ops->stop) - local->ops->stop(local_to_hw(local)); - else if (!res && local->apdev) - dev_open(local->apdev); - } - } - if (res) { - if (local->ops->remove_interface) - local->ops->remove_interface(local_to_hw(local), - &conf); - return res; - } - } - local->open_count++; - - if (sdata->type == IEEE80211_IF_TYPE_MNTR) { - local->monitors++; - local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; - } else - ieee80211_if_config(dev); - - if (sdata->type == IEEE80211_IF_TYPE_STA && - !local->user_space_mlme) - netif_carrier_off(dev); - else - netif_carrier_on(dev); - - netif_start_queue(dev); - return 0; -} - - -static int ieee80211_stop(struct net_device *dev) -{ - struct ieee80211_sub_if_data *sdata; - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - if (sdata->type == IEEE80211_IF_TYPE_MNTR && - local->open_count > 1 && - !(local->hw.flags & IEEE80211_HW_MONITOR_DURING_OPER)) { - /* remove "soft monitor" interface */ - local->open_count--; - local->monitors--; - if (!local->monitors) - local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; - return 0; - } - - netif_stop_queue(dev); - ieee80211_if_shutdown(dev); - - if (sdata->type == IEEE80211_IF_TYPE_MNTR) { - local->monitors--; - if (!local->monitors) - local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; - } - - local->open_count--; - if (local->open_count == 0) { - if (netif_running(local->mdev)) - dev_close(local->mdev); - if (local->apdev) - dev_close(local->apdev); - if (local->ops->stop) - local->ops->stop(local_to_hw(local)); - tasklet_disable(&local->tx_pending_tasklet); - tasklet_disable(&local->tasklet); - } - if (local->ops->remove_interface) { - struct ieee80211_if_init_conf conf; - - conf.if_id = dev->ifindex; - conf.type = sdata->type; - conf.mac_addr = dev->dev_addr; - local->ops->remove_interface(local_to_hw(local), &conf); - } - - ieee80211_start_hard_monitor(local); - - return 0; -} - - -static int header_parse_80211(struct sk_buff *skb, unsigned char *haddr) -{ - memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ - return ETH_ALEN; -} - -static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) -{ - return compare_ether_addr(raddr, addr) == 0 || - is_broadcast_ether_addr(raddr); -} - - -static ieee80211_txrx_result -ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) -{ - struct net_device *dev = rx->dev; - struct ieee80211_local *local = rx->local; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; - u16 fc, hdrlen, ethertype; - u8 *payload; - u8 dst[ETH_ALEN]; - u8 src[ETH_ALEN]; - struct sk_buff *skb = rx->skb, *skb2; - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - fc = rx->fc; - if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) - return TXRX_CONTINUE; - - if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) - return TXRX_DROP; - - hdrlen = ieee80211_get_hdrlen(fc); - - /* convert IEEE 802.11 header + possible LLC headers into Ethernet - * header - * IEEE 802.11 address fields: - * ToDS FromDS Addr1 Addr2 Addr3 Addr4 - * 0 0 DA SA BSSID n/a - * 0 1 DA BSSID SA n/a - * 1 0 BSSID SA DA n/a - * 1 1 RA TA DA SA - */ - - switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { - case IEEE80211_FCTL_TODS: - /* BSSID SA DA */ - memcpy(dst, hdr->addr3, ETH_ALEN); - memcpy(src, hdr->addr2, ETH_ALEN); - - if (unlikely(sdata->type != IEEE80211_IF_TYPE_AP && - sdata->type != IEEE80211_IF_TYPE_VLAN)) { - printk(KERN_DEBUG "%s: dropped ToDS frame (BSSID=" - MAC_FMT " SA=" MAC_FMT " DA=" MAC_FMT ")\n", - dev->name, MAC_ARG(hdr->addr1), - MAC_ARG(hdr->addr2), MAC_ARG(hdr->addr3)); - return TXRX_DROP; - } - break; - case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): - /* RA TA DA SA */ - memcpy(dst, hdr->addr3, ETH_ALEN); - memcpy(src, hdr->addr4, ETH_ALEN); - - if (unlikely(sdata->type != IEEE80211_IF_TYPE_WDS)) { - printk(KERN_DEBUG "%s: dropped FromDS&ToDS frame (RA=" - MAC_FMT " TA=" MAC_FMT " DA=" MAC_FMT " SA=" - MAC_FMT ")\n", - rx->dev->name, MAC_ARG(hdr->addr1), - MAC_ARG(hdr->addr2), MAC_ARG(hdr->addr3), - MAC_ARG(hdr->addr4)); - return TXRX_DROP; - } - break; - case IEEE80211_FCTL_FROMDS: - /* DA BSSID SA */ - memcpy(dst, hdr->addr1, ETH_ALEN); - memcpy(src, hdr->addr3, ETH_ALEN); - - if (sdata->type != IEEE80211_IF_TYPE_STA) { - return TXRX_DROP; - } - break; - case 0: - /* DA SA BSSID */ - memcpy(dst, hdr->addr1, ETH_ALEN); - memcpy(src, hdr->addr2, ETH_ALEN); - - if (sdata->type != IEEE80211_IF_TYPE_IBSS) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: dropped IBSS frame (DA=" - MAC_FMT " SA=" MAC_FMT " BSSID=" MAC_FMT - ")\n", - dev->name, MAC_ARG(hdr->addr1), - MAC_ARG(hdr->addr2), - MAC_ARG(hdr->addr3)); - } - return TXRX_DROP; - } - break; - } - - payload = skb->data + hdrlen; - - if (unlikely(skb->len - hdrlen < 8)) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: RX too short data frame " - "payload\n", dev->name); - } - return TXRX_DROP; - } - - ethertype = (payload[6] << 8) | payload[7]; - - if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && - ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || - compare_ether_addr(payload, bridge_tunnel_header) == 0)) { - /* remove RFC1042 or Bridge-Tunnel encapsulation and - * replace EtherType */ - skb_pull(skb, hdrlen + 6); - memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); - memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); - } else { - struct ethhdr *ehdr; - __be16 len; - skb_pull(skb, hdrlen); - len = htons(skb->len); - ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr)); - memcpy(ehdr->h_dest, dst, ETH_ALEN); - memcpy(ehdr->h_source, src, ETH_ALEN); - ehdr->h_proto = len; - } - skb->dev = dev; - - skb2 = NULL; - - sdata->stats.rx_packets++; - sdata->stats.rx_bytes += skb->len; - - if (local->bridge_packets && (sdata->type == IEEE80211_IF_TYPE_AP - || sdata->type == IEEE80211_IF_TYPE_VLAN) && rx->u.rx.ra_match) { - if (is_multicast_ether_addr(skb->data)) { - /* send multicast frames both to higher layers in - * local net stack and back to the wireless media */ - skb2 = skb_copy(skb, GFP_ATOMIC); - if (!skb2) - printk(KERN_DEBUG "%s: failed to clone " - "multicast frame\n", dev->name); - } else { - struct sta_info *dsta; - dsta = sta_info_get(local, skb->data); - if (dsta && !dsta->dev) { - printk(KERN_DEBUG "Station with null dev " - "structure!\n"); - } else if (dsta && dsta->dev == dev) { - /* Destination station is associated to this - * AP, so send the frame directly to it and - * do not pass the frame to local net stack. - */ - skb2 = skb; - skb = NULL; - } - if (dsta) - sta_info_put(dsta); - } - } - - if (skb) { - /* deliver to local stack */ - skb->protocol = eth_type_trans(skb, dev); - memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx(skb); - } - - if (skb2) { - /* send to wireless media */ - skb2->protocol = __constant_htons(ETH_P_802_3); - skb_set_network_header(skb2, 0); - skb_set_mac_header(skb2, 0); - dev_queue_xmit(skb2); - } - - return TXRX_QUEUED; -} - - -static struct ieee80211_rate * -ieee80211_get_rate(struct ieee80211_local *local, int phymode, int hw_rate) -{ - struct ieee80211_hw_mode *mode; - int r; - - list_for_each_entry(mode, &local->modes_list, list) { - if (mode->mode != phymode) - continue; - for (r = 0; r < mode->num_rates; r++) { - struct ieee80211_rate *rate = &mode->rates[r]; - if (rate->val == hw_rate || - (rate->flags & IEEE80211_RATE_PREAMBLE2 && - rate->val2 == hw_rate)) - return rate; - } - } - - return NULL; -} - -static void -ieee80211_fill_frame_info(struct ieee80211_local *local, - struct ieee80211_frame_info *fi, - struct ieee80211_rx_status *status) -{ - if (status) { - struct timespec ts; - struct ieee80211_rate *rate; - - jiffies_to_timespec(jiffies, &ts); - fi->hosttime = cpu_to_be64((u64) ts.tv_sec * 1000000 + - ts.tv_nsec / 1000); - fi->mactime = cpu_to_be64(status->mactime); - switch (status->phymode) { - case MODE_IEEE80211A: - fi->phytype = htonl(ieee80211_phytype_ofdm_dot11_a); - break; - case MODE_IEEE80211B: - fi->phytype = htonl(ieee80211_phytype_dsss_dot11_b); - break; - case MODE_IEEE80211G: - fi->phytype = htonl(ieee80211_phytype_pbcc_dot11_g); - break; - case MODE_ATHEROS_TURBO: - fi->phytype = - htonl(ieee80211_phytype_dsss_dot11_turbo); - break; - default: - fi->phytype = htonl(0xAAAAAAAA); - break; - } - fi->channel = htonl(status->channel); - rate = ieee80211_get_rate(local, status->phymode, - status->rate); - if (rate) { - fi->datarate = htonl(rate->rate); - if (rate->flags & IEEE80211_RATE_PREAMBLE2) { - if (status->rate == rate->val) - fi->preamble = htonl(2); /* long */ - else if (status->rate == rate->val2) - fi->preamble = htonl(1); /* short */ - } else - fi->preamble = htonl(0); - } else { - fi->datarate = htonl(0); - fi->preamble = htonl(0); - } - - fi->antenna = htonl(status->antenna); - fi->priority = htonl(0xffffffff); /* no clue */ - fi->ssi_type = htonl(ieee80211_ssi_raw); - fi->ssi_signal = htonl(status->ssi); - fi->ssi_noise = 0x00000000; - fi->encoding = 0; - } else { - /* clear everything because we really don't know. - * the msg_type field isn't present on monitor frames - * so we don't know whether it will be present or not, - * but it's ok to not clear it since it'll be assigned - * anyway */ - memset(fi, 0, sizeof(*fi) - sizeof(fi->msg_type)); - - fi->ssi_type = htonl(ieee80211_ssi_none); - } - fi->version = htonl(IEEE80211_FI_VERSION); - fi->length = cpu_to_be32(sizeof(*fi) - sizeof(fi->msg_type)); -} - -/* this routine is actually not just for this, but also - * for pushing fake 'management' frames into userspace. - * it shall be replaced by a netlink-based system. */ -void -ieee80211_rx_mgmt(struct ieee80211_local *local, struct sk_buff *skb, - struct ieee80211_rx_status *status, u32 msg_type) -{ - struct ieee80211_frame_info *fi; - const size_t hlen = sizeof(struct ieee80211_frame_info); - struct ieee80211_sub_if_data *sdata; - - skb->dev = local->apdev; - - sdata = IEEE80211_DEV_TO_SUB_IF(local->apdev); - - if (skb_headroom(skb) < hlen) { - I802_DEBUG_INC(local->rx_expand_skb_head); - if (pskb_expand_head(skb, hlen, 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - return; - } - } - - fi = (struct ieee80211_frame_info *) skb_push(skb, hlen); - - ieee80211_fill_frame_info(local, fi, status); - fi->msg_type = htonl(msg_type); - - sdata->stats.rx_packets++; - sdata->stats.rx_bytes += skb->len; - - skb_set_mac_header(skb, 0); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx(skb); -} - -static void -ieee80211_rx_monitor(struct net_device *dev, struct sk_buff *skb, - struct ieee80211_rx_status *status) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata; - struct ieee80211_rate *rate; - struct ieee80211_rtap_hdr { - struct ieee80211_radiotap_header hdr; - u8 flags; - u8 rate; - __le16 chan_freq; - __le16 chan_flags; - u8 antsignal; - } __attribute__ ((packed)) *rthdr; - - skb->dev = dev; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - if (status->flag & RX_FLAG_RADIOTAP) - goto out; - - if (skb_headroom(skb) < sizeof(*rthdr)) { - I802_DEBUG_INC(local->rx_expand_skb_head); - if (pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) { - dev_kfree_skb(skb); - return; - } - } - - rthdr = (struct ieee80211_rtap_hdr *) skb_push(skb, sizeof(*rthdr)); - memset(rthdr, 0, sizeof(*rthdr)); - rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); - rthdr->hdr.it_present = - cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | - (1 << IEEE80211_RADIOTAP_RATE) | - (1 << IEEE80211_RADIOTAP_CHANNEL) | - (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL)); - rthdr->flags = local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS ? - IEEE80211_RADIOTAP_F_FCS : 0; - rate = ieee80211_get_rate(local, status->phymode, status->rate); - if (rate) - rthdr->rate = rate->rate / 5; - rthdr->chan_freq = cpu_to_le16(status->freq); - rthdr->chan_flags = - status->phymode == MODE_IEEE80211A ? - cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ) : - cpu_to_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ); - rthdr->antsignal = status->ssi; - - out: - sdata->stats.rx_packets++; - sdata->stats.rx_bytes += skb->len; - - skb_set_mac_header(skb, 0); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - memset(skb->cb, 0, sizeof(skb->cb)); - netif_rx(skb); -} - -int ieee80211_radar_status(struct ieee80211_hw *hw, int channel, - int radar, int radar_type) -{ - struct sk_buff *skb; - struct ieee80211_radar_info *msg; - struct ieee80211_local *local = hw_to_local(hw); - - if (!local->apdev) - return 0; - - skb = dev_alloc_skb(sizeof(struct ieee80211_frame_info) + - sizeof(struct ieee80211_radar_info)); - - if (!skb) - return -ENOMEM; - skb_reserve(skb, sizeof(struct ieee80211_frame_info)); - - msg = (struct ieee80211_radar_info *) - skb_put(skb, sizeof(struct ieee80211_radar_info)); - msg->channel = channel; - msg->radar = radar; - msg->radar_type = radar_type; - - ieee80211_rx_mgmt(local, skb, NULL, ieee80211_msg_radar); - return 0; -} -EXPORT_SYMBOL(ieee80211_radar_status); - - -static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) -{ - struct ieee80211_sub_if_data *sdata; - sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); - - if (sdata->bss) - atomic_inc(&sdata->bss->num_sta_ps); - sta->flags |= WLAN_STA_PS; - sta->pspoll = 0; -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - printk(KERN_DEBUG "%s: STA " MAC_FMT " aid %d enters power " - "save mode\n", dev->name, MAC_ARG(sta->addr), sta->aid); -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ -} - - -static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct sk_buff *skb; - int sent = 0; - struct ieee80211_sub_if_data *sdata; - struct ieee80211_tx_packet_data *pkt_data; - - sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); - if (sdata->bss) - atomic_dec(&sdata->bss->num_sta_ps); - sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM); - sta->pspoll = 0; - if (!skb_queue_empty(&sta->ps_tx_buf)) { - if (local->ops->set_tim) - local->ops->set_tim(local_to_hw(local), sta->aid, 0); - if (sdata->bss) - bss_tim_clear(local, sdata->bss, sta->aid); - } -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - printk(KERN_DEBUG "%s: STA " MAC_FMT " aid %d exits power " - "save mode\n", dev->name, MAC_ARG(sta->addr), sta->aid); -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ - /* Send all buffered frames to the station */ - while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { - pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; - sent++; - pkt_data->requeue = 1; - dev_queue_xmit(skb); - } - while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { - pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; - local->total_ps_buffered--; - sent++; -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - printk(KERN_DEBUG "%s: STA " MAC_FMT " aid %d send PS frame " - "since STA not sleeping anymore\n", dev->name, - MAC_ARG(sta->addr), sta->aid); -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ - pkt_data->requeue = 1; - dev_queue_xmit(skb); - } - - return sent; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) -{ - struct sk_buff *skb; - int no_pending_pkts; - - if (likely(!rx->sta || - (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || - (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || - !rx->u.rx.ra_match)) - return TXRX_CONTINUE; - - skb = skb_dequeue(&rx->sta->tx_filtered); - if (!skb) { - skb = skb_dequeue(&rx->sta->ps_tx_buf); - if (skb) - rx->local->total_ps_buffered--; - } - no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) && - skb_queue_empty(&rx->sta->ps_tx_buf); - - if (skb) { - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *) skb->data; - - /* tell TX path to send one frame even though the STA may - * still remain is PS mode after this frame exchange */ - rx->sta->pspoll = 1; - -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - printk(KERN_DEBUG "STA " MAC_FMT " aid %d: PS Poll (entries " - "after %d)\n", - MAC_ARG(rx->sta->addr), rx->sta->aid, - skb_queue_len(&rx->sta->ps_tx_buf)); -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ - - /* Use MoreData flag to indicate whether there are more - * buffered frames for this STA */ - if (no_pending_pkts) { - hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); - rx->sta->flags &= ~WLAN_STA_TIM; - } else - hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); - - dev_queue_xmit(skb); - - if (no_pending_pkts) { - if (rx->local->ops->set_tim) - rx->local->ops->set_tim(local_to_hw(rx->local), - rx->sta->aid, 0); - if (rx->sdata->bss) - bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid); - } -#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG - } else if (!rx->u.rx.sent_ps_buffered) { - printk(KERN_DEBUG "%s: STA " MAC_FMT " sent PS Poll even " - "though there is no buffered frames for it\n", - rx->dev->name, MAC_ARG(rx->sta->addr)); -#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ - - } - - /* Free PS Poll skb here instead of returning TXRX_DROP that would - * count as an dropped frame. */ - dev_kfree_skb(rx->skb); - - return TXRX_QUEUED; -} - - -static inline struct ieee80211_fragment_entry * -ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, - unsigned int frag, unsigned int seq, int rx_queue, - struct sk_buff **skb) -{ - struct ieee80211_fragment_entry *entry; - int idx; - - idx = sdata->fragment_next; - entry = &sdata->fragments[sdata->fragment_next++]; - if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) - sdata->fragment_next = 0; - - if (!skb_queue_empty(&entry->skb_list)) { -#ifdef CONFIG_MAC80211_DEBUG - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *) entry->skb_list.next->data; - printk(KERN_DEBUG "%s: RX reassembly removed oldest " - "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " - "addr1=" MAC_FMT " addr2=" MAC_FMT "\n", - sdata->dev->name, idx, - jiffies - entry->first_frag_time, entry->seq, - entry->last_frag, MAC_ARG(hdr->addr1), - MAC_ARG(hdr->addr2)); -#endif /* CONFIG_MAC80211_DEBUG */ - __skb_queue_purge(&entry->skb_list); - } - - __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ - *skb = NULL; - entry->first_frag_time = jiffies; - entry->seq = seq; - entry->rx_queue = rx_queue; - entry->last_frag = frag; - entry->ccmp = 0; - entry->extra_len = 0; - - return entry; -} - - -static inline struct ieee80211_fragment_entry * -ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, - u16 fc, unsigned int frag, unsigned int seq, - int rx_queue, struct ieee80211_hdr *hdr) -{ - struct ieee80211_fragment_entry *entry; - int i, idx; - - idx = sdata->fragment_next; - for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { - struct ieee80211_hdr *f_hdr; - u16 f_fc; - - idx--; - if (idx < 0) - idx = IEEE80211_FRAGMENT_MAX - 1; - - entry = &sdata->fragments[idx]; - if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || - entry->rx_queue != rx_queue || - entry->last_frag + 1 != frag) - continue; - - f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; - f_fc = le16_to_cpu(f_hdr->frame_control); - - if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || - compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || - compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) - continue; - - if (entry->first_frag_time + 2 * HZ < jiffies) { - __skb_queue_purge(&entry->skb_list); - continue; - } - return entry; - } - - return NULL; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) -{ - struct ieee80211_hdr *hdr; - u16 sc; - unsigned int frag, seq; - struct ieee80211_fragment_entry *entry; - struct sk_buff *skb; - - hdr = (struct ieee80211_hdr *) rx->skb->data; - sc = le16_to_cpu(hdr->seq_ctrl); - frag = sc & IEEE80211_SCTL_FRAG; - - if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || - (rx->skb)->len < 24 || - is_multicast_ether_addr(hdr->addr1))) { - /* not fragmented */ - goto out; - } - I802_DEBUG_INC(rx->local->rx_handlers_fragments); - - seq = (sc & IEEE80211_SCTL_SEQ) >> 4; - - if (frag == 0) { - /* This is the first fragment of a new frame. */ - entry = ieee80211_reassemble_add(rx->sdata, frag, seq, - rx->u.rx.queue, &(rx->skb)); - if (rx->key && rx->key->alg == ALG_CCMP && - (rx->fc & IEEE80211_FCTL_PROTECTED)) { - /* Store CCMP PN so that we can verify that the next - * fragment has a sequential PN value. */ - entry->ccmp = 1; - memcpy(entry->last_pn, - rx->key->u.ccmp.rx_pn[rx->u.rx.queue], - CCMP_PN_LEN); - } - return TXRX_QUEUED; - } - - /* This is a fragment for a frame that should already be pending in - * fragment cache. Add this fragment to the end of the pending entry. - */ - entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, - rx->u.rx.queue, hdr); - if (!entry) { - I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); - return TXRX_DROP; - } - - /* Verify that MPDUs within one MSDU have sequential PN values. - * (IEEE 802.11i, 8.3.3.4.5) */ - if (entry->ccmp) { - int i; - u8 pn[CCMP_PN_LEN], *rpn; - if (!rx->key || rx->key->alg != ALG_CCMP) - return TXRX_DROP; - memcpy(pn, entry->last_pn, CCMP_PN_LEN); - for (i = CCMP_PN_LEN - 1; i >= 0; i--) { - pn[i]++; - if (pn[i]) - break; - } - rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue]; - if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { - printk(KERN_DEBUG "%s: defrag: CCMP PN not sequential" - " A2=" MAC_FMT " PN=%02x%02x%02x%02x%02x%02x " - "(expected %02x%02x%02x%02x%02x%02x)\n", - rx->dev->name, MAC_ARG(hdr->addr2), - rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], rpn[5], - pn[0], pn[1], pn[2], pn[3], pn[4], pn[5]); - return TXRX_DROP; - } - memcpy(entry->last_pn, pn, CCMP_PN_LEN); - } - - skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); - __skb_queue_tail(&entry->skb_list, rx->skb); - entry->last_frag = frag; - entry->extra_len += rx->skb->len; - if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { - rx->skb = NULL; - return TXRX_QUEUED; - } - - rx->skb = __skb_dequeue(&entry->skb_list); - if (skb_tailroom(rx->skb) < entry->extra_len) { - I802_DEBUG_INC(rx->local->rx_expand_skb_head2); - if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, - GFP_ATOMIC))) { - I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); - __skb_queue_purge(&entry->skb_list); - return TXRX_DROP; - } - } - while ((skb = __skb_dequeue(&entry->skb_list))) { - memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); - dev_kfree_skb(skb); - } - - /* Complete frame has been reassembled - process it now */ - rx->fragmented = 1; - - out: - if (rx->sta) - rx->sta->rx_packets++; - if (is_multicast_ether_addr(hdr->addr1)) - rx->local->dot11MulticastReceivedFrameCount++; - else - ieee80211_led_rx(rx->local); - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_monitor(struct ieee80211_txrx_data *rx) -{ - if (rx->sdata->type == IEEE80211_IF_TYPE_MNTR) { - ieee80211_rx_monitor(rx->dev, rx->skb, rx->u.rx.status); - return TXRX_QUEUED; - } - - if (rx->u.rx.status->flag & RX_FLAG_RADIOTAP) - skb_pull(rx->skb, ieee80211_get_radiotap_len(rx->skb)); - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) -{ - struct ieee80211_hdr *hdr; - int always_sta_key; - hdr = (struct ieee80211_hdr *) rx->skb->data; - - /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ - if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { - if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && - rx->sta->last_seq_ctrl[rx->u.rx.queue] == - hdr->seq_ctrl)) { - if (rx->u.rx.ra_match) { - rx->local->dot11FrameDuplicateCount++; - rx->sta->num_duplicates++; - } - return TXRX_DROP; - } else - rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl; - } - - if ((rx->local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) && - rx->skb->len > FCS_LEN) - skb_trim(rx->skb, rx->skb->len - FCS_LEN); - - if (unlikely(rx->skb->len < 16)) { - I802_DEBUG_INC(rx->local->rx_handlers_drop_short); - return TXRX_DROP; - } - - if (!rx->u.rx.ra_match) - rx->skb->pkt_type = PACKET_OTHERHOST; - else if (compare_ether_addr(rx->dev->dev_addr, hdr->addr1) == 0) - rx->skb->pkt_type = PACKET_HOST; - else if (is_multicast_ether_addr(hdr->addr1)) { - if (is_broadcast_ether_addr(hdr->addr1)) - rx->skb->pkt_type = PACKET_BROADCAST; - else - rx->skb->pkt_type = PACKET_MULTICAST; - } else - rx->skb->pkt_type = PACKET_OTHERHOST; - - /* Drop disallowed frame classes based on STA auth/assoc state; - * IEEE 802.11, Chap 5.5. - * - * 80211.o does filtering only based on association state, i.e., it - * drops Class 3 frames from not associated stations. hostapd sends - * deauth/disassoc frames when needed. In addition, hostapd is - * responsible for filtering on both auth and assoc states. - */ - if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || - ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && - (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && - rx->sdata->type != IEEE80211_IF_TYPE_IBSS && - (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { - if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && - !(rx->fc & IEEE80211_FCTL_TODS) && - (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) - || !rx->u.rx.ra_match) { - /* Drop IBSS frames and frames for other hosts - * silently. */ - return TXRX_DROP; - } - - if (!rx->local->apdev) - return TXRX_DROP; - - ieee80211_rx_mgmt(rx->local, rx->skb, rx->u.rx.status, - ieee80211_msg_sta_not_assoc); - return TXRX_QUEUED; - } - - if (rx->sdata->type == IEEE80211_IF_TYPE_STA) - always_sta_key = 0; - else - always_sta_key = 1; - - if (rx->sta && rx->sta->key && always_sta_key) { - rx->key = rx->sta->key; - } else { - if (rx->sta && rx->sta->key) - rx->key = rx->sta->key; - else - rx->key = rx->sdata->default_key; - - if ((rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) && - rx->fc & IEEE80211_FCTL_PROTECTED) { - int keyidx = ieee80211_wep_get_keyidx(rx->skb); - - if (keyidx >= 0 && keyidx < NUM_DEFAULT_KEYS && - (!rx->sta || !rx->sta->key || keyidx > 0)) - rx->key = rx->sdata->keys[keyidx]; - - if (!rx->key) { - if (!rx->u.rx.ra_match) - return TXRX_DROP; - printk(KERN_DEBUG "%s: RX WEP frame with " - "unknown keyidx %d (A1=" MAC_FMT " A2=" - MAC_FMT " A3=" MAC_FMT ")\n", - rx->dev->name, keyidx, - MAC_ARG(hdr->addr1), - MAC_ARG(hdr->addr2), - MAC_ARG(hdr->addr3)); - if (!rx->local->apdev) - return TXRX_DROP; - ieee80211_rx_mgmt( - rx->local, rx->skb, rx->u.rx.status, - ieee80211_msg_wep_frame_unknown_key); - return TXRX_QUEUED; - } - } - } - - if (rx->fc & IEEE80211_FCTL_PROTECTED && rx->key && rx->u.rx.ra_match) { - rx->key->tx_rx_count++; - if (unlikely(rx->local->key_tx_rx_threshold && - rx->key->tx_rx_count > - rx->local->key_tx_rx_threshold)) { - ieee80211_key_threshold_notify(rx->dev, rx->key, - rx->sta); - } - } - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) -{ - struct sta_info *sta = rx->sta; - struct net_device *dev = rx->dev; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; - - if (!sta) - return TXRX_CONTINUE; - - /* Update last_rx only for IBSS packets which are for the current - * BSSID to avoid keeping the current IBSS network alive in cases where - * other STAs are using different BSSID. */ - if (rx->sdata->type == IEEE80211_IF_TYPE_IBSS) { - u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len); - if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) - sta->last_rx = jiffies; - } else - if (!is_multicast_ether_addr(hdr->addr1) || - rx->sdata->type == IEEE80211_IF_TYPE_STA) { - /* Update last_rx only for unicast frames in order to prevent - * the Probe Request frames (the only broadcast frames from a - * STA in infrastructure mode) from keeping a connection alive. - */ - sta->last_rx = jiffies; - } - - if (!rx->u.rx.ra_match) - return TXRX_CONTINUE; - - sta->rx_fragments++; - sta->rx_bytes += rx->skb->len; - sta->last_rssi = (sta->last_rssi * 15 + - rx->u.rx.status->ssi) / 16; - sta->last_signal = (sta->last_signal * 15 + - rx->u.rx.status->signal) / 16; - sta->last_noise = (sta->last_noise * 15 + - rx->u.rx.status->noise) / 16; - - if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { - /* Change STA power saving mode only in the end of a frame - * exchange sequence */ - if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) - rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta); - else if (!(sta->flags & WLAN_STA_PS) && - (rx->fc & IEEE80211_FCTL_PM)) - ap_sta_ps_start(dev, sta); - } - - /* Drop data::nullfunc frames silently, since they are used only to - * control station power saving mode. */ - if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && - (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) { - I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); - /* Update counter and free packet here to avoid counting this - * as a dropped packed. */ - sta->rx_packets++; - dev_kfree_skb(rx->skb); - return TXRX_QUEUED; - } - - return TXRX_CONTINUE; -} /* ieee80211_rx_h_sta_process */ - - -static ieee80211_txrx_result -ieee80211_rx_h_wep_weak_iv_detection(struct ieee80211_txrx_data *rx) -{ - if (!rx->sta || !(rx->fc & IEEE80211_FCTL_PROTECTED) || - (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || - !rx->key || rx->key->alg != ALG_WEP || !rx->u.rx.ra_match) - return TXRX_CONTINUE; - - /* Check for weak IVs, if hwaccel did not remove IV from the frame */ - if ((rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) || - rx->key->force_sw_encrypt) { - u8 *iv = ieee80211_wep_is_weak_iv(rx->skb, rx->key); - if (iv) { - rx->sta->wep_weak_iv_count++; - } - } - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_wep_decrypt(struct ieee80211_txrx_data *rx) -{ - /* If the device handles decryption totally, skip this test */ - if (rx->local->hw.flags & IEEE80211_HW_DEVICE_HIDES_WEP) - return TXRX_CONTINUE; - - if ((rx->key && rx->key->alg != ALG_WEP) || - !(rx->fc & IEEE80211_FCTL_PROTECTED) || - ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && - ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || - (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))) - return TXRX_CONTINUE; - - if (!rx->key) { - printk(KERN_DEBUG "%s: RX WEP frame, but no key set\n", - rx->dev->name); - return TXRX_DROP; - } - - if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED) || - rx->key->force_sw_encrypt) { - if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { - printk(KERN_DEBUG "%s: RX WEP frame, decrypt " - "failed\n", rx->dev->name); - return TXRX_DROP; - } - } else if (rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) { - ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); - /* remove ICV */ - skb_trim(rx->skb, rx->skb->len - 4); - } - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_802_1x_pae(struct ieee80211_txrx_data *rx) -{ - if (rx->sdata->eapol && ieee80211_is_eapol(rx->skb) && - rx->sdata->type != IEEE80211_IF_TYPE_STA && rx->u.rx.ra_match) { - /* Pass both encrypted and unencrypted EAPOL frames to user - * space for processing. */ - if (!rx->local->apdev) - return TXRX_DROP; - ieee80211_rx_mgmt(rx->local, rx->skb, rx->u.rx.status, - ieee80211_msg_normal); - return TXRX_QUEUED; - } - - if (unlikely(rx->sdata->ieee802_1x && - (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && - (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && - (!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED)) && - !ieee80211_is_eapol(rx->skb))) { -#ifdef CONFIG_MAC80211_DEBUG - struct ieee80211_hdr *hdr = - (struct ieee80211_hdr *) rx->skb->data; - printk(KERN_DEBUG "%s: dropped frame from " MAC_FMT - " (unauthorized port)\n", rx->dev->name, - MAC_ARG(hdr->addr2)); -#endif /* CONFIG_MAC80211_DEBUG */ - return TXRX_DROP; - } - - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_drop_unencrypted(struct ieee80211_txrx_data *rx) -{ - /* If the device handles decryption totally, skip this test */ - if (rx->local->hw.flags & IEEE80211_HW_DEVICE_HIDES_WEP) - return TXRX_CONTINUE; - - /* Drop unencrypted frames if key is set. */ - if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && - (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && - (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && - (rx->key || rx->sdata->drop_unencrypted) && - (rx->sdata->eapol == 0 || - !ieee80211_is_eapol(rx->skb)))) { - printk(KERN_DEBUG "%s: RX non-WEP frame, but expected " - "encryption\n", rx->dev->name); - return TXRX_DROP; - } - return TXRX_CONTINUE; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx) -{ - struct ieee80211_sub_if_data *sdata; - - if (!rx->u.rx.ra_match) - return TXRX_DROP; - - sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); - if ((sdata->type == IEEE80211_IF_TYPE_STA || - sdata->type == IEEE80211_IF_TYPE_IBSS) && - !rx->local->user_space_mlme) { - ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status); - } else { - /* Management frames are sent to hostapd for processing */ - if (!rx->local->apdev) - return TXRX_DROP; - ieee80211_rx_mgmt(rx->local, rx->skb, rx->u.rx.status, - ieee80211_msg_normal); - } - return TXRX_QUEUED; -} - - -static ieee80211_txrx_result -ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx) -{ - struct ieee80211_local *local = rx->local; - struct sk_buff *skb = rx->skb; - - if (unlikely(local->sta_scanning != 0)) { - ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status); - return TXRX_QUEUED; - } - - if (unlikely(rx->u.rx.in_scan)) { - /* scanning finished during invoking of handlers */ - I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); - return TXRX_DROP; - } - - return TXRX_CONTINUE; -} - - -static void ieee80211_rx_michael_mic_report(struct net_device *dev, - struct ieee80211_hdr *hdr, - struct sta_info *sta, - struct ieee80211_txrx_data *rx) -{ - int keyidx, hdrlen; - - hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb); - if (rx->skb->len >= hdrlen + 4) - keyidx = rx->skb->data[hdrlen + 3] >> 6; - else - keyidx = -1; - - /* TODO: verify that this is not triggered by fragmented - * frames (hw does not verify MIC for them). */ - printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC " - "failure from " MAC_FMT " to " MAC_FMT " keyidx=%d\n", - dev->name, MAC_ARG(hdr->addr2), MAC_ARG(hdr->addr1), keyidx); - - if (!sta) { - /* Some hardware versions seem to generate incorrect - * Michael MIC reports; ignore them to avoid triggering - * countermeasures. */ - printk(KERN_DEBUG "%s: ignored spurious Michael MIC " - "error for unknown address " MAC_FMT "\n", - dev->name, MAC_ARG(hdr->addr2)); - goto ignore; - } - - if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) { - printk(KERN_DEBUG "%s: ignored spurious Michael MIC " - "error for a frame with no ISWEP flag (src " - MAC_FMT ")\n", dev->name, MAC_ARG(hdr->addr2)); - goto ignore; - } - - if ((rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) && - rx->sdata->type == IEEE80211_IF_TYPE_AP) { - keyidx = ieee80211_wep_get_keyidx(rx->skb); - /* AP with Pairwise keys support should never receive Michael - * MIC errors for non-zero keyidx because these are reserved - * for group keys and only the AP is sending real multicast - * frames in BSS. */ - if (keyidx) { - printk(KERN_DEBUG "%s: ignored Michael MIC error for " - "a frame with non-zero keyidx (%d) (src " MAC_FMT - ")\n", dev->name, keyidx, MAC_ARG(hdr->addr2)); - goto ignore; - } - } - - if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && - ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || - (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) { - printk(KERN_DEBUG "%s: ignored spurious Michael MIC " - "error for a frame that cannot be encrypted " - "(fc=0x%04x) (src " MAC_FMT ")\n", - dev->name, rx->fc, MAC_ARG(hdr->addr2)); - goto ignore; - } - - do { - union iwreq_data wrqu; - char *buf = kmalloc(128, GFP_ATOMIC); - if (!buf) - break; - - /* TODO: needed parameters: count, key type, TSC */ - sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" - "keyid=%d %scast addr=" MAC_FMT ")", - keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni", - MAC_ARG(hdr->addr2)); - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = strlen(buf); - wireless_send_event(rx->dev, IWEVCUSTOM, &wrqu, buf); - kfree(buf); - } while (0); - - /* TODO: consider verifying the MIC error report with software - * implementation if we get too many spurious reports from the - * hardware. */ - if (!rx->local->apdev) - goto ignore; - ieee80211_rx_mgmt(rx->local, rx->skb, rx->u.rx.status, - ieee80211_msg_michael_mic_failure); - return; - - ignore: - dev_kfree_skb(rx->skb); - rx->skb = NULL; -} - -static inline ieee80211_txrx_result __ieee80211_invoke_rx_handlers( - struct ieee80211_local *local, - ieee80211_rx_handler *handlers, - struct ieee80211_txrx_data *rx, - struct sta_info *sta) -{ - ieee80211_rx_handler *handler; - ieee80211_txrx_result res = TXRX_DROP; - - for (handler = handlers; *handler != NULL; handler++) { - res = (*handler)(rx); - if (res != TXRX_CONTINUE) { - if (res == TXRX_DROP) { - I802_DEBUG_INC(local->rx_handlers_drop); - if (sta) - sta->rx_dropped++; - } - if (res == TXRX_QUEUED) - I802_DEBUG_INC(local->rx_handlers_queued); - break; - } - } - - if (res == TXRX_DROP) { - dev_kfree_skb(rx->skb); - } - return res; -} - -static inline void ieee80211_invoke_rx_handlers(struct ieee80211_local *local, - ieee80211_rx_handler *handlers, - struct ieee80211_txrx_data *rx, - struct sta_info *sta) -{ - if (__ieee80211_invoke_rx_handlers(local, handlers, rx, sta) == - TXRX_CONTINUE) - dev_kfree_skb(rx->skb); -} - -/* - * This is the receive path handler. It is called by a low level driver when an - * 802.11 MPDU is received from the hardware. - */ -void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_rx_status *status) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_sub_if_data *sdata; - struct sta_info *sta; - struct ieee80211_hdr *hdr; - struct ieee80211_txrx_data rx; - u16 type; - int multicast; - int radiotap_len = 0; - - if (status->flag & RX_FLAG_RADIOTAP) { - radiotap_len = ieee80211_get_radiotap_len(skb); - skb_pull(skb, radiotap_len); - } - - hdr = (struct ieee80211_hdr *) skb->data; - memset(&rx, 0, sizeof(rx)); - rx.skb = skb; - rx.local = local; - - rx.u.rx.status = status; - rx.fc = skb->len >= 2 ? le16_to_cpu(hdr->frame_control) : 0; - type = rx.fc & IEEE80211_FCTL_FTYPE; - if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) - local->dot11ReceivedFragmentCount++; - multicast = is_multicast_ether_addr(hdr->addr1); - - if (skb->len >= 16) - sta = rx.sta = sta_info_get(local, hdr->addr2); - else - sta = rx.sta = NULL; - - if (sta) { - rx.dev = sta->dev; - rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev); - } - - if ((status->flag & RX_FLAG_MMIC_ERROR)) { - ieee80211_rx_michael_mic_report(local->mdev, hdr, sta, &rx); - goto end; - } - - if (unlikely(local->sta_scanning)) - rx.u.rx.in_scan = 1; - - if (__ieee80211_invoke_rx_handlers(local, local->rx_pre_handlers, &rx, - sta) != TXRX_CONTINUE) - goto end; - skb = rx.skb; - - skb_push(skb, radiotap_len); - if (sta && !sta->assoc_ap && !(sta->flags & WLAN_STA_WDS) && - !local->iff_promiscs && !multicast) { - rx.u.rx.ra_match = 1; - ieee80211_invoke_rx_handlers(local, local->rx_handlers, &rx, - sta); - } else { - struct ieee80211_sub_if_data *prev = NULL; - struct sk_buff *skb_new; - u8 *bssid = ieee80211_get_bssid(hdr, skb->len - radiotap_len); - - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { - rx.u.rx.ra_match = 1; - switch (sdata->type) { - case IEEE80211_IF_TYPE_STA: - if (!bssid) - continue; - if (!ieee80211_bssid_match(bssid, - sdata->u.sta.bssid)) { - if (!rx.u.rx.in_scan) - continue; - rx.u.rx.ra_match = 0; - } else if (!multicast && - compare_ether_addr(sdata->dev->dev_addr, - hdr->addr1) != 0) { - if (!sdata->promisc) - continue; - rx.u.rx.ra_match = 0; - } - break; - case IEEE80211_IF_TYPE_IBSS: - if (!bssid) - continue; - if (!ieee80211_bssid_match(bssid, - sdata->u.sta.bssid)) { - if (!rx.u.rx.in_scan) - continue; - rx.u.rx.ra_match = 0; - } else if (!multicast && - compare_ether_addr(sdata->dev->dev_addr, - hdr->addr1) != 0) { - if (!sdata->promisc) - continue; - rx.u.rx.ra_match = 0; - } else if (!sta) - sta = rx.sta = - ieee80211_ibss_add_sta(sdata->dev, - skb, bssid, - hdr->addr2); - break; - case IEEE80211_IF_TYPE_AP: - if (!bssid) { - if (compare_ether_addr(sdata->dev->dev_addr, - hdr->addr1) != 0) - continue; - } else if (!ieee80211_bssid_match(bssid, - sdata->dev->dev_addr)) { - if (!rx.u.rx.in_scan) - continue; - rx.u.rx.ra_match = 0; - } - if (sdata->dev == local->mdev && - !rx.u.rx.in_scan) - /* do not receive anything via - * master device when not scanning */ - continue; - break; - case IEEE80211_IF_TYPE_WDS: - if (bssid || - (rx.fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) - continue; - if (compare_ether_addr(sdata->u.wds.remote_addr, - hdr->addr2) != 0) - continue; - break; - } - - if (prev) { - skb_new = skb_copy(skb, GFP_ATOMIC); - if (!skb_new) { - if (net_ratelimit()) - printk(KERN_DEBUG "%s: failed to copy " - "multicast frame for %s", - local->mdev->name, prev->dev->name); - continue; - } - rx.skb = skb_new; - rx.dev = prev->dev; - rx.sdata = prev; - ieee80211_invoke_rx_handlers(local, - local->rx_handlers, - &rx, sta); - } - prev = sdata; - } - if (prev) { - rx.skb = skb; - rx.dev = prev->dev; - rx.sdata = prev; - ieee80211_invoke_rx_handlers(local, local->rx_handlers, - &rx, sta); - } else - dev_kfree_skb(skb); - read_unlock(&local->sub_if_lock); + tasklet_disable(&local->tx_pending_tasklet); + tasklet_disable(&local->tasklet); } - end: - if (sta) - sta_info_put(sta); + return 0; } -EXPORT_SYMBOL(__ieee80211_rx); -static ieee80211_txrx_result -ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) +static void ieee80211_set_multicast_list(struct net_device *dev) { - struct ieee80211_local *local = tx->local; - struct ieee80211_hw_mode *mode = tx->u.tx.mode; - struct sk_buff *skb = tx->skb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u32 load = 0, hdrtime; - - /* TODO: this could be part of tx_status handling, so that the number - * of retries would be known; TX rate should in that case be stored - * somewhere with the packet */ - - /* Estimate total channel use caused by this frame */ - - /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, - * 1 usec = 1/8 * (1080 / 10) = 13.5 */ - - if (mode->mode == MODE_IEEE80211A || - mode->mode == MODE_ATHEROS_TURBO || - mode->mode == MODE_ATHEROS_TURBOG || - (mode->mode == MODE_IEEE80211G && - tx->u.tx.rate->flags & IEEE80211_RATE_ERP)) - hdrtime = CHAN_UTIL_HDR_SHORT; - else - hdrtime = CHAN_UTIL_HDR_LONG; - - load = hdrtime; - if (!is_multicast_ether_addr(hdr->addr1)) - load += hdrtime; - - if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_RTS_CTS) - load += 2 * hdrtime; - else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) - load += hdrtime; - - load += skb->len * tx->u.tx.rate->rate_inv; - - if (tx->u.tx.extra_frag) { - int i; - for (i = 0; i < tx->u.tx.num_extra_frag; i++) { - load += 2 * hdrtime; - load += tx->u.tx.extra_frag[i]->len * - tx->u.tx.rate->rate; - } + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + int allmulti, promisc, sdata_allmulti, sdata_promisc; + + allmulti = !!(dev->flags & IFF_ALLMULTI); + promisc = !!(dev->flags & IFF_PROMISC); + sdata_allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI; + sdata_promisc = sdata->flags & IEEE80211_SDATA_PROMISC; + + if (allmulti != sdata_allmulti) { + if (dev->flags & IFF_ALLMULTI) + atomic_inc(&local->iff_allmultis); + else + atomic_dec(&local->iff_allmultis); + sdata->flags ^= IEEE80211_SDATA_ALLMULTI; } - /* Divide channel_use by 8 to avoid wrapping around the counter */ - load >>= CHAN_UTIL_SHIFT; - local->channel_use_raw += load; - if (tx->sta) - tx->sta->channel_use_raw += load; - tx->sdata->channel_use_raw += load; + if (promisc != sdata_promisc) { + if (dev->flags & IFF_PROMISC) + atomic_inc(&local->iff_promiscs); + else + atomic_dec(&local->iff_promiscs); + sdata->flags ^= IEEE80211_SDATA_PROMISC; + } + + dev_mc_sync(local->mdev, dev); +} + +static const struct header_ops ieee80211_header_ops = { + .create = eth_header, + .parse = header_parse_80211, + .rebuild = eth_rebuild_header, + .cache = eth_header_cache, + .cache_update = eth_header_cache_update, +}; - return TXRX_CONTINUE; +/* Must not be called for mdev */ +void ieee80211_if_setup(struct net_device *dev) +{ + ether_setup(dev); + dev->header_ops = &ieee80211_header_ops; + dev->hard_start_xmit = ieee80211_subif_start_xmit; + dev->wireless_handlers = &ieee80211_iw_handler_def; + dev->set_multicast_list = ieee80211_set_multicast_list; + dev->change_mtu = ieee80211_change_mtu; + dev->open = ieee80211_open; + dev->stop = ieee80211_stop; + dev->destructor = ieee80211_if_free; } +/* WDS specialties */ -static ieee80211_txrx_result -ieee80211_rx_h_load_stats(struct ieee80211_txrx_data *rx) +int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr) { - struct ieee80211_local *local = rx->local; - struct sk_buff *skb = rx->skb; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - u32 load = 0, hdrtime; - struct ieee80211_rate *rate; - struct ieee80211_hw_mode *mode = local->hw.conf.mode; - int i; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct sta_info *sta; + DECLARE_MAC_BUF(mac); - /* Estimate total channel use caused by this frame */ + if (compare_ether_addr(remote_addr, sdata->u.wds.remote_addr) == 0) + return 0; - if (unlikely(mode->num_rates < 0)) - return TXRX_CONTINUE; + /* Create STA entry for the new peer */ + sta = sta_info_add(local, dev, remote_addr, GFP_KERNEL); + if (!sta) + return -ENOMEM; + sta_info_put(sta); - rate = &mode->rates[0]; - for (i = 0; i < mode->num_rates; i++) { - if (mode->rates[i].val == rx->u.rx.status->rate) { - rate = &mode->rates[i]; - break; - } + /* Remove STA entry for the old peer */ + sta = sta_info_get(local, sdata->u.wds.remote_addr); + if (sta) { + sta_info_free(sta); + sta_info_put(sta); + } else { + printk(KERN_DEBUG "%s: could not find STA entry for WDS link " + "peer %s\n", + dev->name, print_mac(mac, sdata->u.wds.remote_addr)); } - /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, - * 1 usec = 1/8 * (1080 / 10) = 13.5 */ + /* Update WDS link data */ + memcpy(&sdata->u.wds.remote_addr, remote_addr, ETH_ALEN); - if (mode->mode == MODE_IEEE80211A || - mode->mode == MODE_ATHEROS_TURBO || - mode->mode == MODE_ATHEROS_TURBOG || - (mode->mode == MODE_IEEE80211G && - rate->flags & IEEE80211_RATE_ERP)) - hdrtime = CHAN_UTIL_HDR_SHORT; - else - hdrtime = CHAN_UTIL_HDR_LONG; + return 0; +} - load = hdrtime; - if (!is_multicast_ether_addr(hdr->addr1)) - load += hdrtime; +/* everything else */ - load += skb->len * rate->rate_inv; +static int __ieee80211_if_config(struct net_device *dev, + struct sk_buff *beacon, + struct ieee80211_tx_control *control) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_if_conf conf; - /* Divide channel_use by 8 to avoid wrapping around the counter */ - load >>= CHAN_UTIL_SHIFT; - local->channel_use_raw += load; - if (rx->sta) - rx->sta->channel_use_raw += load; - rx->u.rx.load = load; + if (!local->ops->config_interface || !netif_running(dev)) + return 0; - return TXRX_CONTINUE; + memset(&conf, 0, sizeof(conf)); + conf.type = sdata->type; + if (sdata->type == IEEE80211_IF_TYPE_STA || + sdata->type == IEEE80211_IF_TYPE_IBSS) { + conf.bssid = sdata->u.sta.bssid; + conf.ssid = sdata->u.sta.ssid; + conf.ssid_len = sdata->u.sta.ssid_len; + } else if (sdata->type == IEEE80211_IF_TYPE_AP) { + conf.ssid = sdata->u.ap.ssid; + conf.ssid_len = sdata->u.ap.ssid_len; + conf.beacon = beacon; + conf.beacon_control = control; + } + return local->ops->config_interface(local_to_hw(local), + dev->ifindex, &conf); } -static ieee80211_txrx_result -ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx) +int ieee80211_if_config(struct net_device *dev) { - rx->sdata->channel_use_raw += rx->u.rx.load; - return TXRX_CONTINUE; + return __ieee80211_if_config(dev, NULL, NULL); } -static void ieee80211_stat_refresh(unsigned long data) +int ieee80211_if_config_beacon(struct net_device *dev) { - struct ieee80211_local *local = (struct ieee80211_local *) data; - struct sta_info *sta; - struct ieee80211_sub_if_data *sdata; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_tx_control control; + struct sk_buff *skb; - if (!local->stat_time) - return; + if (!(local->hw.flags & IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE)) + return 0; + skb = ieee80211_beacon_get(local_to_hw(local), dev->ifindex, &control); + if (!skb) + return -ENOMEM; + return __ieee80211_if_config(dev, skb, &control); +} + +int ieee80211_hw_config(struct ieee80211_local *local) +{ + struct ieee80211_hw_mode *mode; + struct ieee80211_channel *chan; + int ret = 0; - /* go through all stations */ - spin_lock_bh(&local->sta_lock); - list_for_each_entry(sta, &local->sta_list, list) { - sta->channel_use = (sta->channel_use_raw / local->stat_time) / - CHAN_UTIL_PER_10MS; - sta->channel_use_raw = 0; + if (local->sta_scanning) { + chan = local->scan_channel; + mode = local->scan_hw_mode; + } else { + chan = local->oper_channel; + mode = local->oper_hw_mode; } - spin_unlock_bh(&local->sta_lock); - - /* go through all subinterfaces */ - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { - sdata->channel_use = (sdata->channel_use_raw / - local->stat_time) / CHAN_UTIL_PER_10MS; - sdata->channel_use_raw = 0; + + local->hw.conf.channel = chan->chan; + local->hw.conf.channel_val = chan->val; + if (!local->hw.conf.power_level) { + local->hw.conf.power_level = chan->power_level; + } else { + local->hw.conf.power_level = min(chan->power_level, + local->hw.conf.power_level); } - read_unlock(&local->sub_if_lock); + local->hw.conf.freq = chan->freq; + local->hw.conf.phymode = mode->mode; + local->hw.conf.antenna_max = chan->antenna_max; + local->hw.conf.chan = chan; + local->hw.conf.mode = mode; + +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "HW CONFIG: channel=%d freq=%d " + "phymode=%d\n", local->hw.conf.channel, local->hw.conf.freq, + local->hw.conf.phymode); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - /* hardware interface */ - local->channel_use = (local->channel_use_raw / - local->stat_time) / CHAN_UTIL_PER_10MS; - local->channel_use_raw = 0; + if (local->open_count) + ret = local->ops->config(local_to_hw(local), &local->hw.conf); - local->stat_timer.expires = jiffies + HZ * local->stat_time / 100; - add_timer(&local->stat_timer); + return ret; } - -/* This is a version of the rx handler that can be called from hard irq - * context. Post the skb on the queue and schedule the tasklet */ -void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ieee80211_rx_status *status) +void ieee80211_erp_info_change_notify(struct net_device *dev, u8 changes) { - struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (local->ops->erp_ie_changed) + local->ops->erp_ie_changed(local_to_hw(local), changes, + !!(sdata->flags & IEEE80211_SDATA_USE_PROTECTION), + !(sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE)); +} - BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); +void ieee80211_reset_erp_info(struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - skb->dev = local->mdev; - /* copy status into skb->cb for use by tasklet */ - memcpy(skb->cb, status, sizeof(*status)); - skb->pkt_type = IEEE80211_RX_MSG; - skb_queue_tail(&local->skb_queue, skb); - tasklet_schedule(&local->tasklet); + sdata->flags &= ~(IEEE80211_SDATA_USE_PROTECTION | + IEEE80211_SDATA_SHORT_PREAMBLE); + ieee80211_erp_info_change_notify(dev, + IEEE80211_ERP_CHANGE_PROTECTION | + IEEE80211_ERP_CHANGE_PREAMBLE); } -EXPORT_SYMBOL(ieee80211_rx_irqsafe); void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, @@ -4398,14 +624,13 @@ static void ieee80211_tasklet_handler(unsigned long data) break; default: /* should never get here! */ printk(KERN_ERR "%s: Unknown message type (%d)\n", - local->mdev->name, skb->pkt_type); + wiphy_name(local->hw.wiphy), skb->pkt_type); dev_kfree_skb(skb); break; } } } - /* Remove added headers (e.g., QoS control), encryption header/MIC, etc. to * make a prepared TX frame (one that has been given to hw) to look like brand * new IEEE 802.11 frame that is ready to go through TX processing again. @@ -4420,10 +645,13 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; pkt_data->ifindex = control->ifindex; - pkt_data->mgmt_iface = (control->type == IEEE80211_IF_TYPE_MGMT); - pkt_data->req_tx_status = !!(control->flags & IEEE80211_TXCTL_REQ_TX_STATUS); - pkt_data->do_not_encrypt = !!(control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT); - pkt_data->requeue = !!(control->flags & IEEE80211_TXCTL_REQUEUE); + pkt_data->flags = 0; + if (control->flags & IEEE80211_TXCTL_REQ_TX_STATUS) + pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS; + if (control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT) + pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; + if (control->flags & IEEE80211_TXCTL_REQUEUE) + pkt_data->flags |= IEEE80211_TXPD_REQUEUE; pkt_data->queue = control->queue; hdrlen = ieee80211_get_hdrlen_from_skb(skb); @@ -4431,7 +659,7 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, if (!key) goto no_key; - switch (key->alg) { + switch (key->conf.alg) { case ALG_WEP: iv_len = WEP_IV_LEN; mic_len = WEP_ICV_LEN; @@ -4448,7 +676,8 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, goto no_key; } - if (skb->len >= mic_len && key->force_sw_encrypt) + if (skb->len >= mic_len && + !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) skb_trim(skb, skb->len - mic_len); if (skb->len >= iv_len && skb->len > hdrlen) { memmove(skb->data + iv_len, skb->data, hdrlen); @@ -4468,7 +697,6 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local, } } - void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_status *status) { @@ -4476,7 +704,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_local *local = hw_to_local(hw); u16 frag, type; - u32 msg_type; struct ieee80211_tx_status_rtap_hdr *rthdr; struct ieee80211_sub_if_data *sdata; int monitors; @@ -4484,7 +711,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, if (!status) { printk(KERN_ERR "%s: ieee80211_tx_status called with NULL status\n", - local->mdev->name); + wiphy_name(local->hw.wiphy)); dev_kfree_skb(skb); return; } @@ -4541,7 +768,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, printk(KERN_DEBUG "%s: dropped TX " "filtered frame queue_len=%d " "PS=%d @%lu\n", - local->mdev->name, + wiphy_name(local->hw.wiphy), skb_queue_len( &sta->tx_filtered), !!(sta->flags & WLAN_STA_PS), @@ -4591,29 +818,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, local->dot11FailedCount++; } - msg_type = (status->flags & IEEE80211_TX_STATUS_ACK) ? - ieee80211_msg_tx_callback_ack : ieee80211_msg_tx_callback_fail; - /* this was a transmitted frame, but now we want to reuse it */ skb_orphan(skb); - if ((status->control.flags & IEEE80211_TXCTL_REQ_TX_STATUS) && - local->apdev) { - if (local->monitors) { - skb2 = skb_clone(skb, GFP_ATOMIC); - } else { - skb2 = skb; - skb = NULL; - } - - if (skb2) - /* Send frame to hostapd */ - ieee80211_rx_mgmt(local, skb2, NULL, msg_type); - - if (!skb) - return; - } - if (!local->monitors) { dev_kfree_skb(skb); return; @@ -4648,9 +855,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, rthdr->data_retries = status->retry_count; - read_lock(&local->sub_if_lock); + rcu_read_lock(); monitors = local->monitors; - list_for_each_entry(sdata, &local->sub_if_list, list) { + list_for_each_entry_rcu(sdata, &local->interfaces, list) { /* * Using the monitors counter is possibly racy, but * if the value is wrong we simply either clone the skb @@ -4666,7 +873,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, continue; monitors--; if (monitors) - skb2 = skb_clone(skb, GFP_KERNEL); + skb2 = skb_clone(skb, GFP_ATOMIC); else skb2 = NULL; skb->dev = sdata->dev; @@ -4681,170 +888,12 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, } } out: - read_unlock(&local->sub_if_lock); + rcu_read_unlock(); if (skb) dev_kfree_skb(skb); } EXPORT_SYMBOL(ieee80211_tx_status); -/* TODO: implement register/unregister functions for adding TX/RX handlers - * into ordered list */ - -/* rx_pre handlers don't have dev and sdata fields available in - * ieee80211_txrx_data */ -static ieee80211_rx_handler ieee80211_rx_pre_handlers[] = -{ - ieee80211_rx_h_parse_qos, - ieee80211_rx_h_load_stats, - NULL -}; - -static ieee80211_rx_handler ieee80211_rx_handlers[] = -{ - ieee80211_rx_h_if_stats, - ieee80211_rx_h_monitor, - ieee80211_rx_h_passive_scan, - ieee80211_rx_h_check, - ieee80211_rx_h_sta_process, - ieee80211_rx_h_ccmp_decrypt, - ieee80211_rx_h_tkip_decrypt, - ieee80211_rx_h_wep_weak_iv_detection, - ieee80211_rx_h_wep_decrypt, - ieee80211_rx_h_defragment, - ieee80211_rx_h_ps_poll, - ieee80211_rx_h_michael_mic_verify, - /* this must be after decryption - so header is counted in MPDU mic - * must be before pae and data, so QOS_DATA format frames - * are not passed to user space by these functions - */ - ieee80211_rx_h_remove_qos_control, - ieee80211_rx_h_802_1x_pae, - ieee80211_rx_h_drop_unencrypted, - ieee80211_rx_h_data, - ieee80211_rx_h_mgmt, - NULL -}; - -static ieee80211_tx_handler ieee80211_tx_handlers[] = -{ - ieee80211_tx_h_check_assoc, - ieee80211_tx_h_sequence, - ieee80211_tx_h_ps_buf, - ieee80211_tx_h_select_key, - ieee80211_tx_h_michael_mic_add, - ieee80211_tx_h_fragment, - ieee80211_tx_h_tkip_encrypt, - ieee80211_tx_h_ccmp_encrypt, - ieee80211_tx_h_wep_encrypt, - ieee80211_tx_h_rate_ctrl, - ieee80211_tx_h_misc, - ieee80211_tx_h_load_stats, - NULL -}; - - -int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct sta_info *sta; - - if (compare_ether_addr(remote_addr, sdata->u.wds.remote_addr) == 0) - return 0; - - /* Create STA entry for the new peer */ - sta = sta_info_add(local, dev, remote_addr, GFP_KERNEL); - if (!sta) - return -ENOMEM; - sta_info_put(sta); - - /* Remove STA entry for the old peer */ - sta = sta_info_get(local, sdata->u.wds.remote_addr); - if (sta) { - sta_info_put(sta); - sta_info_free(sta, 0); - } else { - printk(KERN_DEBUG "%s: could not find STA entry for WDS link " - "peer " MAC_FMT "\n", - dev->name, MAC_ARG(sdata->u.wds.remote_addr)); - } - - /* Update WDS link data */ - memcpy(&sdata->u.wds.remote_addr, remote_addr, ETH_ALEN); - - return 0; -} - -/* Must not be called for mdev and apdev */ -void ieee80211_if_setup(struct net_device *dev) -{ - ether_setup(dev); - dev->hard_start_xmit = ieee80211_subif_start_xmit; - dev->wireless_handlers = &ieee80211_iw_handler_def; - dev->set_multicast_list = ieee80211_set_multicast_list; - dev->change_mtu = ieee80211_change_mtu; - dev->get_stats = ieee80211_get_stats; - dev->open = ieee80211_open; - dev->stop = ieee80211_stop; - dev->uninit = ieee80211_if_reinit; - dev->destructor = ieee80211_if_free; -} - -void ieee80211_if_mgmt_setup(struct net_device *dev) -{ - ether_setup(dev); - dev->hard_start_xmit = ieee80211_mgmt_start_xmit; - dev->change_mtu = ieee80211_change_mtu_apdev; - dev->get_stats = ieee80211_get_stats; - dev->open = ieee80211_mgmt_open; - dev->stop = ieee80211_mgmt_stop; - dev->type = ARPHRD_IEEE80211_PRISM; - dev->hard_header_parse = header_parse_80211; - dev->uninit = ieee80211_if_reinit; - dev->destructor = ieee80211_if_free; -} - -int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, - const char *name) -{ - struct rate_control_ref *ref, *old; - - ASSERT_RTNL(); - if (local->open_count || netif_running(local->mdev) || - (local->apdev && netif_running(local->apdev))) - return -EBUSY; - - ref = rate_control_alloc(name, local); - if (!ref) { - printk(KERN_WARNING "%s: Failed to select rate control " - "algorithm\n", local->mdev->name); - return -ENOENT; - } - - old = local->rate_ctrl; - local->rate_ctrl = ref; - if (old) { - rate_control_put(old); - sta_info_flush(local, NULL); - } - - printk(KERN_DEBUG "%s: Selected rate control " - "algorithm '%s'\n", local->mdev->name, - ref->ops->name); - - - return 0; -} - -static void rate_control_deinitialize(struct ieee80211_local *local) -{ - struct rate_control_ref *ref; - - ref = local->rate_ctrl; - local->rate_ctrl = NULL; - rate_control_put(ref); -} - struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops) { @@ -4888,8 +937,12 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST); BUG_ON(!ops->tx); + BUG_ON(!ops->start); + BUG_ON(!ops->stop); BUG_ON(!ops->config); BUG_ON(!ops->add_interface); + BUG_ON(!ops->remove_interface); + BUG_ON(!ops->configure_filter); local->ops = ops; /* for now, mdev needs sub_if_data :/ */ @@ -4919,17 +972,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->long_retry_limit = 4; local->hw.conf.radio_enabled = 1; - local->enabled_modes = (unsigned int) -1; + local->enabled_modes = ~0; INIT_LIST_HEAD(&local->modes_list); - rwlock_init(&local->sub_if_lock); - INIT_LIST_HEAD(&local->sub_if_list); + INIT_LIST_HEAD(&local->interfaces); INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); - init_timer(&local->stat_timer); - local->stat_timer.function = ieee80211_stat_refresh; - local->stat_timer.data = (unsigned long) local; ieee80211_rx_bss_list_init(mdev); sta_info_init(local); @@ -4938,7 +987,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, mdev->open = ieee80211_master_open; mdev->stop = ieee80211_master_stop; mdev->type = ARPHRD_IEEE80211; - mdev->hard_header_parse = header_parse_80211; + mdev->header_ops = &ieee80211_header_ops; + mdev->set_multicast_list = ieee80211_master_set_multicast_list; sdata->type = IEEE80211_IF_TYPE_AP; sdata->dev = mdev; @@ -4946,7 +996,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, sdata->u.ap.force_unicast_rateidx = -1; sdata->u.ap.max_ratectrl_rateidx = -1; ieee80211_if_sdata_init(sdata); - list_add_tail(&sdata->list, &local->sub_if_list); + /* no RCU needed since we're still during init phase */ + list_add_tail(&sdata->list, &local->interfaces); tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, (unsigned long)local); @@ -5019,11 +1070,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) goto fail_dev; ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev)); + ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP); result = ieee80211_init_rate_ctrl_alg(local, NULL); if (result < 0) { printk(KERN_DEBUG "%s: Failed to initialize rate control " - "algorithm\n", local->mdev->name); + "algorithm\n", wiphy_name(local->hw.wiphy)); goto fail_rate; } @@ -5031,7 +1083,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (result < 0) { printk(KERN_DEBUG "%s: Failed to initialize wep\n", - local->mdev->name); + wiphy_name(local->hw.wiphy)); goto fail_wep; } @@ -5042,7 +1094,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) IEEE80211_IF_TYPE_STA); if (result) printk(KERN_WARNING "%s: Failed to add default virtual iface\n", - local->mdev->name); + wiphy_name(local->hw.wiphy)); local->reg_state = IEEE80211_DEV_REGISTERED; rtnl_unlock(); @@ -5105,7 +1157,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata, *tmp; - struct list_head tmp_list; int i; tasklet_kill(&local->tx_pending_tasklet); @@ -5116,20 +1167,30 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) BUG_ON(local->reg_state != IEEE80211_DEV_REGISTERED); local->reg_state = IEEE80211_DEV_UNREGISTERED; - if (local->apdev) - ieee80211_if_del_mgmt(local); - write_lock_bh(&local->sub_if_lock); - list_replace_init(&local->sub_if_list, &tmp_list); - write_unlock_bh(&local->sub_if_lock); + /* + * At this point, interface list manipulations are fine + * because the driver cannot be handing us frames any + * more and the tasklet is killed. + */ - list_for_each_entry_safe(sdata, tmp, &tmp_list, list) + /* + * First, we remove all non-master interfaces. Do this because they + * may have bss pointer dependency on the master, and when we free + * the master these would be freed as well, breaking our list + * iteration completely. + */ + list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { + if (sdata->dev == local->mdev) + continue; + list_del(&sdata->list); __ieee80211_if_del(local, sdata); + } - rtnl_unlock(); + /* then, finally, remove the master interface */ + __ieee80211_if_del(local, IEEE80211_DEV_TO_SUB_IF(local->mdev)); - if (local->stat_time) - del_timer_sync(&local->stat_timer); + rtnl_unlock(); ieee80211_rx_bss_list_deinit(local->mdev); ieee80211_clear_tx_pending(local); @@ -5145,7 +1206,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) if (skb_queue_len(&local->skb_queue) || skb_queue_len(&local->skb_queue_unreliable)) printk(KERN_WARNING "%s: skb_queue not empty\n", - local->mdev->name); + wiphy_name(local->hw.wiphy)); skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); @@ -5165,72 +1226,6 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) } EXPORT_SYMBOL(ieee80211_free_hw); -void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) -{ - struct ieee80211_local *local = hw_to_local(hw); - - if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, - &local->state[queue])) { - if (test_bit(IEEE80211_LINK_STATE_PENDING, - &local->state[queue])) - tasklet_schedule(&local->tx_pending_tasklet); - else - if (!ieee80211_qdisc_installed(local->mdev)) { - if (queue == 0) - netif_wake_queue(local->mdev); - } else - __netif_schedule(local->mdev); - } -} -EXPORT_SYMBOL(ieee80211_wake_queue); - -void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) -{ - struct ieee80211_local *local = hw_to_local(hw); - - if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) - netif_stop_queue(local->mdev); - set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); -} -EXPORT_SYMBOL(ieee80211_stop_queue); - -void ieee80211_start_queues(struct ieee80211_hw *hw) -{ - struct ieee80211_local *local = hw_to_local(hw); - int i; - - for (i = 0; i < local->hw.queues; i++) - clear_bit(IEEE80211_LINK_STATE_XOFF, &local->state[i]); - if (!ieee80211_qdisc_installed(local->mdev)) - netif_start_queue(local->mdev); -} -EXPORT_SYMBOL(ieee80211_start_queues); - -void ieee80211_stop_queues(struct ieee80211_hw *hw) -{ - int i; - - for (i = 0; i < hw->queues; i++) - ieee80211_stop_queue(hw, i); -} -EXPORT_SYMBOL(ieee80211_stop_queues); - -void ieee80211_wake_queues(struct ieee80211_hw *hw) -{ - int i; - - for (i = 0; i < hw->queues; i++) - ieee80211_wake_queue(hw, i); -} -EXPORT_SYMBOL(ieee80211_wake_queues); - -struct net_device_stats *ieee80211_dev_stats(struct net_device *dev) -{ - struct ieee80211_sub_if_data *sdata; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - return &sdata->stats; -} - static int __init ieee80211_init(void) { struct sk_buff *skb; @@ -5251,7 +1246,6 @@ static int __init ieee80211_init(void) return 0; } - static void __exit ieee80211_exit(void) { ieee80211_wme_unregister(); diff --git a/net/mac80211/ieee80211_cfg.c b/net/mac80211/ieee80211_cfg.c deleted file mode 100644 index 509096edb32463f7cae3720c93ad5e39b0c3a0fb..0000000000000000000000000000000000000000 --- a/net/mac80211/ieee80211_cfg.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * mac80211 configuration hooks for cfg80211 - * - * Copyright 2006 Johannes Berg - * - * This file is GPLv2 as found in COPYING. - */ - -#include -#include -#include -#include "ieee80211_i.h" -#include "ieee80211_cfg.h" - -static int ieee80211_add_iface(struct wiphy *wiphy, char *name, - unsigned int type) -{ - struct ieee80211_local *local = wiphy_priv(wiphy); - int itype; - - if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) - return -ENODEV; - - switch (type) { - case NL80211_IFTYPE_UNSPECIFIED: - itype = IEEE80211_IF_TYPE_STA; - break; - case NL80211_IFTYPE_ADHOC: - itype = IEEE80211_IF_TYPE_IBSS; - break; - case NL80211_IFTYPE_STATION: - itype = IEEE80211_IF_TYPE_STA; - break; - case NL80211_IFTYPE_MONITOR: - itype = IEEE80211_IF_TYPE_MNTR; - break; - default: - return -EINVAL; - } - - return ieee80211_if_add(local->mdev, name, NULL, itype); -} - -static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) -{ - struct ieee80211_local *local = wiphy_priv(wiphy); - struct net_device *dev; - char *name; - - if (unlikely(local->reg_state != IEEE80211_DEV_REGISTERED)) - return -ENODEV; - - dev = dev_get_by_index(ifindex); - if (!dev) - return 0; - - name = dev->name; - dev_put(dev); - - return ieee80211_if_remove(local->mdev, name, -1); -} - -struct cfg80211_ops mac80211_config_ops = { - .add_virtual_intf = ieee80211_add_iface, - .del_virtual_intf = ieee80211_del_iface, -}; diff --git a/net/mac80211/ieee80211_common.h b/net/mac80211/ieee80211_common.h index 77c6afb7f6a802818a975de0675ee5123e3be85c..c15295d43d87faa136584728a84143505a164b49 100644 --- a/net/mac80211/ieee80211_common.h +++ b/net/mac80211/ieee80211_common.h @@ -48,13 +48,13 @@ enum ieee80211_msg_type { ieee80211_msg_tx_callback_ack = 1, ieee80211_msg_tx_callback_fail = 2, /* hole at 3, was ieee80211_msg_passive_scan but unused */ - ieee80211_msg_wep_frame_unknown_key = 4, + /* hole at 4, was ieee80211_msg_wep_frame_unknown_key but now unused */ ieee80211_msg_michael_mic_failure = 5, /* hole at 6, was monitor but never sent to userspace */ ieee80211_msg_sta_not_assoc = 7, /* 8 was ieee80211_msg_set_aid_for_sta */ - ieee80211_msg_key_threshold_notification = 9, - ieee80211_msg_radar = 11, + /* 9 was ieee80211_msg_key_threshold_notification */ + /* 11 was ieee80211_msg_radar */ }; struct ieee80211_msg_key_notification { @@ -73,8 +73,6 @@ enum ieee80211_phytype { ieee80211_phytype_ofdm_dot11_g = 6, ieee80211_phytype_pbcc_dot11_g = 7, ieee80211_phytype_ofdm_dot11_a = 8, - ieee80211_phytype_dsss_dot11_turbog = 255, - ieee80211_phytype_dsss_dot11_turbo = 256, }; enum ieee80211_ssi_type { diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6f7bae7ef9c02d54cbded6ced59745e856260295..d34a9deca67ad25bc36c44c3422dae6128548e42 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "ieee80211_key.h" #include "sta_info.h" @@ -112,6 +113,16 @@ typedef enum { TXRX_CONTINUE, TXRX_DROP, TXRX_QUEUED } ieee80211_txrx_result; +/* flags used in struct ieee80211_txrx_data.flags */ +/* whether the MSDU was fragmented */ +#define IEEE80211_TXRXD_FRAGMENTED BIT(0) +#define IEEE80211_TXRXD_TXUNICAST BIT(1) +#define IEEE80211_TXRXD_TXPS_BUFFERED BIT(2) +#define IEEE80211_TXRXD_TXPROBE_LAST_FRAG BIT(3) +#define IEEE80211_TXRXD_RXIN_SCAN BIT(4) +/* frame is destined to interface currently processed (incl. multicast frames) */ +#define IEEE80211_TXRXD_RXRA_MATCH BIT(5) +#define IEEE80211_TXRXD_TX_INJECTED BIT(6) struct ieee80211_txrx_data { struct sk_buff *skb; struct net_device *dev; @@ -120,14 +131,10 @@ struct ieee80211_txrx_data { struct sta_info *sta; u16 fc, ethertype; struct ieee80211_key *key; - unsigned int fragmented:1; /* whether the MSDU was fragmented */ + unsigned int flags; union { struct { struct ieee80211_tx_control *control; - unsigned int unicast:1; - unsigned int ps_buffered:1; - unsigned int short_preamble:1; - unsigned int probe_last_frag:1; struct ieee80211_hw_mode *mode; struct ieee80211_rate *rate; /* use this rate (if set) for last fragment; rate can @@ -135,7 +142,6 @@ struct ieee80211_txrx_data { * when using CTS protection with IEEE 802.11g. */ struct ieee80211_rate *last_frag_rate; int last_frag_hwrate; - int mgmt_interface; /* Extra fragments (in addition to the first fragment * in skb) */ @@ -147,23 +153,22 @@ struct ieee80211_txrx_data { int sent_ps_buffered; int queue; int load; - unsigned int in_scan:1; - /* frame is destined to interface currently processed - * (including multicast frames) */ - unsigned int ra_match:1; + u32 tkip_iv32; + u16 tkip_iv16; } rx; } u; }; +/* flags used in struct ieee80211_tx_packet_data.flags */ +#define IEEE80211_TXPD_REQ_TX_STATUS BIT(0) +#define IEEE80211_TXPD_DO_NOT_ENCRYPT BIT(1) +#define IEEE80211_TXPD_REQUEUE BIT(2) /* Stored in sk_buff->cb */ struct ieee80211_tx_packet_data { int ifindex; unsigned long jiffies; - unsigned int req_tx_status:1; - unsigned int do_not_encrypt:1; - unsigned int requeue:1; - unsigned int mgmt_iface:1; - unsigned int queue:4; + unsigned int flags; + u8 queue; }; struct ieee80211_tx_stored_packet { @@ -174,7 +179,7 @@ struct ieee80211_tx_stored_packet { int last_frag_rateidx; int last_frag_hwrate; struct ieee80211_rate *last_frag_rate; - unsigned int last_frag_rate_ctrl_probe:1; + unsigned int last_frag_rate_ctrl_probe; }; typedef ieee80211_txrx_result (*ieee80211_tx_handler) @@ -187,10 +192,10 @@ struct ieee80211_if_ap { u8 *beacon_head, *beacon_tail; int beacon_head_len, beacon_tail_len; + struct list_head vlans; + u8 ssid[IEEE80211_MAX_SSID_LEN]; size_t ssid_len; - u8 *generic_elem; - size_t generic_elem_len; /* yes, this looks ugly, but guarantees that we can later use * bitmap_empty :) @@ -210,9 +215,23 @@ struct ieee80211_if_wds { }; struct ieee80211_if_vlan { - u8 id; + struct ieee80211_sub_if_data *ap; + struct list_head list; }; +/* flags used in struct ieee80211_if_sta.flags */ +#define IEEE80211_STA_SSID_SET BIT(0) +#define IEEE80211_STA_BSSID_SET BIT(1) +#define IEEE80211_STA_PREV_BSSID_SET BIT(2) +#define IEEE80211_STA_AUTHENTICATED BIT(3) +#define IEEE80211_STA_ASSOCIATED BIT(4) +#define IEEE80211_STA_PROBEREQ_POLL BIT(5) +#define IEEE80211_STA_CREATE_IBSS BIT(6) +#define IEEE80211_STA_MIXED_CELL BIT(7) +#define IEEE80211_STA_WMM_ENABLED BIT(8) +#define IEEE80211_STA_AUTO_SSID_SEL BIT(10) +#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) +#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) struct ieee80211_if_sta { enum { IEEE80211_DISABLED, IEEE80211_AUTHENTICATE, @@ -235,25 +254,14 @@ struct ieee80211_if_sta { int auth_tries, assoc_tries; - unsigned int ssid_set:1; - unsigned int bssid_set:1; - unsigned int prev_bssid_set:1; - unsigned int authenticated:1; - unsigned int associated:1; - unsigned int probereq_poll:1; - unsigned int create_ibss:1; - unsigned int mixed_cell:1; - unsigned int wmm_enabled:1; - unsigned int auto_ssid_sel:1; - unsigned int auto_bssid_sel:1; - unsigned int auto_channel_sel:1; + unsigned int flags; #define IEEE80211_STA_REQ_SCAN 0 #define IEEE80211_STA_REQ_AUTH 1 #define IEEE80211_STA_REQ_RUN 2 unsigned long request; struct sk_buff_head skb_queue; - int key_mgmt; + int key_management_enabled; unsigned long last_probe; #define IEEE80211_AUTH_ALG_OPEN BIT(0) @@ -271,21 +279,29 @@ struct ieee80211_if_sta { }; +/* flags used in struct ieee80211_sub_if_data.flags */ +#define IEEE80211_SDATA_ALLMULTI BIT(0) +#define IEEE80211_SDATA_PROMISC BIT(1) +#define IEEE80211_SDATA_USE_PROTECTION BIT(2) /* CTS protect ERP frames */ +/* use short preamble with IEEE 802.11b: this flag is set when the AP or beacon + * generator reports that there are no present stations that cannot support short + * preambles */ +#define IEEE80211_SDATA_SHORT_PREAMBLE BIT(3) +#define IEEE80211_SDATA_USERSPACE_MLME BIT(4) struct ieee80211_sub_if_data { struct list_head list; - unsigned int type; + enum ieee80211_if_types type; struct wireless_dev wdev; + /* keys */ + struct list_head key_list; + struct net_device *dev; struct ieee80211_local *local; - int mc_count; - unsigned int allmulti:1; - unsigned int promisc:1; - unsigned int use_protection:1; /* CTS protect ERP frames */ + unsigned int flags; - struct net_device_stats stats; int drop_unencrypted; int eapol; /* 0 = process EAPOL frames as normal data frames, * 1 = send EAPOL frames through wlan#ap to hostapd @@ -364,7 +380,6 @@ struct ieee80211_sub_if_data { struct dentry *drop_unencrypted; struct dentry *eapol; struct dentry *ieee8021_x; - struct dentry *vlan_id; } vlan; struct { struct dentry *mode; @@ -393,9 +408,9 @@ struct ieee80211_local { struct list_head modes_list; struct net_device *mdev; /* wmaster# - "master" 802.11 device */ - struct net_device *apdev; /* wlan#ap - management frames (hostapd) */ int open_count; int monitors; + unsigned int filter_flags; /* FIF_* */ struct iw_statistics wstats; u8 wstats_flags; int tx_headroom; /* required headroom for hardware/radiotap */ @@ -416,10 +431,9 @@ struct ieee80211_local { struct sk_buff_head skb_queue_unreliable; /* Station data structures */ - spinlock_t sta_lock; /* mutex for STA data structures */ + rwlock_t sta_lock; /* protects STA data structures */ int num_sta; /* number of stations in sta_list */ struct list_head sta_list; - struct list_head deleted_sta_list; struct sta_info *sta_hash[STA_HASH_SIZE]; struct timer_list sta_cleanup; @@ -427,17 +441,11 @@ struct ieee80211_local { struct ieee80211_tx_stored_packet pending_packet[NUM_TX_DATA_QUEUES]; struct tasklet_struct tx_pending_tasklet; - int mc_count; /* total count of multicast entries in all interfaces */ - int iff_allmultis, iff_promiscs; - /* number of interfaces with corresponding IFF_ flags */ + /* number of interfaces with corresponding IFF_ flags */ + atomic_t iff_allmultis, iff_promiscs; struct rate_control_ref *rate_ctrl; - int next_mode; /* MODE_IEEE80211* - * The mode preference for next channel change. This is - * used to select .11g vs. .11b channels (or 4.9 GHz vs. - * .11a) when the channel number is not unique. */ - /* Supported and basic rate filters for different modes. These are * pointers to -1 terminated lists and rates in 100 kbps units. */ int *supp_rates[NUM_IEEE80211_MODES]; @@ -447,14 +455,10 @@ struct ieee80211_local { int fragmentation_threshold; int short_retry_limit; /* dot11ShortRetryLimit */ int long_retry_limit; /* dot11LongRetryLimit */ - int short_preamble; /* use short preamble with IEEE 802.11b */ struct crypto_blkcipher *wep_tx_tfm; struct crypto_blkcipher *wep_rx_tfm; u32 wep_iv; - int key_tx_rx_threshold; /* number of times any key can be used in TX - * or RX before generating a rekey - * notification; 0 = notification disabled. */ int bridge_packets; /* bridge packets between associated stations and * deliver multicast frames both back to wireless @@ -464,9 +468,8 @@ struct ieee80211_local { ieee80211_rx_handler *rx_handlers; ieee80211_tx_handler *tx_handlers; - rwlock_t sub_if_lock; /* Protects sub_if_list. Cannot be taken under - * sta_bss_lock or sta_lock. */ - struct list_head sub_if_list; + struct list_head interfaces; + int sta_scanning; int scan_channel_idx; enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; @@ -500,25 +503,17 @@ struct ieee80211_local { #ifdef CONFIG_MAC80211_LEDS int tx_led_counter, rx_led_counter; - struct led_trigger *tx_led, *rx_led; - char tx_led_name[32], rx_led_name[32]; + struct led_trigger *tx_led, *rx_led, *assoc_led; + char tx_led_name[32], rx_led_name[32], assoc_led_name[32]; #endif u32 channel_use; u32 channel_use_raw; - u32 stat_time; - struct timer_list stat_timer; #ifdef CONFIG_MAC80211_DEBUGFS struct work_struct sta_debugfs_add; #endif - enum { - STA_ANTENNA_SEL_AUTO = 0, - STA_ANTENNA_SEL_SW_CTRL = 1, - STA_ANTENNA_SEL_SW_CTRL_DEBUG = 2 - } sta_antenna_sel; - #ifdef CONFIG_MAC80211_DEBUG_COUNTERS /* TX/RX handler statistics */ unsigned int tx_handlers_drop; @@ -548,16 +543,9 @@ struct ieee80211_local { #endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ - int default_wep_only; /* only default WEP keys are used with this - * interface; this is used to decide when hwaccel - * can be used with default keys */ int total_ps_buffered; /* total number of all buffered unicast and * multicast packets for power saving stations */ - int allow_broadcast_always; /* whether to allow TX of broadcast frames - * even when there are no associated STAs - */ - int wifi_wme_noack_test; unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ @@ -566,17 +554,13 @@ struct ieee80211_local { unsigned int hw_modes; /* bitfield of supported hardware modes; * (1 << MODE_*) */ - int user_space_mlme; - #ifdef CONFIG_MAC80211_DEBUGFS struct local_debugfsdentries { struct dentry *channel; struct dentry *frequency; - struct dentry *radar_detect; struct dentry *antenna_sel_tx; struct dentry *antenna_sel_rx; struct dentry *bridge_packets; - struct dentry *key_tx_rx_threshold; struct dentry *rts_threshold; struct dentry *fragmentation_threshold; struct dentry *short_retry_limit; @@ -584,7 +568,6 @@ struct ieee80211_local { struct dentry *total_ps_buffered; struct dentry *mode; struct dentry *wep_iv; - struct dentry *tx_power_reduction; struct dentry *modes; struct dentry *statistics; struct local_debugfsdentries_statsdentries { @@ -656,38 +639,38 @@ struct sta_attribute { ssize_t (*store)(struct sta_info *, const char *buf, size_t count); }; -static inline void __bss_tim_set(struct ieee80211_if_ap *bss, int aid) +static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid) { /* - * This format has ben mandated by the IEEE specifications, + * This format has been mandated by the IEEE specifications, * so this line may not be changed to use the __set_bit() format. */ - bss->tim[(aid)/8] |= 1<<((aid) % 8); + bss->tim[aid / 8] |= (1 << (aid % 8)); } static inline void bss_tim_set(struct ieee80211_local *local, - struct ieee80211_if_ap *bss, int aid) + struct ieee80211_if_ap *bss, u16 aid) { - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); __bss_tim_set(bss, aid); - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); } -static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, int aid) +static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid) { /* - * This format has ben mandated by the IEEE specifications, + * This format has been mandated by the IEEE specifications, * so this line may not be changed to use the __clear_bit() format. */ - bss->tim[(aid)/8] &= !(1<<((aid) % 8)); + bss->tim[aid / 8] &= ~(1 << (aid % 8)); } static inline void bss_tim_clear(struct ieee80211_local *local, - struct ieee80211_if_ap *bss, int aid) + struct ieee80211_if_ap *bss, u16 aid) { - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); __bss_tim_clear(bss, aid); - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); } /** @@ -707,35 +690,28 @@ static inline int ieee80211_is_erp_rate(int phymode, int rate) return 0; } +static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) +{ + return compare_ether_addr(raddr, addr) == 0 || + is_broadcast_ether_addr(raddr); +} + + /* ieee80211.c */ int ieee80211_hw_config(struct ieee80211_local *local); int ieee80211_if_config(struct net_device *dev); int ieee80211_if_config_beacon(struct net_device *dev); -struct ieee80211_key_conf * -ieee80211_key_data2conf(struct ieee80211_local *local, - const struct ieee80211_key *data); -struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, - int idx, size_t key_len, gfp_t flags); -void ieee80211_key_free(struct ieee80211_key *key); -void ieee80211_rx_mgmt(struct ieee80211_local *local, struct sk_buff *skb, - struct ieee80211_rx_status *status, u32 msg_type); void ieee80211_prepare_rates(struct ieee80211_local *local, struct ieee80211_hw_mode *mode); void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx); int ieee80211_if_update_wds(struct net_device *dev, u8 *remote_addr); -int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); -int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); void ieee80211_if_setup(struct net_device *dev); -void ieee80211_if_mgmt_setup(struct net_device *dev); -int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, - const char *name); -struct net_device_stats *ieee80211_dev_stats(struct net_device *dev); +struct ieee80211_rate *ieee80211_get_rate(struct ieee80211_local *local, + int phymode, int hwrate); /* ieee80211_ioctl.c */ extern const struct iw_handler_def ieee80211_iw_handler_def; -void ieee80211_update_default_wep_only(struct ieee80211_local *local); - /* Least common multiple of the used rates (in 100 kbps). This is used to * calculate rate_inv values for each rate so that only integers are needed. */ @@ -783,6 +759,8 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, u8 *addr); int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); +void ieee80211_erp_info_change_notify(struct net_device *dev, u8 changes); +void ieee80211_reset_erp_info(struct net_device *dev); /* ieee80211_iface.c */ int ieee80211_if_add(struct net_device *dev, const char *name, @@ -794,14 +772,32 @@ void __ieee80211_if_del(struct ieee80211_local *local, int ieee80211_if_remove(struct net_device *dev, const char *name, int id); void ieee80211_if_free(struct net_device *dev); void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata); -int ieee80211_if_add_mgmt(struct ieee80211_local *local); -void ieee80211_if_del_mgmt(struct ieee80211_local *local); /* regdomain.c */ void ieee80211_regdomain_init(void); void ieee80211_set_default_regdomain(struct ieee80211_hw_mode *mode); -/* for wiphy privid */ -extern void *mac80211_wiphy_privid; +/* rx handling */ +extern ieee80211_rx_handler ieee80211_rx_pre_handlers[]; +extern ieee80211_rx_handler ieee80211_rx_handlers[]; + +/* tx handling */ +extern ieee80211_tx_handler ieee80211_tx_handlers[]; +void ieee80211_clear_tx_pending(struct ieee80211_local *local); +void ieee80211_tx_pending(unsigned long data); +int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev); +int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); +int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); + +/* utility functions/constants */ +extern void *mac80211_wiphy_privid; /* for wiphy privid */ +extern const unsigned char rfc1042_header[6]; +extern const unsigned char bridge_tunnel_header[6]; +u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len); +int ieee80211_is_eapol(const struct sk_buff *skb); +int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, + int rate, int erp, int short_preamble); +void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, + struct ieee80211_hdr *hdr); #endif /* IEEE80211_I_H */ diff --git a/net/mac80211/ieee80211_iface.c b/net/mac80211/ieee80211_iface.c index 8532a5ccdd1ee35003482b7e4a194a5529ee68bd..43e505d294527ca1769c24da69b82c7b42ee6e66 100644 --- a/net/mac80211/ieee80211_iface.c +++ b/net/mac80211/ieee80211_iface.c @@ -25,6 +25,8 @@ void ieee80211_if_sdata_init(struct ieee80211_sub_if_data *sdata) sdata->eapol = 1; for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) skb_queue_head_init(&sdata->fragments[i].skb_list); + + INIT_LIST_HEAD(&sdata->key_list); } static void ieee80211_if_sdata_deinit(struct ieee80211_sub_if_data *sdata) @@ -77,18 +79,15 @@ int ieee80211_if_add(struct net_device *dev, const char *name, ieee80211_debugfs_add_netdev(sdata); ieee80211_if_set_type(ndev, type); - write_lock_bh(&local->sub_if_lock); + /* we're under RTNL so all this is fine */ if (unlikely(local->reg_state == IEEE80211_DEV_UNREGISTERED)) { - write_unlock_bh(&local->sub_if_lock); __ieee80211_if_del(local, sdata); return -ENODEV; } - list_add(&sdata->list, &local->sub_if_list); + list_add_tail_rcu(&sdata->list, &local->interfaces); + if (new_dev) *new_dev = ndev; - write_unlock_bh(&local->sub_if_lock); - - ieee80211_update_default_wep_only(local); return 0; @@ -97,74 +96,35 @@ int ieee80211_if_add(struct net_device *dev, const char *name, return ret; } -int ieee80211_if_add_mgmt(struct ieee80211_local *local) -{ - struct net_device *ndev; - struct ieee80211_sub_if_data *nsdata; - int ret; - - ASSERT_RTNL(); - - ndev = alloc_netdev(sizeof(struct ieee80211_sub_if_data), "wmgmt%d", - ieee80211_if_mgmt_setup); - if (!ndev) - return -ENOMEM; - ret = dev_alloc_name(ndev, ndev->name); - if (ret < 0) - goto fail; - - memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); - SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); - - nsdata = IEEE80211_DEV_TO_SUB_IF(ndev); - ndev->ieee80211_ptr = &nsdata->wdev; - nsdata->wdev.wiphy = local->hw.wiphy; - nsdata->type = IEEE80211_IF_TYPE_MGMT; - nsdata->dev = ndev; - nsdata->local = local; - ieee80211_if_sdata_init(nsdata); - - ret = register_netdevice(ndev); - if (ret) - goto fail; - - ieee80211_debugfs_add_netdev(nsdata); - - if (local->open_count > 0) - dev_open(ndev); - local->apdev = ndev; - return 0; - -fail: - free_netdev(ndev); - return ret; -} - -void ieee80211_if_del_mgmt(struct ieee80211_local *local) -{ - struct net_device *apdev; - - ASSERT_RTNL(); - apdev = local->apdev; - ieee80211_debugfs_remove_netdev(IEEE80211_DEV_TO_SUB_IF(apdev)); - local->apdev = NULL; - unregister_netdevice(apdev); -} - void ieee80211_if_set_type(struct net_device *dev, int type) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int oldtype = sdata->type; - dev->hard_start_xmit = ieee80211_subif_start_xmit; - + /* + * We need to call this function on the master interface + * which already has a hard_start_xmit routine assigned + * which must not be changed. + */ + if (dev != sdata->local->mdev) + dev->hard_start_xmit = ieee80211_subif_start_xmit; + + /* + * Called even when register_netdevice fails, it would + * oops if assigned before initialising the rest. + */ + dev->uninit = ieee80211_if_reinit; + + /* most have no BSS pointer */ + sdata->bss = NULL; sdata->type = type; + switch (type) { case IEEE80211_IF_TYPE_WDS: - sdata->bss = NULL; + /* nothing special */ break; case IEEE80211_IF_TYPE_VLAN: + sdata->u.vlan.ap = NULL; break; case IEEE80211_IF_TYPE_AP: sdata->u.ap.dtim_period = 2; @@ -172,6 +132,7 @@ void ieee80211_if_set_type(struct net_device *dev, int type) sdata->u.ap.max_ratectrl_rateidx = -1; skb_queue_head_init(&sdata->u.ap.ps_bc_buf); sdata->bss = &sdata->u.ap; + INIT_LIST_HEAD(&sdata->u.ap.vlans); break; case IEEE80211_IF_TYPE_STA: case IEEE80211_IF_TYPE_IBSS: { @@ -187,10 +148,10 @@ void ieee80211_if_set_type(struct net_device *dev, int type) ifsta->capab = WLAN_CAPABILITY_ESS; ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN | IEEE80211_AUTH_ALG_SHARED_KEY; - ifsta->create_ibss = 1; - ifsta->wmm_enabled = 1; - ifsta->auto_channel_sel = 1; - ifsta->auto_bssid_sel = 1; + ifsta->flags |= IEEE80211_STA_CREATE_IBSS | + IEEE80211_STA_WMM_ENABLED | + IEEE80211_STA_AUTO_BSSID_SEL | + IEEE80211_STA_AUTO_CHANNEL_SEL; msdata = IEEE80211_DEV_TO_SUB_IF(sdata->local->mdev); sdata->bss = &msdata->u.ap; @@ -205,7 +166,6 @@ void ieee80211_if_set_type(struct net_device *dev, int type) dev->name, __FUNCTION__, type); } ieee80211_debugfs_change_if_type(sdata, oldtype); - ieee80211_update_default_wep_only(local); } /* Must be called with rtnl lock held. */ @@ -214,57 +174,46 @@ void ieee80211_if_reinit(struct net_device *dev) struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; - int i; + struct sk_buff *skb; ASSERT_RTNL(); + + ieee80211_free_keys(sdata); + ieee80211_if_sdata_deinit(sdata); - for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - if (!sdata->keys[i]) - continue; -#if 0 - /* The interface is down at the moment, so there is not - * really much point in disabling the keys at this point. */ - memset(addr, 0xff, ETH_ALEN); - if (local->ops->set_key) - local->ops->set_key(local_to_hw(local), DISABLE_KEY, addr, - local->keys[i], 0); -#endif - ieee80211_key_free(sdata->keys[i]); - sdata->keys[i] = NULL; - } switch (sdata->type) { + case IEEE80211_IF_TYPE_INVALID: + /* cannot happen */ + WARN_ON(1); + break; case IEEE80211_IF_TYPE_AP: { /* Remove all virtual interfaces that use this BSS * as their sdata->bss */ struct ieee80211_sub_if_data *tsdata, *n; - LIST_HEAD(tmp_list); - write_lock_bh(&local->sub_if_lock); - list_for_each_entry_safe(tsdata, n, &local->sub_if_list, list) { + list_for_each_entry_safe(tsdata, n, &local->interfaces, list) { if (tsdata != sdata && tsdata->bss == &sdata->u.ap) { printk(KERN_DEBUG "%s: removing virtual " "interface %s because its BSS interface" " is being removed\n", sdata->dev->name, tsdata->dev->name); - list_move_tail(&tsdata->list, &tmp_list); + list_del_rcu(&tsdata->list); + /* + * We have lots of time and can afford + * to sync for each interface + */ + synchronize_rcu(); + __ieee80211_if_del(local, tsdata); } } - write_unlock_bh(&local->sub_if_lock); - - list_for_each_entry_safe(tsdata, n, &tmp_list, list) - __ieee80211_if_del(local, tsdata); kfree(sdata->u.ap.beacon_head); kfree(sdata->u.ap.beacon_tail); - kfree(sdata->u.ap.generic_elem); - if (dev != local->mdev) { - struct sk_buff *skb; - while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { - local->total_ps_buffered--; - dev_kfree_skb(skb); - } + while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { + local->total_ps_buffered--; + dev_kfree_skb(skb); } break; @@ -272,8 +221,8 @@ void ieee80211_if_reinit(struct net_device *dev) case IEEE80211_IF_TYPE_WDS: sta = sta_info_get(local, sdata->u.wds.remote_addr); if (sta) { + sta_info_free(sta); sta_info_put(sta); - sta_info_free(sta, 0); } else { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: Someone had deleted my STA " @@ -298,6 +247,9 @@ void ieee80211_if_reinit(struct net_device *dev) case IEEE80211_IF_TYPE_MNTR: dev->type = ARPHRD_ETHER; break; + case IEEE80211_IF_TYPE_VLAN: + sdata->u.vlan.ap = NULL; + break; } /* remove all STAs that are bound to this virtual interface */ @@ -327,29 +279,23 @@ int ieee80211_if_remove(struct net_device *dev, const char *name, int id) ASSERT_RTNL(); - write_lock_bh(&local->sub_if_lock); - list_for_each_entry_safe(sdata, n, &local->sub_if_list, list) { + list_for_each_entry_safe(sdata, n, &local->interfaces, list) { if ((sdata->type == id || id == -1) && strcmp(name, sdata->dev->name) == 0 && sdata->dev != local->mdev) { - list_del(&sdata->list); - write_unlock_bh(&local->sub_if_lock); + list_del_rcu(&sdata->list); + synchronize_rcu(); __ieee80211_if_del(local, sdata); - ieee80211_update_default_wep_only(local); return 0; } } - write_unlock_bh(&local->sub_if_lock); return -ENODEV; } void ieee80211_if_free(struct net_device *dev) { - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - /* local->apdev must be NULL when freeing management interface */ - BUG_ON(dev == local->apdev); ieee80211_if_sdata_deinit(sdata); free_netdev(dev); } diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c index e7904db553255eb5cf96697ce671e24ea5cc6af5..f0224c2311d29a9df5826bfe46def0a93cf3866f 100644 --- a/net/mac80211/ieee80211_ioctl.c +++ b/net/mac80211/ieee80211_ioctl.c @@ -21,77 +21,41 @@ #include #include "ieee80211_i.h" -#include "hostapd_ioctl.h" #include "ieee80211_rate.h" #include "wpa.h" #include "aes_ccm.h" -#include "debugfs_key.h" - -static void ieee80211_set_hw_encryption(struct net_device *dev, - struct sta_info *sta, u8 addr[ETH_ALEN], - struct ieee80211_key *key) -{ - struct ieee80211_key_conf *keyconf = NULL; - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - - /* default to sw encryption; this will be cleared by low-level - * driver if the hw supports requested encryption */ - if (key) - key->force_sw_encrypt = 1; - - if (key && local->ops->set_key && - (keyconf = ieee80211_key_data2conf(local, key))) { - if (local->ops->set_key(local_to_hw(local), SET_KEY, addr, - keyconf, sta ? sta->aid : 0)) { - key->force_sw_encrypt = 1; - key->hw_key_idx = HW_KEY_IDX_INVALID; - } else { - key->force_sw_encrypt = - !!(keyconf->flags & IEEE80211_KEY_FORCE_SW_ENCRYPT); - key->hw_key_idx = - keyconf->hw_key_idx; - - } - } - kfree(keyconf); -} static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, - int idx, int alg, int set_tx_key, - const u8 *_key, size_t key_len) + int idx, int alg, int remove, + int set_tx_key, const u8 *_key, + size_t key_len) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int ret = 0; struct sta_info *sta; - struct ieee80211_key *key, *old_key; - int try_hwaccel = 1; - struct ieee80211_key_conf *keyconf; + struct ieee80211_key *key; struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { + printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", + dev->name, idx); + return -EINVAL; + } + if (is_broadcast_ether_addr(sta_addr)) { sta = NULL; - if (idx >= NUM_DEFAULT_KEYS) { - printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", - dev->name, idx); - return -EINVAL; - } key = sdata->keys[idx]; - - /* TODO: consider adding hwaccel support for these; at least - * Atheros key cache should be able to handle this since AP is - * only transmitting frames with default keys. */ - /* FIX: hw key cache can be used when only one virtual - * STA is associated with each AP. If more than one STA - * is associated to the same AP, software encryption - * must be used. This should be done automatically - * based on configured station devices. For the time - * being, this can be only set at compile time. */ } else { set_tx_key = 0; - if (idx != 0) { + /* + * According to the standard, the key index of a pairwise + * key must be zero. However, some AP are broken when it + * comes to WEP key indices, so we work around this. + */ + if (idx != 0 && alg != ALG_WEP) { printk(KERN_DEBUG "%s: set_encrypt - non-zero idx for " "individual key\n", dev->name); return -EINVAL; @@ -100,9 +64,10 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, sta = sta_info_get(local, sta_addr); if (!sta) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "%s: set_encrypt - unknown addr " - MAC_FMT "\n", - dev->name, MAC_ARG(sta_addr)); + "%s\n", + dev->name, print_mac(mac, sta_addr)); #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ return -ENOENT; @@ -111,144 +76,25 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, key = sta->key; } - /* FIX: - * Cannot configure default hwaccel keys with WEP algorithm, if - * any of the virtual interfaces is using static WEP - * configuration because hwaccel would otherwise try to decrypt - * these frames. - * - * For now, just disable WEP hwaccel for broadcast when there is - * possibility of conflict with default keys. This can maybe later be - * optimized by using non-default keys (at least with Atheros ar521x). - */ - if (!sta && alg == ALG_WEP && !local->default_wep_only && - sdata->type != IEEE80211_IF_TYPE_IBSS && - sdata->type != IEEE80211_IF_TYPE_AP) { - try_hwaccel = 0; - } - - if (local->hw.flags & IEEE80211_HW_DEVICE_HIDES_WEP) { - /* Software encryption cannot be used with devices that hide - * encryption from the host system, so always try to use - * hardware acceleration with such devices. */ - try_hwaccel = 1; - } - - if ((local->hw.flags & IEEE80211_HW_NO_TKIP_WMM_HWACCEL) && - alg == ALG_TKIP) { - if (sta && (sta->flags & WLAN_STA_WME)) { - /* Hardware does not support hwaccel with TKIP when using WMM. - */ - try_hwaccel = 0; - } - else if (sdata->type == IEEE80211_IF_TYPE_STA) { - sta = sta_info_get(local, sdata->u.sta.bssid); - if (sta) { - if (sta->flags & WLAN_STA_WME) { - try_hwaccel = 0; - } - sta_info_put(sta); - sta = NULL; - } - } - } - - if (alg == ALG_NONE) { - keyconf = NULL; - if (try_hwaccel && key && - key->hw_key_idx != HW_KEY_IDX_INVALID && - local->ops->set_key && - (keyconf = ieee80211_key_data2conf(local, key)) != NULL && - local->ops->set_key(local_to_hw(local), DISABLE_KEY, - sta_addr, keyconf, sta ? sta->aid : 0)) { - printk(KERN_DEBUG "%s: set_encrypt - low-level disable" - " failed\n", dev->name); - ret = -EINVAL; - } - kfree(keyconf); - - if (set_tx_key || sdata->default_key == key) { - ieee80211_debugfs_key_remove_default(sdata); - sdata->default_key = NULL; - } - ieee80211_debugfs_key_remove(key); - if (sta) - sta->key = NULL; - else - sdata->keys[idx] = NULL; + if (remove) { ieee80211_key_free(key); key = NULL; } else { - old_key = key; - key = ieee80211_key_alloc(sta ? NULL : sdata, idx, key_len, - GFP_KERNEL); + /* + * Automatically frees any old key if present. + */ + key = ieee80211_key_alloc(sdata, sta, alg, idx, key_len, _key); if (!key) { ret = -ENOMEM; goto err_out; } - - /* default to sw encryption; low-level driver sets these if the - * requested encryption is supported */ - key->hw_key_idx = HW_KEY_IDX_INVALID; - key->force_sw_encrypt = 1; - - key->alg = alg; - key->keyidx = idx; - key->keylen = key_len; - memcpy(key->key, _key, key_len); - if (set_tx_key) - key->default_tx_key = 1; - - if (alg == ALG_CCMP) { - /* Initialize AES key state here as an optimization - * so that it does not need to be initialized for every - * packet. */ - key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( - key->key); - if (!key->u.ccmp.tfm) { - ret = -ENOMEM; - goto err_free; - } - } - - if (set_tx_key || sdata->default_key == old_key) { - ieee80211_debugfs_key_remove_default(sdata); - sdata->default_key = NULL; - } - ieee80211_debugfs_key_remove(old_key); - if (sta) - sta->key = key; - else - sdata->keys[idx] = key; - ieee80211_key_free(old_key); - ieee80211_debugfs_key_add(local, key); - if (sta) - ieee80211_debugfs_key_sta_link(key, sta); - - if (try_hwaccel && - (alg == ALG_WEP || alg == ALG_TKIP || alg == ALG_CCMP)) - ieee80211_set_hw_encryption(dev, sta, sta_addr, key); } - if (set_tx_key || (!sta && !sdata->default_key && key)) { - sdata->default_key = key; - if (key) - ieee80211_debugfs_key_add_default(sdata); - - if (local->ops->set_key_idx && - local->ops->set_key_idx(local_to_hw(local), idx)) - printk(KERN_DEBUG "%s: failed to set TX key idx for " - "low-level driver\n", dev->name); - } + if (set_tx_key || (!sta && !sdata->default_key && key)) + ieee80211_set_default_key(sdata, idx); - if (sta) - sta_info_put(sta); - - return 0; - -err_free: - ieee80211_key_free(key); -err_out: + ret = 0; + err_out: if (sta) sta_info_put(sta); return ret; @@ -259,44 +105,25 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev, struct iw_point *data, char *extra) { struct ieee80211_sub_if_data *sdata; - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - if (local->user_space_mlme) + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) return -EOPNOTSUPP; - sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->type == IEEE80211_IF_TYPE_STA || sdata->type == IEEE80211_IF_TYPE_IBSS) { int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length); if (ret) return ret; - sdata->u.sta.auto_bssid_sel = 0; + sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; ieee80211_sta_req_auth(dev, &sdata->u.sta); return 0; } - if (sdata->type == IEEE80211_IF_TYPE_AP) { - kfree(sdata->u.ap.generic_elem); - sdata->u.ap.generic_elem = kmalloc(data->length, GFP_KERNEL); - if (!sdata->u.ap.generic_elem) - return -ENOMEM; - memcpy(sdata->u.ap.generic_elem, extra, data->length); - sdata->u.ap.generic_elem_len = data->length; - return ieee80211_if_config(dev); - } return -EOPNOTSUPP; } -static int ieee80211_ioctl_set_radio_enabled(struct net_device *dev, - int val) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_conf *conf = &local->hw.conf; - - conf->radio_enabled = val; - return ieee80211_hw_config(wdev_priv(dev->ieee80211_ptr)); -} - static int ieee80211_ioctl_giwname(struct net_device *dev, struct iw_request_info *info, char *name, char *extra) @@ -313,9 +140,6 @@ static int ieee80211_ioctl_giwname(struct net_device *dev, case MODE_IEEE80211G: strcpy(name, "IEEE 802.11g"); break; - case MODE_ATHEROS_TURBO: - strcpy(name, "5GHz Turbo"); - break; default: strcpy(name, "IEEE 802.11"); break; @@ -480,11 +304,6 @@ int ieee80211_set_channel(struct ieee80211_local *local, int channel, int freq) struct ieee80211_channel *chan = &mode->channels[c]; if (chan->flag & IEEE80211_CHAN_W_SCAN && ((chan->chan == channel) || (chan->freq == freq))) { - /* Use next_mode as the mode preference to - * resolve non-unique channel numbers. */ - if (set && mode->mode != local->next_mode) - continue; - local->oper_channel = chan; local->oper_hw_mode = mode; set++; @@ -512,13 +331,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->type == IEEE80211_IF_TYPE_STA) - sdata->u.sta.auto_channel_sel = 0; + sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ if (freq->e == 0) { if (freq->m < 0) { if (sdata->type == IEEE80211_IF_TYPE_STA) - sdata->u.sta.auto_channel_sel = 1; + sdata->u.sta.flags |= + IEEE80211_STA_AUTO_CHANNEL_SEL; return 0; } else return ieee80211_set_channel(local, freq->m, -1); @@ -554,7 +374,6 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *ssid) { - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_sub_if_data *sdata; size_t len = data->length; @@ -566,14 +385,17 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev, if (sdata->type == IEEE80211_IF_TYPE_STA || sdata->type == IEEE80211_IF_TYPE_IBSS) { int ret; - if (local->user_space_mlme) { + if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { if (len > IEEE80211_MAX_SSID_LEN) return -EINVAL; memcpy(sdata->u.sta.ssid, ssid, len); sdata->u.sta.ssid_len = len; return 0; } - sdata->u.sta.auto_ssid_sel = !data->flags; + if (data->flags) + sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; + else + sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; ret = ieee80211_sta_set_ssid(dev, ssid, len); if (ret) return ret; @@ -628,25 +450,24 @@ static int ieee80211_ioctl_siwap(struct net_device *dev, struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) { - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->type == IEEE80211_IF_TYPE_STA || sdata->type == IEEE80211_IF_TYPE_IBSS) { int ret; - if (local->user_space_mlme) { + if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data, ETH_ALEN); return 0; } - if (is_zero_ether_addr((u8 *) &ap_addr->sa_data)) { - sdata->u.sta.auto_bssid_sel = 1; - sdata->u.sta.auto_channel_sel = 1; - } else if (is_broadcast_ether_addr((u8 *) &ap_addr->sa_data)) - sdata->u.sta.auto_bssid_sel = 1; + if (is_zero_ether_addr((u8 *) &ap_addr->sa_data)) + sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL | + IEEE80211_STA_AUTO_CHANNEL_SEL; + else if (is_broadcast_ether_addr((u8 *) &ap_addr->sa_data)) + sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; else - sdata->u.sta.auto_bssid_sel = 0; + sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data); if (ret) return ret; @@ -762,9 +583,6 @@ static int ieee80211_ioctl_siwrate(struct net_device *dev, struct ieee80211_rate *rates = &mode->rates[i]; int this_rate = rates->rate; - if (mode->mode == MODE_ATHEROS_TURBO || - mode->mode == MODE_ATHEROS_TURBOG) - this_rate *= 2; if (target_rate == this_rate) { sdata->bss->max_ratectrl_rateidx = i; if (rate->fixed) @@ -798,6 +616,52 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev, return 0; } +static int ieee80211_ioctl_siwtxpower(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + bool need_reconfig = 0; + + if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) + return -EINVAL; + if (data->txpower.flags & IW_TXPOW_RANGE) + return -EINVAL; + if (!data->txpower.fixed) + return -EINVAL; + + if (local->hw.conf.power_level != data->txpower.value) { + local->hw.conf.power_level = data->txpower.value; + need_reconfig = 1; + } + if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) { + local->hw.conf.radio_enabled = !(data->txpower.disabled); + need_reconfig = 1; + } + if (need_reconfig) { + ieee80211_hw_config(local); + /* The return value of hw_config is not of big interest here, + * as it doesn't say that it failed because of _this_ config + * change or something else. Ignore it. */ + } + + return 0; +} + +static int ieee80211_ioctl_giwtxpower(struct net_device *dev, + struct iw_request_info *info, + union iwreq_data *data, char *extra) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + + data->txpower.fixed = 1; + data->txpower.disabled = !(local->hw.conf.radio_enabled); + data->txpower.value = local->hw.conf.power_level; + data->txpower.flags = IW_TXPOW_DBM; + + return 0; +} + static int ieee80211_ioctl_siwrts(struct net_device *dev, struct iw_request_info *info, struct iw_param *rts, char *extra) @@ -930,335 +794,6 @@ static int ieee80211_ioctl_giwretry(struct net_device *dev, return 0; } -static void ieee80211_key_enable_hwaccel(struct ieee80211_local *local, - struct ieee80211_key *key) -{ - struct ieee80211_key_conf *keyconf; - u8 addr[ETH_ALEN]; - - if (!key || key->alg != ALG_WEP || !key->force_sw_encrypt || - (local->hw.flags & IEEE80211_HW_DEVICE_HIDES_WEP)) - return; - - memset(addr, 0xff, ETH_ALEN); - keyconf = ieee80211_key_data2conf(local, key); - if (keyconf && local->ops->set_key && - local->ops->set_key(local_to_hw(local), - SET_KEY, addr, keyconf, 0) == 0) { - key->force_sw_encrypt = - !!(keyconf->flags & IEEE80211_KEY_FORCE_SW_ENCRYPT); - key->hw_key_idx = keyconf->hw_key_idx; - } - kfree(keyconf); -} - - -static void ieee80211_key_disable_hwaccel(struct ieee80211_local *local, - struct ieee80211_key *key) -{ - struct ieee80211_key_conf *keyconf; - u8 addr[ETH_ALEN]; - - if (!key || key->alg != ALG_WEP || key->force_sw_encrypt || - (local->hw.flags & IEEE80211_HW_DEVICE_HIDES_WEP)) - return; - - memset(addr, 0xff, ETH_ALEN); - keyconf = ieee80211_key_data2conf(local, key); - if (keyconf && local->ops->set_key) - local->ops->set_key(local_to_hw(local), DISABLE_KEY, - addr, keyconf, 0); - kfree(keyconf); - key->force_sw_encrypt = 1; -} - - -static int ieee80211_ioctl_default_wep_only(struct ieee80211_local *local, - int value) -{ - int i; - struct ieee80211_sub_if_data *sdata; - - local->default_wep_only = value; - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) - for (i = 0; i < NUM_DEFAULT_KEYS; i++) - if (value) - ieee80211_key_enable_hwaccel(local, - sdata->keys[i]); - else - ieee80211_key_disable_hwaccel(local, - sdata->keys[i]); - read_unlock(&local->sub_if_lock); - - return 0; -} - - -void ieee80211_update_default_wep_only(struct ieee80211_local *local) -{ - int i = 0; - struct ieee80211_sub_if_data *sdata; - - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { - - if (sdata->dev == local->mdev) - continue; - - /* If there is an AP interface then depend on userspace to - set default_wep_only correctly. */ - if (sdata->type == IEEE80211_IF_TYPE_AP) { - read_unlock(&local->sub_if_lock); - return; - } - - i++; - } - - read_unlock(&local->sub_if_lock); - - if (i <= 1) - ieee80211_ioctl_default_wep_only(local, 1); - else - ieee80211_ioctl_default_wep_only(local, 0); -} - - -static int ieee80211_ioctl_prism2_param(struct net_device *dev, - struct iw_request_info *info, - void *wrqu, char *extra) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata; - int *i = (int *) extra; - int param = *i; - int value = *(i + 1); - int ret = 0; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - switch (param) { - case PRISM2_PARAM_IEEE_802_1X: - if (local->ops->set_ieee8021x) - ret = local->ops->set_ieee8021x(local_to_hw(local), - value); - if (ret) - printk(KERN_DEBUG "%s: failed to set IEEE 802.1X (%d) " - "for low-level driver\n", dev->name, value); - else - sdata->ieee802_1x = value; - break; - - case PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES: - if (sdata->type != IEEE80211_IF_TYPE_AP) - ret = -ENOENT; - else - sdata->use_protection = value; - break; - - case PRISM2_PARAM_PREAMBLE: - local->short_preamble = value; - break; - - case PRISM2_PARAM_STAT_TIME: - if (!local->stat_time && value) { - local->stat_timer.expires = jiffies + HZ * value / 100; - add_timer(&local->stat_timer); - } else if (local->stat_time && !value) { - del_timer_sync(&local->stat_timer); - } - local->stat_time = value; - break; - case PRISM2_PARAM_SHORT_SLOT_TIME: - if (value) - local->hw.conf.flags |= IEEE80211_CONF_SHORT_SLOT_TIME; - else - local->hw.conf.flags &= ~IEEE80211_CONF_SHORT_SLOT_TIME; - if (ieee80211_hw_config(local)) - ret = -EINVAL; - break; - - case PRISM2_PARAM_NEXT_MODE: - local->next_mode = value; - break; - - case PRISM2_PARAM_RADIO_ENABLED: - ret = ieee80211_ioctl_set_radio_enabled(dev, value); - break; - - case PRISM2_PARAM_ANTENNA_MODE: - local->hw.conf.antenna_mode = value; - if (ieee80211_hw_config(local)) - ret = -EINVAL; - break; - - case PRISM2_PARAM_STA_ANTENNA_SEL: - local->sta_antenna_sel = value; - break; - - case PRISM2_PARAM_TX_POWER_REDUCTION: - if (value < 0) - ret = -EINVAL; - else - local->hw.conf.tx_power_reduction = value; - break; - - case PRISM2_PARAM_KEY_TX_RX_THRESHOLD: - local->key_tx_rx_threshold = value; - break; - - case PRISM2_PARAM_DEFAULT_WEP_ONLY: - ret = ieee80211_ioctl_default_wep_only(local, value); - break; - - case PRISM2_PARAM_WIFI_WME_NOACK_TEST: - local->wifi_wme_noack_test = value; - break; - - case PRISM2_PARAM_SCAN_FLAGS: - local->scan_flags = value; - break; - - case PRISM2_PARAM_MIXED_CELL: - if (sdata->type != IEEE80211_IF_TYPE_STA && - sdata->type != IEEE80211_IF_TYPE_IBSS) - ret = -EINVAL; - else - sdata->u.sta.mixed_cell = !!value; - break; - - case PRISM2_PARAM_HW_MODES: - local->enabled_modes = value; - break; - - case PRISM2_PARAM_CREATE_IBSS: - if (sdata->type != IEEE80211_IF_TYPE_IBSS) - ret = -EINVAL; - else - sdata->u.sta.create_ibss = !!value; - break; - case PRISM2_PARAM_WMM_ENABLED: - if (sdata->type != IEEE80211_IF_TYPE_STA && - sdata->type != IEEE80211_IF_TYPE_IBSS) - ret = -EINVAL; - else - sdata->u.sta.wmm_enabled = !!value; - break; - case PRISM2_PARAM_RADAR_DETECT: - local->hw.conf.radar_detect = value; - break; - case PRISM2_PARAM_SPECTRUM_MGMT: - local->hw.conf.spect_mgmt = value; - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} - - -static int ieee80211_ioctl_get_prism2_param(struct net_device *dev, - struct iw_request_info *info, - void *wrqu, char *extra) -{ - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); - struct ieee80211_sub_if_data *sdata; - int *param = (int *) extra; - int ret = 0; - - sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - switch (*param) { - case PRISM2_PARAM_IEEE_802_1X: - *param = sdata->ieee802_1x; - break; - - case PRISM2_PARAM_CTS_PROTECT_ERP_FRAMES: - *param = sdata->use_protection; - break; - - case PRISM2_PARAM_PREAMBLE: - *param = local->short_preamble; - break; - - case PRISM2_PARAM_STAT_TIME: - *param = local->stat_time; - break; - case PRISM2_PARAM_SHORT_SLOT_TIME: - *param = !!(local->hw.conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME); - break; - - case PRISM2_PARAM_NEXT_MODE: - *param = local->next_mode; - break; - - case PRISM2_PARAM_ANTENNA_MODE: - *param = local->hw.conf.antenna_mode; - break; - - case PRISM2_PARAM_STA_ANTENNA_SEL: - *param = local->sta_antenna_sel; - break; - - case PRISM2_PARAM_TX_POWER_REDUCTION: - *param = local->hw.conf.tx_power_reduction; - break; - - case PRISM2_PARAM_KEY_TX_RX_THRESHOLD: - *param = local->key_tx_rx_threshold; - break; - - case PRISM2_PARAM_DEFAULT_WEP_ONLY: - *param = local->default_wep_only; - break; - - case PRISM2_PARAM_WIFI_WME_NOACK_TEST: - *param = local->wifi_wme_noack_test; - break; - - case PRISM2_PARAM_SCAN_FLAGS: - *param = local->scan_flags; - break; - - case PRISM2_PARAM_HW_MODES: - *param = local->enabled_modes; - break; - - case PRISM2_PARAM_CREATE_IBSS: - if (sdata->type != IEEE80211_IF_TYPE_IBSS) - ret = -EINVAL; - else - *param = !!sdata->u.sta.create_ibss; - break; - - case PRISM2_PARAM_MIXED_CELL: - if (sdata->type != IEEE80211_IF_TYPE_STA && - sdata->type != IEEE80211_IF_TYPE_IBSS) - ret = -EINVAL; - else - *param = !!sdata->u.sta.mixed_cell; - break; - case PRISM2_PARAM_WMM_ENABLED: - if (sdata->type != IEEE80211_IF_TYPE_STA && - sdata->type != IEEE80211_IF_TYPE_IBSS) - ret = -EINVAL; - else - *param = !!sdata->u.sta.wmm_enabled; - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} - static int ieee80211_ioctl_siwmlme(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) @@ -1291,6 +826,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev, struct ieee80211_sub_if_data *sdata; int idx, i, alg = ALG_WEP; u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + int remove = 0; sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -1309,21 +845,16 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev, idx--; if (erq->flags & IW_ENCODE_DISABLED) - alg = ALG_NONE; + remove = 1; else if (erq->length == 0) { /* No key data - just set the default TX key index */ - if (sdata->default_key != sdata->keys[idx]) { - ieee80211_debugfs_key_remove_default(sdata); - sdata->default_key = sdata->keys[idx]; - if (sdata->default_key) - ieee80211_debugfs_key_add_default(sdata); - } + ieee80211_set_default_key(sdata, idx); return 0; } return ieee80211_set_encryption( dev, bcaddr, - idx, alg, + idx, alg, remove, !sdata->default_key, keybuf, erq->length); } @@ -1362,9 +893,9 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev, return 0; } - memcpy(key, sdata->keys[idx]->key, - min((int)erq->length, sdata->keys[idx]->keylen)); - erq->length = sdata->keys[idx]->keylen; + memcpy(key, sdata->keys[idx]->conf.key, + min_t(int, erq->length, sdata->keys[idx]->conf.keylen)); + erq->length = sdata->keys[idx]->conf.keylen; erq->flags |= IW_ENCODE_ENABLED; return 0; @@ -1390,22 +921,12 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev, ret = -EINVAL; else { /* - * TODO: sdata->u.sta.key_mgmt does not match with WE18 - * value completely; could consider modifying this to - * be closer to WE18. For now, this value is not really - * used for anything else than Privacy matching, so the - * current code here should be more or less OK. + * Key management was set by wpa_supplicant, + * we only need this to associate to a network + * that has privacy enabled regardless of not + * having a key. */ - if (data->value & IW_AUTH_KEY_MGMT_802_1X) { - sdata->u.sta.key_mgmt = - IEEE80211_KEY_MGMT_WPA_EAP; - } else if (data->value & IW_AUTH_KEY_MGMT_PSK) { - sdata->u.sta.key_mgmt = - IEEE80211_KEY_MGMT_WPA_PSK; - } else { - sdata->u.sta.key_mgmt = - IEEE80211_KEY_MGMT_NONE; - } + sdata->u.sta.key_management_enabled = !!data->value; } break; case IW_AUTH_80211_AUTH_ALG: @@ -1484,11 +1005,11 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; - int alg, idx, i; + int uninitialized_var(alg), idx, i, remove = 0; switch (ext->alg) { case IW_ENCODE_ALG_NONE: - alg = ALG_NONE; + remove = 1; break; case IW_ENCODE_ALG_WEP: alg = ALG_WEP; @@ -1504,7 +1025,7 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev, } if (erq->flags & IW_ENCODE_DISABLED) - alg = ALG_NONE; + remove = 1; idx = erq->flags & IW_ENCODE_INDEX; if (idx < 1 || idx > 4) { @@ -1523,20 +1044,13 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev, idx--; return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg, + remove, ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, ext->key, ext->key_len); } -static const struct iw_priv_args ieee80211_ioctl_priv[] = { - { PRISM2_IOCTL_PRISM2_PARAM, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "param" }, - { PRISM2_IOCTL_GET_PRISM2_PARAM, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, - IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_param" }, -}; - /* Structures to export the Wireless Handlers */ static const iw_handler ieee80211_handler[] = @@ -1557,10 +1071,10 @@ static const iw_handler ieee80211_handler[] = (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */ (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */ (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */ - iw_handler_set_spy, /* SIOCSIWSPY */ - iw_handler_get_spy, /* SIOCGIWSPY */ - iw_handler_set_thrspy, /* SIOCSIWTHRSPY */ - iw_handler_get_thrspy, /* SIOCGIWTHRSPY */ + (iw_handler) NULL, /* SIOCSIWSPY */ + (iw_handler) NULL, /* SIOCGIWSPY */ + (iw_handler) NULL, /* SIOCSIWTHRSPY */ + (iw_handler) NULL, /* SIOCGIWTHRSPY */ (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */ (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */ (iw_handler) ieee80211_ioctl_siwmlme, /* SIOCSIWMLME */ @@ -1579,8 +1093,8 @@ static const iw_handler ieee80211_handler[] = (iw_handler) ieee80211_ioctl_giwrts, /* SIOCGIWRTS */ (iw_handler) ieee80211_ioctl_siwfrag, /* SIOCSIWFRAG */ (iw_handler) ieee80211_ioctl_giwfrag, /* SIOCGIWFRAG */ - (iw_handler) NULL, /* SIOCSIWTXPOW */ - (iw_handler) NULL, /* SIOCGIWTXPOW */ + (iw_handler) ieee80211_ioctl_siwtxpower, /* SIOCSIWTXPOW */ + (iw_handler) ieee80211_ioctl_giwtxpower, /* SIOCGIWTXPOW */ (iw_handler) ieee80211_ioctl_siwretry, /* SIOCSIWRETRY */ (iw_handler) ieee80211_ioctl_giwretry, /* SIOCGIWRETRY */ (iw_handler) ieee80211_ioctl_siwencode, /* SIOCSIWENCODE */ @@ -1599,19 +1113,9 @@ static const iw_handler ieee80211_handler[] = (iw_handler) NULL, /* -- hole -- */ }; -static const iw_handler ieee80211_private_handler[] = -{ /* SIOCIWFIRSTPRIV + */ - (iw_handler) ieee80211_ioctl_prism2_param, /* 0 */ - (iw_handler) ieee80211_ioctl_get_prism2_param, /* 1 */ -}; - const struct iw_handler_def ieee80211_iw_handler_def = { .num_standard = ARRAY_SIZE(ieee80211_handler), - .num_private = ARRAY_SIZE(ieee80211_private_handler), - .num_private_args = ARRAY_SIZE(ieee80211_ioctl_priv), .standard = (iw_handler *) ieee80211_handler, - .private = (iw_handler *) ieee80211_private_handler, - .private_args = (struct iw_priv_args *) ieee80211_ioctl_priv, .get_wireless_stats = ieee80211_get_wireless_stats, }; diff --git a/net/mac80211/ieee80211_key.h b/net/mac80211/ieee80211_key.h index c33384912782f0c71395bc80fae8664ee67ac7f2..fc770e98d47b44f3ba64250600db08823714c423 100644 --- a/net/mac80211/ieee80211_key.h +++ b/net/mac80211/ieee80211_key.h @@ -11,7 +11,7 @@ #define IEEE80211_KEY_H #include -#include +#include #include #include @@ -41,11 +41,21 @@ #define NUM_RX_DATA_QUEUES 17 +struct ieee80211_local; +struct ieee80211_sub_if_data; +struct sta_info; + +#define KEY_FLAG_UPLOADED_TO_HARDWARE (1<<0) + struct ieee80211_key { - struct kref kref; + struct ieee80211_local *local; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + struct list_head list; + + unsigned int flags; - int hw_key_idx; /* filled and used by low-level driver */ - ieee80211_key_alg alg; union { struct { /* last used TSC */ @@ -73,22 +83,16 @@ struct ieee80211_key { u8 rx_crypto_buf[6 * AES_BLOCK_LEN]; } ccmp; } u; - int tx_rx_count; /* number of times this key has been used */ - int keylen; - /* if the low level driver can provide hardware acceleration it should - * clear this flag */ - unsigned int force_sw_encrypt:1; - unsigned int default_tx_key:1; /* This key is the new default TX key - * (used only for broadcast keys). */ - s8 keyidx; /* WEP key index */ + /* number of times this key has been used */ + int tx_rx_count; #ifdef CONFIG_MAC80211_DEBUGFS struct { struct dentry *stalink; struct dentry *dir; struct dentry *keylen; - struct dentry *force_sw_encrypt; + struct dentry *flags; struct dentry *keyidx; struct dentry *hw_key_idx; struct dentry *tx_rx_count; @@ -97,10 +101,27 @@ struct ieee80211_key { struct dentry *rx_spec; struct dentry *replays; struct dentry *key; + struct dentry *ifindex; } debugfs; #endif - u8 key[0]; + /* + * key config, must be last because it contains key + * material as variable length member + */ + struct ieee80211_key_conf conf; }; +struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum ieee80211_key_alg alg, + int idx, + size_t key_len, + const u8 *key_data); +void ieee80211_key_free(struct ieee80211_key *key); +void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); +void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); +void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); +void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); + #endif /* IEEE80211_KEY_H */ diff --git a/net/mac80211/ieee80211_led.c b/net/mac80211/ieee80211_led.c index 719d75b207073f1224322f7e14b3083b9346c323..4cf89af9d1009ffc528622d2e0f64a550c4614d4 100644 --- a/net/mac80211/ieee80211_led.c +++ b/net/mac80211/ieee80211_led.c @@ -33,33 +33,58 @@ void ieee80211_led_tx(struct ieee80211_local *local, int q) led_trigger_event(local->tx_led, LED_FULL); } +void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) +{ + if (unlikely(!local->assoc_led)) + return; + if (associated) + led_trigger_event(local->assoc_led, LED_FULL); + else + led_trigger_event(local->assoc_led, LED_OFF); +} + void ieee80211_led_init(struct ieee80211_local *local) { local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); - if (!local->rx_led) - return; - snprintf(local->rx_led_name, sizeof(local->rx_led_name), - "%srx", wiphy_name(local->hw.wiphy)); - local->rx_led->name = local->rx_led_name; - if (led_trigger_register(local->rx_led)) { - kfree(local->rx_led); - local->rx_led = NULL; + if (local->rx_led) { + snprintf(local->rx_led_name, sizeof(local->rx_led_name), + "%srx", wiphy_name(local->hw.wiphy)); + local->rx_led->name = local->rx_led_name; + if (led_trigger_register(local->rx_led)) { + kfree(local->rx_led); + local->rx_led = NULL; + } } local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); - if (!local->tx_led) - return; - snprintf(local->tx_led_name, sizeof(local->tx_led_name), - "%stx", wiphy_name(local->hw.wiphy)); - local->tx_led->name = local->tx_led_name; - if (led_trigger_register(local->tx_led)) { - kfree(local->tx_led); - local->tx_led = NULL; + if (local->tx_led) { + snprintf(local->tx_led_name, sizeof(local->tx_led_name), + "%stx", wiphy_name(local->hw.wiphy)); + local->tx_led->name = local->tx_led_name; + if (led_trigger_register(local->tx_led)) { + kfree(local->tx_led); + local->tx_led = NULL; + } + } + + local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL); + if (local->assoc_led) { + snprintf(local->assoc_led_name, sizeof(local->assoc_led_name), + "%sassoc", wiphy_name(local->hw.wiphy)); + local->assoc_led->name = local->assoc_led_name; + if (led_trigger_register(local->assoc_led)) { + kfree(local->assoc_led); + local->assoc_led = NULL; + } } } void ieee80211_led_exit(struct ieee80211_local *local) { + if (local->assoc_led) { + led_trigger_unregister(local->assoc_led); + kfree(local->assoc_led); + } if (local->tx_led) { led_trigger_unregister(local->tx_led); kfree(local->tx_led); @@ -70,6 +95,16 @@ void ieee80211_led_exit(struct ieee80211_local *local) } } +char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (local->assoc_led) + return local->assoc_led_name; + return NULL; +} +EXPORT_SYMBOL(__ieee80211_get_assoc_led_name); + char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); diff --git a/net/mac80211/ieee80211_led.h b/net/mac80211/ieee80211_led.h index 5c8ab8263878f80a08fe830a4e5c4cbe174aaac9..0feb22619835e0230df6e8bf9469d76e0e49f914 100644 --- a/net/mac80211/ieee80211_led.h +++ b/net/mac80211/ieee80211_led.h @@ -14,6 +14,8 @@ #ifdef CONFIG_MAC80211_LEDS extern void ieee80211_led_rx(struct ieee80211_local *local); extern void ieee80211_led_tx(struct ieee80211_local *local, int q); +extern void ieee80211_led_assoc(struct ieee80211_local *local, + bool associated); extern void ieee80211_led_init(struct ieee80211_local *local); extern void ieee80211_led_exit(struct ieee80211_local *local); #else @@ -23,6 +25,10 @@ static inline void ieee80211_led_rx(struct ieee80211_local *local) static inline void ieee80211_led_tx(struct ieee80211_local *local, int q) { } +static inline void ieee80211_led_assoc(struct ieee80211_local *local, + bool associated) +{ +} static inline void ieee80211_led_init(struct ieee80211_local *local) { } diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/ieee80211_rate.c index 2118de04fc3542b1ee2323f90ff040013dfa121f..93abb8fff1410f254c9fb6adf04ad34823bc80a5 100644 --- a/net/mac80211/ieee80211_rate.c +++ b/net/mac80211/ieee80211_rate.c @@ -9,6 +9,7 @@ */ #include +#include #include "ieee80211_rate.h" #include "ieee80211_i.h" @@ -137,3 +138,43 @@ void rate_control_put(struct rate_control_ref *ref) { kref_put(&ref->kref, rate_control_release); } + +int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + const char *name) +{ + struct rate_control_ref *ref, *old; + + ASSERT_RTNL(); + if (local->open_count || netif_running(local->mdev)) + return -EBUSY; + + ref = rate_control_alloc(name, local); + if (!ref) { + printk(KERN_WARNING "%s: Failed to select rate control " + "algorithm\n", wiphy_name(local->hw.wiphy)); + return -ENOENT; + } + + old = local->rate_ctrl; + local->rate_ctrl = ref; + if (old) { + rate_control_put(old); + sta_info_flush(local, NULL); + } + + printk(KERN_DEBUG "%s: Selected rate control " + "algorithm '%s'\n", wiphy_name(local->hw.wiphy), + ref->ops->name); + + + return 0; +} + +void rate_control_deinitialize(struct ieee80211_local *local) +{ + struct rate_control_ref *ref; + + ref = local->rate_ctrl; + local->rate_ctrl = NULL; + rate_control_put(ref); +} diff --git a/net/mac80211/ieee80211_rate.h b/net/mac80211/ieee80211_rate.h index f021a028d9d0fa551b55d1b999dccdf0bcdb2be4..7cd1ebab4f8345f4cdf008b17ecc134254c45c39 100644 --- a/net/mac80211/ieee80211_rate.h +++ b/net/mac80211/ieee80211_rate.h @@ -30,8 +30,6 @@ struct rate_control_extra { /* parameters from the caller to rate_control_get_rate(): */ struct ieee80211_hw_mode *mode; - int mgmt_data; /* this is data frame that is used for management - * (e.g., IEEE 802.1X EAPOL) */ u16 ethertype; }; @@ -141,4 +139,10 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta) #endif } + +/* functions for rate control related to a device */ +int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, + const char *name); +void rate_control_deinitialize(struct ieee80211_local *local); + #endif /* IEEE80211_RATE_H */ diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index 0d99b685df5f6cf46ea3254b92bc3f103e539d17..1641e8fe44b715e9b8d3832273d90d7df499b4a4 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c @@ -31,7 +31,7 @@ #include #include "ieee80211_i.h" #include "ieee80211_rate.h" -#include "hostapd_ioctl.h" +#include "ieee80211_led.h" #define IEEE80211_AUTH_TIMEOUT (HZ / 5) #define IEEE80211_AUTH_MAX_TRIES 3 @@ -108,11 +108,10 @@ struct ieee802_11_elems { u8 wmm_param_len; }; -typedef enum { ParseOK = 0, ParseUnknown = 1, ParseFailed = -1 } ParseRes; +enum ParseRes { ParseOK = 0, ParseUnknown = 1, ParseFailed = -1 }; - -static ParseRes ieee802_11_parse_elems(u8 *start, size_t len, - struct ieee802_11_elems *elems) +static enum ParseRes ieee802_11_parse_elems(u8 *start, size_t len, + struct ieee802_11_elems *elems) { size_t left = len; u8 *pos = start; @@ -234,7 +233,6 @@ static int ecw2cw(int ecw) return cw - 1; } - static void ieee80211_sta_wmm_params(struct net_device *dev, struct ieee80211_if_sta *ifsta, u8 *wmm_param, size_t wmm_param_len) @@ -318,17 +316,43 @@ static void ieee80211_handle_erp_ie(struct net_device *dev, u8 erp_value) struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_sta *ifsta = &sdata->u.sta; int use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0; + int preamble_mode = (erp_value & WLAN_ERP_BARKER_PREAMBLE) != 0; + u8 changes = 0; + DECLARE_MAC_BUF(mac); - if (use_protection != sdata->use_protection) { + if (use_protection != !!(sdata->flags & IEEE80211_SDATA_USE_PROTECTION)) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: CTS protection %s (BSSID=" - MAC_FMT ")\n", + "%s)\n", dev->name, use_protection ? "enabled" : "disabled", - MAC_ARG(ifsta->bssid)); + print_mac(mac, ifsta->bssid)); + } + if (use_protection) + sdata->flags |= IEEE80211_SDATA_USE_PROTECTION; + else + sdata->flags &= ~IEEE80211_SDATA_USE_PROTECTION; + changes |= IEEE80211_ERP_CHANGE_PROTECTION; + } + + if (preamble_mode != !(sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE)) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: switched to %s barker preamble" + " (BSSID=%s)\n", + dev->name, + (preamble_mode == WLAN_ERP_PREAMBLE_SHORT) ? + "short" : "long", + print_mac(mac, ifsta->bssid)); } - sdata->use_protection = use_protection; + if (preamble_mode) + sdata->flags &= ~IEEE80211_SDATA_SHORT_PREAMBLE; + else + sdata->flags |= IEEE80211_SDATA_SHORT_PREAMBLE; + changes |= IEEE80211_ERP_CHANGE_PREAMBLE; } + + if (changes) + ieee80211_erp_info_change_notify(dev, changes); } @@ -344,7 +368,7 @@ static void ieee80211_sta_send_associnfo(struct net_device *dev, return; buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + - ifsta->assocresp_ies_len), GFP_ATOMIC); + ifsta->assocresp_ies_len), GFP_KERNEL); if (!buf) return; @@ -384,19 +408,21 @@ static void ieee80211_sta_send_associnfo(struct net_device *dev, static void ieee80211_set_associated(struct net_device *dev, - struct ieee80211_if_sta *ifsta, int assoc) + struct ieee80211_if_sta *ifsta, + bool assoc) { + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); union iwreq_data wrqu; - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - if (ifsta->associated == assoc) + if (!!(ifsta->flags & IEEE80211_STA_ASSOCIATED) == assoc) return; - ifsta->associated = assoc; - if (assoc) { struct ieee80211_sub_if_data *sdata; struct ieee80211_sta_bss *bss; + + ifsta->flags |= IEEE80211_STA_ASSOCIATED; + sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->type != IEEE80211_IF_TYPE_STA) return; @@ -409,18 +435,21 @@ static void ieee80211_set_associated(struct net_device *dev, } netif_carrier_on(dev); - ifsta->prev_bssid_set = 1; + ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET; memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN); memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN); ieee80211_sta_send_associnfo(dev, ifsta); } else { + ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; + netif_carrier_off(dev); - sdata->use_protection = 0; + ieee80211_reset_erp_info(dev); memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); } wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); ifsta->last_probe = jiffies; + ieee80211_led_assoc(local, assoc); } static void ieee80211_set_disassoc(struct net_device *dev, @@ -447,8 +476,8 @@ static void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); pkt_data->ifindex = sdata->dev->ifindex; - pkt_data->mgmt_iface = (sdata->type == IEEE80211_IF_TYPE_MGMT); - pkt_data->do_not_encrypt = !encrypt; + if (!encrypt) + pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; dev_queue_xmit(skb); } @@ -495,18 +524,20 @@ static void ieee80211_send_auth(struct net_device *dev, static void ieee80211_authenticate(struct net_device *dev, struct ieee80211_if_sta *ifsta) { + DECLARE_MAC_BUF(mac); + ifsta->auth_tries++; if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) { - printk(KERN_DEBUG "%s: authentication with AP " MAC_FMT + printk(KERN_DEBUG "%s: authentication with AP %s" " timed out\n", - dev->name, MAC_ARG(ifsta->bssid)); + dev->name, print_mac(mac, ifsta->bssid)); ifsta->state = IEEE80211_DISABLED; return; } ifsta->state = IEEE80211_AUTHENTICATE; - printk(KERN_DEBUG "%s: authenticate with AP " MAC_FMT "\n", - dev->name, MAC_ARG(ifsta->bssid)); + printk(KERN_DEBUG "%s: authenticate with AP %s\n", + dev->name, print_mac(mac, ifsta->bssid)); ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0); @@ -559,7 +590,7 @@ static void ieee80211_send_assoc(struct net_device *dev, memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); - if (ifsta->prev_bssid_set) { + if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { skb_put(skb, 10); mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, IEEE80211_STYPE_REASSOC_REQ); @@ -589,8 +620,6 @@ static void ieee80211_send_assoc(struct net_device *dev, *pos++ = len; for (i = 0; i < len; i++) { int rate = mode->rates[i].rate; - if (mode->mode == MODE_ATHEROS_TURBO) - rate /= 2; *pos++ = (u8) (rate / 5); } @@ -600,8 +629,6 @@ static void ieee80211_send_assoc(struct net_device *dev, *pos++ = mode->num_rates - len; for (i = len; i < mode->num_rates; i++) { int rate = mode->rates[i].rate; - if (mode->mode == MODE_ATHEROS_TURBO) - rate /= 2; *pos++ = (u8) (rate / 5); } } @@ -611,7 +638,7 @@ static void ieee80211_send_assoc(struct net_device *dev, memcpy(pos, ifsta->extra_ie, ifsta->extra_ie_len); } - if (wmm && ifsta->wmm_enabled) { + if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { pos = skb_put(skb, 9); *pos++ = WLAN_EID_VENDOR_SPECIFIC; *pos++ = 7; /* len */ @@ -626,7 +653,7 @@ static void ieee80211_send_assoc(struct net_device *dev, kfree(ifsta->assocreq_ies); ifsta->assocreq_ies_len = (skb->data + skb->len) - ies; - ifsta->assocreq_ies = kmalloc(ifsta->assocreq_ies_len, GFP_ATOMIC); + ifsta->assocreq_ies = kmalloc(ifsta->assocreq_ies_len, GFP_KERNEL); if (ifsta->assocreq_ies) memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); @@ -698,8 +725,8 @@ static int ieee80211_privacy_mismatch(struct net_device *dev, struct ieee80211_sta_bss *bss; int res = 0; - if (!ifsta || ifsta->mixed_cell || - ifsta->key_mgmt != IEEE80211_KEY_MGMT_NONE) + if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL) || + ifsta->key_management_enabled) return 0; bss = ieee80211_rx_bss_get(dev, ifsta->bssid); @@ -719,18 +746,20 @@ static int ieee80211_privacy_mismatch(struct net_device *dev, static void ieee80211_associate(struct net_device *dev, struct ieee80211_if_sta *ifsta) { + DECLARE_MAC_BUF(mac); + ifsta->assoc_tries++; if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { - printk(KERN_DEBUG "%s: association with AP " MAC_FMT + printk(KERN_DEBUG "%s: association with AP %s" " timed out\n", - dev->name, MAC_ARG(ifsta->bssid)); + dev->name, print_mac(mac, ifsta->bssid)); ifsta->state = IEEE80211_DISABLED; return; } ifsta->state = IEEE80211_ASSOCIATE; - printk(KERN_DEBUG "%s: associate with AP " MAC_FMT "\n", - dev->name, MAC_ARG(ifsta->bssid)); + printk(KERN_DEBUG "%s: associate with AP %s\n", + dev->name, print_mac(mac, ifsta->bssid)); if (ieee80211_privacy_mismatch(dev, ifsta)) { printk(KERN_DEBUG "%s: mismatch in privacy configuration and " "mixed-cell disabled - abort association\n", dev->name); @@ -750,6 +779,7 @@ static void ieee80211_associated(struct net_device *dev, struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct sta_info *sta; int disassoc; + DECLARE_MAC_BUF(mac); /* TODO: start monitoring current AP signal quality and number of * missed beacons. Scan other channels every now and then and search @@ -760,29 +790,27 @@ static void ieee80211_associated(struct net_device *dev, sta = sta_info_get(local, ifsta->bssid); if (!sta) { - printk(KERN_DEBUG "%s: No STA entry for own AP " MAC_FMT "\n", - dev->name, MAC_ARG(ifsta->bssid)); + printk(KERN_DEBUG "%s: No STA entry for own AP %s\n", + dev->name, print_mac(mac, ifsta->bssid)); disassoc = 1; } else { disassoc = 0; if (time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) { - if (ifsta->probereq_poll) { + if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) { printk(KERN_DEBUG "%s: No ProbeResp from " - "current AP " MAC_FMT " - assume out of " + "current AP %s - assume out of " "range\n", - dev->name, MAC_ARG(ifsta->bssid)); + dev->name, print_mac(mac, ifsta->bssid)); disassoc = 1; - sta_info_free(sta, 0); - ifsta->probereq_poll = 0; - } else { + sta_info_free(sta); + } else ieee80211_send_probe_req(dev, ifsta->bssid, local->scan_ssid, local->scan_ssid_len); - ifsta->probereq_poll = 1; - } + ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL; } else { - ifsta->probereq_poll = 0; + ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL; if (time_after(jiffies, ifsta->last_probe + IEEE80211_PROBE_INTERVAL)) { ifsta->last_probe = jiffies; @@ -862,10 +890,7 @@ static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, pos = skb_put(skb, 1); supp_rates[1]++; } - if (mode->mode == MODE_ATHEROS_TURBO) - *pos = rate->rate / 10; - else - *pos = rate->rate / 5; + *pos = rate->rate / 5; } ieee80211_sta_tx(dev, skb, 0); @@ -876,7 +901,7 @@ static int ieee80211_sta_wep_configured(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!sdata || !sdata->default_key || - sdata->default_key->alg != ALG_WEP) + sdata->default_key->conf.alg != ALG_WEP) return 0; return 1; } @@ -886,7 +911,7 @@ static void ieee80211_auth_completed(struct net_device *dev, struct ieee80211_if_sta *ifsta) { printk(KERN_DEBUG "%s: authenticated\n", dev->name); - ifsta->authenticated = 1; + ifsta->flags |= IEEE80211_STA_AUTHENTICATED; ieee80211_associate(dev, ifsta); } @@ -924,37 +949,38 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); u16 auth_alg, auth_transaction, status_code; + DECLARE_MAC_BUF(mac); if (ifsta->state != IEEE80211_AUTHENTICATE && sdata->type != IEEE80211_IF_TYPE_IBSS) { printk(KERN_DEBUG "%s: authentication frame received from " - MAC_FMT ", but not in authenticate state - ignored\n", - dev->name, MAC_ARG(mgmt->sa)); + "%s, but not in authenticate state - ignored\n", + dev->name, print_mac(mac, mgmt->sa)); return; } if (len < 24 + 6) { printk(KERN_DEBUG "%s: too short (%zd) authentication frame " - "received from " MAC_FMT " - ignored\n", - dev->name, len, MAC_ARG(mgmt->sa)); + "received from %s - ignored\n", + dev->name, len, print_mac(mac, mgmt->sa)); return; } if (sdata->type != IEEE80211_IF_TYPE_IBSS && memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { printk(KERN_DEBUG "%s: authentication frame received from " - "unknown AP (SA=" MAC_FMT " BSSID=" MAC_FMT ") - " - "ignored\n", dev->name, MAC_ARG(mgmt->sa), - MAC_ARG(mgmt->bssid)); + "unknown AP (SA=%s BSSID=%s) - " + "ignored\n", dev->name, print_mac(mac, mgmt->sa), + print_mac(mac, mgmt->bssid)); return; } if (sdata->type != IEEE80211_IF_TYPE_IBSS && memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) { printk(KERN_DEBUG "%s: authentication frame received from " - "unknown BSSID (SA=" MAC_FMT " BSSID=" MAC_FMT ") - " - "ignored\n", dev->name, MAC_ARG(mgmt->sa), - MAC_ARG(mgmt->bssid)); + "unknown BSSID (SA=%s BSSID=%s) - " + "ignored\n", dev->name, print_mac(mac, mgmt->sa), + print_mac(mac, mgmt->bssid)); return; } @@ -962,9 +988,9 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev, auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); status_code = le16_to_cpu(mgmt->u.auth.status_code); - printk(KERN_DEBUG "%s: RX authentication from " MAC_FMT " (alg=%d " + printk(KERN_DEBUG "%s: RX authentication from %s (alg=%d " "transaction=%d status=%d)\n", - dev->name, MAC_ARG(mgmt->sa), auth_alg, + dev->name, print_mac(mac, mgmt->sa), auth_alg, auth_transaction, status_code); if (sdata->type == IEEE80211_IF_TYPE_IBSS) { @@ -1051,29 +1077,30 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev, size_t len) { u16 reason_code; + DECLARE_MAC_BUF(mac); if (len < 24 + 2) { printk(KERN_DEBUG "%s: too short (%zd) deauthentication frame " - "received from " MAC_FMT " - ignored\n", - dev->name, len, MAC_ARG(mgmt->sa)); + "received from %s - ignored\n", + dev->name, len, print_mac(mac, mgmt->sa)); return; } if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { printk(KERN_DEBUG "%s: deauthentication frame received from " - "unknown AP (SA=" MAC_FMT " BSSID=" MAC_FMT ") - " - "ignored\n", dev->name, MAC_ARG(mgmt->sa), - MAC_ARG(mgmt->bssid)); + "unknown AP (SA=%s BSSID=%s) - " + "ignored\n", dev->name, print_mac(mac, mgmt->sa), + print_mac(mac, mgmt->bssid)); return; } reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); - printk(KERN_DEBUG "%s: RX deauthentication from " MAC_FMT + printk(KERN_DEBUG "%s: RX deauthentication from %s" " (reason=%d)\n", - dev->name, MAC_ARG(mgmt->sa), reason_code); + dev->name, print_mac(mac, mgmt->sa), reason_code); - if (ifsta->authenticated) { + if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) { printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); } @@ -1086,7 +1113,7 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev, } ieee80211_set_disassoc(dev, ifsta, 1); - ifsta->authenticated = 0; + ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; } @@ -1096,29 +1123,30 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, size_t len) { u16 reason_code; + DECLARE_MAC_BUF(mac); if (len < 24 + 2) { printk(KERN_DEBUG "%s: too short (%zd) disassociation frame " - "received from " MAC_FMT " - ignored\n", - dev->name, len, MAC_ARG(mgmt->sa)); + "received from %s - ignored\n", + dev->name, len, print_mac(mac, mgmt->sa)); return; } if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { printk(KERN_DEBUG "%s: disassociation frame received from " - "unknown AP (SA=" MAC_FMT " BSSID=" MAC_FMT ") - " - "ignored\n", dev->name, MAC_ARG(mgmt->sa), - MAC_ARG(mgmt->bssid)); + "unknown AP (SA=%s BSSID=%s) - " + "ignored\n", dev->name, print_mac(mac, mgmt->sa), + print_mac(mac, mgmt->bssid)); return; } reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); - printk(KERN_DEBUG "%s: RX disassociation from " MAC_FMT + printk(KERN_DEBUG "%s: RX disassociation from %s" " (reason=%d)\n", - dev->name, MAC_ARG(mgmt->sa), reason_code); + dev->name, print_mac(mac, mgmt->sa), reason_code); - if (ifsta->associated) + if (ifsta->flags & IEEE80211_STA_ASSOCIATED) printk(KERN_DEBUG "%s: disassociated\n", dev->name); if (ifsta->state == IEEE80211_ASSOCIATED) { @@ -1145,29 +1173,30 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, struct ieee802_11_elems elems; u8 *pos; int i, j; + DECLARE_MAC_BUF(mac); /* AssocResp and ReassocResp have identical structure, so process both * of them in this function. */ if (ifsta->state != IEEE80211_ASSOCIATE) { printk(KERN_DEBUG "%s: association frame received from " - MAC_FMT ", but not in associate state - ignored\n", - dev->name, MAC_ARG(mgmt->sa)); + "%s, but not in associate state - ignored\n", + dev->name, print_mac(mac, mgmt->sa)); return; } if (len < 24 + 6) { printk(KERN_DEBUG "%s: too short (%zd) association frame " - "received from " MAC_FMT " - ignored\n", - dev->name, len, MAC_ARG(mgmt->sa)); + "received from %s - ignored\n", + dev->name, len, print_mac(mac, mgmt->sa)); return; } if (memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) { printk(KERN_DEBUG "%s: association frame received from " - "unknown AP (SA=" MAC_FMT " BSSID=" MAC_FMT ") - " - "ignored\n", dev->name, MAC_ARG(mgmt->sa), - MAC_ARG(mgmt->bssid)); + "unknown AP (SA=%s BSSID=%s) - " + "ignored\n", dev->name, print_mac(mac, mgmt->sa), + print_mac(mac, mgmt->bssid)); return; } @@ -1179,16 +1208,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, "set\n", dev->name, aid); aid &= ~(BIT(15) | BIT(14)); - printk(KERN_DEBUG "%s: RX %sssocResp from " MAC_FMT " (capab=0x%x " + printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " "status=%d aid=%d)\n", - dev->name, reassoc ? "Rea" : "A", MAC_ARG(mgmt->sa), + dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), capab_info, status_code, aid); if (status_code != WLAN_STATUS_SUCCESS) { printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", dev->name, status_code); - if (status_code == WLAN_STATUS_REASSOC_NO_ASSOC) - ifsta->prev_bssid_set = 0; + /* if this was a reassociation, ensure we try a "full" + * association next time. This works around some broken APs + * which do not correctly reject reassociation requests. */ + ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; return; } @@ -1224,7 +1255,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, kfree(ifsta->assocresp_ies); ifsta->assocresp_ies_len = len - (pos - (u8 *) mgmt); - ifsta->assocresp_ies = kmalloc(ifsta->assocresp_ies_len, GFP_ATOMIC); + ifsta->assocresp_ies = kmalloc(ifsta->assocresp_ies_len, GFP_KERNEL); if (ifsta->assocresp_ies) memcpy(ifsta->assocresp_ies, pos, ifsta->assocresp_ies_len); @@ -1234,7 +1265,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, sta = sta_info_get(local, ifsta->bssid); if (!sta) { struct ieee80211_sta_bss *bss; - sta = sta_info_add(local, dev, ifsta->bssid, GFP_ATOMIC); + sta = sta_info_add(local, dev, ifsta->bssid, GFP_KERNEL); if (!sta) { printk(KERN_DEBUG "%s: failed to add STA entry for the" " AP\n", dev->name); @@ -1250,23 +1281,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, } sta->dev = dev; - sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC; - sta->assoc_ap = 1; + sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP; rates = 0; mode = local->oper_hw_mode; for (i = 0; i < elems.supp_rates_len; i++) { int rate = (elems.supp_rates[i] & 0x7f) * 5; - if (mode->mode == MODE_ATHEROS_TURBO) - rate *= 2; for (j = 0; j < mode->num_rates; j++) if (mode->rates[j].rate == rate) rates |= BIT(j); } for (i = 0; i < elems.ext_supp_rates_len; i++) { int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; - if (mode->mode == MODE_ATHEROS_TURBO) - rate *= 2; for (j = 0; j < mode->num_rates; j++) if (mode->rates[j].rate == rate) rates |= BIT(j); @@ -1275,7 +1301,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, rate_control_rate_init(sta, local); - if (elems.wmm_param && ifsta->wmm_enabled) { + if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { sta->flags |= WLAN_STA_WME; ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, elems.wmm_param_len); @@ -1418,14 +1444,16 @@ static void ieee80211_rx_bss_info(struct net_device *dev, struct sta_info *sta; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); u64 timestamp; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN)) return; /* ignore ProbeResp to foreign address */ #if 0 - printk(KERN_DEBUG "%s: RX %s from " MAC_FMT " to " MAC_FMT "\n", + printk(KERN_DEBUG "%s: RX %s from %s to %s\n", dev->name, beacon ? "Beacon" : "Probe Response", - MAC_ARG(mgmt->sa), MAC_ARG(mgmt->da)); + print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da)); #endif baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; @@ -1444,10 +1472,10 @@ static void ieee80211_rx_bss_info(struct net_device *dev, else tsf = -1LLU; if (time_after(jiffies, last_tsf_debug + 5 * HZ)) { - printk(KERN_DEBUG "RX beacon SA=" MAC_FMT " BSSID=" - MAC_FMT " TSF=0x%llx BCN=0x%llx diff=%lld " + printk(KERN_DEBUG "RX beacon SA=%s BSSID=" + "%s TSF=0x%llx BCN=0x%llx diff=%lld " "@%lu\n", - MAC_ARG(mgmt->sa), MAC_ARG(mgmt->bssid), + print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->bssid), (unsigned long long)tsf, (unsigned long long)timestamp, (unsigned long long)(tsf - timestamp), @@ -1486,8 +1514,6 @@ static void ieee80211_rx_bss_info(struct net_device *dev, rate = elems.ext_supp_rates [i - elems.supp_rates_len]; own_rate = 5 * (rate & 0x7f); - if (mode->mode == MODE_ATHEROS_TURBO) - own_rate *= 2; for (j = 0; j < num_rates; j++) if (rates[j].rate == own_rate) supp_rates |= BIT(j); @@ -1503,9 +1529,9 @@ static void ieee80211_rx_bss_info(struct net_device *dev, } if (sta->supp_rates != prev_rates) { printk(KERN_DEBUG "%s: updated supp_rates set for " - MAC_FMT " based on beacon info (0x%x & 0x%x -> " + "%s based on beacon info (0x%x & 0x%x -> " "0x%x)\n", - dev->name, MAC_ARG(sta->addr), prev_rates, + dev->name, print_mac(mac, sta->addr), prev_rates, supp_rates, sta->supp_rates); } sta_info_put(sta); @@ -1672,7 +1698,7 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, return; ifsta = &sdata->u.sta; - if (!ifsta->associated || + if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED) || memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) return; @@ -1688,7 +1714,7 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev, if (elems.erp_info && elems.erp_info_len >= 1) ieee80211_handle_erp_ie(dev, elems.erp_info[0]); - if (elems.wmm_param && ifsta->wmm_enabled) { + if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) { ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, elems.wmm_param_len); } @@ -1707,6 +1733,11 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, struct sk_buff *skb; struct ieee80211_mgmt *resp; u8 *pos, *end; + DECLARE_MAC_BUF(mac); +#ifdef CONFIG_MAC80211_IBSS_DEBUG + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); +#endif if (sdata->type != IEEE80211_IF_TYPE_IBSS || ifsta->state != IEEE80211_IBSS_JOINED || @@ -1719,10 +1750,10 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, tx_last_beacon = 1; #ifdef CONFIG_MAC80211_IBSS_DEBUG - printk(KERN_DEBUG "%s: RX ProbeReq SA=" MAC_FMT " DA=" MAC_FMT " BSSID=" - MAC_FMT " (tx_last_beacon=%d)\n", - dev->name, MAC_ARG(mgmt->sa), MAC_ARG(mgmt->da), - MAC_ARG(mgmt->bssid), tx_last_beacon); + printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" + "%s (tx_last_beacon=%d)\n", + dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), + print_mac(mac3, mgmt->bssid), tx_last_beacon); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ if (!tx_last_beacon) @@ -1738,8 +1769,8 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, pos + 2 + pos[1] > end) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " - "from " MAC_FMT "\n", - dev->name, MAC_ARG(mgmt->sa)); + "from %s\n", + dev->name, print_mac(mac, mgmt->sa)); } return; } @@ -1751,15 +1782,15 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, } /* Reply with ProbeResp */ - skb = skb_copy(ifsta->probe_resp, GFP_ATOMIC); + skb = skb_copy(ifsta->probe_resp, GFP_KERNEL); if (!skb) return; resp = (struct ieee80211_mgmt *) skb->data; memcpy(resp->da, mgmt->sa, ETH_ALEN); #ifdef CONFIG_MAC80211_IBSS_DEBUG - printk(KERN_DEBUG "%s: Sending ProbeResp to " MAC_FMT "\n", - dev->name, MAC_ARG(resp->da)); + printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", + dev->name, print_mac(mac, resp->da)); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ ieee80211_sta_tx(dev, skb, 0); } @@ -1890,7 +1921,7 @@ static int ieee80211_sta_active_ibss(struct net_device *dev) int active = 0; struct sta_info *sta; - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); list_for_each_entry(sta, &local->sta_list, list) { if (sta->dev == dev && time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL, @@ -1899,7 +1930,7 @@ static int ieee80211_sta_active_ibss(struct net_device *dev) break; } } - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); return active; } @@ -1909,16 +1940,25 @@ static void ieee80211_sta_expire(struct net_device *dev) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct sta_info *sta, *tmp; + LIST_HEAD(tmp_list); + DECLARE_MAC_BUF(mac); - spin_lock_bh(&local->sta_lock); + write_lock_bh(&local->sta_lock); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) if (time_after(jiffies, sta->last_rx + IEEE80211_IBSS_INACTIVITY_LIMIT)) { - printk(KERN_DEBUG "%s: expiring inactive STA " MAC_FMT - "\n", dev->name, MAC_ARG(sta->addr)); - sta_info_free(sta, 1); + printk(KERN_DEBUG "%s: expiring inactive STA %s\n", + dev->name, print_mac(mac, sta->addr)); + __sta_info_get(sta); + sta_info_remove(sta); + list_add(&sta->list, &tmp_list); } - spin_unlock_bh(&local->sta_lock); + write_unlock_bh(&local->sta_lock); + + list_for_each_entry_safe(sta, tmp, &tmp_list, list) { + sta_info_free(sta); + sta_info_put(sta); + } } @@ -2047,7 +2087,8 @@ static void ieee80211_sta_reset_auth(struct net_device *dev, printk(KERN_DEBUG "%s: Initial auth_alg=%d\n", dev->name, ifsta->auth_alg); ifsta->auth_transaction = -1; - ifsta->associated = ifsta->auth_tries = ifsta->assoc_tries = 0; + ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; + ifsta->auth_tries = ifsta->assoc_tries = 0; netif_carrier_off(dev); } @@ -2061,8 +2102,10 @@ void ieee80211_sta_req_auth(struct net_device *dev, if (sdata->type != IEEE80211_IF_TYPE_STA) return; - if ((ifsta->bssid_set || ifsta->auto_bssid_sel) && - (ifsta->ssid_set || ifsta->auto_ssid_sel)) { + if ((ifsta->flags & (IEEE80211_STA_BSSID_SET | + IEEE80211_STA_AUTO_BSSID_SEL)) && + (ifsta->flags & (IEEE80211_STA_SSID_SET | + IEEE80211_STA_AUTO_SSID_SEL))) { set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request); queue_work(local->hw.workqueue, &ifsta->work); } @@ -2076,7 +2119,7 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, if (!memcmp(ifsta->ssid, ssid, ssid_len)) return 1; - if (ifsta->auto_bssid_sel) + if (ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) return 0; hidden_ssid = 1; @@ -2105,8 +2148,8 @@ static int ieee80211_sta_config_auth(struct net_device *dev, struct ieee80211_sta_bss *bss, *selected = NULL; int top_rssi = 0, freq; - if (!ifsta->auto_channel_sel && !ifsta->auto_bssid_sel && - !ifsta->auto_ssid_sel) { + if (!(ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL | + IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL))) { ifsta->state = IEEE80211_AUTHENTICATE; ieee80211_sta_reset_auth(dev, ifsta); return 0; @@ -2122,14 +2165,15 @@ static int ieee80211_sta_config_auth(struct net_device *dev, !!sdata->default_key) continue; - if (!ifsta->auto_channel_sel && bss->freq != freq) + if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) && + bss->freq != freq) continue; - if (!ifsta->auto_bssid_sel && + if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) && memcmp(bss->bssid, ifsta->bssid, ETH_ALEN)) continue; - if (!ifsta->auto_ssid_sel && + if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) && !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len)) continue; @@ -2144,7 +2188,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, if (selected) { ieee80211_set_channel(local, -1, selected->freq); - if (!ifsta->ssid_set) + if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) ieee80211_sta_set_ssid(dev, selected->ssid, selected->ssid_len); ieee80211_sta_set_bssid(dev, selected->bssid); @@ -2154,7 +2198,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, return 0; } else { if (ifsta->state != IEEE80211_AUTHENTICATE) { - if (ifsta->auto_ssid_sel) + if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) ieee80211_sta_start_scan(dev, NULL, 0); else ieee80211_sta_start_scan(dev, ifsta->ssid, @@ -2271,8 +2315,9 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, "for IBSS beacon\n", dev->name); break; } - control.tx_rate = (local->short_preamble && - (rate->flags & IEEE80211_RATE_PREAMBLE2)) ? + control.tx_rate = + ((sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE) && + (rate->flags & IEEE80211_RATE_PREAMBLE2)) ? rate->val2 : rate->val; control.antenna_sel_tx = local->hw.conf.antenna_sel_tx; control.power_level = local->hw.conf.power_level; @@ -2303,8 +2348,6 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, mode = local->oper_hw_mode; for (i = 0; i < bss->supp_rates_len; i++) { int bitrate = (bss->supp_rates[i] & 0x7f) * 5; - if (mode->mode == MODE_ATHEROS_TURBO) - bitrate *= 2; for (j = 0; j < mode->num_rates; j++) if (mode->rates[j].rate == bitrate) rates |= BIT(j); @@ -2336,6 +2379,7 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, struct ieee80211_hw_mode *mode; u8 bssid[ETH_ALEN], *pos; int i; + DECLARE_MAC_BUF(mac); #if 0 /* Easier testing, use fixed BSSID. */ @@ -2351,8 +2395,8 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, bssid[0] |= 0x02; #endif - printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID " MAC_FMT "\n", - dev->name, MAC_ARG(bssid)); + printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", + dev->name, print_mac(mac, bssid)); bss = ieee80211_rx_bss_add(dev, bssid); if (!bss) @@ -2377,8 +2421,6 @@ static int ieee80211_sta_create_ibss(struct net_device *dev, pos = bss->supp_rates; for (i = 0; i < mode->num_rates; i++) { int rate = mode->rates[i].rate; - if (mode->mode == MODE_ATHEROS_TURBO) - rate /= 2; *pos++ = (u8) (rate / 5); } @@ -2394,6 +2436,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, int found = 0; u8 bssid[ETH_ALEN]; int active_ibss; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); if (ifsta->ssid_len == 0) return -EINVAL; @@ -2410,8 +2454,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, || !(bss->capability & WLAN_CAPABILITY_IBSS)) continue; #ifdef CONFIG_MAC80211_IBSS_DEBUG - printk(KERN_DEBUG " bssid=" MAC_FMT " found\n", - MAC_ARG(bss->bssid)); + printk(KERN_DEBUG " bssid=%s found\n", + print_mac(mac, bss->bssid)); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ memcpy(bssid, bss->bssid, ETH_ALEN); found = 1; @@ -2421,14 +2465,14 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, spin_unlock_bh(&local->sta_bss_lock); #ifdef CONFIG_MAC80211_IBSS_DEBUG - printk(KERN_DEBUG " sta_find_ibss: selected " MAC_FMT " current " - MAC_FMT "\n", MAC_ARG(bssid), MAC_ARG(ifsta->bssid)); + printk(KERN_DEBUG " sta_find_ibss: selected %s current " + "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid)); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && (bss = ieee80211_rx_bss_get(dev, bssid))) { - printk(KERN_DEBUG "%s: Selected IBSS BSSID " MAC_FMT + printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" " based on configured SSID\n", - dev->name, MAC_ARG(bssid)); + dev->name, print_mac(mac, bssid)); return ieee80211_sta_join_ibss(dev, ifsta, bss); } #ifdef CONFIG_MAC80211_IBSS_DEBUG @@ -2451,10 +2495,10 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, if (time_after(jiffies, ifsta->ibss_join_req + IEEE80211_IBSS_JOIN_TIMEOUT)) { - if (ifsta->create_ibss && + if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && local->oper_channel->flag & IEEE80211_CHAN_W_IBSS) return ieee80211_sta_create_ibss(dev, ifsta); - if (ifsta->create_ibss) { + if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { printk(KERN_DEBUG "%s: IBSS not allowed on the" " configured channel %d (%d MHz)\n", dev->name, local->hw.conf.channel, @@ -2515,13 +2559,17 @@ int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) ifsta = &sdata->u.sta; if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) - ifsta->prev_bssid_set = 0; + ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET; memcpy(ifsta->ssid, ssid, len); memset(ifsta->ssid + len, 0, IEEE80211_MAX_SSID_LEN - len); ifsta->ssid_len = len; - ifsta->ssid_set = len ? 1 : 0; - if (sdata->type == IEEE80211_IF_TYPE_IBSS && !ifsta->bssid_set) { + if (len) + ifsta->flags |= IEEE80211_STA_SSID_SET; + else + ifsta->flags &= ~IEEE80211_STA_SSID_SET; + if (sdata->type == IEEE80211_IF_TYPE_IBSS && + !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { ifsta->ibss_join_req = jiffies; ifsta->state = IEEE80211_IBSS_SEARCH; return ieee80211_sta_find_ibss(dev, ifsta); @@ -2559,10 +2607,11 @@ int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) } } - if (!is_valid_ether_addr(bssid)) - ifsta->bssid_set = 0; + if (is_valid_ether_addr(bssid)) + ifsta->flags |= IEEE80211_STA_BSSID_SET; else - ifsta->bssid_set = 1; + ifsta->flags &= ~IEEE80211_STA_BSSID_SET; + return 0; } @@ -2613,35 +2662,41 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) printk(KERN_DEBUG "%s: failed to restore operational" "channel after scan\n", dev->name); - if (!(local->hw.flags & IEEE80211_HW_NO_PROBE_FILTERING) && - ieee80211_if_config(dev)) - printk(KERN_DEBUG "%s: failed to restore operational" - "BSSID after scan\n", dev->name); + + netif_tx_lock_bh(local->mdev); + local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; + local->ops->configure_filter(local_to_hw(local), + FIF_BCN_PRBRESP_PROMISC, + &local->filter_flags, + local->mdev->mc_count, + local->mdev->mc_list); + + netif_tx_unlock_bh(local->mdev); memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { /* No need to wake the master device. */ if (sdata->dev == local->mdev) continue; if (sdata->type == IEEE80211_IF_TYPE_STA) { - if (sdata->u.sta.associated) + if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) ieee80211_send_nullfunc(local, sdata, 0); ieee80211_sta_timer((unsigned long)sdata); } netif_wake_queue(sdata->dev); } - read_unlock(&local->sub_if_lock); + rcu_read_unlock(); sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->type == IEEE80211_IF_TYPE_IBSS) { struct ieee80211_if_sta *ifsta = &sdata->u.sta; - if (!ifsta->bssid_set || + if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || (!ifsta->state == IEEE80211_IBSS_JOINED && !ieee80211_sta_active_ibss(dev))) ieee80211_sta_find_ibss(dev, ifsta); @@ -2773,8 +2828,8 @@ static int ieee80211_sta_start_scan(struct net_device *dev, local->sta_scanning = 1; - read_lock(&local->sub_if_lock); - list_for_each_entry(sdata, &local->sub_if_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) { /* Don't stop the master interface, otherwise we can't transmit * probes! */ @@ -2783,10 +2838,10 @@ static int ieee80211_sta_start_scan(struct net_device *dev, netif_stop_queue(sdata->dev); if (sdata->type == IEEE80211_IF_TYPE_STA && - sdata->u.sta.associated) + (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) ieee80211_send_nullfunc(local, sdata, 1); } - read_unlock(&local->sub_if_lock); + rcu_read_unlock(); if (ssid) { local->scan_ssid_len = ssid_len; @@ -2800,10 +2855,14 @@ static int ieee80211_sta_start_scan(struct net_device *dev, local->scan_channel_idx = 0; local->scan_dev = dev; - if (!(local->hw.flags & IEEE80211_HW_NO_PROBE_FILTERING) && - ieee80211_if_config(dev)) - printk(KERN_DEBUG "%s: failed to set BSSID for scan\n", - dev->name); + netif_tx_lock_bh(local->mdev); + local->filter_flags |= FIF_BCN_PRBRESP_PROMISC; + local->ops->configure_filter(local_to_hw(local), + FIF_BCN_PRBRESP_PROMISC, + &local->filter_flags, + local->mdev->mc_count, + local->mdev->mc_list); + netif_tx_unlock_bh(local->mdev); /* TODO: start scan as soon as all nullfunc frames are ACKed */ queue_delayed_work(local->hw.workqueue, &local->scan_work, @@ -3041,19 +3100,20 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct sta_info *sta; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + DECLARE_MAC_BUF(mac); /* TODO: Could consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: No room for a new IBSS STA " - "entry " MAC_FMT "\n", dev->name, MAC_ARG(addr)); + "entry %s\n", dev->name, print_mac(mac, addr)); } return NULL; } - printk(KERN_DEBUG "%s: Adding new IBSS station " MAC_FMT " (dev=%s)\n", - local->mdev->name, MAC_ARG(addr), dev->name); + printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n", + wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name); sta = sta_info_add(local, dev, addr, GFP_ATOMIC); if (!sta) @@ -3096,7 +3156,7 @@ int ieee80211_sta_disassociate(struct net_device *dev, u16 reason) if (sdata->type != IEEE80211_IF_TYPE_STA) return -EINVAL; - if (!ifsta->associated) + if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) return -1; ieee80211_send_disassoc(dev, ifsta, reason); diff --git a/net/mac80211/key.c b/net/mac80211/key.c new file mode 100644 index 0000000000000000000000000000000000000000..0b2328f7d67cdb7b3bc90990f50e0450461d309c --- /dev/null +++ b/net/mac80211/key.c @@ -0,0 +1,279 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include "ieee80211_i.h" +#include "debugfs_key.h" +#include "aes_ccm.h" + + +/* + * Key handling basics + * + * Key handling in mac80211 is done based on per-interface (sub_if_data) + * keys and per-station keys. Since each station belongs to an interface, + * each station key also belongs to that interface. + * + * Hardware acceleration is done on a best-effort basis, for each key + * that is eligible the hardware is asked to enable that key but if + * it cannot do that they key is simply kept for software encryption. + * There is currently no way of knowing this except by looking into + * debugfs. + * + * All operations here are called under RTNL so no extra locking is + * required. + */ + +static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +static const u8 zero_addr[ETH_ALEN]; + +static const u8 *get_mac_for_key(struct ieee80211_key *key) +{ + const u8 *addr = bcast_addr; + + /* + * If we're an AP we won't ever receive frames with a non-WEP + * group key so we tell the driver that by using the zero MAC + * address to indicate a transmit-only key. + */ + if (key->conf.alg != ALG_WEP && + (key->sdata->type == IEEE80211_IF_TYPE_AP || + key->sdata->type == IEEE80211_IF_TYPE_VLAN)) + addr = zero_addr; + + if (key->sta) + addr = key->sta->addr; + + return addr; +} + +static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) +{ + const u8 *addr; + int ret; + DECLARE_MAC_BUF(mac); + + if (!key->local->ops->set_key) + return; + + addr = get_mac_for_key(key); + + ret = key->local->ops->set_key(local_to_hw(key->local), SET_KEY, + key->sdata->dev->dev_addr, addr, + &key->conf); + + if (!ret) + key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; + + if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) + printk(KERN_ERR "mac80211-%s: failed to set key " + "(%d, %s) to hardware (%d)\n", + wiphy_name(key->local->hw.wiphy), + key->conf.keyidx, print_mac(mac, addr), ret); +} + +static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) +{ + const u8 *addr; + int ret; + DECLARE_MAC_BUF(mac); + + if (!key->local->ops->set_key) + return; + + if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + return; + + addr = get_mac_for_key(key); + + ret = key->local->ops->set_key(local_to_hw(key->local), DISABLE_KEY, + key->sdata->dev->dev_addr, addr, + &key->conf); + + if (ret) + printk(KERN_ERR "mac80211-%s: failed to remove key " + "(%d, %s) from hardware (%d)\n", + wiphy_name(key->local->hw.wiphy), + key->conf.keyidx, print_mac(mac, addr), ret); + + key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; +} + +struct ieee80211_key *ieee80211_key_alloc(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum ieee80211_key_alg alg, + int idx, + size_t key_len, + const u8 *key_data) +{ + struct ieee80211_key *key; + + BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS); + + key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); + if (!key) + return NULL; + + /* + * Default to software encryption; we'll later upload the + * key to the hardware if possible. + */ + key->conf.flags = 0; + key->flags = 0; + + key->conf.alg = alg; + key->conf.keyidx = idx; + key->conf.keylen = key_len; + memcpy(key->conf.key, key_data, key_len); + + key->local = sdata->local; + key->sdata = sdata; + key->sta = sta; + + if (alg == ALG_CCMP) { + /* + * Initialize AES key state here as an optimization so that + * it does not need to be initialized for every packet. + */ + key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data); + if (!key->u.ccmp.tfm) { + ieee80211_key_free(key); + return NULL; + } + } + + ieee80211_debugfs_key_add(key->local, key); + + /* remove key first */ + if (sta) + ieee80211_key_free(sta->key); + else + ieee80211_key_free(sdata->keys[idx]); + + if (sta) { + ieee80211_debugfs_key_sta_link(key, sta); + + /* + * some hardware cannot handle TKIP with QoS, so + * we indicate whether QoS could be in use. + */ + if (sta->flags & WLAN_STA_WME) + key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA; + } else { + if (sdata->type == IEEE80211_IF_TYPE_STA) { + struct sta_info *ap; + + /* same here, the AP could be using QoS */ + ap = sta_info_get(key->local, key->sdata->u.sta.bssid); + if (ap) { + if (ap->flags & WLAN_STA_WME) + key->conf.flags |= + IEEE80211_KEY_FLAG_WMM_STA; + sta_info_put(ap); + } + } + } + + /* enable hwaccel if appropriate */ + if (netif_running(key->sdata->dev)) + ieee80211_key_enable_hw_accel(key); + + if (sta) + rcu_assign_pointer(sta->key, key); + else + rcu_assign_pointer(sdata->keys[idx], key); + + list_add(&key->list, &sdata->key_list); + + return key; +} + +void ieee80211_key_free(struct ieee80211_key *key) +{ + if (!key) + return; + + if (key->sta) { + rcu_assign_pointer(key->sta->key, NULL); + } else { + if (key->sdata->default_key == key) + ieee80211_set_default_key(key->sdata, -1); + if (key->conf.keyidx >= 0 && + key->conf.keyidx < NUM_DEFAULT_KEYS) + rcu_assign_pointer(key->sdata->keys[key->conf.keyidx], + NULL); + else + WARN_ON(1); + } + + /* wait for all key users to complete */ + synchronize_rcu(); + + /* remove from hwaccel if appropriate */ + ieee80211_key_disable_hw_accel(key); + + if (key->conf.alg == ALG_CCMP) + ieee80211_aes_key_free(key->u.ccmp.tfm); + ieee80211_debugfs_key_remove(key); + + list_del(&key->list); + + kfree(key); +} + +void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) +{ + struct ieee80211_key *key = NULL; + + if (idx >= 0 && idx < NUM_DEFAULT_KEYS) + key = sdata->keys[idx]; + + if (sdata->default_key != key) { + ieee80211_debugfs_key_remove_default(sdata); + + rcu_assign_pointer(sdata->default_key, key); + + if (sdata->default_key) + ieee80211_debugfs_key_add_default(sdata); + } +} + +void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_key *key, *tmp; + + list_for_each_entry_safe(key, tmp, &sdata->key_list, list) + ieee80211_key_free(key); +} + +void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_key *key; + + WARN_ON(!netif_running(sdata->dev)); + if (!netif_running(sdata->dev)) + return; + + list_for_each_entry(key, &sdata->key_list, list) + ieee80211_key_enable_hw_accel(key); +} + +void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_key *key; + + list_for_each_entry(key, &sdata->key_list, list) + ieee80211_key_disable_hw_accel(key); +} diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c index 17b9f46bbf2bb6dd912c1b11e1d3b1891ee1208f..314b8de888625c576a02fbd50caa01fc7787d84d 100644 --- a/net/mac80211/rc80211_simple.c +++ b/net/mac80211/rc80211_simple.c @@ -147,14 +147,6 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev, srctrl = sta->rate_ctrl_priv; srctrl->tx_num_xmit++; if (status->excessive_retries) { - sta->antenna_sel_tx = sta->antenna_sel_tx == 1 ? 2 : 1; - sta->antenna_sel_rx = sta->antenna_sel_rx == 1 ? 2 : 1; - if (local->sta_antenna_sel == STA_ANTENNA_SEL_SW_CTRL_DEBUG) { - printk(KERN_DEBUG "%s: " MAC_FMT " TX antenna --> %d " - "RX antenna --> %d (@%lu)\n", - dev->name, MAC_ARG(hdr->addr1), - sta->antenna_sel_tx, sta->antenna_sel_rx, jiffies); - } srctrl->tx_num_failures++; sta->tx_retry_failed++; sta->tx_num_consecutive_failures++; @@ -209,9 +201,10 @@ static void rate_control_simple_tx_status(void *priv, struct net_device *dev, srctrl->avg_rate_update = jiffies; if (srctrl->tx_avg_rate_num > 0) { #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: STA " MAC_FMT " Average rate: " + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "%s: STA %s Average rate: " "%d (%d/%d)\n", - dev->name, MAC_ARG(sta->addr), + dev->name, print_mac(mac, sta->addr), srctrl->tx_avg_rate_sum / srctrl->tx_avg_rate_num, srctrl->tx_avg_rate_sum, diff --git a/net/mac80211/regdomain.c b/net/mac80211/regdomain.c index b697a2afbb4b1d53fc3cb7d1d59200ee9ca39fbd..f42678fa62d1ec6711f0ef666a3859a366110884 100644 --- a/net/mac80211/regdomain.c +++ b/net/mac80211/regdomain.c @@ -82,12 +82,6 @@ static void ieee80211_unmask_channel(int mode, struct ieee80211_channel *chan) chan->flag = 0; - if (ieee80211_regdom == 64 && - (mode == MODE_ATHEROS_TURBO || mode == MODE_ATHEROS_TURBOG)) { - /* Do not allow Turbo modes in Japan. */ - return; - } - for (i = 0; channel_range[i].start_freq; i++) { const struct ieee80211_channel_range *r = &channel_range[i]; if (r->start_freq <= chan->freq && r->end_freq >= chan->freq) { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c new file mode 100644 index 0000000000000000000000000000000000000000..ece77766ea2bd798552152b67313e8c86ae0aa1c --- /dev/null +++ b/net/mac80211/rx.c @@ -0,0 +1,1588 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "ieee80211_led.h" +#include "wep.h" +#include "wpa.h" +#include "tkip.h" +#include "wme.h" + +/* + * monitor mode reception + * + * This function cleans up the SKB, i.e. it removes all the stuff + * only useful for monitoring. + */ +static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, + struct sk_buff *skb, + int rtap_len) +{ + skb_pull(skb, rtap_len); + + if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { + if (likely(skb->len > FCS_LEN)) + skb_trim(skb, skb->len - FCS_LEN); + else { + /* driver bug */ + WARN_ON(1); + dev_kfree_skb(skb); + skb = NULL; + } + } + + return skb; +} + +static inline int should_drop_frame(struct ieee80211_rx_status *status, + struct sk_buff *skb, + int present_fcs_len, + int radiotap_len) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + + if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) + return 1; + if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) + return 1; + if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL)) + return 1; + return 0; +} + +/* + * This function copies a received frame to all monitor interfaces and + * returns a cleaned-up SKB that no longer includes the FCS nor the + * radiotap header the driver might have added. + */ +static struct sk_buff * +ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, + struct ieee80211_rx_status *status) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_rate *rate; + int needed_headroom = 0; + struct ieee80211_rtap_hdr { + struct ieee80211_radiotap_header hdr; + u8 flags; + u8 rate; + __le16 chan_freq; + __le16 chan_flags; + u8 antsignal; + u8 padding_for_rxflags; + __le16 rx_flags; + } __attribute__ ((packed)) *rthdr; + struct sk_buff *skb, *skb2; + struct net_device *prev_dev = NULL; + int present_fcs_len = 0; + int rtap_len = 0; + + /* + * First, we may need to make a copy of the skb because + * (1) we need to modify it for radiotap (if not present), and + * (2) the other RX handlers will modify the skb we got. + * + * We don't need to, of course, if we aren't going to return + * the SKB because it has a bad FCS/PLCP checksum. + */ + if (status->flag & RX_FLAG_RADIOTAP) + rtap_len = ieee80211_get_radiotap_len(origskb->data); + else + needed_headroom = sizeof(*rthdr); + + if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) + present_fcs_len = FCS_LEN; + + if (!local->monitors) { + if (should_drop_frame(status, origskb, present_fcs_len, + rtap_len)) { + dev_kfree_skb(origskb); + return NULL; + } + + return remove_monitor_info(local, origskb, rtap_len); + } + + if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) { + /* only need to expand headroom if necessary */ + skb = origskb; + origskb = NULL; + + /* + * This shouldn't trigger often because most devices have an + * RX header they pull before we get here, and that should + * be big enough for our radiotap information. We should + * probably export the length to drivers so that we can have + * them allocate enough headroom to start with. + */ + if (skb_headroom(skb) < needed_headroom && + pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return NULL; + } + } else { + /* + * Need to make a copy and possibly remove radiotap header + * and FCS from the original. + */ + skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); + + origskb = remove_monitor_info(local, origskb, rtap_len); + + if (!skb) + return origskb; + } + + /* if necessary, prepend radiotap information */ + if (!(status->flag & RX_FLAG_RADIOTAP)) { + rthdr = (void *) skb_push(skb, sizeof(*rthdr)); + memset(rthdr, 0, sizeof(*rthdr)); + rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr)); + rthdr->hdr.it_present = + cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | + (1 << IEEE80211_RADIOTAP_RATE) | + (1 << IEEE80211_RADIOTAP_CHANNEL) | + (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | + (1 << IEEE80211_RADIOTAP_RX_FLAGS)); + rthdr->flags = local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS ? + IEEE80211_RADIOTAP_F_FCS : 0; + + /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ + rthdr->rx_flags = 0; + if (status->flag & + (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) + rthdr->rx_flags |= + cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); + + rate = ieee80211_get_rate(local, status->phymode, + status->rate); + if (rate) + rthdr->rate = rate->rate / 5; + + rthdr->chan_freq = cpu_to_le16(status->freq); + + if (status->phymode == MODE_IEEE80211A) + rthdr->chan_flags = + cpu_to_le16(IEEE80211_CHAN_OFDM | + IEEE80211_CHAN_5GHZ); + else + rthdr->chan_flags = + cpu_to_le16(IEEE80211_CHAN_DYN | + IEEE80211_CHAN_2GHZ); + + rthdr->antsignal = status->ssi; + } + + skb_set_mac_header(skb, 0); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!netif_running(sdata->dev)) + continue; + + if (sdata->type != IEEE80211_IF_TYPE_MNTR) + continue; + + if (prev_dev) { + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + skb2->dev = prev_dev; + netif_rx(skb2); + } + } + + prev_dev = sdata->dev; + sdata->dev->stats.rx_packets++; + sdata->dev->stats.rx_bytes += skb->len; + } + + if (prev_dev) { + skb->dev = prev_dev; + netif_rx(skb); + } else + dev_kfree_skb(skb); + + return origskb; +} + + +/* pre-rx handlers + * + * these don't have dev/sdata fields in the rx data + * The sta value should also not be used because it may + * be NULL even though a STA (in IBSS mode) will be added. + */ + +static ieee80211_txrx_result +ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx) +{ + u8 *data = rx->skb->data; + int tid; + + /* does the frame have a qos control field? */ + if (WLAN_FC_IS_QOS_DATA(rx->fc)) { + u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN; + /* frame has qos control */ + tid = qc[0] & QOS_CONTROL_TID_MASK; + } else { + if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { + /* Separate TID for management frames */ + tid = NUM_RX_DATA_QUEUES - 1; + } else { + /* no qos control present */ + tid = 0; /* 802.1d - Best Effort */ + } + } + + I802_DEBUG_INC(rx->local->wme_rx_queue[tid]); + /* only a debug counter, sta might not be assigned properly yet */ + if (rx->sta) + I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); + + rx->u.rx.queue = tid; + /* Set skb->priority to 1d tag if highest order bit of TID is not set. + * For now, set skb->priority to 0 for other cases. */ + rx->skb->priority = (tid > 7) ? 0 : tid; + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_load_stats(struct ieee80211_txrx_data *rx) +{ + struct ieee80211_local *local = rx->local; + struct sk_buff *skb = rx->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u32 load = 0, hdrtime; + struct ieee80211_rate *rate; + struct ieee80211_hw_mode *mode = local->hw.conf.mode; + int i; + + /* Estimate total channel use caused by this frame */ + + if (unlikely(mode->num_rates < 0)) + return TXRX_CONTINUE; + + rate = &mode->rates[0]; + for (i = 0; i < mode->num_rates; i++) { + if (mode->rates[i].val == rx->u.rx.status->rate) { + rate = &mode->rates[i]; + break; + } + } + + /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, + * 1 usec = 1/8 * (1080 / 10) = 13.5 */ + + if (mode->mode == MODE_IEEE80211A || + (mode->mode == MODE_IEEE80211G && + rate->flags & IEEE80211_RATE_ERP)) + hdrtime = CHAN_UTIL_HDR_SHORT; + else + hdrtime = CHAN_UTIL_HDR_LONG; + + load = hdrtime; + if (!is_multicast_ether_addr(hdr->addr1)) + load += hdrtime; + + load += skb->len * rate->rate_inv; + + /* Divide channel_use by 8 to avoid wrapping around the counter */ + load >>= CHAN_UTIL_SHIFT; + local->channel_use_raw += load; + rx->u.rx.load = load; + + return TXRX_CONTINUE; +} + +ieee80211_rx_handler ieee80211_rx_pre_handlers[] = +{ + ieee80211_rx_h_parse_qos, + ieee80211_rx_h_load_stats, + NULL +}; + +/* rx handlers */ + +static ieee80211_txrx_result +ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx) +{ + if (rx->sta) + rx->sta->channel_use_raw += rx->u.rx.load; + rx->sdata->channel_use_raw += rx->u.rx.load; + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx) +{ + struct ieee80211_local *local = rx->local; + struct sk_buff *skb = rx->skb; + + if (unlikely(local->sta_scanning != 0)) { + ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status); + return TXRX_QUEUED; + } + + if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) { + /* scanning finished during invoking of handlers */ + I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); + return TXRX_DROP; + } + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) +{ + struct ieee80211_hdr *hdr; + hdr = (struct ieee80211_hdr *) rx->skb->data; + + /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ + if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { + if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && + rx->sta->last_seq_ctrl[rx->u.rx.queue] == + hdr->seq_ctrl)) { + if (rx->flags & IEEE80211_TXRXD_RXRA_MATCH) { + rx->local->dot11FrameDuplicateCount++; + rx->sta->num_duplicates++; + } + return TXRX_DROP; + } else + rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl; + } + + if (unlikely(rx->skb->len < 16)) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_short); + return TXRX_DROP; + } + + if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) + rx->skb->pkt_type = PACKET_OTHERHOST; + else if (compare_ether_addr(rx->dev->dev_addr, hdr->addr1) == 0) + rx->skb->pkt_type = PACKET_HOST; + else if (is_multicast_ether_addr(hdr->addr1)) { + if (is_broadcast_ether_addr(hdr->addr1)) + rx->skb->pkt_type = PACKET_BROADCAST; + else + rx->skb->pkt_type = PACKET_MULTICAST; + } else + rx->skb->pkt_type = PACKET_OTHERHOST; + + /* Drop disallowed frame classes based on STA auth/assoc state; + * IEEE 802.11, Chap 5.5. + * + * 80211.o does filtering only based on association state, i.e., it + * drops Class 3 frames from not associated stations. hostapd sends + * deauth/disassoc frames when needed. In addition, hostapd is + * responsible for filtering on both auth and assoc states. + */ + if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || + ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && + (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && + rx->sdata->type != IEEE80211_IF_TYPE_IBSS && + (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { + if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && + !(rx->fc & IEEE80211_FCTL_TODS) && + (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) + || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { + /* Drop IBSS frames and frames for other hosts + * silently. */ + return TXRX_DROP; + } + + return TXRX_DROP; + } + + return TXRX_CONTINUE; +} + + +static ieee80211_txrx_result +ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; + int keyidx; + int hdrlen; + ieee80211_txrx_result result = TXRX_DROP; + struct ieee80211_key *stakey = NULL; + + /* + * Key selection 101 + * + * There are three types of keys: + * - GTK (group keys) + * - PTK (pairwise keys) + * - STK (station-to-station pairwise keys) + * + * When selecting a key, we have to distinguish between multicast + * (including broadcast) and unicast frames, the latter can only + * use PTKs and STKs while the former always use GTKs. Unless, of + * course, actual WEP keys ("pre-RSNA") are used, then unicast + * frames can also use key indizes like GTKs. Hence, if we don't + * have a PTK/STK we check the key index for a WEP key. + * + * Note that in a regular BSS, multicast frames are sent by the + * AP only, associated stations unicast the frame to the AP first + * which then multicasts it on their behalf. + * + * There is also a slight problem in IBSS mode: GTKs are negotiated + * with each station, that is something we don't currently handle. + * The spec seems to expect that one negotiates the same key with + * every station but there's no such requirement; VLANs could be + * possible. + */ + + if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) + return TXRX_CONTINUE; + + /* + * No point in finding a key and decrypting if the frame is neither + * addressed to us nor a multicast frame. + */ + if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) + return TXRX_CONTINUE; + + if (rx->sta) + stakey = rcu_dereference(rx->sta->key); + + if (!is_multicast_ether_addr(hdr->addr1) && stakey) { + rx->key = stakey; + } else { + /* + * The device doesn't give us the IV so we won't be + * able to look up the key. That's ok though, we + * don't need to decrypt the frame, we just won't + * be able to keep statistics accurate. + * Except for key threshold notifications, should + * we somehow allow the driver to tell us which key + * the hardware used if this flag is set? + */ + if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && + (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) + return TXRX_CONTINUE; + + hdrlen = ieee80211_get_hdrlen(rx->fc); + + if (rx->skb->len < 8 + hdrlen) + return TXRX_DROP; /* TODO: count this? */ + + /* + * no need to call ieee80211_wep_get_keyidx, + * it verifies a bunch of things we've done already + */ + keyidx = rx->skb->data[hdrlen + 3] >> 6; + + rx->key = rcu_dereference(rx->sdata->keys[keyidx]); + + /* + * RSNA-protected unicast frames should always be sent with + * pairwise or station-to-station keys, but for WEP we allow + * using a key index as well. + */ + if (rx->key && rx->key->conf.alg != ALG_WEP && + !is_multicast_ether_addr(hdr->addr1)) + rx->key = NULL; + } + + if (rx->key) { + rx->key->tx_rx_count++; + /* TODO: add threshold stuff again */ + } else { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: RX protected frame," + " but have no key\n", rx->dev->name); + return TXRX_DROP; + } + + /* Check for weak IVs if possible */ + if (rx->sta && rx->key->conf.alg == ALG_WEP && + ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && + (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) || + !(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) && + ieee80211_wep_is_weak_iv(rx->skb, rx->key)) + rx->sta->wep_weak_iv_count++; + + switch (rx->key->conf.alg) { + case ALG_WEP: + result = ieee80211_crypto_wep_decrypt(rx); + break; + case ALG_TKIP: + result = ieee80211_crypto_tkip_decrypt(rx); + break; + case ALG_CCMP: + result = ieee80211_crypto_ccmp_decrypt(rx); + break; + } + + /* either the frame has been decrypted or will be dropped */ + rx->u.rx.status->flag |= RX_FLAG_DECRYPTED; + + return result; +} + +static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata; + DECLARE_MAC_BUF(mac); + + sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); + + if (sdata->bss) + atomic_inc(&sdata->bss->num_sta_ps); + sta->flags |= WLAN_STA_PS; + sta->pspoll = 0; +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", + dev->name, print_mac(mac, sta->addr), sta->aid); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ +} + +static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct sk_buff *skb; + int sent = 0; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_tx_packet_data *pkt_data; + DECLARE_MAC_BUF(mac); + + sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); + if (sdata->bss) + atomic_dec(&sdata->bss->num_sta_ps); + sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM); + sta->pspoll = 0; + if (!skb_queue_empty(&sta->ps_tx_buf)) { + if (local->ops->set_tim) + local->ops->set_tim(local_to_hw(local), sta->aid, 0); + if (sdata->bss) + bss_tim_clear(local, sdata->bss, sta->aid); + } +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", + dev->name, print_mac(mac, sta->addr), sta->aid); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + /* Send all buffered frames to the station */ + while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { + pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; + sent++; + pkt_data->flags |= IEEE80211_TXPD_REQUEUE; + dev_queue_xmit(skb); + } + while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { + pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; + local->total_ps_buffered--; + sent++; +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "%s: STA %s aid %d send PS frame " + "since STA not sleeping anymore\n", dev->name, + print_mac(mac, sta->addr), sta->aid); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + pkt_data->flags |= IEEE80211_TXPD_REQUEUE; + dev_queue_xmit(skb); + } + + return sent; +} + +static ieee80211_txrx_result +ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) +{ + struct sta_info *sta = rx->sta; + struct net_device *dev = rx->dev; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; + + if (!sta) + return TXRX_CONTINUE; + + /* Update last_rx only for IBSS packets which are for the current + * BSSID to avoid keeping the current IBSS network alive in cases where + * other STAs are using different BSSID. */ + if (rx->sdata->type == IEEE80211_IF_TYPE_IBSS) { + u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len); + if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) + sta->last_rx = jiffies; + } else + if (!is_multicast_ether_addr(hdr->addr1) || + rx->sdata->type == IEEE80211_IF_TYPE_STA) { + /* Update last_rx only for unicast frames in order to prevent + * the Probe Request frames (the only broadcast frames from a + * STA in infrastructure mode) from keeping a connection alive. + */ + sta->last_rx = jiffies; + } + + if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) + return TXRX_CONTINUE; + + sta->rx_fragments++; + sta->rx_bytes += rx->skb->len; + sta->last_rssi = rx->u.rx.status->ssi; + sta->last_signal = rx->u.rx.status->signal; + sta->last_noise = rx->u.rx.status->noise; + + if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { + /* Change STA power saving mode only in the end of a frame + * exchange sequence */ + if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) + rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta); + else if (!(sta->flags & WLAN_STA_PS) && + (rx->fc & IEEE80211_FCTL_PM)) + ap_sta_ps_start(dev, sta); + } + + /* Drop data::nullfunc frames silently, since they are used only to + * control station power saving mode. */ + if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && + (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); + /* Update counter and free packet here to avoid counting this + * as a dropped packed. */ + sta->rx_packets++; + dev_kfree_skb(rx->skb); + return TXRX_QUEUED; + } + + return TXRX_CONTINUE; +} /* ieee80211_rx_h_sta_process */ + +static inline struct ieee80211_fragment_entry * +ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, + unsigned int frag, unsigned int seq, int rx_queue, + struct sk_buff **skb) +{ + struct ieee80211_fragment_entry *entry; + int idx; + + idx = sdata->fragment_next; + entry = &sdata->fragments[sdata->fragment_next++]; + if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) + sdata->fragment_next = 0; + + if (!skb_queue_empty(&entry->skb_list)) { +#ifdef CONFIG_MAC80211_DEBUG + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) entry->skb_list.next->data; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + printk(KERN_DEBUG "%s: RX reassembly removed oldest " + "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " + "addr1=%s addr2=%s\n", + sdata->dev->name, idx, + jiffies - entry->first_frag_time, entry->seq, + entry->last_frag, print_mac(mac, hdr->addr1), + print_mac(mac2, hdr->addr2)); +#endif /* CONFIG_MAC80211_DEBUG */ + __skb_queue_purge(&entry->skb_list); + } + + __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ + *skb = NULL; + entry->first_frag_time = jiffies; + entry->seq = seq; + entry->rx_queue = rx_queue; + entry->last_frag = frag; + entry->ccmp = 0; + entry->extra_len = 0; + + return entry; +} + +static inline struct ieee80211_fragment_entry * +ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, + u16 fc, unsigned int frag, unsigned int seq, + int rx_queue, struct ieee80211_hdr *hdr) +{ + struct ieee80211_fragment_entry *entry; + int i, idx; + + idx = sdata->fragment_next; + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { + struct ieee80211_hdr *f_hdr; + u16 f_fc; + + idx--; + if (idx < 0) + idx = IEEE80211_FRAGMENT_MAX - 1; + + entry = &sdata->fragments[idx]; + if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || + entry->rx_queue != rx_queue || + entry->last_frag + 1 != frag) + continue; + + f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; + f_fc = le16_to_cpu(f_hdr->frame_control); + + if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || + compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || + compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) + continue; + + if (entry->first_frag_time + 2 * HZ < jiffies) { + __skb_queue_purge(&entry->skb_list); + continue; + } + return entry; + } + + return NULL; +} + +static ieee80211_txrx_result +ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) +{ + struct ieee80211_hdr *hdr; + u16 sc; + unsigned int frag, seq; + struct ieee80211_fragment_entry *entry; + struct sk_buff *skb; + DECLARE_MAC_BUF(mac); + + hdr = (struct ieee80211_hdr *) rx->skb->data; + sc = le16_to_cpu(hdr->seq_ctrl); + frag = sc & IEEE80211_SCTL_FRAG; + + if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || + (rx->skb)->len < 24 || + is_multicast_ether_addr(hdr->addr1))) { + /* not fragmented */ + goto out; + } + I802_DEBUG_INC(rx->local->rx_handlers_fragments); + + seq = (sc & IEEE80211_SCTL_SEQ) >> 4; + + if (frag == 0) { + /* This is the first fragment of a new frame. */ + entry = ieee80211_reassemble_add(rx->sdata, frag, seq, + rx->u.rx.queue, &(rx->skb)); + if (rx->key && rx->key->conf.alg == ALG_CCMP && + (rx->fc & IEEE80211_FCTL_PROTECTED)) { + /* Store CCMP PN so that we can verify that the next + * fragment has a sequential PN value. */ + entry->ccmp = 1; + memcpy(entry->last_pn, + rx->key->u.ccmp.rx_pn[rx->u.rx.queue], + CCMP_PN_LEN); + } + return TXRX_QUEUED; + } + + /* This is a fragment for a frame that should already be pending in + * fragment cache. Add this fragment to the end of the pending entry. + */ + entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, + rx->u.rx.queue, hdr); + if (!entry) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); + return TXRX_DROP; + } + + /* Verify that MPDUs within one MSDU have sequential PN values. + * (IEEE 802.11i, 8.3.3.4.5) */ + if (entry->ccmp) { + int i; + u8 pn[CCMP_PN_LEN], *rpn; + if (!rx->key || rx->key->conf.alg != ALG_CCMP) + return TXRX_DROP; + memcpy(pn, entry->last_pn, CCMP_PN_LEN); + for (i = CCMP_PN_LEN - 1; i >= 0; i--) { + pn[i]++; + if (pn[i]) + break; + } + rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue]; + if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: defrag: CCMP PN not " + "sequential A2=%s" + " PN=%02x%02x%02x%02x%02x%02x " + "(expected %02x%02x%02x%02x%02x%02x)\n", + rx->dev->name, print_mac(mac, hdr->addr2), + rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], + rpn[5], pn[0], pn[1], pn[2], pn[3], + pn[4], pn[5]); + return TXRX_DROP; + } + memcpy(entry->last_pn, pn, CCMP_PN_LEN); + } + + skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); + __skb_queue_tail(&entry->skb_list, rx->skb); + entry->last_frag = frag; + entry->extra_len += rx->skb->len; + if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { + rx->skb = NULL; + return TXRX_QUEUED; + } + + rx->skb = __skb_dequeue(&entry->skb_list); + if (skb_tailroom(rx->skb) < entry->extra_len) { + I802_DEBUG_INC(rx->local->rx_expand_skb_head2); + if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, + GFP_ATOMIC))) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); + __skb_queue_purge(&entry->skb_list); + return TXRX_DROP; + } + } + while ((skb = __skb_dequeue(&entry->skb_list))) { + memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); + dev_kfree_skb(skb); + } + + /* Complete frame has been reassembled - process it now */ + rx->flags |= IEEE80211_TXRXD_FRAGMENTED; + + out: + if (rx->sta) + rx->sta->rx_packets++; + if (is_multicast_ether_addr(hdr->addr1)) + rx->local->dot11MulticastReceivedFrameCount++; + else + ieee80211_led_rx(rx->local); + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) +{ + struct sk_buff *skb; + int no_pending_pkts; + DECLARE_MAC_BUF(mac); + + if (likely(!rx->sta || + (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || + (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || + !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))) + return TXRX_CONTINUE; + + skb = skb_dequeue(&rx->sta->tx_filtered); + if (!skb) { + skb = skb_dequeue(&rx->sta->ps_tx_buf); + if (skb) + rx->local->total_ps_buffered--; + } + no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) && + skb_queue_empty(&rx->sta->ps_tx_buf); + + if (skb) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) skb->data; + + /* tell TX path to send one frame even though the STA may + * still remain is PS mode after this frame exchange */ + rx->sta->pspoll = 1; + +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", + print_mac(mac, rx->sta->addr), rx->sta->aid, + skb_queue_len(&rx->sta->ps_tx_buf)); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + + /* Use MoreData flag to indicate whether there are more + * buffered frames for this STA */ + if (no_pending_pkts) { + hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); + rx->sta->flags &= ~WLAN_STA_TIM; + } else + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + + dev_queue_xmit(skb); + + if (no_pending_pkts) { + if (rx->local->ops->set_tim) + rx->local->ops->set_tim(local_to_hw(rx->local), + rx->sta->aid, 0); + if (rx->sdata->bss) + bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid); + } +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + } else if (!rx->u.rx.sent_ps_buffered) { + printk(KERN_DEBUG "%s: STA %s sent PS Poll even " + "though there is no buffered frames for it\n", + rx->dev->name, print_mac(mac, rx->sta->addr)); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + + } + + /* Free PS Poll skb here instead of returning TXRX_DROP that would + * count as an dropped frame. */ + dev_kfree_skb(rx->skb); + + return TXRX_QUEUED; +} + +static ieee80211_txrx_result +ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) +{ + u16 fc = rx->fc; + u8 *data = rx->skb->data; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; + + if (!WLAN_FC_IS_QOS_DATA(fc)) + return TXRX_CONTINUE; + + /* remove the qos control field, update frame type and meta-data */ + memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); + hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2); + /* change frame type to non QOS */ + rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; + hdr->frame_control = cpu_to_le16(fc); + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_802_1x_pae(struct ieee80211_txrx_data *rx) +{ + if (rx->sdata->eapol && ieee80211_is_eapol(rx->skb) && + rx->sdata->type != IEEE80211_IF_TYPE_STA && + (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) + return TXRX_CONTINUE; + + if (unlikely(rx->sdata->ieee802_1x && + (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && + (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && + (!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED)) && + !ieee80211_is_eapol(rx->skb))) { +#ifdef CONFIG_MAC80211_DEBUG + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) rx->skb->data; + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "%s: dropped frame from %s" + " (unauthorized port)\n", rx->dev->name, + print_mac(mac, hdr->addr2)); +#endif /* CONFIG_MAC80211_DEBUG */ + return TXRX_DROP; + } + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_drop_unencrypted(struct ieee80211_txrx_data *rx) +{ + /* + * Pass through unencrypted frames if the hardware has + * decrypted them already. + */ + if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) + return TXRX_CONTINUE; + + /* Drop unencrypted frames if key is set. */ + if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && + (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && + (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && + rx->sdata->drop_unencrypted && + (rx->sdata->eapol == 0 || !ieee80211_is_eapol(rx->skb)))) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: RX non-WEP frame, but expected " + "encryption\n", rx->dev->name); + return TXRX_DROP; + } + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) +{ + struct net_device *dev = rx->dev; + struct ieee80211_local *local = rx->local; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; + u16 fc, hdrlen, ethertype; + u8 *payload; + u8 dst[ETH_ALEN]; + u8 src[ETH_ALEN]; + struct sk_buff *skb = rx->skb, *skb2; + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + DECLARE_MAC_BUF(mac3); + DECLARE_MAC_BUF(mac4); + + fc = rx->fc; + if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) + return TXRX_CONTINUE; + + if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) + return TXRX_DROP; + + hdrlen = ieee80211_get_hdrlen(fc); + + /* convert IEEE 802.11 header + possible LLC headers into Ethernet + * header + * IEEE 802.11 address fields: + * ToDS FromDS Addr1 Addr2 Addr3 Addr4 + * 0 0 DA SA BSSID n/a + * 0 1 DA BSSID SA n/a + * 1 0 BSSID SA DA n/a + * 1 1 RA TA DA SA + */ + + switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { + case IEEE80211_FCTL_TODS: + /* BSSID SA DA */ + memcpy(dst, hdr->addr3, ETH_ALEN); + memcpy(src, hdr->addr2, ETH_ALEN); + + if (unlikely(sdata->type != IEEE80211_IF_TYPE_AP && + sdata->type != IEEE80211_IF_TYPE_VLAN)) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: dropped ToDS frame " + "(BSSID=%s SA=%s DA=%s)\n", + dev->name, + print_mac(mac, hdr->addr1), + print_mac(mac2, hdr->addr2), + print_mac(mac3, hdr->addr3)); + return TXRX_DROP; + } + break; + case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): + /* RA TA DA SA */ + memcpy(dst, hdr->addr3, ETH_ALEN); + memcpy(src, hdr->addr4, ETH_ALEN); + + if (unlikely(sdata->type != IEEE80211_IF_TYPE_WDS)) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: dropped FromDS&ToDS " + "frame (RA=%s TA=%s DA=%s SA=%s)\n", + rx->dev->name, + print_mac(mac, hdr->addr1), + print_mac(mac2, hdr->addr2), + print_mac(mac3, hdr->addr3), + print_mac(mac4, hdr->addr4)); + return TXRX_DROP; + } + break; + case IEEE80211_FCTL_FROMDS: + /* DA BSSID SA */ + memcpy(dst, hdr->addr1, ETH_ALEN); + memcpy(src, hdr->addr3, ETH_ALEN); + + if (sdata->type != IEEE80211_IF_TYPE_STA || + (is_multicast_ether_addr(dst) && + !compare_ether_addr(src, dev->dev_addr))) + return TXRX_DROP; + break; + case 0: + /* DA SA BSSID */ + memcpy(dst, hdr->addr1, ETH_ALEN); + memcpy(src, hdr->addr2, ETH_ALEN); + + if (sdata->type != IEEE80211_IF_TYPE_IBSS) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: dropped IBSS frame " + "(DA=%s SA=%s BSSID=%s)\n", + dev->name, + print_mac(mac, hdr->addr1), + print_mac(mac2, hdr->addr2), + print_mac(mac3, hdr->addr3)); + } + return TXRX_DROP; + } + break; + } + + payload = skb->data + hdrlen; + + if (unlikely(skb->len - hdrlen < 8)) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: RX too short data frame " + "payload\n", dev->name); + } + return TXRX_DROP; + } + + ethertype = (payload[6] << 8) | payload[7]; + + if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && + ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || + compare_ether_addr(payload, bridge_tunnel_header) == 0)) { + /* remove RFC1042 or Bridge-Tunnel encapsulation and + * replace EtherType */ + skb_pull(skb, hdrlen + 6); + memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); + memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); + } else { + struct ethhdr *ehdr; + __be16 len; + skb_pull(skb, hdrlen); + len = htons(skb->len); + ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr)); + memcpy(ehdr->h_dest, dst, ETH_ALEN); + memcpy(ehdr->h_source, src, ETH_ALEN); + ehdr->h_proto = len; + } + skb->dev = dev; + + skb2 = NULL; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + if (local->bridge_packets && (sdata->type == IEEE80211_IF_TYPE_AP + || sdata->type == IEEE80211_IF_TYPE_VLAN) && + (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { + if (is_multicast_ether_addr(skb->data)) { + /* send multicast frames both to higher layers in + * local net stack and back to the wireless media */ + skb2 = skb_copy(skb, GFP_ATOMIC); + if (!skb2 && net_ratelimit()) + printk(KERN_DEBUG "%s: failed to clone " + "multicast frame\n", dev->name); + } else { + struct sta_info *dsta; + dsta = sta_info_get(local, skb->data); + if (dsta && !dsta->dev) { + if (net_ratelimit()) + printk(KERN_DEBUG "Station with null " + "dev structure!\n"); + } else if (dsta && dsta->dev == dev) { + /* Destination station is associated to this + * AP, so send the frame directly to it and + * do not pass the frame to local net stack. + */ + skb2 = skb; + skb = NULL; + } + if (dsta) + sta_info_put(dsta); + } + } + + if (skb) { + /* deliver to local stack */ + skb->protocol = eth_type_trans(skb, dev); + memset(skb->cb, 0, sizeof(skb->cb)); + netif_rx(skb); + } + + if (skb2) { + /* send to wireless media */ + skb2->protocol = __constant_htons(ETH_P_802_3); + skb_set_network_header(skb2, 0); + skb_set_mac_header(skb2, 0); + dev_queue_xmit(skb2); + } + + return TXRX_QUEUED; +} + +static ieee80211_txrx_result +ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx) +{ + struct ieee80211_sub_if_data *sdata; + + if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) + return TXRX_DROP; + + sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); + if ((sdata->type == IEEE80211_IF_TYPE_STA || + sdata->type == IEEE80211_IF_TYPE_IBSS) && + !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) + ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status); + else + return TXRX_DROP; + + return TXRX_QUEUED; +} + +static inline ieee80211_txrx_result __ieee80211_invoke_rx_handlers( + struct ieee80211_local *local, + ieee80211_rx_handler *handlers, + struct ieee80211_txrx_data *rx, + struct sta_info *sta) +{ + ieee80211_rx_handler *handler; + ieee80211_txrx_result res = TXRX_DROP; + + for (handler = handlers; *handler != NULL; handler++) { + res = (*handler)(rx); + + switch (res) { + case TXRX_CONTINUE: + continue; + case TXRX_DROP: + I802_DEBUG_INC(local->rx_handlers_drop); + if (sta) + sta->rx_dropped++; + break; + case TXRX_QUEUED: + I802_DEBUG_INC(local->rx_handlers_queued); + break; + } + break; + } + + if (res == TXRX_DROP) + dev_kfree_skb(rx->skb); + return res; +} + +static inline void ieee80211_invoke_rx_handlers(struct ieee80211_local *local, + ieee80211_rx_handler *handlers, + struct ieee80211_txrx_data *rx, + struct sta_info *sta) +{ + if (__ieee80211_invoke_rx_handlers(local, handlers, rx, sta) == + TXRX_CONTINUE) + dev_kfree_skb(rx->skb); +} + +static void ieee80211_rx_michael_mic_report(struct net_device *dev, + struct ieee80211_hdr *hdr, + struct sta_info *sta, + struct ieee80211_txrx_data *rx) +{ + int keyidx, hdrlen; + DECLARE_MAC_BUF(mac); + DECLARE_MAC_BUF(mac2); + + hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb); + if (rx->skb->len >= hdrlen + 4) + keyidx = rx->skb->data[hdrlen + 3] >> 6; + else + keyidx = -1; + + if (net_ratelimit()) + printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC " + "failure from %s to %s keyidx=%d\n", + dev->name, print_mac(mac, hdr->addr2), + print_mac(mac2, hdr->addr1), keyidx); + + if (!sta) { + /* + * Some hardware seem to generate incorrect Michael MIC + * reports; ignore them to avoid triggering countermeasures. + */ + if (net_ratelimit()) + printk(KERN_DEBUG "%s: ignored spurious Michael MIC " + "error for unknown address %s\n", + dev->name, print_mac(mac, hdr->addr2)); + goto ignore; + } + + if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: ignored spurious Michael MIC " + "error for a frame with no PROTECTED flag (src " + "%s)\n", dev->name, print_mac(mac, hdr->addr2)); + goto ignore; + } + + if (rx->sdata->type == IEEE80211_IF_TYPE_AP && keyidx) { + /* + * APs with pairwise keys should never receive Michael MIC + * errors for non-zero keyidx because these are reserved for + * group keys and only the AP is sending real multicast + * frames in the BSS. + */ + if (net_ratelimit()) + printk(KERN_DEBUG "%s: ignored Michael MIC error for " + "a frame with non-zero keyidx (%d)" + " (src %s)\n", dev->name, keyidx, + print_mac(mac, hdr->addr2)); + goto ignore; + } + + if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && + ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || + (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: ignored spurious Michael MIC " + "error for a frame that cannot be encrypted " + "(fc=0x%04x) (src %s)\n", + dev->name, rx->fc, print_mac(mac, hdr->addr2)); + goto ignore; + } + + mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); + ignore: + dev_kfree_skb(rx->skb); + rx->skb = NULL; +} + +ieee80211_rx_handler ieee80211_rx_handlers[] = +{ + ieee80211_rx_h_if_stats, + ieee80211_rx_h_passive_scan, + ieee80211_rx_h_check, + ieee80211_rx_h_decrypt, + ieee80211_rx_h_sta_process, + ieee80211_rx_h_defragment, + ieee80211_rx_h_ps_poll, + ieee80211_rx_h_michael_mic_verify, + /* this must be after decryption - so header is counted in MPDU mic + * must be before pae and data, so QOS_DATA format frames + * are not passed to user space by these functions + */ + ieee80211_rx_h_remove_qos_control, + ieee80211_rx_h_802_1x_pae, + ieee80211_rx_h_drop_unencrypted, + ieee80211_rx_h_data, + ieee80211_rx_h_mgmt, + NULL +}; + +/* main receive path */ + +static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, + u8 *bssid, struct ieee80211_txrx_data *rx, + struct ieee80211_hdr *hdr) +{ + int multicast = is_multicast_ether_addr(hdr->addr1); + + switch (sdata->type) { + case IEEE80211_IF_TYPE_STA: + if (!bssid) + return 0; + if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { + if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) + return 0; + rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; + } else if (!multicast && + compare_ether_addr(sdata->dev->dev_addr, + hdr->addr1) != 0) { + if (!(sdata->dev->flags & IFF_PROMISC)) + return 0; + rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; + } + break; + case IEEE80211_IF_TYPE_IBSS: + if (!bssid) + return 0; + if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { + if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) + return 0; + rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; + } else if (!multicast && + compare_ether_addr(sdata->dev->dev_addr, + hdr->addr1) != 0) { + if (!(sdata->dev->flags & IFF_PROMISC)) + return 0; + rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; + } else if (!rx->sta) + rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, + bssid, hdr->addr2); + break; + case IEEE80211_IF_TYPE_VLAN: + case IEEE80211_IF_TYPE_AP: + if (!bssid) { + if (compare_ether_addr(sdata->dev->dev_addr, + hdr->addr1)) + return 0; + } else if (!ieee80211_bssid_match(bssid, + sdata->dev->dev_addr)) { + if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) + return 0; + rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; + } + if (sdata->dev == sdata->local->mdev && + !(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) + /* do not receive anything via + * master device when not scanning */ + return 0; + break; + case IEEE80211_IF_TYPE_WDS: + if (bssid || + (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) + return 0; + if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) + return 0; + break; + case IEEE80211_IF_TYPE_MNTR: + /* take everything */ + break; + case IEEE80211_IF_TYPE_INVALID: + /* should never get here */ + WARN_ON(1); + break; + } + + return 1; +} + +/* + * This is the receive path handler. It is called by a low level driver when an + * 802.11 MPDU is received from the hardware. + */ +void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_rx_status *status) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + struct ieee80211_hdr *hdr; + struct ieee80211_txrx_data rx; + u16 type; + int prepres; + struct ieee80211_sub_if_data *prev = NULL; + struct sk_buff *skb_new; + u8 *bssid; + + /* + * key references and virtual interfaces are protected using RCU + * and this requires that we are in a read-side RCU section during + * receive processing + */ + rcu_read_lock(); + + /* + * Frames with failed FCS/PLCP checksum are not returned, + * all other frames are returned without radiotap header + * if it was previously present. + * Also, frames with less than 16 bytes are dropped. + */ + skb = ieee80211_rx_monitor(local, skb, status); + if (!skb) { + rcu_read_unlock(); + return; + } + + hdr = (struct ieee80211_hdr *) skb->data; + memset(&rx, 0, sizeof(rx)); + rx.skb = skb; + rx.local = local; + + rx.u.rx.status = status; + rx.fc = le16_to_cpu(hdr->frame_control); + type = rx.fc & IEEE80211_FCTL_FTYPE; + + if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) + local->dot11ReceivedFragmentCount++; + + sta = rx.sta = sta_info_get(local, hdr->addr2); + if (sta) { + rx.dev = rx.sta->dev; + rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev); + } + + if ((status->flag & RX_FLAG_MMIC_ERROR)) { + ieee80211_rx_michael_mic_report(local->mdev, hdr, sta, &rx); + goto end; + } + + if (unlikely(local->sta_scanning)) + rx.flags |= IEEE80211_TXRXD_RXIN_SCAN; + + if (__ieee80211_invoke_rx_handlers(local, local->rx_pre_handlers, &rx, + sta) != TXRX_CONTINUE) + goto end; + skb = rx.skb; + + if (sta && !(sta->flags & (WLAN_STA_WDS | WLAN_STA_ASSOC_AP)) && + !atomic_read(&local->iff_promiscs) && + !is_multicast_ether_addr(hdr->addr1)) { + rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; + ieee80211_invoke_rx_handlers(local, local->rx_handlers, &rx, + rx.sta); + sta_info_put(sta); + rcu_read_unlock(); + return; + } + + bssid = ieee80211_get_bssid(hdr, skb->len); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (!netif_running(sdata->dev)) + continue; + + if (sdata->type == IEEE80211_IF_TYPE_MNTR) + continue; + + rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; + prepres = prepare_for_handlers(sdata, bssid, &rx, hdr); + /* prepare_for_handlers can change sta */ + sta = rx.sta; + + if (!prepres) + continue; + + /* + * frame is destined for this interface, but if it's not + * also for the previous one we handle that after the + * loop to avoid copying the SKB once too much + */ + + if (!prev) { + prev = sdata; + continue; + } + + /* + * frame was destined for the previous interface + * so invoke RX handlers for it + */ + + skb_new = skb_copy(skb, GFP_ATOMIC); + if (!skb_new) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: failed to copy " + "multicast frame for %s", + wiphy_name(local->hw.wiphy), + prev->dev->name); + continue; + } + rx.skb = skb_new; + rx.dev = prev->dev; + rx.sdata = prev; + ieee80211_invoke_rx_handlers(local, local->rx_handlers, + &rx, sta); + prev = sdata; + } + if (prev) { + rx.skb = skb; + rx.dev = prev->dev; + rx.sdata = prev; + ieee80211_invoke_rx_handlers(local, local->rx_handlers, + &rx, sta); + } else + dev_kfree_skb(skb); + + end: + rcu_read_unlock(); + + if (sta) + sta_info_put(sta); +} +EXPORT_SYMBOL(__ieee80211_rx); + +/* This is a version of the rx handler that can be called from hard irq + * context. Post the skb on the queue and schedule the tasklet */ +void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_rx_status *status) +{ + struct ieee80211_local *local = hw_to_local(hw); + + BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); + + skb->dev = local->mdev; + /* copy status into skb->cb for use by tasklet */ + memcpy(skb->cb, status, sizeof(*status)); + skb->pkt_type = IEEE80211_RX_MSG; + skb_queue_tail(&local->skb_queue, skb); + tasklet_schedule(&local->tasklet); +} +EXPORT_SYMBOL(ieee80211_rx_irqsafe); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ab7b1f067c6e3ca29481fae3819de22b95c9696e..e8491554a5dc1129950d25f94022391304102598 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -19,7 +19,6 @@ #include "ieee80211_i.h" #include "ieee80211_rate.h" #include "sta_info.h" -#include "debugfs_key.h" #include "debugfs_sta.h" /* Caller must hold local->sta_lock */ @@ -32,38 +31,34 @@ static void sta_info_hash_add(struct ieee80211_local *local, /* Caller must hold local->sta_lock */ -static void sta_info_hash_del(struct ieee80211_local *local, - struct sta_info *sta) +static int sta_info_hash_del(struct ieee80211_local *local, + struct sta_info *sta) { struct sta_info *s; s = local->sta_hash[STA_HASH(sta->addr)]; if (!s) - return; - if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) { + return -ENOENT; + if (s == sta) { local->sta_hash[STA_HASH(sta->addr)] = s->hnext; - return; + return 0; } - while (s->hnext && memcmp(s->hnext->addr, sta->addr, ETH_ALEN) != 0) + while (s->hnext && s->hnext != sta) s = s->hnext; - if (s->hnext) - s->hnext = s->hnext->hnext; - else - printk(KERN_ERR "%s: could not remove STA " MAC_FMT " from " - "hash table\n", local->mdev->name, MAC_ARG(sta->addr)); -} + if (s->hnext) { + s->hnext = sta->hnext; + return 0; + } -static inline void __sta_info_get(struct sta_info *sta) -{ - kref_get(&sta->kref); + return -ENOENT; } struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) { struct sta_info *sta; - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); sta = local->sta_hash[STA_HASH(addr)]; while (sta) { if (memcmp(sta->addr, addr, ETH_ALEN) == 0) { @@ -72,7 +67,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr) } sta = sta->hnext; } - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); return sta; } @@ -85,7 +80,7 @@ int sta_info_min_txrate_get(struct ieee80211_local *local) int min_txrate = 9999999; int i; - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); mode = local->oper_hw_mode; for (i = 0; i < STA_HASH_SIZE; i++) { sta = local->sta_hash[i]; @@ -95,7 +90,7 @@ int sta_info_min_txrate_get(struct ieee80211_local *local) sta = sta->hnext; } } - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); if (min_txrate == 9999999) min_txrate = 0; @@ -122,8 +117,6 @@ static void sta_info_release(struct kref *kref) } rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); rate_control_put(sta->rate_ctrl); - if (sta->key) - ieee80211_debugfs_key_sta_del(sta->key, sta); kfree(sta); } @@ -139,6 +132,7 @@ struct sta_info * sta_info_add(struct ieee80211_local *local, struct net_device *dev, u8 *addr, gfp_t gfp) { struct sta_info *sta; + DECLARE_MAC_BUF(mac); sta = kzalloc(sizeof(*sta), gfp); if (!sta) @@ -150,7 +144,6 @@ struct sta_info * sta_info_add(struct ieee80211_local *local, sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, gfp); if (!sta->rate_ctrl_priv) { rate_control_put(sta->rate_ctrl); - kref_put(&sta->kref, sta_info_release); kfree(sta); return NULL; } @@ -162,63 +155,40 @@ struct sta_info * sta_info_add(struct ieee80211_local *local, skb_queue_head_init(&sta->tx_filtered); __sta_info_get(sta); /* sta used by caller, decremented by * sta_info_put() */ - spin_lock_bh(&local->sta_lock); + write_lock_bh(&local->sta_lock); list_add(&sta->list, &local->sta_list); local->num_sta++; sta_info_hash_add(local, sta); - spin_unlock_bh(&local->sta_lock); - if (local->ops->sta_table_notification) - local->ops->sta_table_notification(local_to_hw(local), - local->num_sta); - sta->key_idx_compression = HW_KEY_IDX_INVALID; + if (local->ops->sta_notify) + local->ops->sta_notify(local_to_hw(local), dev->ifindex, + STA_NOTIFY_ADD, addr); + write_unlock_bh(&local->sta_lock); #ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: Added STA " MAC_FMT "\n", - local->mdev->name, MAC_ARG(addr)); + printk(KERN_DEBUG "%s: Added STA %s\n", + wiphy_name(local->hw.wiphy), print_mac(mac, addr)); #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ #ifdef CONFIG_MAC80211_DEBUGFS - if (!in_interrupt()) { - sta->debugfs_registered = 1; - ieee80211_sta_debugfs_add(sta); - rate_control_add_sta_debugfs(sta); - } else { - /* debugfs entry adding might sleep, so schedule process - * context task for adding entry for STAs that do not yet - * have one. */ - queue_work(local->hw.workqueue, &local->sta_debugfs_add); - } + /* debugfs entry adding might sleep, so schedule process + * context task for adding entry for STAs that do not yet + * have one. */ + queue_work(local->hw.workqueue, &local->sta_debugfs_add); #endif return sta; } -static void finish_sta_info_free(struct ieee80211_local *local, - struct sta_info *sta) -{ -#ifdef CONFIG_MAC80211_VERBOSE_DEBUG - printk(KERN_DEBUG "%s: Removed STA " MAC_FMT "\n", - local->mdev->name, MAC_ARG(sta->addr)); -#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ - - if (sta->key) { - ieee80211_debugfs_key_remove(sta->key); - ieee80211_key_free(sta->key); - sta->key = NULL; - } - - rate_control_remove_sta_debugfs(sta); - ieee80211_sta_debugfs_remove(sta); - - sta_info_put(sta); -} - -static void sta_info_remove(struct sta_info *sta) +/* Caller must hold local->sta_lock */ +void sta_info_remove(struct sta_info *sta) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata; - sta_info_hash_del(local, sta); + /* don't do anything if we've been removed already */ + if (sta_info_hash_del(local, sta)) + return; + list_del(&sta->list); sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); if (sta->flags & WLAN_STA_PS) { @@ -228,61 +198,45 @@ static void sta_info_remove(struct sta_info *sta) } local->num_sta--; sta_info_remove_aid_ptr(sta); + } -void sta_info_free(struct sta_info *sta, int locked) +void sta_info_free(struct sta_info *sta) { struct sk_buff *skb; struct ieee80211_local *local = sta->local; + DECLARE_MAC_BUF(mac); - if (!locked) { - spin_lock_bh(&local->sta_lock); - sta_info_remove(sta); - spin_unlock_bh(&local->sta_lock); - } else { - sta_info_remove(sta); - } - if (local->ops->sta_table_notification) - local->ops->sta_table_notification(local_to_hw(local), - local->num_sta); + might_sleep(); + + write_lock_bh(&local->sta_lock); + sta_info_remove(sta); + write_unlock_bh(&local->sta_lock); while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { local->total_ps_buffered--; - dev_kfree_skb_any(skb); + dev_kfree_skb(skb); } while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { - dev_kfree_skb_any(skb); + dev_kfree_skb(skb); } - if (sta->key) { - if (local->ops->set_key) { - struct ieee80211_key_conf *key; - key = ieee80211_key_data2conf(local, sta->key); - if (key) { - local->ops->set_key(local_to_hw(local), - DISABLE_KEY, - sta->addr, key, sta->aid); - kfree(key); - } - } - } else if (sta->key_idx_compression != HW_KEY_IDX_INVALID) { - struct ieee80211_key_conf conf; - memset(&conf, 0, sizeof(conf)); - conf.hw_key_idx = sta->key_idx_compression; - conf.alg = ALG_NULL; - conf.flags |= IEEE80211_KEY_FORCE_SW_ENCRYPT; - local->ops->set_key(local_to_hw(local), DISABLE_KEY, - sta->addr, &conf, sta->aid); - sta->key_idx_compression = HW_KEY_IDX_INVALID; - } +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Removed STA %s\n", + wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ -#ifdef CONFIG_MAC80211_DEBUGFS - if (in_atomic()) { - list_add(&sta->list, &local->deleted_sta_list); - queue_work(local->hw.workqueue, &local->sta_debugfs_add); - } else -#endif - finish_sta_info_free(local, sta); + ieee80211_key_free(sta->key); + sta->key = NULL; + + if (local->ops->sta_notify) + local->ops->sta_notify(local_to_hw(local), sta->dev->ifindex, + STA_NOTIFY_REMOVE, sta->addr); + + rate_control_remove_sta_debugfs(sta); + ieee80211_sta_debugfs_remove(sta); + + sta_info_put(sta); } @@ -312,6 +266,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, { unsigned long flags; struct sk_buff *skb; + DECLARE_MAC_BUF(mac); if (skb_queue_empty(&sta->ps_tx_buf)) return; @@ -330,7 +285,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local, if (skb) { local->total_ps_buffered--; printk(KERN_DEBUG "Buffered frame expired (STA " - MAC_FMT ")\n", MAC_ARG(sta->addr)); + "%s)\n", print_mac(mac, sta->addr)); dev_kfree_skb(skb); } else break; @@ -343,13 +298,13 @@ static void sta_info_cleanup(unsigned long data) struct ieee80211_local *local = (struct ieee80211_local *) data; struct sta_info *sta; - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); list_for_each_entry(sta, &local->sta_list, list) { __sta_info_get(sta); sta_info_cleanup_expire_buffered(local, sta); sta_info_put(sta); } - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); local->sta_cleanup.expires = jiffies + STA_INFO_CLEANUP_INTERVAL; add_timer(&local->sta_cleanup); @@ -362,36 +317,21 @@ static void sta_info_debugfs_add_task(struct work_struct *work) container_of(work, struct ieee80211_local, sta_debugfs_add); struct sta_info *sta, *tmp; - while (1) { - spin_lock_bh(&local->sta_lock); - if (!list_empty(&local->deleted_sta_list)) { - sta = list_entry(local->deleted_sta_list.next, - struct sta_info, list); - list_del(local->deleted_sta_list.next); - } else - sta = NULL; - spin_unlock_bh(&local->sta_lock); - if (!sta) - break; - finish_sta_info_free(local, sta); - } - while (1) { sta = NULL; - spin_lock_bh(&local->sta_lock); + read_lock_bh(&local->sta_lock); list_for_each_entry(tmp, &local->sta_list, list) { - if (!tmp->debugfs_registered) { + if (!tmp->debugfs.dir) { sta = tmp; __sta_info_get(sta); break; } } - spin_unlock_bh(&local->sta_lock); + read_unlock_bh(&local->sta_lock); if (!sta) break; - sta->debugfs_registered = 1; ieee80211_sta_debugfs_add(sta); rate_control_add_sta_debugfs(sta); sta_info_put(sta); @@ -401,9 +341,8 @@ static void sta_info_debugfs_add_task(struct work_struct *work) void sta_info_init(struct ieee80211_local *local) { - spin_lock_init(&local->sta_lock); + rwlock_init(&local->sta_lock); INIT_LIST_HEAD(&local->sta_list); - INIT_LIST_HEAD(&local->deleted_sta_list); init_timer(&local->sta_cleanup); local->sta_cleanup.expires = jiffies + STA_INFO_CLEANUP_INTERVAL; @@ -423,17 +362,8 @@ int sta_info_start(struct ieee80211_local *local) void sta_info_stop(struct ieee80211_local *local) { - struct sta_info *sta, *tmp; - del_timer(&local->sta_cleanup); - - list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { - /* sta_info_free must be called with 0 as the last - * parameter to ensure all debugfs sta entries are - * unregistered. We don't need locking at this - * point. */ - sta_info_free(sta, 0); - } + sta_info_flush(local, NULL); } void sta_info_remove_aid_ptr(struct sta_info *sta) @@ -461,10 +391,19 @@ void sta_info_remove_aid_ptr(struct sta_info *sta) void sta_info_flush(struct ieee80211_local *local, struct net_device *dev) { struct sta_info *sta, *tmp; + LIST_HEAD(tmp_list); - spin_lock_bh(&local->sta_lock); + write_lock_bh(&local->sta_lock); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) - if (!dev || dev == sta->dev) - sta_info_free(sta, 1); - spin_unlock_bh(&local->sta_lock); + if (!dev || dev == sta->dev) { + __sta_info_get(sta); + sta_info_remove(sta); + list_add_tail(&sta->list, &tmp_list); + } + write_unlock_bh(&local->sta_lock); + + list_for_each_entry_safe(sta, tmp, &tmp_list, list) { + sta_info_free(sta); + sta_info_put(sta); + } } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index b5591d2f60a4fcef8032bb1cd78a09fe1997c1da..8f7ebe41c0246a91d687a1ac91e79509dc062b5f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -26,6 +26,8 @@ * send and receive non-IEEE 802.1X frames */ #define WLAN_STA_SHORT_PREAMBLE BIT(7) +/* whether this is an AP that we are associated with as a client */ +#define WLAN_STA_ASSOC_AP BIT(8) #define WLAN_STA_WME BIT(9) #define WLAN_STA_WDS BIT(27) @@ -90,27 +92,11 @@ struct sta_info { int channel_use; int channel_use_raw; - u8 antenna_sel_tx; - u8 antenna_sel_rx; - - - int key_idx_compression; /* key table index for compression and TX - * filtering; used only if sta->key is not - * set */ - -#ifdef CONFIG_MAC80211_DEBUGFS - int debugfs_registered; -#endif - int assoc_ap; /* whether this is an AP that we are - * associated with as a client */ - #ifdef CONFIG_MAC80211_DEBUG_COUNTERS unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES]; unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES]; #endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ - int vlan_id; - u16 listen_interval; #ifdef CONFIG_MAC80211_DEBUGFS @@ -149,12 +135,18 @@ struct sta_info { */ #define STA_INFO_CLEANUP_INTERVAL (10 * HZ) +static inline void __sta_info_get(struct sta_info *sta) +{ + kref_get(&sta->kref); +} + struct sta_info * sta_info_get(struct ieee80211_local *local, u8 *addr); int sta_info_min_txrate_get(struct ieee80211_local *local); void sta_info_put(struct sta_info *sta); struct sta_info * sta_info_add(struct ieee80211_local *local, struct net_device *dev, u8 *addr, gfp_t gfp); -void sta_info_free(struct sta_info *sta, int locked); +void sta_info_remove(struct sta_info *sta); +void sta_info_free(struct sta_info *sta); void sta_info_init(struct ieee80211_local *local); int sta_info_start(struct ieee80211_local *local); void sta_info_stop(struct ieee80211_local *local); diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c index 41621720e5601d4e6344598431fedb7148aa94b8..3abe194e4d55328ae61aba0bdcd1515a299212fd 100644 --- a/net/mac80211/tkip.c +++ b/net/mac80211/tkip.c @@ -182,7 +182,7 @@ u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, *pos++ = iv0; *pos++ = iv1; *pos++ = iv2; - *pos++ = (key->keyidx << 6) | (1 << 5) /* Ext IV */; + *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; *pos++ = key->u.tkip.iv32 & 0xff; *pos++ = (key->u.tkip.iv32 >> 8) & 0xff; *pos++ = (key->u.tkip.iv32 >> 16) & 0xff; @@ -194,7 +194,7 @@ u8 * ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, void ieee80211_tkip_gen_phase1key(struct ieee80211_key *key, u8 *ta, u16 *phase1key) { - tkip_mixing_phase1(ta, &key->key[ALG_TKIP_TEMP_ENCR_KEY], + tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], key->u.tkip.iv32, phase1key); } @@ -204,12 +204,13 @@ void ieee80211_tkip_gen_rc4key(struct ieee80211_key *key, u8 *ta, /* Calculate per-packet key */ if (key->u.tkip.iv16 == 0 || !key->u.tkip.tx_initialized) { /* IV16 wrapped around - perform TKIP phase 1 */ - tkip_mixing_phase1(ta, &key->key[ALG_TKIP_TEMP_ENCR_KEY], + tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], key->u.tkip.iv32, key->u.tkip.p1k); key->u.tkip.tx_initialized = 1; } - tkip_mixing_phase2(key->u.tkip.p1k, &key->key[ALG_TKIP_TEMP_ENCR_KEY], + tkip_mixing_phase2(key->u.tkip.p1k, + &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], key->u.tkip.iv16, rc4key); } @@ -237,7 +238,8 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, struct ieee80211_key *key, u8 *payload, size_t payload_len, u8 *ta, - int only_iv, int queue) + int only_iv, int queue, + u32 *out_iv32, u16 *out_iv16) { u32 iv32; u32 iv16; @@ -266,7 +268,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, if (!(keyid & (1 << 5))) return TKIP_DECRYPT_NO_EXT_IV; - if ((keyid >> 6) != key->keyidx) + if ((keyid >> 6) != key->conf.keyidx) return TKIP_DECRYPT_INVALID_KEYIDX; if (key->u.tkip.rx_initialized[queue] && @@ -274,9 +276,10 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, (iv32 == key->u.tkip.iv32_rx[queue] && iv16 <= key->u.tkip.iv16_rx[queue]))) { #ifdef CONFIG_TKIP_DEBUG + DECLARE_MAC_BUF(mac); printk(KERN_DEBUG "TKIP replay detected for RX frame from " - MAC_FMT " (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", - MAC_ARG(ta), + "%s (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n", + print_mac(mac, ta), iv32, iv16, key->u.tkip.iv32_rx[queue], key->u.tkip.iv16_rx[queue]); #endif /* CONFIG_TKIP_DEBUG */ @@ -293,16 +296,18 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, key->u.tkip.iv32_rx[queue] != iv32) { key->u.tkip.rx_initialized[queue] = 1; /* IV16 wrapped around - perform TKIP phase 1 */ - tkip_mixing_phase1(ta, &key->key[ALG_TKIP_TEMP_ENCR_KEY], + tkip_mixing_phase1(ta, &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv32, key->u.tkip.p1k_rx[queue]); #ifdef CONFIG_TKIP_DEBUG { int i; - printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=" MAC_FMT - " TK=", MAC_ARG(ta)); + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%s" + " TK=", print_mac(mac, ta)); for (i = 0; i < 16; i++) printk("%02x ", - key->key[ALG_TKIP_TEMP_ENCR_KEY + i]); + key->conf.key[ + ALG_TKIP_TEMP_ENCR_KEY + i]); printk("\n"); printk(KERN_DEBUG "TKIP decrypt: P1K="); for (i = 0; i < 5; i++) @@ -313,7 +318,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, } tkip_mixing_phase2(key->u.tkip.p1k_rx[queue], - &key->key[ALG_TKIP_TEMP_ENCR_KEY], + &key->conf.key[ALG_TKIP_TEMP_ENCR_KEY], iv16, rc4key); #ifdef CONFIG_TKIP_DEBUG { @@ -328,11 +333,14 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12); done: if (res == TKIP_DECRYPT_OK) { - /* FIX: these should be updated only after Michael MIC has been - * verified */ - /* Record previously received IV */ - key->u.tkip.iv32_rx[queue] = iv32; - key->u.tkip.iv16_rx[queue] = iv16; + /* + * Record previously received IV, will be copied into the + * key information after MIC verification. It is possible + * that we don't catch replays of fragments but that's ok + * because the Michael MIC verication will then fail. + */ + *out_iv32 = iv32; + *out_iv16 = iv16; } return res; diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h index a0d181a180495f786156790bbec9dc4470ce7430..73d8ef2a93b0f9ed5821d3a0686b44e65f6e0c63 100644 --- a/net/mac80211/tkip.h +++ b/net/mac80211/tkip.h @@ -31,6 +31,7 @@ enum { int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, struct ieee80211_key *key, u8 *payload, size_t payload_len, u8 *ta, - int only_iv, int queue); + int only_iv, int queue, + u32 *out_iv32, u16 *out_iv16); #endif /* TKIP_H */ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c new file mode 100644 index 0000000000000000000000000000000000000000..1a531543bccb79df167148c001ee0c26c45bb079 --- /dev/null +++ b/net/mac80211/tx.c @@ -0,0 +1,1903 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * Transmit and frame generation functions. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "ieee80211_led.h" +#include "wep.h" +#include "wpa.h" +#include "wme.h" +#include "ieee80211_rate.h" + +#define IEEE80211_TX_OK 0 +#define IEEE80211_TX_AGAIN 1 +#define IEEE80211_TX_FRAG_AGAIN 2 + +/* misc utils */ + +static inline void ieee80211_include_sequence(struct ieee80211_sub_if_data *sdata, + struct ieee80211_hdr *hdr) +{ + /* Set the sequence number for this frame. */ + hdr->seq_ctrl = cpu_to_le16(sdata->sequence); + + /* Increase the sequence number. */ + sdata->sequence = (sdata->sequence + 0x10) & IEEE80211_SCTL_SEQ; +} + +#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP +static void ieee80211_dump_frame(const char *ifname, const char *title, + const struct sk_buff *skb) +{ + const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u16 fc; + int hdrlen; + DECLARE_MAC_BUF(mac); + + printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len); + if (skb->len < 4) { + printk("\n"); + return; + } + + fc = le16_to_cpu(hdr->frame_control); + hdrlen = ieee80211_get_hdrlen(fc); + if (hdrlen > skb->len) + hdrlen = skb->len; + if (hdrlen >= 4) + printk(" FC=0x%04x DUR=0x%04x", + fc, le16_to_cpu(hdr->duration_id)); + if (hdrlen >= 10) + printk(" A1=%s", print_mac(mac, hdr->addr1)); + if (hdrlen >= 16) + printk(" A2=%s", print_mac(mac, hdr->addr2)); + if (hdrlen >= 24) + printk(" A3=%s", print_mac(mac, hdr->addr3)); + if (hdrlen >= 30) + printk(" A4=%s", print_mac(mac, hdr->addr4)); + printk("\n"); +} +#else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ +static inline void ieee80211_dump_frame(const char *ifname, const char *title, + struct sk_buff *skb) +{ +} +#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */ + +static u16 ieee80211_duration(struct ieee80211_txrx_data *tx, int group_addr, + int next_frag_len) +{ + int rate, mrate, erp, dur, i; + struct ieee80211_rate *txrate = tx->u.tx.rate; + struct ieee80211_local *local = tx->local; + struct ieee80211_hw_mode *mode = tx->u.tx.mode; + + erp = txrate->flags & IEEE80211_RATE_ERP; + + /* + * data and mgmt (except PS Poll): + * - during CFP: 32768 + * - during contention period: + * if addr1 is group address: 0 + * if more fragments = 0 and addr1 is individual address: time to + * transmit one ACK plus SIFS + * if more fragments = 1 and addr1 is individual address: time to + * transmit next fragment plus 2 x ACK plus 3 x SIFS + * + * IEEE 802.11, 9.6: + * - control response frame (CTS or ACK) shall be transmitted using the + * same rate as the immediately previous frame in the frame exchange + * sequence, if this rate belongs to the PHY mandatory rates, or else + * at the highest possible rate belonging to the PHY rates in the + * BSSBasicRateSet + */ + + if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { + /* TODO: These control frames are not currently sent by + * 80211.o, but should they be implemented, this function + * needs to be updated to support duration field calculation. + * + * RTS: time needed to transmit pending data/mgmt frame plus + * one CTS frame plus one ACK frame plus 3 x SIFS + * CTS: duration of immediately previous RTS minus time + * required to transmit CTS and its SIFS + * ACK: 0 if immediately previous directed data/mgmt had + * more=0, with more=1 duration in ACK frame is duration + * from previous frame minus time needed to transmit ACK + * and its SIFS + * PS Poll: BIT(15) | BIT(14) | aid + */ + return 0; + } + + /* data/mgmt */ + if (0 /* FIX: data/mgmt during CFP */) + return 32768; + + if (group_addr) /* Group address as the destination - no ACK */ + return 0; + + /* Individual destination address: + * IEEE 802.11, Ch. 9.6 (after IEEE 802.11g changes) + * CTS and ACK frames shall be transmitted using the highest rate in + * basic rate set that is less than or equal to the rate of the + * immediately previous frame and that is using the same modulation + * (CCK or OFDM). If no basic rate set matches with these requirements, + * the highest mandatory rate of the PHY that is less than or equal to + * the rate of the previous frame is used. + * Mandatory rates for IEEE 802.11g PHY: 1, 2, 5.5, 11, 6, 12, 24 Mbps + */ + rate = -1; + mrate = 10; /* use 1 Mbps if everything fails */ + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *r = &mode->rates[i]; + if (r->rate > txrate->rate) + break; + + if (IEEE80211_RATE_MODULATION(txrate->flags) != + IEEE80211_RATE_MODULATION(r->flags)) + continue; + + if (r->flags & IEEE80211_RATE_BASIC) + rate = r->rate; + else if (r->flags & IEEE80211_RATE_MANDATORY) + mrate = r->rate; + } + if (rate == -1) { + /* No matching basic rate found; use highest suitable mandatory + * PHY rate */ + rate = mrate; + } + + /* Time needed to transmit ACK + * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up + * to closest integer */ + + dur = ieee80211_frame_duration(local, 10, rate, erp, + tx->sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE); + + if (next_frag_len) { + /* Frame is fragmented: duration increases with time needed to + * transmit next fragment plus ACK and 2 x SIFS. */ + dur *= 2; /* ACK + SIFS */ + /* next fragment */ + dur += ieee80211_frame_duration(local, next_frag_len, + txrate->rate, erp, + tx->sdata->flags & + IEEE80211_SDATA_SHORT_PREAMBLE); + } + + return dur; +} + +static inline int __ieee80211_queue_stopped(const struct ieee80211_local *local, + int queue) +{ + return test_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); +} + +static inline int __ieee80211_queue_pending(const struct ieee80211_local *local, + int queue) +{ + return test_bit(IEEE80211_LINK_STATE_PENDING, &local->state[queue]); +} + +static int inline is_ieee80211_device(struct net_device *dev, + struct net_device *master) +{ + return (wdev_priv(dev->ieee80211_ptr) == + wdev_priv(master->ieee80211_ptr)); +} + +/* tx handlers */ + +static ieee80211_txrx_result +ieee80211_tx_h_check_assoc(struct ieee80211_txrx_data *tx) +{ +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + struct sk_buff *skb = tx->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + u32 sta_flags; + + if (unlikely(tx->flags & IEEE80211_TXRXD_TX_INJECTED)) + return TXRX_CONTINUE; + + if (unlikely(tx->local->sta_scanning != 0) && + ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || + (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ)) + return TXRX_DROP; + + if (tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED) + return TXRX_CONTINUE; + + sta_flags = tx->sta ? tx->sta->flags : 0; + + if (likely(tx->flags & IEEE80211_TXRXD_TXUNICAST)) { + if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && + tx->sdata->type != IEEE80211_IF_TYPE_IBSS && + (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "%s: dropped data frame to not " + "associated station %s\n", + tx->dev->name, print_mac(mac, hdr->addr1)); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); + return TXRX_DROP; + } + } else { + if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && + tx->local->num_sta == 0 && + tx->sdata->type != IEEE80211_IF_TYPE_IBSS)) { + /* + * No associated STAs - no need to send multicast + * frames. + */ + return TXRX_DROP; + } + return TXRX_CONTINUE; + } + + if (unlikely(/* !injected && */ tx->sdata->ieee802_1x && + !(sta_flags & WLAN_STA_AUTHORIZED))) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + DECLARE_MAC_BUF(mac); + printk(KERN_DEBUG "%s: dropped frame to %s" + " (unauthorized port)\n", tx->dev->name, + print_mac(mac, hdr->addr1)); +#endif + I802_DEBUG_INC(tx->local->tx_handlers_drop_unauth_port); + return TXRX_DROP; + } + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_tx_h_sequence(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + + if (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)) >= 24) + ieee80211_include_sequence(tx->sdata, hdr); + + return TXRX_CONTINUE; +} + +/* This function is called whenever the AP is about to exceed the maximum limit + * of buffered frames for power saving STAs. This situation should not really + * happen often during normal operation, so dropping the oldest buffered packet + * from each queue should be OK to make some room for new frames. */ +static void purge_old_ps_buffers(struct ieee80211_local *local) +{ + int total = 0, purged = 0; + struct sk_buff *skb; + struct ieee80211_sub_if_data *sdata; + struct sta_info *sta; + + /* + * virtual interfaces are protected by RCU + */ + rcu_read_lock(); + + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + struct ieee80211_if_ap *ap; + if (sdata->dev == local->mdev || + sdata->type != IEEE80211_IF_TYPE_AP) + continue; + ap = &sdata->u.ap; + skb = skb_dequeue(&ap->ps_bc_buf); + if (skb) { + purged++; + dev_kfree_skb(skb); + } + total += skb_queue_len(&ap->ps_bc_buf); + } + rcu_read_unlock(); + + read_lock_bh(&local->sta_lock); + list_for_each_entry(sta, &local->sta_list, list) { + skb = skb_dequeue(&sta->ps_tx_buf); + if (skb) { + purged++; + dev_kfree_skb(skb); + } + total += skb_queue_len(&sta->ps_tx_buf); + } + read_unlock_bh(&local->sta_lock); + + local->total_ps_buffered = total; + printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", + wiphy_name(local->hw.wiphy), purged); +} + +static inline ieee80211_txrx_result +ieee80211_tx_h_multicast_ps_buf(struct ieee80211_txrx_data *tx) +{ + /* broadcast/multicast frame */ + /* If any of the associated stations is in power save mode, + * the frame is buffered to be sent after DTIM beacon frame */ + if ((tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) && + tx->sdata->type != IEEE80211_IF_TYPE_WDS && + tx->sdata->bss && atomic_read(&tx->sdata->bss->num_sta_ps) && + !(tx->fc & IEEE80211_FCTL_ORDER)) { + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) + purge_old_ps_buffers(tx->local); + if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= + AP_MAX_BC_BUFFER) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: BC TX buffer full - " + "dropping the oldest frame\n", + tx->dev->name); + } + dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf)); + } else + tx->local->total_ps_buffered++; + skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb); + return TXRX_QUEUED; + } + + return TXRX_CONTINUE; +} + +static inline ieee80211_txrx_result +ieee80211_tx_h_unicast_ps_buf(struct ieee80211_txrx_data *tx) +{ + struct sta_info *sta = tx->sta; + DECLARE_MAC_BUF(mac); + + if (unlikely(!sta || + ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && + (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP))) + return TXRX_CONTINUE; + + if (unlikely((sta->flags & WLAN_STA_PS) && !sta->pspoll)) { + struct ieee80211_tx_packet_data *pkt_data; +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " + "before %d)\n", + print_mac(mac, sta->addr), sta->aid, + skb_queue_len(&sta->ps_tx_buf)); +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + sta->flags |= WLAN_STA_TIM; + if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) + purge_old_ps_buffers(tx->local); + if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) { + struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf); + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: STA %s TX " + "buffer full - dropping oldest frame\n", + tx->dev->name, print_mac(mac, sta->addr)); + } + dev_kfree_skb(old); + } else + tx->local->total_ps_buffered++; + /* Queue frame to be sent after STA sends an PS Poll frame */ + if (skb_queue_empty(&sta->ps_tx_buf)) { + if (tx->local->ops->set_tim) + tx->local->ops->set_tim(local_to_hw(tx->local), + sta->aid, 1); + if (tx->sdata->bss) + bss_tim_set(tx->local, tx->sdata->bss, sta->aid); + } + pkt_data = (struct ieee80211_tx_packet_data *)tx->skb->cb; + pkt_data->jiffies = jiffies; + skb_queue_tail(&sta->ps_tx_buf, tx->skb); + return TXRX_QUEUED; + } +#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG + else if (unlikely(sta->flags & WLAN_STA_PS)) { + printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " + "set -> send frame\n", tx->dev->name, + print_mac(mac, sta->addr)); + } +#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ + sta->pspoll = 0; + + return TXRX_CONTINUE; +} + + +static ieee80211_txrx_result +ieee80211_tx_h_ps_buf(struct ieee80211_txrx_data *tx) +{ + if (unlikely(tx->flags & IEEE80211_TXRXD_TXPS_BUFFERED)) + return TXRX_CONTINUE; + + if (tx->flags & IEEE80211_TXRXD_TXUNICAST) + return ieee80211_tx_h_unicast_ps_buf(tx); + else + return ieee80211_tx_h_multicast_ps_buf(tx); +} + + + + +static ieee80211_txrx_result +ieee80211_tx_h_select_key(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_key *key; + + if (unlikely(tx->u.tx.control->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) + tx->key = NULL; + else if (tx->sta && (key = rcu_dereference(tx->sta->key))) + tx->key = key; + else if ((key = rcu_dereference(tx->sdata->default_key))) + tx->key = key; + else if (tx->sdata->drop_unencrypted && + !(tx->sdata->eapol && ieee80211_is_eapol(tx->skb))) { + I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted); + return TXRX_DROP; + } else { + tx->key = NULL; + tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; + } + + if (tx->key) { + tx->key->tx_rx_count++; + /* TODO: add threshold stuff again */ + } + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_tx_h_fragment(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; + size_t hdrlen, per_fragm, num_fragm, payload_len, left; + struct sk_buff **frags, *first, *frag; + int i; + u16 seq; + u8 *pos; + int frag_threshold = tx->local->fragmentation_threshold; + + if (!(tx->flags & IEEE80211_TXRXD_FRAGMENTED)) + return TXRX_CONTINUE; + + first = tx->skb; + + hdrlen = ieee80211_get_hdrlen(tx->fc); + payload_len = first->len - hdrlen; + per_fragm = frag_threshold - hdrlen - FCS_LEN; + num_fragm = DIV_ROUND_UP(payload_len, per_fragm); + + frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC); + if (!frags) + goto fail; + + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); + seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ; + pos = first->data + hdrlen + per_fragm; + left = payload_len - per_fragm; + for (i = 0; i < num_fragm - 1; i++) { + struct ieee80211_hdr *fhdr; + size_t copylen; + + if (left <= 0) + goto fail; + + /* reserve enough extra head and tail room for possible + * encryption */ + frag = frags[i] = + dev_alloc_skb(tx->local->tx_headroom + + frag_threshold + + IEEE80211_ENCRYPT_HEADROOM + + IEEE80211_ENCRYPT_TAILROOM); + if (!frag) + goto fail; + /* Make sure that all fragments use the same priority so + * that they end up using the same TX queue */ + frag->priority = first->priority; + skb_reserve(frag, tx->local->tx_headroom + + IEEE80211_ENCRYPT_HEADROOM); + fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); + memcpy(fhdr, first->data, hdrlen); + if (i == num_fragm - 2) + fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); + fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG)); + copylen = left > per_fragm ? per_fragm : left; + memcpy(skb_put(frag, copylen), pos, copylen); + + pos += copylen; + left -= copylen; + } + skb_trim(first, hdrlen + per_fragm); + + tx->u.tx.num_extra_frag = num_fragm - 1; + tx->u.tx.extra_frag = frags; + + return TXRX_CONTINUE; + + fail: + printk(KERN_DEBUG "%s: failed to fragment frame\n", tx->dev->name); + if (frags) { + for (i = 0; i < num_fragm - 1; i++) + if (frags[i]) + dev_kfree_skb(frags[i]); + kfree(frags); + } + I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment); + return TXRX_DROP; +} + +static ieee80211_txrx_result +ieee80211_tx_h_encrypt(struct ieee80211_txrx_data *tx) +{ + if (!tx->key) + return TXRX_CONTINUE; + + switch (tx->key->conf.alg) { + case ALG_WEP: + return ieee80211_crypto_wep_encrypt(tx); + case ALG_TKIP: + return ieee80211_crypto_tkip_encrypt(tx); + case ALG_CCMP: + return ieee80211_crypto_ccmp_encrypt(tx); + } + + /* not reached */ + WARN_ON(1); + return TXRX_DROP; +} + +static ieee80211_txrx_result +ieee80211_tx_h_rate_ctrl(struct ieee80211_txrx_data *tx) +{ + struct rate_control_extra extra; + + if (likely(!tx->u.tx.rate)) { + memset(&extra, 0, sizeof(extra)); + extra.mode = tx->u.tx.mode; + extra.ethertype = tx->ethertype; + + tx->u.tx.rate = rate_control_get_rate(tx->local, tx->dev, + tx->skb, &extra); + if (unlikely(extra.probe != NULL)) { + tx->u.tx.control->flags |= + IEEE80211_TXCTL_RATE_CTRL_PROBE; + tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; + tx->u.tx.control->alt_retry_rate = tx->u.tx.rate->val; + tx->u.tx.rate = extra.probe; + } else + tx->u.tx.control->alt_retry_rate = -1; + + if (!tx->u.tx.rate) + return TXRX_DROP; + } else + tx->u.tx.control->alt_retry_rate = -1; + + if (tx->u.tx.mode->mode == MODE_IEEE80211G && + (tx->sdata->flags & IEEE80211_SDATA_USE_PROTECTION) && + (tx->flags & IEEE80211_TXRXD_FRAGMENTED) && extra.nonerp) { + tx->u.tx.last_frag_rate = tx->u.tx.rate; + if (extra.probe) + tx->flags &= ~IEEE80211_TXRXD_TXPROBE_LAST_FRAG; + else + tx->flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; + tx->u.tx.rate = extra.nonerp; + tx->u.tx.control->rate = extra.nonerp; + tx->u.tx.control->flags &= ~IEEE80211_TXCTL_RATE_CTRL_PROBE; + } else { + tx->u.tx.last_frag_rate = tx->u.tx.rate; + tx->u.tx.control->rate = tx->u.tx.rate; + } + tx->u.tx.control->tx_rate = tx->u.tx.rate->val; + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_tx_h_misc(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; + u16 fc = le16_to_cpu(hdr->frame_control); + u16 dur; + struct ieee80211_tx_control *control = tx->u.tx.control; + struct ieee80211_hw_mode *mode = tx->u.tx.mode; + + if (!control->retry_limit) { + if (!is_multicast_ether_addr(hdr->addr1)) { + if (tx->skb->len + FCS_LEN > tx->local->rts_threshold + && tx->local->rts_threshold < + IEEE80211_MAX_RTS_THRESHOLD) { + control->flags |= + IEEE80211_TXCTL_USE_RTS_CTS; + control->flags |= + IEEE80211_TXCTL_LONG_RETRY_LIMIT; + control->retry_limit = + tx->local->long_retry_limit; + } else { + control->retry_limit = + tx->local->short_retry_limit; + } + } else { + control->retry_limit = 1; + } + } + + if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) { + /* Do not use multiple retry rates when sending fragmented + * frames. + * TODO: The last fragment could still use multiple retry + * rates. */ + control->alt_retry_rate = -1; + } + + /* Use CTS protection for unicast frames sent using extended rates if + * there are associated non-ERP stations and RTS/CTS is not configured + * for the frame. */ + if (mode->mode == MODE_IEEE80211G && + (tx->u.tx.rate->flags & IEEE80211_RATE_ERP) && + (tx->flags & IEEE80211_TXRXD_TXUNICAST) && + (tx->sdata->flags & IEEE80211_SDATA_USE_PROTECTION) && + !(control->flags & IEEE80211_TXCTL_USE_RTS_CTS)) + control->flags |= IEEE80211_TXCTL_USE_CTS_PROTECT; + + /* Transmit data frames using short preambles if the driver supports + * short preambles at the selected rate and short preambles are + * available on the network at the current point in time. */ + if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && + (tx->u.tx.rate->flags & IEEE80211_RATE_PREAMBLE2) && + (tx->sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE) && + (!tx->sta || (tx->sta->flags & WLAN_STA_SHORT_PREAMBLE))) { + tx->u.tx.control->tx_rate = tx->u.tx.rate->val2; + } + + /* Setup duration field for the first fragment of the frame. Duration + * for remaining fragments will be updated when they are being sent + * to low-level driver in ieee80211_tx(). */ + dur = ieee80211_duration(tx, is_multicast_ether_addr(hdr->addr1), + (tx->flags & IEEE80211_TXRXD_FRAGMENTED) ? + tx->u.tx.extra_frag[0]->len : 0); + hdr->duration_id = cpu_to_le16(dur); + + if ((control->flags & IEEE80211_TXCTL_USE_RTS_CTS) || + (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)) { + struct ieee80211_rate *rate; + + /* Do not use multiple retry rates when using RTS/CTS */ + control->alt_retry_rate = -1; + + /* Use min(data rate, max base rate) as CTS/RTS rate */ + rate = tx->u.tx.rate; + while (rate > mode->rates && + !(rate->flags & IEEE80211_RATE_BASIC)) + rate--; + + control->rts_cts_rate = rate->val; + control->rts_rate = rate; + } + + if (tx->sta) { + tx->sta->tx_packets++; + tx->sta->tx_fragments++; + tx->sta->tx_bytes += tx->skb->len; + if (tx->u.tx.extra_frag) { + int i; + tx->sta->tx_fragments += tx->u.tx.num_extra_frag; + for (i = 0; i < tx->u.tx.num_extra_frag; i++) { + tx->sta->tx_bytes += + tx->u.tx.extra_frag[i]->len; + } + } + } + + /* + * Tell hardware to not encrypt when we had sw crypto. + * Because we use the same flag to internally indicate that + * no (software) encryption should be done, we have to set it + * after all crypto handlers. + */ + if (tx->key && !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) + tx->u.tx.control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; + + return TXRX_CONTINUE; +} + +static ieee80211_txrx_result +ieee80211_tx_h_load_stats(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_local *local = tx->local; + struct ieee80211_hw_mode *mode = tx->u.tx.mode; + struct sk_buff *skb = tx->skb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u32 load = 0, hdrtime; + + /* TODO: this could be part of tx_status handling, so that the number + * of retries would be known; TX rate should in that case be stored + * somewhere with the packet */ + + /* Estimate total channel use caused by this frame */ + + /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, + * 1 usec = 1/8 * (1080 / 10) = 13.5 */ + + if (mode->mode == MODE_IEEE80211A || + (mode->mode == MODE_IEEE80211G && + tx->u.tx.rate->flags & IEEE80211_RATE_ERP)) + hdrtime = CHAN_UTIL_HDR_SHORT; + else + hdrtime = CHAN_UTIL_HDR_LONG; + + load = hdrtime; + if (!is_multicast_ether_addr(hdr->addr1)) + load += hdrtime; + + if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_RTS_CTS) + load += 2 * hdrtime; + else if (tx->u.tx.control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) + load += hdrtime; + + load += skb->len * tx->u.tx.rate->rate_inv; + + if (tx->u.tx.extra_frag) { + int i; + for (i = 0; i < tx->u.tx.num_extra_frag; i++) { + load += 2 * hdrtime; + load += tx->u.tx.extra_frag[i]->len * + tx->u.tx.rate->rate; + } + } + + /* Divide channel_use by 8 to avoid wrapping around the counter */ + load >>= CHAN_UTIL_SHIFT; + local->channel_use_raw += load; + if (tx->sta) + tx->sta->channel_use_raw += load; + tx->sdata->channel_use_raw += load; + + return TXRX_CONTINUE; +} + +/* TODO: implement register/unregister functions for adding TX/RX handlers + * into ordered list */ + +ieee80211_tx_handler ieee80211_tx_handlers[] = +{ + ieee80211_tx_h_check_assoc, + ieee80211_tx_h_sequence, + ieee80211_tx_h_ps_buf, + ieee80211_tx_h_select_key, + ieee80211_tx_h_michael_mic_add, + ieee80211_tx_h_fragment, + ieee80211_tx_h_encrypt, + ieee80211_tx_h_rate_ctrl, + ieee80211_tx_h_misc, + ieee80211_tx_h_load_stats, + NULL +}; + +/* actual transmit path */ + +/* + * deal with packet injection down monitor interface + * with Radiotap Header -- only called for monitor mode interface + */ +static ieee80211_txrx_result +__ieee80211_parse_tx_radiotap(struct ieee80211_txrx_data *tx, + struct sk_buff *skb) +{ + /* + * this is the moment to interpret and discard the radiotap header that + * must be at the start of the packet injected in Monitor mode + * + * Need to take some care with endian-ness since radiotap + * args are little-endian + */ + + struct ieee80211_radiotap_iterator iterator; + struct ieee80211_radiotap_header *rthdr = + (struct ieee80211_radiotap_header *) skb->data; + struct ieee80211_hw_mode *mode = tx->local->hw.conf.mode; + int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len); + struct ieee80211_tx_control *control = tx->u.tx.control; + + control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; + tx->flags |= IEEE80211_TXRXD_TX_INJECTED; + tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; + + /* + * for every radiotap entry that is present + * (ieee80211_radiotap_iterator_next returns -ENOENT when no more + * entries present, or -EINVAL on error) + */ + + while (!ret) { + int i, target_rate; + + ret = ieee80211_radiotap_iterator_next(&iterator); + + if (ret) + continue; + + /* see if this argument is something we can use */ + switch (iterator.this_arg_index) { + /* + * You must take care when dereferencing iterator.this_arg + * for multibyte types... the pointer is not aligned. Use + * get_unaligned((type *)iterator.this_arg) to dereference + * iterator.this_arg for type "type" safely on all arches. + */ + case IEEE80211_RADIOTAP_RATE: + /* + * radiotap rate u8 is in 500kbps units eg, 0x02=1Mbps + * ieee80211 rate int is in 100kbps units eg, 0x0a=1Mbps + */ + target_rate = (*iterator.this_arg) * 5; + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *r = &mode->rates[i]; + + if (r->rate == target_rate) { + tx->u.tx.rate = r; + break; + } + } + break; + + case IEEE80211_RADIOTAP_ANTENNA: + /* + * radiotap uses 0 for 1st ant, mac80211 is 1 for + * 1st ant + */ + control->antenna_sel_tx = (*iterator.this_arg) + 1; + break; + + case IEEE80211_RADIOTAP_DBM_TX_POWER: + control->power_level = *iterator.this_arg; + break; + + case IEEE80211_RADIOTAP_FLAGS: + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) { + /* + * this indicates that the skb we have been + * handed has the 32-bit FCS CRC at the end... + * we should react to that by snipping it off + * because it will be recomputed and added + * on transmission + */ + if (skb->len < (iterator.max_length + FCS_LEN)) + return TXRX_DROP; + + skb_trim(skb, skb->len - FCS_LEN); + } + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP) + control->flags &= + ~IEEE80211_TXCTL_DO_NOT_ENCRYPT; + if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) + tx->flags |= IEEE80211_TXRXD_FRAGMENTED; + break; + + /* + * Please update the file + * Documentation/networking/mac80211-injection.txt + * when parsing new fields here. + */ + + default: + break; + } + } + + if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */ + return TXRX_DROP; + + /* + * remove the radiotap header + * iterator->max_length was sanity-checked against + * skb->len by iterator init + */ + skb_pull(skb, iterator.max_length); + + return TXRX_CONTINUE; +} + +/* + * initialises @tx + */ +static ieee80211_txrx_result +__ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, + struct sk_buff *skb, + struct net_device *dev, + struct ieee80211_tx_control *control) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_hdr *hdr; + struct ieee80211_sub_if_data *sdata; + ieee80211_txrx_result res = TXRX_CONTINUE; + + int hdrlen; + + memset(tx, 0, sizeof(*tx)); + tx->skb = skb; + tx->dev = dev; /* use original interface */ + tx->local = local; + tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev); + tx->u.tx.control = control; + /* + * Set this flag (used below to indicate "automatic fragmentation"), + * it will be cleared/left by radiotap as desired. + */ + tx->flags |= IEEE80211_TXRXD_FRAGMENTED; + + /* process and remove the injection radiotap header */ + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (unlikely(sdata->type == IEEE80211_IF_TYPE_MNTR)) { + if (__ieee80211_parse_tx_radiotap(tx, skb) == TXRX_DROP) + return TXRX_DROP; + + /* + * __ieee80211_parse_tx_radiotap has now removed + * the radiotap header that was present and pre-filled + * 'tx' with tx control information. + */ + } + + hdr = (struct ieee80211_hdr *) skb->data; + + tx->sta = sta_info_get(local, hdr->addr1); + tx->fc = le16_to_cpu(hdr->frame_control); + + if (is_multicast_ether_addr(hdr->addr1)) { + tx->flags &= ~IEEE80211_TXRXD_TXUNICAST; + control->flags |= IEEE80211_TXCTL_NO_ACK; + } else { + tx->flags |= IEEE80211_TXRXD_TXUNICAST; + control->flags &= ~IEEE80211_TXCTL_NO_ACK; + } + + if (tx->flags & IEEE80211_TXRXD_FRAGMENTED) { + if ((tx->flags & IEEE80211_TXRXD_TXUNICAST) && + skb->len + FCS_LEN > local->fragmentation_threshold && + !local->ops->set_frag_threshold) + tx->flags |= IEEE80211_TXRXD_FRAGMENTED; + else + tx->flags &= ~IEEE80211_TXRXD_FRAGMENTED; + } + + if (!tx->sta) + control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; + else if (tx->sta->clear_dst_mask) { + control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; + tx->sta->clear_dst_mask = 0; + } + + hdrlen = ieee80211_get_hdrlen(tx->fc); + if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { + u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; + tx->ethertype = (pos[0] << 8) | pos[1]; + } + control->flags |= IEEE80211_TXCTL_FIRST_FRAGMENT; + + return res; +} + +/* Device in tx->dev has a reference added; use dev_put(tx->dev) when + * finished with it. + * + * NB: @tx is uninitialised when passed in here + */ +static int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx, + struct sk_buff *skb, + struct net_device *mdev, + struct ieee80211_tx_control *control) +{ + struct ieee80211_tx_packet_data *pkt_data; + struct net_device *dev; + + pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; + dev = dev_get_by_index(&init_net, pkt_data->ifindex); + if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { + dev_put(dev); + dev = NULL; + } + if (unlikely(!dev)) + return -ENODEV; + /* initialises tx with control */ + __ieee80211_tx_prepare(tx, skb, dev, control); + return 0; +} + +static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, + struct ieee80211_txrx_data *tx) +{ + struct ieee80211_tx_control *control = tx->u.tx.control; + int ret, i; + + if (!ieee80211_qdisc_installed(local->mdev) && + __ieee80211_queue_stopped(local, 0)) { + netif_stop_queue(local->mdev); + return IEEE80211_TX_AGAIN; + } + if (skb) { + ieee80211_dump_frame(wiphy_name(local->hw.wiphy), + "TX to low-level driver", skb); + ret = local->ops->tx(local_to_hw(local), skb, control); + if (ret) + return IEEE80211_TX_AGAIN; + local->mdev->trans_start = jiffies; + ieee80211_led_tx(local, 1); + } + if (tx->u.tx.extra_frag) { + control->flags &= ~(IEEE80211_TXCTL_USE_RTS_CTS | + IEEE80211_TXCTL_USE_CTS_PROTECT | + IEEE80211_TXCTL_CLEAR_DST_MASK | + IEEE80211_TXCTL_FIRST_FRAGMENT); + for (i = 0; i < tx->u.tx.num_extra_frag; i++) { + if (!tx->u.tx.extra_frag[i]) + continue; + if (__ieee80211_queue_stopped(local, control->queue)) + return IEEE80211_TX_FRAG_AGAIN; + if (i == tx->u.tx.num_extra_frag) { + control->tx_rate = tx->u.tx.last_frag_hwrate; + control->rate = tx->u.tx.last_frag_rate; + if (tx->flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG) + control->flags |= + IEEE80211_TXCTL_RATE_CTRL_PROBE; + else + control->flags &= + ~IEEE80211_TXCTL_RATE_CTRL_PROBE; + } + + ieee80211_dump_frame(wiphy_name(local->hw.wiphy), + "TX to low-level driver", + tx->u.tx.extra_frag[i]); + ret = local->ops->tx(local_to_hw(local), + tx->u.tx.extra_frag[i], + control); + if (ret) + return IEEE80211_TX_FRAG_AGAIN; + local->mdev->trans_start = jiffies; + ieee80211_led_tx(local, 1); + tx->u.tx.extra_frag[i] = NULL; + } + kfree(tx->u.tx.extra_frag); + tx->u.tx.extra_frag = NULL; + } + return IEEE80211_TX_OK; +} + +static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, + struct ieee80211_tx_control *control) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct sta_info *sta; + ieee80211_tx_handler *handler; + struct ieee80211_txrx_data tx; + ieee80211_txrx_result res = TXRX_DROP, res_prepare; + int ret, i; + + WARN_ON(__ieee80211_queue_pending(local, control->queue)); + + if (unlikely(skb->len < 10)) { + dev_kfree_skb(skb); + return 0; + } + + /* initialises tx */ + res_prepare = __ieee80211_tx_prepare(&tx, skb, dev, control); + + if (res_prepare == TXRX_DROP) { + dev_kfree_skb(skb); + return 0; + } + + /* + * key references are protected using RCU and this requires that + * we are in a read-site RCU section during receive processing + */ + rcu_read_lock(); + + sta = tx.sta; + tx.u.tx.mode = local->hw.conf.mode; + + for (handler = local->tx_handlers; *handler != NULL; + handler++) { + res = (*handler)(&tx); + if (res != TXRX_CONTINUE) + break; + } + + skb = tx.skb; /* handlers are allowed to change skb */ + + if (sta) + sta_info_put(sta); + + if (unlikely(res == TXRX_DROP)) { + I802_DEBUG_INC(local->tx_handlers_drop); + goto drop; + } + + if (unlikely(res == TXRX_QUEUED)) { + I802_DEBUG_INC(local->tx_handlers_queued); + rcu_read_unlock(); + return 0; + } + + if (tx.u.tx.extra_frag) { + for (i = 0; i < tx.u.tx.num_extra_frag; i++) { + int next_len, dur; + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) + tx.u.tx.extra_frag[i]->data; + + if (i + 1 < tx.u.tx.num_extra_frag) { + next_len = tx.u.tx.extra_frag[i + 1]->len; + } else { + next_len = 0; + tx.u.tx.rate = tx.u.tx.last_frag_rate; + tx.u.tx.last_frag_hwrate = tx.u.tx.rate->val; + } + dur = ieee80211_duration(&tx, 0, next_len); + hdr->duration_id = cpu_to_le16(dur); + } + } + +retry: + ret = __ieee80211_tx(local, skb, &tx); + if (ret) { + struct ieee80211_tx_stored_packet *store = + &local->pending_packet[control->queue]; + + if (ret == IEEE80211_TX_FRAG_AGAIN) + skb = NULL; + set_bit(IEEE80211_LINK_STATE_PENDING, + &local->state[control->queue]); + smp_mb(); + /* When the driver gets out of buffers during sending of + * fragments and calls ieee80211_stop_queue, there is + * a small window between IEEE80211_LINK_STATE_XOFF and + * IEEE80211_LINK_STATE_PENDING flags are set. If a buffer + * gets available in that window (i.e. driver calls + * ieee80211_wake_queue), we would end up with ieee80211_tx + * called with IEEE80211_LINK_STATE_PENDING. Prevent this by + * continuing transmitting here when that situation is + * possible to have happened. */ + if (!__ieee80211_queue_stopped(local, control->queue)) { + clear_bit(IEEE80211_LINK_STATE_PENDING, + &local->state[control->queue]); + goto retry; + } + memcpy(&store->control, control, + sizeof(struct ieee80211_tx_control)); + store->skb = skb; + store->extra_frag = tx.u.tx.extra_frag; + store->num_extra_frag = tx.u.tx.num_extra_frag; + store->last_frag_hwrate = tx.u.tx.last_frag_hwrate; + store->last_frag_rate = tx.u.tx.last_frag_rate; + store->last_frag_rate_ctrl_probe = + !!(tx.flags & IEEE80211_TXRXD_TXPROBE_LAST_FRAG); + } + rcu_read_unlock(); + return 0; + + drop: + if (skb) + dev_kfree_skb(skb); + for (i = 0; i < tx.u.tx.num_extra_frag; i++) + if (tx.u.tx.extra_frag[i]) + dev_kfree_skb(tx.u.tx.extra_frag[i]); + kfree(tx.u.tx.extra_frag); + rcu_read_unlock(); + return 0; +} + +/* device xmit handlers */ + +int ieee80211_master_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_tx_control control; + struct ieee80211_tx_packet_data *pkt_data; + struct net_device *odev = NULL; + struct ieee80211_sub_if_data *osdata; + int headroom; + int ret; + + /* + * copy control out of the skb so other people can use skb->cb + */ + pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; + memset(&control, 0, sizeof(struct ieee80211_tx_control)); + + if (pkt_data->ifindex) + odev = dev_get_by_index(&init_net, pkt_data->ifindex); + if (unlikely(odev && !is_ieee80211_device(odev, dev))) { + dev_put(odev); + odev = NULL; + } + if (unlikely(!odev)) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + printk(KERN_DEBUG "%s: Discarded packet with nonexistent " + "originating device\n", dev->name); +#endif + dev_kfree_skb(skb); + return 0; + } + osdata = IEEE80211_DEV_TO_SUB_IF(odev); + + headroom = osdata->local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM; + if (skb_headroom(skb) < headroom) { + if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + dev_put(odev); + return 0; + } + } + + control.ifindex = odev->ifindex; + control.type = osdata->type; + if (pkt_data->flags & IEEE80211_TXPD_REQ_TX_STATUS) + control.flags |= IEEE80211_TXCTL_REQ_TX_STATUS; + if (pkt_data->flags & IEEE80211_TXPD_DO_NOT_ENCRYPT) + control.flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; + if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) + control.flags |= IEEE80211_TXCTL_REQUEUE; + control.queue = pkt_data->queue; + + ret = ieee80211_tx(odev, skb, &control); + dev_put(odev); + + return ret; +} + +int ieee80211_monitor_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_tx_packet_data *pkt_data; + struct ieee80211_radiotap_header *prthdr = + (struct ieee80211_radiotap_header *)skb->data; + u16 len_rthdr; + + /* check for not even having the fixed radiotap header part */ + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + goto fail; /* too short to be possibly valid */ + + /* is it a header version we can trust to find length from? */ + if (unlikely(prthdr->it_version)) + goto fail; /* only version 0 is supported */ + + /* then there must be a radiotap header with a length we can use */ + len_rthdr = ieee80211_get_radiotap_len(skb->data); + + /* does the skb contain enough to deliver on the alleged length? */ + if (unlikely(skb->len < len_rthdr)) + goto fail; /* skb too short for claimed rt header extent */ + + skb->dev = local->mdev; + + pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; + memset(pkt_data, 0, sizeof(*pkt_data)); + /* needed because we set skb device to master */ + pkt_data->ifindex = dev->ifindex; + + pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; + + /* + * fix up the pointers accounting for the radiotap + * header still being in there. We are being given + * a precooked IEEE80211 header so no need for + * normal processing + */ + skb_set_mac_header(skb, len_rthdr); + /* + * these are just fixed to the end of the rt area since we + * don't have any better information and at this point, nobody cares + */ + skb_set_network_header(skb, len_rthdr); + skb_set_transport_header(skb, len_rthdr); + + /* pass the radiotap header up to the next stage intact */ + dev_queue_xmit(skb); + return NETDEV_TX_OK; + +fail: + dev_kfree_skb(skb); + return NETDEV_TX_OK; /* meaning, we dealt with the skb */ +} + +/** + * ieee80211_subif_start_xmit - netif start_xmit function for Ethernet-type + * subinterfaces (wlan#, WDS, and VLAN interfaces) + * @skb: packet to be sent + * @dev: incoming interface + * + * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will + * not be freed, and caller is responsible for either retrying later or freeing + * skb). + * + * This function takes in an Ethernet header and encapsulates it with suitable + * IEEE 802.11 header based on which interface the packet is coming in. The + * encapsulated packet will then be passed to master interface, wlan#.11, for + * transmission (through low-level driver). + */ +int ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_tx_packet_data *pkt_data; + struct ieee80211_sub_if_data *sdata; + int ret = 1, head_need; + u16 ethertype, hdrlen, fc; + struct ieee80211_hdr hdr; + const u8 *encaps_data; + int encaps_len, skip_header_bytes; + int nh_pos, h_pos; + struct sta_info *sta; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + if (unlikely(skb->len < ETH_HLEN)) { + printk(KERN_DEBUG "%s: short skb (len=%d)\n", + dev->name, skb->len); + ret = 0; + goto fail; + } + + nh_pos = skb_network_header(skb) - skb->data; + h_pos = skb_transport_header(skb) - skb->data; + + /* convert Ethernet header to proper 802.11 header (based on + * operation mode) */ + ethertype = (skb->data[12] << 8) | skb->data[13]; + /* TODO: handling for 802.1x authorized/unauthorized port */ + fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; + + switch (sdata->type) { + case IEEE80211_IF_TYPE_AP: + case IEEE80211_IF_TYPE_VLAN: + fc |= IEEE80211_FCTL_FROMDS; + /* DA BSSID SA */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 24; + break; + case IEEE80211_IF_TYPE_WDS: + fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; + /* RA TA DA SA */ + memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); + memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); + hdrlen = 30; + break; + case IEEE80211_IF_TYPE_STA: + fc |= IEEE80211_FCTL_TODS; + /* BSSID SA DA */ + memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, skb->data, ETH_ALEN); + hdrlen = 24; + break; + case IEEE80211_IF_TYPE_IBSS: + /* DA SA BSSID */ + memcpy(hdr.addr1, skb->data, ETH_ALEN); + memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(hdr.addr3, sdata->u.sta.bssid, ETH_ALEN); + hdrlen = 24; + break; + default: + ret = 0; + goto fail; + } + + /* receiver is QoS enabled, use a QoS type frame */ + sta = sta_info_get(local, hdr.addr1); + if (sta) { + if (sta->flags & WLAN_STA_WME) { + fc |= IEEE80211_STYPE_QOS_DATA; + hdrlen += 2; + } + sta_info_put(sta); + } + + hdr.frame_control = cpu_to_le16(fc); + hdr.duration_id = 0; + hdr.seq_ctrl = 0; + + skip_header_bytes = ETH_HLEN; + if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { + encaps_data = bridge_tunnel_header; + encaps_len = sizeof(bridge_tunnel_header); + skip_header_bytes -= 2; + } else if (ethertype >= 0x600) { + encaps_data = rfc1042_header; + encaps_len = sizeof(rfc1042_header); + skip_header_bytes -= 2; + } else { + encaps_data = NULL; + encaps_len = 0; + } + + skb_pull(skb, skip_header_bytes); + nh_pos -= skip_header_bytes; + h_pos -= skip_header_bytes; + + /* TODO: implement support for fragments so that there is no need to + * reallocate and copy payload; it might be enough to support one + * extra fragment that would be copied in the beginning of the frame + * data.. anyway, it would be nice to include this into skb structure + * somehow + * + * There are few options for this: + * use skb->cb as an extra space for 802.11 header + * allocate new buffer if not enough headroom + * make sure that there is enough headroom in every skb by increasing + * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and + * alloc_skb() (net/core/skbuff.c) + */ + head_need = hdrlen + encaps_len + local->tx_headroom; + head_need -= skb_headroom(skb); + + /* We are going to modify skb data, so make a copy of it if happens to + * be cloned. This could happen, e.g., with Linux bridge code passing + * us broadcast frames. */ + + if (head_need > 0 || skb_cloned(skb)) { +#if 0 + printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " + "of headroom\n", dev->name, head_need); +#endif + + if (skb_cloned(skb)) + I802_DEBUG_INC(local->tx_expand_skb_head_cloned); + else + I802_DEBUG_INC(local->tx_expand_skb_head); + /* Since we have to reallocate the buffer, make sure that there + * is enough room for possible WEP IV/ICV and TKIP (8 bytes + * before payload and 12 after). */ + if (pskb_expand_head(skb, (head_need > 0 ? head_need + 8 : 8), + 12, GFP_ATOMIC)) { + printk(KERN_DEBUG "%s: failed to reallocate TX buffer" + "\n", dev->name); + goto fail; + } + } + + if (encaps_data) { + memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); + nh_pos += encaps_len; + h_pos += encaps_len; + } + + if (fc & IEEE80211_STYPE_QOS_DATA) { + __le16 *qos_control; + + qos_control = (__le16*) skb_push(skb, 2); + memcpy(skb_push(skb, hdrlen - 2), &hdr, hdrlen - 2); + /* + * Maybe we could actually set some fields here, for now just + * initialise to zero to indicate no special operation. + */ + *qos_control = 0; + } else + memcpy(skb_push(skb, hdrlen), &hdr, hdrlen); + + nh_pos += hdrlen; + h_pos += hdrlen; + + pkt_data = (struct ieee80211_tx_packet_data *)skb->cb; + memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); + pkt_data->ifindex = dev->ifindex; + + skb->dev = local->mdev; + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* Update skb pointers to various headers since this modified frame + * is going to go through Linux networking code that may potentially + * need things like pointer to IP header. */ + skb_set_mac_header(skb, 0); + skb_set_network_header(skb, nh_pos); + skb_set_transport_header(skb, h_pos); + + dev->trans_start = jiffies; + dev_queue_xmit(skb); + + return 0; + + fail: + if (!ret) + dev_kfree_skb(skb); + + return ret; +} + +/* + * This is the transmit routine for the 802.11 type interfaces + * called by upper layers of the linux networking + * stack when it has a frame to transmit + */ +int ieee80211_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata; + struct ieee80211_tx_packet_data *pkt_data; + struct ieee80211_hdr *hdr; + u16 fc; + + sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + if (skb->len < 10) { + dev_kfree_skb(skb); + return 0; + } + + if (skb_headroom(skb) < sdata->local->tx_headroom) { + if (pskb_expand_head(skb, sdata->local->tx_headroom, + 0, GFP_ATOMIC)) { + dev_kfree_skb(skb); + return 0; + } + } + + hdr = (struct ieee80211_hdr *) skb->data; + fc = le16_to_cpu(hdr->frame_control); + + pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; + memset(pkt_data, 0, sizeof(struct ieee80211_tx_packet_data)); + pkt_data->ifindex = sdata->dev->ifindex; + + skb->priority = 20; /* use hardcoded priority for mgmt TX queue */ + skb->dev = sdata->local->mdev; + + /* + * We're using the protocol field of the the frame control header + * to request TX callback for hostapd. BIT(1) is checked. + */ + if ((fc & BIT(1)) == BIT(1)) { + pkt_data->flags |= IEEE80211_TXPD_REQ_TX_STATUS; + fc &= ~BIT(1); + hdr->frame_control = cpu_to_le16(fc); + } + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + pkt_data->flags |= IEEE80211_TXPD_DO_NOT_ENCRYPT; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + dev_queue_xmit(skb); + + return 0; +} + +/* helper functions for pending packets for when queues are stopped */ + +void ieee80211_clear_tx_pending(struct ieee80211_local *local) +{ + int i, j; + struct ieee80211_tx_stored_packet *store; + + for (i = 0; i < local->hw.queues; i++) { + if (!__ieee80211_queue_pending(local, i)) + continue; + store = &local->pending_packet[i]; + kfree_skb(store->skb); + for (j = 0; j < store->num_extra_frag; j++) + kfree_skb(store->extra_frag[j]); + kfree(store->extra_frag); + clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[i]); + } +} + +void ieee80211_tx_pending(unsigned long data) +{ + struct ieee80211_local *local = (struct ieee80211_local *)data; + struct net_device *dev = local->mdev; + struct ieee80211_tx_stored_packet *store; + struct ieee80211_txrx_data tx; + int i, ret, reschedule = 0; + + netif_tx_lock_bh(dev); + for (i = 0; i < local->hw.queues; i++) { + if (__ieee80211_queue_stopped(local, i)) + continue; + if (!__ieee80211_queue_pending(local, i)) { + reschedule = 1; + continue; + } + store = &local->pending_packet[i]; + tx.u.tx.control = &store->control; + tx.u.tx.extra_frag = store->extra_frag; + tx.u.tx.num_extra_frag = store->num_extra_frag; + tx.u.tx.last_frag_hwrate = store->last_frag_hwrate; + tx.u.tx.last_frag_rate = store->last_frag_rate; + tx.flags = 0; + if (store->last_frag_rate_ctrl_probe) + tx.flags |= IEEE80211_TXRXD_TXPROBE_LAST_FRAG; + ret = __ieee80211_tx(local, store->skb, &tx); + if (ret) { + if (ret == IEEE80211_TX_FRAG_AGAIN) + store->skb = NULL; + } else { + clear_bit(IEEE80211_LINK_STATE_PENDING, + &local->state[i]); + reschedule = 1; + } + } + netif_tx_unlock_bh(dev); + if (reschedule) { + if (!ieee80211_qdisc_installed(dev)) { + if (!__ieee80211_queue_stopped(local, 0)) + netif_wake_queue(dev); + } else + netif_schedule(dev); + } +} + +/* functions for drivers to get certain frames */ + +static void ieee80211_beacon_add_tim(struct ieee80211_local *local, + struct ieee80211_if_ap *bss, + struct sk_buff *skb) +{ + u8 *pos, *tim; + int aid0 = 0; + int i, have_bits = 0, n1, n2; + + /* Generate bitmap for TIM only if there are any STAs in power save + * mode. */ + read_lock_bh(&local->sta_lock); + if (atomic_read(&bss->num_sta_ps) > 0) + /* in the hope that this is faster than + * checking byte-for-byte */ + have_bits = !bitmap_empty((unsigned long*)bss->tim, + IEEE80211_MAX_AID+1); + + if (bss->dtim_count == 0) + bss->dtim_count = bss->dtim_period - 1; + else + bss->dtim_count--; + + tim = pos = (u8 *) skb_put(skb, 6); + *pos++ = WLAN_EID_TIM; + *pos++ = 4; + *pos++ = bss->dtim_count; + *pos++ = bss->dtim_period; + + if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf)) + aid0 = 1; + + if (have_bits) { + /* Find largest even number N1 so that bits numbered 1 through + * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits + * (N2 + 1) x 8 through 2007 are 0. */ + n1 = 0; + for (i = 0; i < IEEE80211_MAX_TIM_LEN; i++) { + if (bss->tim[i]) { + n1 = i & 0xfe; + break; + } + } + n2 = n1; + for (i = IEEE80211_MAX_TIM_LEN - 1; i >= n1; i--) { + if (bss->tim[i]) { + n2 = i; + break; + } + } + + /* Bitmap control */ + *pos++ = n1 | aid0; + /* Part Virt Bitmap */ + memcpy(pos, bss->tim + n1, n2 - n1 + 1); + + tim[1] = n2 - n1 + 4; + skb_put(skb, n2 - n1); + } else { + *pos++ = aid0; /* Bitmap control */ + *pos++ = 0; /* Part Virt Bitmap */ + } + read_unlock_bh(&local->sta_lock); +} + +struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, int if_id, + struct ieee80211_tx_control *control) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct sk_buff *skb; + struct net_device *bdev; + struct ieee80211_sub_if_data *sdata = NULL; + struct ieee80211_if_ap *ap = NULL; + struct ieee80211_rate *rate; + struct rate_control_extra extra; + u8 *b_head, *b_tail; + int bh_len, bt_len; + + bdev = dev_get_by_index(&init_net, if_id); + if (bdev) { + sdata = IEEE80211_DEV_TO_SUB_IF(bdev); + ap = &sdata->u.ap; + dev_put(bdev); + } + + if (!ap || sdata->type != IEEE80211_IF_TYPE_AP || + !ap->beacon_head) { +#ifdef CONFIG_MAC80211_VERBOSE_DEBUG + if (net_ratelimit()) + printk(KERN_DEBUG "no beacon data avail for idx=%d " + "(%s)\n", if_id, bdev ? bdev->name : "N/A"); +#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ + return NULL; + } + + /* Assume we are generating the normal beacon locally */ + b_head = ap->beacon_head; + b_tail = ap->beacon_tail; + bh_len = ap->beacon_head_len; + bt_len = ap->beacon_tail_len; + + skb = dev_alloc_skb(local->tx_headroom + + bh_len + bt_len + 256 /* maximum TIM len */); + if (!skb) + return NULL; + + skb_reserve(skb, local->tx_headroom); + memcpy(skb_put(skb, bh_len), b_head, bh_len); + + ieee80211_include_sequence(sdata, (struct ieee80211_hdr *)skb->data); + + ieee80211_beacon_add_tim(local, ap, skb); + + if (b_tail) { + memcpy(skb_put(skb, bt_len), b_tail, bt_len); + } + + if (control) { + memset(&extra, 0, sizeof(extra)); + extra.mode = local->oper_hw_mode; + + rate = rate_control_get_rate(local, local->mdev, skb, &extra); + if (!rate) { + if (net_ratelimit()) { + printk(KERN_DEBUG "%s: ieee80211_beacon_get: no rate " + "found\n", wiphy_name(local->hw.wiphy)); + } + dev_kfree_skb(skb); + return NULL; + } + + control->tx_rate = + ((sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE) && + (rate->flags & IEEE80211_RATE_PREAMBLE2)) ? + rate->val2 : rate->val; + control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; + control->power_level = local->hw.conf.power_level; + control->flags |= IEEE80211_TXCTL_NO_ACK; + control->retry_limit = 1; + control->flags |= IEEE80211_TXCTL_CLEAR_DST_MASK; + } + + ap->num_beacons++; + return skb; +} +EXPORT_SYMBOL(ieee80211_beacon_get); + +void ieee80211_rts_get(struct ieee80211_hw *hw, int if_id, + const void *frame, size_t frame_len, + const struct ieee80211_tx_control *frame_txctl, + struct ieee80211_rts *rts) +{ + const struct ieee80211_hdr *hdr = frame; + u16 fctl; + + fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS; + rts->frame_control = cpu_to_le16(fctl); + rts->duration = ieee80211_rts_duration(hw, if_id, frame_len, frame_txctl); + memcpy(rts->ra, hdr->addr1, sizeof(rts->ra)); + memcpy(rts->ta, hdr->addr2, sizeof(rts->ta)); +} +EXPORT_SYMBOL(ieee80211_rts_get); + +void ieee80211_ctstoself_get(struct ieee80211_hw *hw, int if_id, + const void *frame, size_t frame_len, + const struct ieee80211_tx_control *frame_txctl, + struct ieee80211_cts *cts) +{ + const struct ieee80211_hdr *hdr = frame; + u16 fctl; + + fctl = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS; + cts->frame_control = cpu_to_le16(fctl); + cts->duration = ieee80211_ctstoself_duration(hw, if_id, frame_len, frame_txctl); + memcpy(cts->ra, hdr->addr1, sizeof(cts->ra)); +} +EXPORT_SYMBOL(ieee80211_ctstoself_get); + +struct sk_buff * +ieee80211_get_buffered_bc(struct ieee80211_hw *hw, int if_id, + struct ieee80211_tx_control *control) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct sk_buff *skb; + struct sta_info *sta; + ieee80211_tx_handler *handler; + struct ieee80211_txrx_data tx; + ieee80211_txrx_result res = TXRX_DROP; + struct net_device *bdev; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_if_ap *bss = NULL; + + bdev = dev_get_by_index(&init_net, if_id); + if (bdev) { + sdata = IEEE80211_DEV_TO_SUB_IF(bdev); + bss = &sdata->u.ap; + dev_put(bdev); + } + if (!bss || sdata->type != IEEE80211_IF_TYPE_AP || !bss->beacon_head) + return NULL; + + if (bss->dtim_count != 0) + return NULL; /* send buffered bc/mc only after DTIM beacon */ + memset(control, 0, sizeof(*control)); + while (1) { + skb = skb_dequeue(&bss->ps_bc_buf); + if (!skb) + return NULL; + local->total_ps_buffered--; + + if (!skb_queue_empty(&bss->ps_bc_buf) && skb->len >= 2) { + struct ieee80211_hdr *hdr = + (struct ieee80211_hdr *) skb->data; + /* more buffered multicast/broadcast frames ==> set + * MoreData flag in IEEE 802.11 header to inform PS + * STAs */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + if (!ieee80211_tx_prepare(&tx, skb, local->mdev, control)) + break; + dev_kfree_skb_any(skb); + } + sta = tx.sta; + tx.flags |= IEEE80211_TXRXD_TXPS_BUFFERED; + tx.u.tx.mode = local->hw.conf.mode; + + for (handler = local->tx_handlers; *handler != NULL; handler++) { + res = (*handler)(&tx); + if (res == TXRX_DROP || res == TXRX_QUEUED) + break; + } + dev_put(tx.dev); + skb = tx.skb; /* handlers are allowed to change skb */ + + if (res == TXRX_DROP) { + I802_DEBUG_INC(local->tx_handlers_drop); + dev_kfree_skb(skb); + skb = NULL; + } else if (res == TXRX_QUEUED) { + I802_DEBUG_INC(local->tx_handlers_queued); + skb = NULL; + } + + if (sta) + sta_info_put(sta); + + return skb; +} +EXPORT_SYMBOL(ieee80211_get_buffered_bc); diff --git a/net/mac80211/util.c b/net/mac80211/util.c new file mode 100644 index 0000000000000000000000000000000000000000..5a0564e1dbd6ca387c5c44d854bcd2b38fc0e84a --- /dev/null +++ b/net/mac80211/util.c @@ -0,0 +1,486 @@ +/* + * Copyright 2002-2005, Instant802 Networks, Inc. + * Copyright 2005-2006, Devicescape Software, Inc. + * Copyright 2006-2007 Jiri Benc + * Copyright 2007 Johannes Berg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * utilities for mac80211 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ieee80211_i.h" +#include "ieee80211_rate.h" +#include "wme.h" + +/* privid for wiphys to determine whether they belong to us or not */ +void *mac80211_wiphy_privid = &mac80211_wiphy_privid; + +/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ +/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ +const unsigned char rfc1042_header[] = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; + +/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ +const unsigned char bridge_tunnel_header[] = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; + +/* No encapsulation header if EtherType < 0x600 (=length) */ +static const unsigned char eapol_header[] = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00, 0x88, 0x8e }; + + +static int rate_list_match(const int *rate_list, int rate) +{ + int i; + + if (!rate_list) + return 0; + + for (i = 0; rate_list[i] >= 0; i++) + if (rate_list[i] == rate) + return 1; + + return 0; +} + +void ieee80211_prepare_rates(struct ieee80211_local *local, + struct ieee80211_hw_mode *mode) +{ + int i; + + for (i = 0; i < mode->num_rates; i++) { + struct ieee80211_rate *rate = &mode->rates[i]; + + rate->flags &= ~(IEEE80211_RATE_SUPPORTED | + IEEE80211_RATE_BASIC); + + if (local->supp_rates[mode->mode]) { + if (!rate_list_match(local->supp_rates[mode->mode], + rate->rate)) + continue; + } + + rate->flags |= IEEE80211_RATE_SUPPORTED; + + /* Use configured basic rate set if it is available. If not, + * use defaults that are sane for most cases. */ + if (local->basic_rates[mode->mode]) { + if (rate_list_match(local->basic_rates[mode->mode], + rate->rate)) + rate->flags |= IEEE80211_RATE_BASIC; + } else switch (mode->mode) { + case MODE_IEEE80211A: + if (rate->rate == 60 || rate->rate == 120 || + rate->rate == 240) + rate->flags |= IEEE80211_RATE_BASIC; + break; + case MODE_IEEE80211B: + if (rate->rate == 10 || rate->rate == 20) + rate->flags |= IEEE80211_RATE_BASIC; + break; + case MODE_IEEE80211G: + if (rate->rate == 10 || rate->rate == 20 || + rate->rate == 55 || rate->rate == 110) + rate->flags |= IEEE80211_RATE_BASIC; + break; + case NUM_IEEE80211_MODES: + /* not useful */ + break; + } + + /* Set ERP and MANDATORY flags based on phymode */ + switch (mode->mode) { + case MODE_IEEE80211A: + if (rate->rate == 60 || rate->rate == 120 || + rate->rate == 240) + rate->flags |= IEEE80211_RATE_MANDATORY; + break; + case MODE_IEEE80211B: + if (rate->rate == 10) + rate->flags |= IEEE80211_RATE_MANDATORY; + break; + case MODE_IEEE80211G: + if (rate->rate == 10 || rate->rate == 20 || + rate->rate == 55 || rate->rate == 110 || + rate->rate == 60 || rate->rate == 120 || + rate->rate == 240) + rate->flags |= IEEE80211_RATE_MANDATORY; + break; + case NUM_IEEE80211_MODES: + /* not useful */ + break; + } + if (ieee80211_is_erp_rate(mode->mode, rate->rate)) + rate->flags |= IEEE80211_RATE_ERP; + } +} + +u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len) +{ + u16 fc; + + if (len < 24) + return NULL; + + fc = le16_to_cpu(hdr->frame_control); + + switch (fc & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_DATA: + switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { + case IEEE80211_FCTL_TODS: + return hdr->addr1; + case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): + return NULL; + case IEEE80211_FCTL_FROMDS: + return hdr->addr2; + case 0: + return hdr->addr3; + } + break; + case IEEE80211_FTYPE_MGMT: + return hdr->addr3; + case IEEE80211_FTYPE_CTL: + if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL) + return hdr->addr1; + else + return NULL; + } + + return NULL; +} + +int ieee80211_get_hdrlen(u16 fc) +{ + int hdrlen = 24; + + switch (fc & IEEE80211_FCTL_FTYPE) { + case IEEE80211_FTYPE_DATA: + if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) + hdrlen = 30; /* Addr4 */ + /* + * The QoS Control field is two bytes and its presence is + * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to + * hdrlen if that bit is set. + * This works by masking out the bit and shifting it to + * bit position 1 so the result has the value 0 or 2. + */ + hdrlen += (fc & IEEE80211_STYPE_QOS_DATA) + >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1); + break; + case IEEE80211_FTYPE_CTL: + /* + * ACK and CTS are 10 bytes, all others 16. To see how + * to get this condition consider + * subtype mask: 0b0000000011110000 (0x00F0) + * ACK subtype: 0b0000000011010000 (0x00D0) + * CTS subtype: 0b0000000011000000 (0x00C0) + * bits that matter: ^^^ (0x00E0) + * value of those: 0b0000000011000000 (0x00C0) + */ + if ((fc & 0xE0) == 0xC0) + hdrlen = 10; + else + hdrlen = 16; + break; + } + + return hdrlen; +} +EXPORT_SYMBOL(ieee80211_get_hdrlen); + +int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *) skb->data; + int hdrlen; + + if (unlikely(skb->len < 10)) + return 0; + hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); + if (unlikely(hdrlen > skb->len)) + return 0; + return hdrlen; +} +EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb); + +int ieee80211_is_eapol(const struct sk_buff *skb) +{ + const struct ieee80211_hdr *hdr; + u16 fc; + int hdrlen; + + if (unlikely(skb->len < 10)) + return 0; + + hdr = (const struct ieee80211_hdr *) skb->data; + fc = le16_to_cpu(hdr->frame_control); + + if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) + return 0; + + hdrlen = ieee80211_get_hdrlen(fc); + + if (unlikely(skb->len >= hdrlen + sizeof(eapol_header) && + memcmp(skb->data + hdrlen, eapol_header, + sizeof(eapol_header)) == 0)) + return 1; + + return 0; +} + +void ieee80211_tx_set_iswep(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; + + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + if (tx->u.tx.extra_frag) { + struct ieee80211_hdr *fhdr; + int i; + for (i = 0; i < tx->u.tx.num_extra_frag; i++) { + fhdr = (struct ieee80211_hdr *) + tx->u.tx.extra_frag[i]->data; + fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } + } +} + +int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, + int rate, int erp, int short_preamble) +{ + int dur; + + /* calculate duration (in microseconds, rounded up to next higher + * integer if it includes a fractional microsecond) to send frame of + * len bytes (does not include FCS) at the given rate. Duration will + * also include SIFS. + * + * rate is in 100 kbps, so divident is multiplied by 10 in the + * DIV_ROUND_UP() operations. + */ + + if (local->hw.conf.phymode == MODE_IEEE80211A || erp) { + /* + * OFDM: + * + * N_DBPS = DATARATE x 4 + * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) + * (16 = SIGNAL time, 6 = tail bits) + * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext + * + * T_SYM = 4 usec + * 802.11a - 17.5.2: aSIFSTime = 16 usec + * 802.11g - 19.8.4: aSIFSTime = 10 usec + + * signal ext = 6 usec + */ + dur = 16; /* SIFS + signal ext */ + dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */ + dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */ + dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, + 4 * rate); /* T_SYM x N_SYM */ + } else { + /* + * 802.11b or 802.11g with 802.11b compatibility: + * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + + * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. + * + * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 + * aSIFSTime = 10 usec + * aPreambleLength = 144 usec or 72 usec with short preamble + * aPLCPHeaderLength = 48 usec or 24 usec with short preamble + */ + dur = 10; /* aSIFSTime = 10 usec */ + dur += short_preamble ? (72 + 24) : (144 + 48); + + dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); + } + + return dur; +} + +/* Exported duration function for driver use */ +__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, int if_id, + size_t frame_len, int rate) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct net_device *bdev = dev_get_by_index(&init_net, if_id); + struct ieee80211_sub_if_data *sdata; + u16 dur; + int erp; + + if (unlikely(!bdev)) + return 0; + + sdata = IEEE80211_DEV_TO_SUB_IF(bdev); + erp = ieee80211_is_erp_rate(hw->conf.phymode, rate); + dur = ieee80211_frame_duration(local, frame_len, rate, + erp, sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE); + + dev_put(bdev); + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_generic_frame_duration); + +__le16 ieee80211_rts_duration(struct ieee80211_hw *hw, int if_id, + size_t frame_len, + const struct ieee80211_tx_control *frame_txctl) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate; + struct net_device *bdev = dev_get_by_index(&init_net, if_id); + struct ieee80211_sub_if_data *sdata; + int short_preamble; + int erp; + u16 dur; + + if (unlikely(!bdev)) + return 0; + + sdata = IEEE80211_DEV_TO_SUB_IF(bdev); + short_preamble = sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE; + + rate = frame_txctl->rts_rate; + erp = !!(rate->flags & IEEE80211_RATE_ERP); + + /* CTS duration */ + dur = ieee80211_frame_duration(local, 10, rate->rate, + erp, short_preamble); + /* Data frame duration */ + dur += ieee80211_frame_duration(local, frame_len, rate->rate, + erp, short_preamble); + /* ACK duration */ + dur += ieee80211_frame_duration(local, 10, rate->rate, + erp, short_preamble); + + dev_put(bdev); + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_rts_duration); + +__le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, int if_id, + size_t frame_len, + const struct ieee80211_tx_control *frame_txctl) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_rate *rate; + struct net_device *bdev = dev_get_by_index(&init_net, if_id); + struct ieee80211_sub_if_data *sdata; + int short_preamble; + int erp; + u16 dur; + + if (unlikely(!bdev)) + return 0; + + sdata = IEEE80211_DEV_TO_SUB_IF(bdev); + short_preamble = sdata->flags & IEEE80211_SDATA_SHORT_PREAMBLE; + + rate = frame_txctl->rts_rate; + erp = !!(rate->flags & IEEE80211_RATE_ERP); + + /* Data frame duration */ + dur = ieee80211_frame_duration(local, frame_len, rate->rate, + erp, short_preamble); + if (!(frame_txctl->flags & IEEE80211_TXCTL_NO_ACK)) { + /* ACK duration */ + dur += ieee80211_frame_duration(local, 10, rate->rate, + erp, short_preamble); + } + + dev_put(bdev); + return cpu_to_le16(dur); +} +EXPORT_SYMBOL(ieee80211_ctstoself_duration); + +struct ieee80211_rate * +ieee80211_get_rate(struct ieee80211_local *local, int phymode, int hw_rate) +{ + struct ieee80211_hw_mode *mode; + int r; + + list_for_each_entry(mode, &local->modes_list, list) { + if (mode->mode != phymode) + continue; + for (r = 0; r < mode->num_rates; r++) { + struct ieee80211_rate *rate = &mode->rates[r]; + if (rate->val == hw_rate || + (rate->flags & IEEE80211_RATE_PREAMBLE2 && + rate->val2 == hw_rate)) + return rate; + } + } + + return NULL; +} + +void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (test_and_clear_bit(IEEE80211_LINK_STATE_XOFF, + &local->state[queue])) { + if (test_bit(IEEE80211_LINK_STATE_PENDING, + &local->state[queue])) + tasklet_schedule(&local->tx_pending_tasklet); + else + if (!ieee80211_qdisc_installed(local->mdev)) { + if (queue == 0) + netif_wake_queue(local->mdev); + } else + __netif_schedule(local->mdev); + } +} +EXPORT_SYMBOL(ieee80211_wake_queue); + +void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) +{ + struct ieee80211_local *local = hw_to_local(hw); + + if (!ieee80211_qdisc_installed(local->mdev) && queue == 0) + netif_stop_queue(local->mdev); + set_bit(IEEE80211_LINK_STATE_XOFF, &local->state[queue]); +} +EXPORT_SYMBOL(ieee80211_stop_queue); + +void ieee80211_start_queues(struct ieee80211_hw *hw) +{ + struct ieee80211_local *local = hw_to_local(hw); + int i; + + for (i = 0; i < local->hw.queues; i++) + clear_bit(IEEE80211_LINK_STATE_XOFF, &local->state[i]); + if (!ieee80211_qdisc_installed(local->mdev)) + netif_start_queue(local->mdev); +} +EXPORT_SYMBOL(ieee80211_start_queues); + +void ieee80211_stop_queues(struct ieee80211_hw *hw) +{ + int i; + + for (i = 0; i < hw->queues; i++) + ieee80211_stop_queue(hw, i); +} +EXPORT_SYMBOL(ieee80211_stop_queues); + +void ieee80211_wake_queues(struct ieee80211_hw *hw) +{ + int i; + + for (i = 0; i < hw->queues; i++) + ieee80211_wake_queue(hw, i); +} +EXPORT_SYMBOL(ieee80211_wake_queues); diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 1ad3d75281cc9e044b80ec97c5d2a56e4050e6aa..6675261e958f41abb0fc23a8bf3f3178e0364c8f 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -63,11 +63,11 @@ static inline int ieee80211_wep_weak_iv(u32 iv, int keylen) } -void ieee80211_wep_get_iv(struct ieee80211_local *local, - struct ieee80211_key *key, u8 *iv) +static void ieee80211_wep_get_iv(struct ieee80211_local *local, + struct ieee80211_key *key, u8 *iv) { local->wep_iv++; - if (ieee80211_wep_weak_iv(local->wep_iv, key->keylen)) + if (ieee80211_wep_weak_iv(local->wep_iv, key->conf.keylen)) local->wep_iv += 0x0100; if (!iv) @@ -76,13 +76,13 @@ void ieee80211_wep_get_iv(struct ieee80211_local *local, *iv++ = (local->wep_iv >> 16) & 0xff; *iv++ = (local->wep_iv >> 8) & 0xff; *iv++ = local->wep_iv & 0xff; - *iv++ = key->keyidx << 6; + *iv++ = key->conf.keyidx << 6; } -u8 * ieee80211_wep_add_iv(struct ieee80211_local *local, - struct sk_buff *skb, - struct ieee80211_key *key) +static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u16 fc; @@ -109,9 +109,9 @@ u8 * ieee80211_wep_add_iv(struct ieee80211_local *local, } -void ieee80211_wep_remove_iv(struct ieee80211_local *local, - struct sk_buff *skb, - struct ieee80211_key *key) +static void ieee80211_wep_remove_iv(struct ieee80211_local *local, + struct sk_buff *skb, + struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u16 fc; @@ -159,10 +159,10 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, u8 *rc4key, *iv; size_t len; - if (!key || key->alg != ALG_WEP) + if (!key || key->conf.alg != ALG_WEP) return -1; - klen = 3 + key->keylen; + klen = 3 + key->conf.keylen; rc4key = kmalloc(klen, GFP_ATOMIC); if (!rc4key) return -1; @@ -179,7 +179,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, memcpy(rc4key, iv, 3); /* Copy rest of the WEP key (the secret part) */ - memcpy(rc4key + 3, key->key, key->keylen); + memcpy(rc4key + 3, key->conf.key, key->conf.keylen); /* Add room for ICV */ skb_put(skb, WEP_ICV_LEN); @@ -251,10 +251,10 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, keyidx = skb->data[hdrlen + 3] >> 6; - if (!key || keyidx != key->keyidx || key->alg != ALG_WEP) + if (!key || keyidx != key->conf.keyidx || key->conf.alg != ALG_WEP) return -1; - klen = 3 + key->keylen; + klen = 3 + key->conf.keylen; rc4key = kmalloc(klen, GFP_ATOMIC); if (!rc4key) @@ -264,7 +264,7 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, memcpy(rc4key, skb->data + hdrlen, 3); /* Copy rest of the WEP key (the secret part) */ - memcpy(rc4key + 3, key->key, key->keylen); + memcpy(rc4key + 3, key->conf.key, key->conf.keylen); if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, skb->data + hdrlen + WEP_IV_LEN, @@ -286,43 +286,99 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, } -int ieee80211_wep_get_keyidx(struct sk_buff *skb) +u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u16 fc; int hdrlen; + u8 *ivpos; + u32 iv; fc = le16_to_cpu(hdr->frame_control); if (!(fc & IEEE80211_FCTL_PROTECTED)) - return -1; + return NULL; hdrlen = ieee80211_get_hdrlen(fc); + ivpos = skb->data + hdrlen; + iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; - if (skb->len < 8 + hdrlen) - return -1; + if (ieee80211_wep_weak_iv(iv, key->conf.keylen)) + return ivpos; - return skb->data[hdrlen + 3] >> 6; + return NULL; } +ieee80211_txrx_result +ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx) +{ + if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && + ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || + (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) + return TXRX_CONTINUE; + + if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { + if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) { + if (net_ratelimit()) + printk(KERN_DEBUG "%s: RX WEP frame, decrypt " + "failed\n", rx->dev->name); + return TXRX_DROP; + } + } else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) { + ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); + /* remove ICV */ + skb_trim(rx->skb, rx->skb->len - 4); + } -u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key) + return TXRX_CONTINUE; +} + +static int wep_encrypt_skb(struct ieee80211_txrx_data *tx, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { + if (ieee80211_wep_encrypt(tx->local, skb, tx->key)) + return -1; + } else { + tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; + if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) { + if (!ieee80211_wep_add_iv(tx->local, skb, tx->key)) + return -1; + } + } + return 0; +} + +ieee80211_txrx_result +ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; u16 fc; - int hdrlen; - u8 *ivpos; - u32 iv; fc = le16_to_cpu(hdr->frame_control); - if (!(fc & IEEE80211_FCTL_PROTECTED)) - return NULL; - hdrlen = ieee80211_get_hdrlen(fc); - ivpos = skb->data + hdrlen; - iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2]; + if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && + ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || + (fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH))) + return TXRX_CONTINUE; - if (ieee80211_wep_weak_iv(iv, key->keylen)) - return ivpos; + tx->u.tx.control->iv_len = WEP_IV_LEN; + tx->u.tx.control->icv_len = WEP_ICV_LEN; + ieee80211_tx_set_iswep(tx); - return NULL; + if (wep_encrypt_skb(tx, tx->skb) < 0) { + I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); + return TXRX_DROP; + } + + if (tx->u.tx.extra_frag) { + int i; + for (i = 0; i < tx->u.tx.num_extra_frag; i++) { + if (wep_encrypt_skb(tx, tx->u.tx.extra_frag[i]) < 0) { + I802_DEBUG_INC(tx->local-> + tx_handlers_drop_wep); + return TXRX_DROP; + } + } + } + + return TXRX_CONTINUE; } diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h index bfe29e8e10aac7dfe4447f8034230494dd9ee89b..785fbb4e0dd70d869777b5ecb38d9cbf958ce8f9 100644 --- a/net/mac80211/wep.h +++ b/net/mac80211/wep.h @@ -18,14 +18,6 @@ int ieee80211_wep_init(struct ieee80211_local *local); void ieee80211_wep_free(struct ieee80211_local *local); -void ieee80211_wep_get_iv(struct ieee80211_local *local, - struct ieee80211_key *key, u8 *iv); -u8 * ieee80211_wep_add_iv(struct ieee80211_local *local, - struct sk_buff *skb, - struct ieee80211_key *key); -void ieee80211_wep_remove_iv(struct ieee80211_local *local, - struct sk_buff *skb, - struct ieee80211_key *key); void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, size_t klen, u8 *data, size_t data_len); int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, @@ -34,7 +26,11 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key); int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key); -int ieee80211_wep_get_keyidx(struct sk_buff *skb); u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); +ieee80211_txrx_result +ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx); +ieee80211_txrx_result +ieee80211_crypto_wep_encrypt(struct ieee80211_txrx_data *tx); + #endif /* WEP_H */ diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 7ab82b376e1bc92f8d656bfa30f7a3cf4b2b1152..5b8a157975a35dd452bc6e03e17f5dafcae09788 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -18,70 +18,6 @@ #include "ieee80211_i.h" #include "wme.h" -static inline int WLAN_FC_IS_QOS_DATA(u16 fc) -{ - return (fc & 0x8C) == 0x88; -} - - -ieee80211_txrx_result -ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx) -{ - u8 *data = rx->skb->data; - int tid; - - /* does the frame have a qos control field? */ - if (WLAN_FC_IS_QOS_DATA(rx->fc)) { - u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN; - /* frame has qos control */ - tid = qc[0] & QOS_CONTROL_TID_MASK; - } else { - if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { - /* Separate TID for management frames */ - tid = NUM_RX_DATA_QUEUES - 1; - } else { - /* no qos control present */ - tid = 0; /* 802.1d - Best Effort */ - } - } -#ifdef CONFIG_MAC80211_DEBUG_COUNTERS - I802_DEBUG_INC(rx->local->wme_rx_queue[tid]); - if (rx->sta) { - I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); - } -#endif /* CONFIG_MAC80211_DEBUG_COUNTERS */ - - rx->u.rx.queue = tid; - /* Set skb->priority to 1d tag if highest order bit of TID is not set. - * For now, set skb->priority to 0 for other cases. */ - rx->skb->priority = (tid > 7) ? 0 : tid; - - return TXRX_CONTINUE; -} - - -ieee80211_txrx_result -ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) -{ - u16 fc = rx->fc; - u8 *data = rx->skb->data; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; - - if (!WLAN_FC_IS_QOS_DATA(fc)) - return TXRX_CONTINUE; - - /* remove the qos control field, update frame type and meta-data */ - memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); - hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2); - /* change frame type to non QOS */ - rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; - hdr->frame_control = cpu_to_le16(fc); - - return TXRX_CONTINUE; -} - - -#ifdef CONFIG_NET_SCHED /* maximum number of hardware queues we support. */ #define TC_80211_MAX_QUEUES 8 @@ -158,8 +94,6 @@ static inline int wme_downgrade_ac(struct sk_buff *skb) static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) { struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); - struct ieee80211_tx_packet_data *pkt_data = - (struct ieee80211_tx_packet_data *) skb->cb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; unsigned short fc = le16_to_cpu(hdr->frame_control); int qos; @@ -172,12 +106,8 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) return IEEE80211_TX_QUEUE_DATA0; } - if (unlikely(pkt_data->mgmt_iface)) { - /* Data frames from hostapd (mainly, EAPOL) use AC_VO - * and they will include QoS control fields if - * the target STA is using WME. */ - skb->priority = 7; - return ieee802_1d_to_ac[skb->priority]; + if (0 /* injected */) { + /* use AC from radiotap */ } /* is this a QoS frame? */ @@ -189,14 +119,13 @@ static inline int classify80211(struct sk_buff *skb, struct Qdisc *qd) } /* use the data classifier to determine what 802.1d tag the - * data frame has */ + * data frame has */ skb->priority = classify_1d(skb, qd); - /* incase we are a client verify acm is not set for this ac */ + /* in case we are a client verify acm is not set for this ac */ while (unlikely(local->wmm_acm & BIT(skb->priority))) { if (wme_downgrade_ac(skb)) { - /* No AC with lower priority has acm=0, - * drop packet. */ + /* No AC with lower priority has acm=0, drop packet. */ return -1; } } @@ -217,7 +146,7 @@ static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) struct Qdisc *qdisc; int err, queue; - if (pkt_data->requeue) { + if (pkt_data->flags & IEEE80211_TXPD_REQUEUE) { skb_queue_tail(&q->requeued[pkt_data->queue], skb); qd->q.qlen++; return 0; @@ -675,4 +604,3 @@ void ieee80211_wme_unregister(void) { unregister_qdisc(&wme_qdisc_ops); } -#endif /* CONFIG_NET_SCHED */ diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h index f0bff10f0e08676c94c61df8646885973756245d..76c713a6450c37290f0ba521cbe2e212e06bc514 100644 --- a/net/mac80211/wme.h +++ b/net/mac80211/wme.h @@ -24,11 +24,10 @@ #define QOS_CONTROL_TAG1D_MASK 0x07 -ieee80211_txrx_result -ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx); - -ieee80211_txrx_result -ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx); +static inline int WLAN_FC_IS_QOS_DATA(u16 fc) +{ + return (fc & 0x8C) == 0x88; +} #ifdef CONFIG_NET_SCHED void ieee80211_install_qdisc(struct net_device *dev); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 783af32c6911d89fe90f933e1169c0db6aba2268..6695efba57ec12c52329c203da57d97d45f7946b 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -11,10 +11,8 @@ #include #include #include -#include - #include -#include "ieee80211_common.h" + #include "ieee80211_i.h" #include "michael.h" #include "tkip.h" @@ -84,16 +82,16 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) fc = tx->fc; - if (!tx->key || tx->key->alg != ALG_TKIP || skb->len < 24 || + if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || !WLAN_FC_DATA_PRESENT(fc)) return TXRX_CONTINUE; if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len)) return TXRX_DROP; - if (!tx->key->force_sw_encrypt && - !tx->fragmented && - !(tx->local->hw.flags & IEEE80211_HW_TKIP_INCLUDE_MMIC) && + if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && + !(tx->flags & IEEE80211_TXRXD_FRAGMENTED) && + !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) && !wpa_test) { /* hwaccel - with no need for preallocated room for Michael MIC */ @@ -116,8 +114,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_txrx_data *tx) #else authenticator = 1; #endif - key = &tx->key->key[authenticator ? ALG_TKIP_TEMP_AUTH_TX_MIC_KEY : - ALG_TKIP_TEMP_AUTH_RX_MIC_KEY]; + key = &tx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_TX_MIC_KEY : + ALG_TKIP_TEMP_AUTH_RX_MIC_KEY]; mic = skb_put(skb, MICHAEL_MIC_LEN); michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); @@ -134,31 +132,20 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) u8 mic[MICHAEL_MIC_LEN]; struct sk_buff *skb = rx->skb; int authenticator = 1, wpa_test = 0; + DECLARE_MAC_BUF(mac); fc = rx->fc; - /* If device handles decryption totally, skip this check */ - if ((rx->local->hw.flags & IEEE80211_HW_DEVICE_HIDES_WEP) || - (rx->local->hw.flags & IEEE80211_HW_DEVICE_STRIPS_MIC)) + /* + * No way to verify the MIC if the hardware stripped it + */ + if (rx->u.rx.status->flag & RX_FLAG_MMIC_STRIPPED) return TXRX_CONTINUE; - if (!rx->key || rx->key->alg != ALG_TKIP || + if (!rx->key || rx->key->conf.alg != ALG_TKIP || !(rx->fc & IEEE80211_FCTL_PROTECTED) || !WLAN_FC_DATA_PRESENT(fc)) return TXRX_CONTINUE; - if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && - !rx->key->force_sw_encrypt) { - if (rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) { - if (skb->len < MICHAEL_MIC_LEN) - return TXRX_DROP; - } - /* Need to verify Michael MIC sometimes in software even when - * hwaccel is used. Atheros ar5212: fragmented frames and QoS - * frames. */ - if (!rx->fragmented && !wpa_test) - goto remove_mic; - } - if (ieee80211_get_hdr_info(skb, &sa, &da, &qos_tid, &data, &data_len) || data_len < MICHAEL_MIC_LEN) return TXRX_DROP; @@ -170,49 +157,28 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx) #else authenticator = 1; #endif - key = &rx->key->key[authenticator ? ALG_TKIP_TEMP_AUTH_RX_MIC_KEY : - ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; + key = &rx->key->conf.key[authenticator ? ALG_TKIP_TEMP_AUTH_RX_MIC_KEY : + ALG_TKIP_TEMP_AUTH_TX_MIC_KEY]; michael_mic(key, da, sa, qos_tid & 0x0f, data, data_len, mic); if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) { - if (!rx->u.rx.ra_match) + if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) return TXRX_DROP; printk(KERN_DEBUG "%s: invalid Michael MIC in data frame from " - MAC_FMT "\n", rx->dev->name, MAC_ARG(sa)); - - do { - struct ieee80211_hdr *hdr; - union iwreq_data wrqu; - char *buf = kmalloc(128, GFP_ATOMIC); - if (!buf) - break; - - /* TODO: needed parameters: count, key type, TSC */ - hdr = (struct ieee80211_hdr *) skb->data; - sprintf(buf, "MLME-MICHAELMICFAILURE.indication(" - "keyid=%d %scast addr=" MAC_FMT ")", - rx->key->keyidx, - hdr->addr1[0] & 0x01 ? "broad" : "uni", - MAC_ARG(hdr->addr2)); - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = strlen(buf); - wireless_send_event(rx->dev, IWEVCUSTOM, &wrqu, buf); - kfree(buf); - } while (0); - - if (!rx->local->apdev) - return TXRX_DROP; - - ieee80211_rx_mgmt(rx->local, rx->skb, rx->u.rx.status, - ieee80211_msg_michael_mic_failure); + "%s\n", rx->dev->name, print_mac(mac, sa)); - return TXRX_QUEUED; + mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, + (void *) skb->data); + return TXRX_DROP; } - remove_mic: /* remove Michael MIC from payload */ skb_trim(skb, skb->len - MICHAEL_MIC_LEN); + /* update IV in key information to be able to detect replays */ + rx->key->u.tkip.iv32_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv32; + rx->key->u.tkip.iv16_rx[rx->u.rx.queue] = rx->u.rx.tkip_iv16; + return TXRX_CONTINUE; } @@ -230,7 +196,11 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, hdrlen = ieee80211_get_hdrlen(fc); len = skb->len - hdrlen; - tailneed = !tx->key->force_sw_encrypt ? 0 : TKIP_ICV_LEN; + if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + tailneed = 0; + else + tailneed = TKIP_ICV_LEN; + if ((skb_headroom(skb) < TKIP_IV_LEN || skb_tailroom(skb) < tailneed)) { I802_DEBUG_INC(tx->local->tx_expand_skb_head); @@ -248,8 +218,7 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, if (key->u.tkip.iv16 == 0) key->u.tkip.iv32++; - if (!tx->key->force_sw_encrypt) { - u32 flags = tx->local->hw.flags; + if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { hdr = (struct ieee80211_hdr *)skb->data; /* hwaccel - with preallocated room for IV */ @@ -259,23 +228,7 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, 0x7f), (u8) key->u.tkip.iv16); - if (flags & IEEE80211_HW_TKIP_REQ_PHASE2_KEY) - ieee80211_tkip_gen_rc4key(key, hdr->addr2, - tx->u.tx.control->tkip_key); - else if (flags & IEEE80211_HW_TKIP_REQ_PHASE1_KEY) { - if (key->u.tkip.iv16 == 0 || - !key->u.tkip.tx_initialized) { - ieee80211_tkip_gen_phase1key(key, hdr->addr2, - (u16 *)tx->u.tx.control->tkip_key); - key->u.tkip.tx_initialized = 1; - tx->u.tx.control->flags |= - IEEE80211_TXCTL_TKIP_NEW_PHASE1_KEY; - } else - tx->u.tx.control->flags &= - ~IEEE80211_TXCTL_TKIP_NEW_PHASE1_KEY; - } - - tx->u.tx.control->key_idx = tx->key->hw_key_idx; + tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; return 0; } @@ -290,28 +243,27 @@ static int tkip_encrypt_skb(struct ieee80211_txrx_data *tx, ieee80211_txrx_result -ieee80211_tx_h_tkip_encrypt(struct ieee80211_txrx_data *tx) +ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; u16 fc; - struct ieee80211_key *key = tx->key; struct sk_buff *skb = tx->skb; int wpa_test = 0, test = 0; fc = le16_to_cpu(hdr->frame_control); - if (!key || key->alg != ALG_TKIP || !WLAN_FC_DATA_PRESENT(fc)) + if (!WLAN_FC_DATA_PRESENT(fc)) return TXRX_CONTINUE; tx->u.tx.control->icv_len = TKIP_ICV_LEN; tx->u.tx.control->iv_len = TKIP_IV_LEN; ieee80211_tx_set_iswep(tx); - if (!tx->key->force_sw_encrypt && - !(tx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV) && + if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && + !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !wpa_test) { /* hwaccel - with no need for preallocated room for IV/ICV */ - tx->u.tx.control->key_idx = tx->key->hw_key_idx; + tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; return TXRX_CONTINUE; } @@ -332,30 +284,31 @@ ieee80211_tx_h_tkip_encrypt(struct ieee80211_txrx_data *tx) ieee80211_txrx_result -ieee80211_rx_h_tkip_decrypt(struct ieee80211_txrx_data *rx) +ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; u16 fc; int hdrlen, res, hwaccel = 0, wpa_test = 0; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; + DECLARE_MAC_BUF(mac); fc = le16_to_cpu(hdr->frame_control); hdrlen = ieee80211_get_hdrlen(fc); - if (!rx->key || rx->key->alg != ALG_TKIP || - !(rx->fc & IEEE80211_FCTL_PROTECTED) || - (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) + if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) return TXRX_CONTINUE; if (!rx->sta || skb->len - hdrlen < 12) return TXRX_DROP; - if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && - !rx->key->force_sw_encrypt) { - if (!(rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV)) { - /* Hardware takes care of all processing, including - * replay protection, so no need to continue here. */ + if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) { + if (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) { + /* + * Hardware took care of all processing, including + * replay protection, and stripped the ICV/IV so + * we cannot do any checks here. + */ return TXRX_CONTINUE; } @@ -366,11 +319,13 @@ ieee80211_rx_h_tkip_decrypt(struct ieee80211_txrx_data *rx) res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->addr, - hwaccel, rx->u.rx.queue); + hwaccel, rx->u.rx.queue, + &rx->u.rx.tkip_iv32, + &rx->u.rx.tkip_iv16); if (res != TKIP_DECRYPT_OK || wpa_test) { printk(KERN_DEBUG "%s: TKIP decrypt failed for RX frame from " - MAC_FMT " (res=%d)\n", - rx->dev->name, MAC_ARG(rx->sta->addr), res); + "%s (res=%d)\n", + rx->dev->name, print_mac(mac, rx->sta->addr), res); return TXRX_DROP; } @@ -496,7 +451,10 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, hdrlen = ieee80211_get_hdrlen(fc); len = skb->len - hdrlen; - tailneed = !key->force_sw_encrypt ? 0 : CCMP_MIC_LEN; + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) + tailneed = 0; + else + tailneed = CCMP_MIC_LEN; if ((skb_headroom(skb) < CCMP_HDR_LEN || skb_tailroom(skb) < tailneed)) { @@ -520,11 +478,11 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, break; } - ccmp_pn2hdr(pos, pn, key->keyidx); + ccmp_pn2hdr(pos, pn, key->conf.keyidx); - if (!key->force_sw_encrypt) { + if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { /* hwaccel - with preallocated room for CCMP header */ - tx->u.tx.control->key_idx = key->hw_key_idx; + tx->u.tx.control->key_idx = key->conf.hw_key_idx; return 0; } @@ -538,28 +496,27 @@ static int ccmp_encrypt_skb(struct ieee80211_txrx_data *tx, ieee80211_txrx_result -ieee80211_tx_h_ccmp_encrypt(struct ieee80211_txrx_data *tx) +ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; - struct ieee80211_key *key = tx->key; u16 fc; struct sk_buff *skb = tx->skb; int test = 0; fc = le16_to_cpu(hdr->frame_control); - if (!key || key->alg != ALG_CCMP || !WLAN_FC_DATA_PRESENT(fc)) + if (!WLAN_FC_DATA_PRESENT(fc)) return TXRX_CONTINUE; tx->u.tx.control->icv_len = CCMP_MIC_LEN; tx->u.tx.control->iv_len = CCMP_HDR_LEN; ieee80211_tx_set_iswep(tx); - if (!tx->key->force_sw_encrypt && - !(tx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV)) { + if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && + !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* hwaccel - with no need for preallocated room for CCMP " * header or MIC fields */ - tx->u.tx.control->key_idx = tx->key->hw_key_idx; + tx->u.tx.control->key_idx = tx->key->conf.hw_key_idx; return TXRX_CONTINUE; } @@ -568,7 +525,6 @@ ieee80211_tx_h_ccmp_encrypt(struct ieee80211_txrx_data *tx) if (tx->u.tx.extra_frag) { int i; - for (i = 0; i < tx->u.tx.num_extra_frag; i++) { if (ccmp_encrypt_skb(tx, tx->u.tx.extra_frag[i], test) < 0) @@ -581,7 +537,7 @@ ieee80211_tx_h_ccmp_encrypt(struct ieee80211_txrx_data *tx) ieee80211_txrx_result -ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) +ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; u16 fc; @@ -590,13 +546,12 @@ ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) struct sk_buff *skb = rx->skb; u8 pn[CCMP_PN_LEN]; int data_len; + DECLARE_MAC_BUF(mac); fc = le16_to_cpu(hdr->frame_control); hdrlen = ieee80211_get_hdrlen(fc); - if (!key || key->alg != ALG_CCMP || - !(rx->fc & IEEE80211_FCTL_PROTECTED) || - (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) + if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) return TXRX_CONTINUE; data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; @@ -604,8 +559,7 @@ ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) return TXRX_DROP; if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && - !key->force_sw_encrypt && - !(rx->local->hw.flags & IEEE80211_HW_WEP_INCLUDE_IV)) + (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) return TXRX_CONTINUE; (void) ccmp_hdr2pn(pn, skb->data + hdrlen); @@ -613,10 +567,11 @@ ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) if (memcmp(pn, key->u.ccmp.rx_pn[rx->u.rx.queue], CCMP_PN_LEN) <= 0) { #ifdef CONFIG_MAC80211_DEBUG u8 *ppn = key->u.ccmp.rx_pn[rx->u.rx.queue]; + printk(KERN_DEBUG "%s: CCMP replay detected for RX frame from " - MAC_FMT " (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN " + "%s (RX PN %02x%02x%02x%02x%02x%02x <= prev. PN " "%02x%02x%02x%02x%02x%02x)\n", rx->dev->name, - MAC_ARG(rx->sta->addr), + print_mac(mac, rx->sta->addr), pn[0], pn[1], pn[2], pn[3], pn[4], pn[5], ppn[0], ppn[1], ppn[2], ppn[3], ppn[4], ppn[5]); #endif /* CONFIG_MAC80211_DEBUG */ @@ -624,10 +579,8 @@ ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) return TXRX_DROP; } - if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && - !key->force_sw_encrypt) { - /* hwaccel has already decrypted frame and verified MIC */ - } else { + if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) { + /* hardware didn't decrypt/verify MIC */ u8 *scratch, *b_0, *aad; scratch = key->u.ccmp.rx_crypto_buf; @@ -642,8 +595,8 @@ ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) skb->data + skb->len - CCMP_MIC_LEN, skb->data + hdrlen + CCMP_HDR_LEN)) { printk(KERN_DEBUG "%s: CCMP decrypt failed for RX " - "frame from " MAC_FMT "\n", rx->dev->name, - MAC_ARG(rx->sta->addr)); + "frame from %s\n", rx->dev->name, + print_mac(mac, rx->sta->addr)); return TXRX_DROP; } } @@ -657,4 +610,3 @@ ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx) return TXRX_CONTINUE; } - diff --git a/net/mac80211/wpa.h b/net/mac80211/wpa.h index da3b9594f9c3e9cc7f187dc515c27cd37b89a270..49d80cf0cd7569fb5f409121f017b185f0e76bdf 100644 --- a/net/mac80211/wpa.h +++ b/net/mac80211/wpa.h @@ -19,13 +19,13 @@ ieee80211_txrx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_txrx_data *rx); ieee80211_txrx_result -ieee80211_tx_h_tkip_encrypt(struct ieee80211_txrx_data *tx); +ieee80211_crypto_tkip_encrypt(struct ieee80211_txrx_data *tx); ieee80211_txrx_result -ieee80211_rx_h_tkip_decrypt(struct ieee80211_txrx_data *rx); +ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx); ieee80211_txrx_result -ieee80211_tx_h_ccmp_encrypt(struct ieee80211_txrx_data *tx); +ieee80211_crypto_ccmp_encrypt(struct ieee80211_txrx_data *tx); ieee80211_txrx_result -ieee80211_rx_h_ccmp_decrypt(struct ieee80211_txrx_data *rx); +ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx); #endif /* WPA_H */ diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 3599770a24730f0782a899205c7a99806955391f..d7a600a5720acaf306c83a67d18210ee39b495c9 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -665,6 +665,20 @@ config NETFILTER_XT_MATCH_TCPMSS To compile it as a module, choose M here. If unsure, say N. +config NETFILTER_XT_MATCH_TIME + tristate '"time" match support' + depends on NETFILTER_XTABLES + ---help--- + This option adds a "time" match, which allows you to match based on + the packet arrival time (at the machine which netfilter is running) + on) or departure time/date (for locally generated packets). + + If you say Y here, try `iptables -m time --help` for + more information. + + If you want to compile it as a module, say M here. + If unsure, say N. + config NETFILTER_XT_MATCH_U32 tristate '"u32" match support' depends on NETFILTER_XTABLES diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 0c054bf279739f5f0298357c33df6bbf92f6acfb..93c58f973831f5dd268bf9e114c009faa755350a 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o +obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 381a77cf0c9eb83c5900fb775f6e38a5fcd4b7ad..a523fa4136ed53a7f13e227c0ee6bb8a38ae95f4 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "nf_internals.h" @@ -293,7 +294,7 @@ void __init netfilter_init(void) } #ifdef CONFIG_PROC_FS - proc_net_netfilter = proc_mkdir("netfilter", proc_net); + proc_net_netfilter = proc_mkdir("netfilter", init_net.proc_net); if (!proc_net_netfilter) panic("cannot create netfilter proc entry"); #endif diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0fe11889ce141c70f9811a5b8fd89a6e9d267265..83c30b45d170985da545b76c7910d1684175b7cd 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -63,7 +63,6 @@ unsigned int nf_ct_log_invalid __read_mostly; HLIST_HEAD(unconfirmed); static int nf_conntrack_vmalloc __read_mostly; static struct kmem_cache *nf_conntrack_cachep __read_mostly; -static unsigned int nf_conntrack_next_id; DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat); EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat); @@ -287,7 +286,6 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int repl_hash) { - ct->id = ++nf_conntrack_next_id; hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode, &nf_conntrack_hash[hash]); hlist_add_head(&ct->tuplehash[IP_CT_DIR_REPLY].hnode, @@ -827,44 +825,41 @@ EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); #include #include - /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be * in ip_conntrack_core, since we don't want the protocols to autoload * or depend on ctnetlink */ -int nf_ct_port_tuple_to_nfattr(struct sk_buff *skb, +int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { - NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t), + NLA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t), &tuple->src.u.tcp.port); - NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t), + NLA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t), &tuple->dst.u.tcp.port); return 0; -nfattr_failure: +nla_put_failure: return -1; } -EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nfattr); +EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); -static const size_t cta_min_proto[CTA_PROTO_MAX] = { - [CTA_PROTO_SRC_PORT-1] = sizeof(u_int16_t), - [CTA_PROTO_DST_PORT-1] = sizeof(u_int16_t) +const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { + [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, + [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, }; +EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); -int nf_ct_port_nfattr_to_tuple(struct nfattr *tb[], +int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t) { - if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1]) - return -EINVAL; - - if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) + if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) return -EINVAL; - t->src.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]); - t->dst.u.tcp.port = *(__be16 *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]); + t->src.u.tcp.port = *(__be16 *)nla_data(tb[CTA_PROTO_SRC_PORT]); + t->dst.u.tcp.port = *(__be16 *)nla_data(tb[CTA_PROTO_DST_PORT]); return 0; } -EXPORT_SYMBOL_GPL(nf_ct_port_nfattr_to_tuple); +EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); #endif /* Used by ipt_REJECT and ip6t_REJECT. */ diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 3ac64e25f10cd45f5629eada35b02a217933159e..175c8d1a199219ee57144b9c1d0574c4374abf01 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -40,7 +41,6 @@ static int nf_ct_expect_hash_rnd_initted __read_mostly; static int nf_ct_expect_vmalloc; static struct kmem_cache *nf_ct_expect_cachep __read_mostly; -static unsigned int nf_ct_expect_next_id; /* nf_conntrack_expect helper functions */ void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) @@ -301,7 +301,6 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) exp->timeout.expires = jiffies + master_help->helper->timeout * HZ; add_timer(&exp->timeout); - exp->id = ++nf_ct_expect_next_id; atomic_inc(&exp->use); NF_CT_STAT_INC(expect_create); } @@ -473,22 +472,8 @@ static const struct seq_operations exp_seq_ops = { static int exp_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct ct_expect_iter_state *st; - int ret; - - st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL); - if (!st) - return -ENOMEM; - ret = seq_open(file, &exp_seq_ops); - if (ret) - goto out_free; - seq = file->private_data; - seq->private = st; - return ret; -out_free: - kfree(st); - return ret; + return seq_open_private(file, &exp_seq_ops, + sizeof(struct ct_expect_iter_state)); } static const struct file_operations exp_file_ops = { @@ -505,7 +490,7 @@ static int __init exp_proc_init(void) #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc; - proc = proc_net_fops_create("nf_conntrack_expect", 0440, &exp_file_ops); + proc = proc_net_fops_create(&init_net, "nf_conntrack_expect", 0440, &exp_file_ops); if (!proc) return -ENOMEM; #endif /* CONFIG_PROC_FS */ @@ -515,7 +500,7 @@ static int __init exp_proc_init(void) static void exp_proc_remove(void) { #ifdef CONFIG_PROC_FS - proc_net_remove("nf_conntrack_expect"); + proc_net_remove(&init_net, "nf_conntrack_expect"); #endif /* CONFIG_PROC_FS */ } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 2863e72b40916d74fd7d1ed63a83f1ef16d6bd1d..9be1826e6cdd30ded188df436c604051b67653c5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -4,7 +4,7 @@ * (C) 2001 by Jay Schulist * (C) 2002-2006 by Harald Welte * (C) 2003 by Patrick Mchardy - * (C) 2005-2006 by Pablo Neira Ayuso + * (C) 2005-2007 by Pablo Neira Ayuso * * Initial connection tracking via netlink development funded and * generally made possible by Network Robots, Inc. (www.networkrobots.com) @@ -54,18 +54,21 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb, struct nf_conntrack_l4proto *l4proto) { int ret = 0; - struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_PROTO); + struct nlattr *nest_parms; - NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum); + nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; + NLA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum); - if (likely(l4proto->tuple_to_nfattr)) - ret = l4proto->tuple_to_nfattr(skb, tuple); + if (likely(l4proto->tuple_to_nlattr)) + ret = l4proto->tuple_to_nlattr(skb, tuple); - NFA_NEST_END(skb, nest_parms); + nla_nest_end(skb, nest_parms); return ret; -nfattr_failure: +nla_put_failure: return -1; } @@ -75,16 +78,20 @@ ctnetlink_dump_tuples_ip(struct sk_buff *skb, struct nf_conntrack_l3proto *l3proto) { int ret = 0; - struct nfattr *nest_parms = NFA_NEST(skb, CTA_TUPLE_IP); + struct nlattr *nest_parms; - if (likely(l3proto->tuple_to_nfattr)) - ret = l3proto->tuple_to_nfattr(skb, tuple); + nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; - NFA_NEST_END(skb, nest_parms); + if (likely(l3proto->tuple_to_nlattr)) + ret = l3proto->tuple_to_nlattr(skb, tuple); + + nla_nest_end(skb, nest_parms); return ret; -nfattr_failure: +nla_put_failure: return -1; } @@ -114,10 +121,10 @@ static inline int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { __be32 status = htonl((u_int32_t) ct->status); - NFA_PUT(skb, CTA_STATUS, sizeof(status), &status); + NLA_PUT(skb, CTA_STATUS, sizeof(status), &status); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -132,10 +139,10 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct) else timeout = htonl(timeout_l / HZ); - NFA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); + NLA_PUT(skb, CTA_TIMEOUT, sizeof(timeout), &timeout); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -143,25 +150,27 @@ static inline int ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) { struct nf_conntrack_l4proto *l4proto = nf_ct_l4proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num, ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); - struct nfattr *nest_proto; + struct nlattr *nest_proto; int ret; - if (!l4proto->to_nfattr) { + if (!l4proto->to_nlattr) { nf_ct_l4proto_put(l4proto); return 0; } - nest_proto = NFA_NEST(skb, CTA_PROTOINFO); + nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED); + if (!nest_proto) + goto nla_put_failure; - ret = l4proto->to_nfattr(skb, nest_proto, ct); + ret = l4proto->to_nlattr(skb, nest_proto, ct); nf_ct_l4proto_put(l4proto); - NFA_NEST_END(skb, nest_proto); + nla_nest_end(skb, nest_proto); return ret; -nfattr_failure: +nla_put_failure: nf_ct_l4proto_put(l4proto); return -1; } @@ -169,7 +178,7 @@ ctnetlink_dump_protoinfo(struct sk_buff *skb, const struct nf_conn *ct) static inline int ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) { - struct nfattr *nest_helper; + struct nlattr *nest_helper; const struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_helper *helper; @@ -181,18 +190,20 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) if (!helper) goto out; - nest_helper = NFA_NEST(skb, CTA_HELP); - NFA_PUT(skb, CTA_HELP_NAME, strlen(helper->name), helper->name); + nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); + if (!nest_helper) + goto nla_put_failure; + NLA_PUT(skb, CTA_HELP_NAME, strlen(helper->name), helper->name); - if (helper->to_nfattr) - helper->to_nfattr(skb, ct); + if (helper->to_nlattr) + helper->to_nlattr(skb, ct); - NFA_NEST_END(skb, nest_helper); + nla_nest_end(skb, nest_helper); out: rcu_read_unlock(); return 0; -nfattr_failure: +nla_put_failure: rcu_read_unlock(); return -1; } @@ -203,20 +214,24 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct, enum ip_conntrack_dir dir) { enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; - struct nfattr *nest_count = NFA_NEST(skb, type); + struct nlattr *nest_count; __be32 tmp; + nest_count = nla_nest_start(skb, type | NLA_F_NESTED); + if (!nest_count) + goto nla_put_failure; + tmp = htonl(ct->counters[dir].packets); - NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); + NLA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); tmp = htonl(ct->counters[dir].bytes); - NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp); + NLA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp); - NFA_NEST_END(skb, nest_count); + nla_nest_end(skb, nest_count); return 0; -nfattr_failure: +nla_put_failure: return -1; } #else @@ -229,10 +244,10 @@ ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) { __be32 mark = htonl(ct->mark); - NFA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark); + NLA_PUT(skb, CTA_MARK, sizeof(u_int32_t), &mark); return 0; -nfattr_failure: +nla_put_failure: return -1; } #else @@ -242,11 +257,11 @@ ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) static inline int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { - __be32 id = htonl(ct->id); - NFA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id); + __be32 id = htonl((unsigned long)ct); + NLA_PUT(skb, CTA_ID, sizeof(u_int32_t), &id); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -255,10 +270,10 @@ ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { __be32 use = htonl(atomic_read(&ct->ct_general.use)); - NFA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); + NLA_PUT(skb, CTA_USE, sizeof(u_int32_t), &use); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -271,7 +286,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - struct nfattr *nest_parms; + struct nlattr *nest_parms; unsigned char *b = skb_tail_pointer(skb); event |= NFNL_SUBSYS_CTNETLINK << 8; @@ -284,15 +299,19 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG); + nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); + goto nla_put_failure; + nla_nest_end(skb, nest_parms); - nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); + nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); + goto nla_put_failure; + nla_nest_end(skb, nest_parms); if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_timeout(skb, ct) < 0 || @@ -303,13 +322,13 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, ctnetlink_dump_mark(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0) - goto nfattr_failure; + goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: -nfattr_failure: +nla_put_failure: nlmsg_trim(skb, b); return -1; } @@ -320,7 +339,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; - struct nfattr *nest_parms; + struct nlattr *nest_parms; struct nf_conn *ct = (struct nf_conn *)ptr; struct sk_buff *skb; unsigned int type; @@ -362,45 +381,49 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - nest_parms = NFA_NEST(skb, CTA_TUPLE_ORIG); + nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); + goto nla_put_failure; + nla_nest_end(skb, nest_parms); - nest_parms = NFA_NEST(skb, CTA_TUPLE_REPLY); + nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple(ct, IP_CT_DIR_REPLY)) < 0) - goto nfattr_failure; - NFA_NEST_END(skb, nest_parms); + goto nla_put_failure; + nla_nest_end(skb, nest_parms); if (events & IPCT_DESTROY) { if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0) - goto nfattr_failure; + goto nla_put_failure; } else { if (ctnetlink_dump_status(skb, ct) < 0) - goto nfattr_failure; + goto nla_put_failure; if (ctnetlink_dump_timeout(skb, ct) < 0) - goto nfattr_failure; + goto nla_put_failure; if (events & IPCT_PROTOINFO && ctnetlink_dump_protoinfo(skb, ct) < 0) - goto nfattr_failure; + goto nla_put_failure; if ((events & IPCT_HELPER || nfct_help(ct)) && ctnetlink_dump_helpinfo(skb, ct) < 0) - goto nfattr_failure; + goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK if ((events & IPCT_MARK || ct->mark) && ctnetlink_dump_mark(skb, ct) < 0) - goto nfattr_failure; + goto nla_put_failure; #endif if (events & IPCT_COUNTER_FILLING && (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)) - goto nfattr_failure; + goto nla_put_failure; } nlh->nlmsg_len = skb->tail - b; @@ -408,7 +431,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this, return NOTIFY_DONE; nlmsg_failure: -nfattr_failure: +nla_put_failure: kfree_skb(skb); return NOTIFY_DONE; } @@ -479,49 +502,56 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) } static inline int -ctnetlink_parse_tuple_ip(struct nfattr *attr, struct nf_conntrack_tuple *tuple) +ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple) { - struct nfattr *tb[CTA_IP_MAX]; + struct nlattr *tb[CTA_IP_MAX+1]; struct nf_conntrack_l3proto *l3proto; int ret = 0; - nfattr_parse_nested(tb, CTA_IP_MAX, attr); + nla_parse_nested(tb, CTA_IP_MAX, attr, NULL); l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); - if (likely(l3proto->nfattr_to_tuple)) - ret = l3proto->nfattr_to_tuple(tb, tuple); + if (likely(l3proto->nlattr_to_tuple)) { + ret = nla_validate_nested(attr, CTA_IP_MAX, + l3proto->nla_policy); + if (ret == 0) + ret = l3proto->nlattr_to_tuple(tb, tuple); + } nf_ct_l3proto_put(l3proto); return ret; } -static const size_t cta_min_proto[CTA_PROTO_MAX] = { - [CTA_PROTO_NUM-1] = sizeof(u_int8_t), +static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { + [CTA_PROTO_NUM] = { .type = NLA_U8 }, }; static inline int -ctnetlink_parse_tuple_proto(struct nfattr *attr, +ctnetlink_parse_tuple_proto(struct nlattr *attr, struct nf_conntrack_tuple *tuple) { - struct nfattr *tb[CTA_PROTO_MAX]; + struct nlattr *tb[CTA_PROTO_MAX+1]; struct nf_conntrack_l4proto *l4proto; int ret = 0; - nfattr_parse_nested(tb, CTA_PROTO_MAX, attr); - - if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto)) - return -EINVAL; + ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy); + if (ret < 0) + return ret; - if (!tb[CTA_PROTO_NUM-1]) + if (!tb[CTA_PROTO_NUM]) return -EINVAL; - tuple->dst.protonum = *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_NUM-1]); + tuple->dst.protonum = *(u_int8_t *)nla_data(tb[CTA_PROTO_NUM]); l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); - if (likely(l4proto->nfattr_to_tuple)) - ret = l4proto->nfattr_to_tuple(tb, tuple); + if (likely(l4proto->nlattr_to_tuple)) { + ret = nla_validate_nested(attr, CTA_PROTO_MAX, + l4proto->nla_policy); + if (ret == 0) + ret = l4proto->nlattr_to_tuple(tb, tuple); + } nf_ct_l4proto_put(l4proto); @@ -529,29 +559,29 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr, } static inline int -ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple, +ctnetlink_parse_tuple(struct nlattr *cda[], struct nf_conntrack_tuple *tuple, enum ctattr_tuple type, u_int8_t l3num) { - struct nfattr *tb[CTA_TUPLE_MAX]; + struct nlattr *tb[CTA_TUPLE_MAX+1]; int err; memset(tuple, 0, sizeof(*tuple)); - nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]); + nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL); - if (!tb[CTA_TUPLE_IP-1]) + if (!tb[CTA_TUPLE_IP]) return -EINVAL; tuple->src.l3num = l3num; - err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP-1], tuple); + err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); if (err < 0) return err; - if (!tb[CTA_TUPLE_PROTO-1]) + if (!tb[CTA_TUPLE_PROTO]) return -EINVAL; - err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO-1], tuple); + err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple); if (err < 0) return err; @@ -565,32 +595,32 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple, } #ifdef CONFIG_NF_NAT_NEEDED -static const size_t cta_min_protonat[CTA_PROTONAT_MAX] = { - [CTA_PROTONAT_PORT_MIN-1] = sizeof(u_int16_t), - [CTA_PROTONAT_PORT_MAX-1] = sizeof(u_int16_t), +static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { + [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, + [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, }; -static int nfnetlink_parse_nat_proto(struct nfattr *attr, +static int nfnetlink_parse_nat_proto(struct nlattr *attr, const struct nf_conn *ct, struct nf_nat_range *range) { - struct nfattr *tb[CTA_PROTONAT_MAX]; + struct nlattr *tb[CTA_PROTONAT_MAX+1]; struct nf_nat_protocol *npt; + int err; - nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr); - - if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat)) - return -EINVAL; + err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy); + if (err < 0) + return err; npt = nf_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum); - if (!npt->nfattr_to_range) { + if (!npt->nlattr_to_range) { nf_nat_proto_put(npt); return 0; } - /* nfattr_to_range returns 1 if it parsed, 0 if not, neg. on error */ - if (npt->nfattr_to_range(tb, range) > 0) + /* nlattr_to_range returns 1 if it parsed, 0 if not, neg. on error */ + if (npt->nlattr_to_range(tb, range) > 0) range->flags |= IP_NAT_RANGE_PROTO_SPECIFIED; nf_nat_proto_put(npt); @@ -598,40 +628,39 @@ static int nfnetlink_parse_nat_proto(struct nfattr *attr, return 0; } -static const size_t cta_min_nat[CTA_NAT_MAX] = { - [CTA_NAT_MINIP-1] = sizeof(u_int32_t), - [CTA_NAT_MAXIP-1] = sizeof(u_int32_t), +static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { + [CTA_NAT_MINIP] = { .type = NLA_U32 }, + [CTA_NAT_MAXIP] = { .type = NLA_U32 }, }; static inline int -nfnetlink_parse_nat(struct nfattr *nat, +nfnetlink_parse_nat(struct nlattr *nat, const struct nf_conn *ct, struct nf_nat_range *range) { - struct nfattr *tb[CTA_NAT_MAX]; + struct nlattr *tb[CTA_NAT_MAX+1]; int err; memset(range, 0, sizeof(*range)); - nfattr_parse_nested(tb, CTA_NAT_MAX, nat); - - if (nfattr_bad_size(tb, CTA_NAT_MAX, cta_min_nat)) - return -EINVAL; + err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy); + if (err < 0) + return err; - if (tb[CTA_NAT_MINIP-1]) - range->min_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MINIP-1]); + if (tb[CTA_NAT_MINIP]) + range->min_ip = *(__be32 *)nla_data(tb[CTA_NAT_MINIP]); - if (!tb[CTA_NAT_MAXIP-1]) + if (!tb[CTA_NAT_MAXIP]) range->max_ip = range->min_ip; else - range->max_ip = *(__be32 *)NFA_DATA(tb[CTA_NAT_MAXIP-1]); + range->max_ip = *(__be32 *)nla_data(tb[CTA_NAT_MAXIP]); if (range->min_ip) range->flags |= IP_NAT_RANGE_MAP_IPS; - if (!tb[CTA_NAT_PROTO-1]) + if (!tb[CTA_NAT_PROTO]) return 0; - err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO-1], ct, range); + err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); if (err < 0) return err; @@ -640,31 +669,31 @@ nfnetlink_parse_nat(struct nfattr *nat, #endif static inline int -ctnetlink_parse_help(struct nfattr *attr, char **helper_name) +ctnetlink_parse_help(struct nlattr *attr, char **helper_name) { - struct nfattr *tb[CTA_HELP_MAX]; + struct nlattr *tb[CTA_HELP_MAX+1]; - nfattr_parse_nested(tb, CTA_HELP_MAX, attr); + nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL); - if (!tb[CTA_HELP_NAME-1]) + if (!tb[CTA_HELP_NAME]) return -EINVAL; - *helper_name = NFA_DATA(tb[CTA_HELP_NAME-1]); + *helper_name = nla_data(tb[CTA_HELP_NAME]); return 0; } -static const size_t cta_min[CTA_MAX] = { - [CTA_STATUS-1] = sizeof(u_int32_t), - [CTA_TIMEOUT-1] = sizeof(u_int32_t), - [CTA_MARK-1] = sizeof(u_int32_t), - [CTA_USE-1] = sizeof(u_int32_t), - [CTA_ID-1] = sizeof(u_int32_t) +static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { + [CTA_STATUS] = { .type = NLA_U32 }, + [CTA_TIMEOUT] = { .type = NLA_U32 }, + [CTA_MARK] = { .type = NLA_U32 }, + [CTA_USE] = { .type = NLA_U32 }, + [CTA_ID] = { .type = NLA_U32 }, }; static int ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]) + struct nlmsghdr *nlh, struct nlattr *cda[]) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; @@ -673,12 +702,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, u_int8_t u3 = nfmsg->nfgen_family; int err = 0; - if (nfattr_bad_size(cda, CTA_MAX, cta_min)) - return -EINVAL; - - if (cda[CTA_TUPLE_ORIG-1]) + if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); - else if (cda[CTA_TUPLE_REPLY-1]) + else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else { /* Flush the whole table */ @@ -695,9 +721,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, ct = nf_ct_tuplehash_to_ctrack(h); - if (cda[CTA_ID-1]) { - u_int32_t id = ntohl(*(__be32 *)NFA_DATA(cda[CTA_ID-1])); - if (ct->id != id) { + if (cda[CTA_ID]) { + u_int32_t id = ntohl(*(__be32 *)nla_data(cda[CTA_ID])); + if (id != (u32)(unsigned long)ct) { nf_ct_put(ct); return -ENOENT; } @@ -712,7 +738,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb, static int ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]) + struct nlmsghdr *nlh, struct nlattr *cda[]) { struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; @@ -731,12 +757,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, ctnetlink_done); } - if (nfattr_bad_size(cda, CTA_MAX, cta_min)) - return -EINVAL; - - if (cda[CTA_TUPLE_ORIG-1]) + if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3); - else if (cda[CTA_TUPLE_REPLY-1]) + else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3); else return -EINVAL; @@ -776,10 +799,10 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, } static inline int -ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) +ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[]) { unsigned long d; - unsigned int status = ntohl(*(__be32 *)NFA_DATA(cda[CTA_STATUS-1])); + unsigned int status = ntohl(*(__be32 *)nla_data(cda[CTA_STATUS])); d = ct->status ^ status; if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) @@ -795,14 +818,14 @@ ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) /* ASSURED bit can only be set */ return -EINVAL; - if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) { + if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { #ifndef CONFIG_NF_NAT_NEEDED return -EINVAL; #else struct nf_nat_range range; - if (cda[CTA_NAT_DST-1]) { - if (nfnetlink_parse_nat(cda[CTA_NAT_DST-1], ct, + if (cda[CTA_NAT_DST]) { + if (nfnetlink_parse_nat(cda[CTA_NAT_DST], ct, &range) < 0) return -EINVAL; if (nf_nat_initialized(ct, @@ -810,8 +833,8 @@ ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) return -EEXIST; nf_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); } - if (cda[CTA_NAT_SRC-1]) { - if (nfnetlink_parse_nat(cda[CTA_NAT_SRC-1], ct, + if (cda[CTA_NAT_SRC]) { + if (nfnetlink_parse_nat(cda[CTA_NAT_SRC], ct, &range) < 0) return -EINVAL; if (nf_nat_initialized(ct, @@ -831,7 +854,7 @@ ctnetlink_change_status(struct nf_conn *ct, struct nfattr *cda[]) static inline int -ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[]) +ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[]) { struct nf_conntrack_helper *helper; struct nf_conn_help *help = nfct_help(ct); @@ -842,7 +865,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[]) if (ct->master) return -EINVAL; - err = ctnetlink_parse_help(cda[CTA_HELP-1], &helpname); + err = ctnetlink_parse_help(cda[CTA_HELP], &helpname); if (err < 0) return err; @@ -879,9 +902,9 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[]) } static inline int -ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[]) +ctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[]) { - u_int32_t timeout = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); + u_int32_t timeout = ntohl(*(__be32 *)nla_data(cda[CTA_TIMEOUT])); if (!del_timer(&ct->timeout)) return -ETIME; @@ -893,66 +916,67 @@ ctnetlink_change_timeout(struct nf_conn *ct, struct nfattr *cda[]) } static inline int -ctnetlink_change_protoinfo(struct nf_conn *ct, struct nfattr *cda[]) +ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[]) { - struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1]; + struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO]; struct nf_conntrack_l4proto *l4proto; u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; u_int16_t l3num = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; int err = 0; - nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr); + nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL); l4proto = nf_ct_l4proto_find_get(l3num, npt); - if (l4proto->from_nfattr) - err = l4proto->from_nfattr(tb, ct); + if (l4proto->from_nlattr) + err = l4proto->from_nlattr(tb, ct); nf_ct_l4proto_put(l4proto); return err; } static int -ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[]) +ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]) { int err; - if (cda[CTA_HELP-1]) { + if (cda[CTA_HELP]) { err = ctnetlink_change_helper(ct, cda); if (err < 0) return err; } - if (cda[CTA_TIMEOUT-1]) { + if (cda[CTA_TIMEOUT]) { err = ctnetlink_change_timeout(ct, cda); if (err < 0) return err; } - if (cda[CTA_STATUS-1]) { + if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) return err; } - if (cda[CTA_PROTOINFO-1]) { + if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) return err; } #if defined(CONFIG_NF_CONNTRACK_MARK) - if (cda[CTA_MARK-1]) - ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); + if (cda[CTA_MARK]) + ct->mark = ntohl(*(__be32 *)nla_data(cda[CTA_MARK])); #endif return 0; } static int -ctnetlink_create_conntrack(struct nfattr *cda[], +ctnetlink_create_conntrack(struct nlattr *cda[], struct nf_conntrack_tuple *otuple, - struct nf_conntrack_tuple *rtuple) + struct nf_conntrack_tuple *rtuple, + struct nf_conn *master_ct) { struct nf_conn *ct; int err = -EINVAL; @@ -963,28 +987,28 @@ ctnetlink_create_conntrack(struct nfattr *cda[], if (ct == NULL || IS_ERR(ct)) return -ENOMEM; - if (!cda[CTA_TIMEOUT-1]) + if (!cda[CTA_TIMEOUT]) goto err; - ct->timeout.expires = ntohl(*(__be32 *)NFA_DATA(cda[CTA_TIMEOUT-1])); + ct->timeout.expires = ntohl(*(__be32 *)nla_data(cda[CTA_TIMEOUT])); ct->timeout.expires = jiffies + ct->timeout.expires * HZ; ct->status |= IPS_CONFIRMED; - if (cda[CTA_STATUS-1]) { + if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) goto err; } - if (cda[CTA_PROTOINFO-1]) { + if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) goto err; } #if defined(CONFIG_NF_CONNTRACK_MARK) - if (cda[CTA_MARK-1]) - ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1])); + if (cda[CTA_MARK]) + ct->mark = ntohl(*(__be32 *)nla_data(cda[CTA_MARK])); #endif helper = nf_ct_helper_find_get(rtuple); @@ -999,6 +1023,10 @@ ctnetlink_create_conntrack(struct nfattr *cda[], rcu_assign_pointer(help->helper, helper); } + /* setup master conntrack: this is a confirmed expectation */ + if (master_ct) + ct->master = master_ct; + add_timer(&ct->timeout); nf_conntrack_hash_insert(ct); @@ -1014,7 +1042,7 @@ ctnetlink_create_conntrack(struct nfattr *cda[], static int ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]) + struct nlmsghdr *nlh, struct nlattr *cda[]) { struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; @@ -1022,32 +1050,56 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, u_int8_t u3 = nfmsg->nfgen_family; int err = 0; - if (nfattr_bad_size(cda, CTA_MAX, cta_min)) - return -EINVAL; - - if (cda[CTA_TUPLE_ORIG-1]) { + if (cda[CTA_TUPLE_ORIG]) { err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3); if (err < 0) return err; } - if (cda[CTA_TUPLE_REPLY-1]) { + if (cda[CTA_TUPLE_REPLY]) { err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3); if (err < 0) return err; } write_lock_bh(&nf_conntrack_lock); - if (cda[CTA_TUPLE_ORIG-1]) + if (cda[CTA_TUPLE_ORIG]) h = __nf_conntrack_find(&otuple, NULL); - else if (cda[CTA_TUPLE_REPLY-1]) + else if (cda[CTA_TUPLE_REPLY]) h = __nf_conntrack_find(&rtuple, NULL); if (h == NULL) { + struct nf_conntrack_tuple master; + struct nf_conntrack_tuple_hash *master_h = NULL; + struct nf_conn *master_ct = NULL; + + if (cda[CTA_TUPLE_MASTER]) { + err = ctnetlink_parse_tuple(cda, + &master, + CTA_TUPLE_MASTER, + u3); + if (err < 0) + return err; + + master_h = __nf_conntrack_find(&master, NULL); + if (master_h == NULL) { + err = -ENOENT; + goto out_unlock; + } + master_ct = nf_ct_tuplehash_to_ctrack(master_h); + atomic_inc(&master_ct->ct_general.use); + } + write_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) - err = ctnetlink_create_conntrack(cda, &otuple, &rtuple); + err = ctnetlink_create_conntrack(cda, + &otuple, + &rtuple, + master_ct); + if (err < 0 && master_ct) + nf_ct_put(master_ct); + return err; } /* implicit 'else' */ @@ -1057,7 +1109,12 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, err = -EEXIST; if (!(nlh->nlmsg_flags & NLM_F_EXCL)) { /* we only allow nat config for new conntracks */ - if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) { + if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { + err = -EINVAL; + goto out_unlock; + } + /* can't link an existing conntrack to a master */ + if (cda[CTA_TUPLE_MASTER]) { err = -EINVAL; goto out_unlock; } @@ -1079,16 +1136,18 @@ ctnetlink_exp_dump_tuple(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, enum ctattr_expect type) { - struct nfattr *nest_parms = NFA_NEST(skb, type); + struct nlattr *nest_parms; + nest_parms = nla_nest_start(skb, type | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple) < 0) - goto nfattr_failure; - - NFA_NEST_END(skb, nest_parms); + goto nla_put_failure; + nla_nest_end(skb, nest_parms); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -1101,32 +1160,34 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb, struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple m; - struct nfattr *nest_parms; + struct nlattr *nest_parms; memset(&m, 0xFF, sizeof(m)); m.src.u.all = mask->src.u.all; memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); - nest_parms = NFA_NEST(skb, CTA_EXPECT_MASK); + nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; l3proto = nf_ct_l3proto_find_get(tuple->src.l3num); ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto); nf_ct_l3proto_put(l3proto); if (unlikely(ret < 0)) - goto nfattr_failure; + goto nla_put_failure; l4proto = nf_ct_l4proto_find_get(tuple->src.l3num, tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); nf_ct_l4proto_put(l4proto); if (unlikely(ret < 0)) - goto nfattr_failure; + goto nla_put_failure; - NFA_NEST_END(skb, nest_parms); + nla_nest_end(skb, nest_parms); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -1136,23 +1197,23 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb, { struct nf_conn *master = exp->master; __be32 timeout = htonl((exp->timeout.expires - jiffies) / HZ); - __be32 id = htonl(exp->id); + __be32 id = htonl((unsigned long)exp); if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) - goto nfattr_failure; + goto nla_put_failure; if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) - goto nfattr_failure; + goto nla_put_failure; if (ctnetlink_exp_dump_tuple(skb, &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, CTA_EXPECT_MASTER) < 0) - goto nfattr_failure; + goto nla_put_failure; - NFA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(timeout), &timeout); - NFA_PUT(skb, CTA_EXPECT_ID, sizeof(u_int32_t), &id); + NLA_PUT(skb, CTA_EXPECT_TIMEOUT, sizeof(timeout), &timeout); + NLA_PUT(skb, CTA_EXPECT_ID, sizeof(u_int32_t), &id); return 0; -nfattr_failure: +nla_put_failure: return -1; } @@ -1176,13 +1237,13 @@ ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq, nfmsg->res_id = 0; if (ctnetlink_exp_dump_expect(skb, exp) < 0) - goto nfattr_failure; + goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; nlmsg_failure: -nfattr_failure: +nla_put_failure: nlmsg_trim(skb, b); return -1; } @@ -1224,14 +1285,14 @@ static int ctnetlink_expect_event(struct notifier_block *this, nfmsg->res_id = 0; if (ctnetlink_exp_dump_expect(skb, exp) < 0) - goto nfattr_failure; + goto nla_put_failure; nlh->nlmsg_len = skb->tail - b; nfnetlink_send(skb, 0, NFNLGRP_CONNTRACK_EXP_NEW, 0); return NOTIFY_DONE; nlmsg_failure: -nfattr_failure: +nla_put_failure: kfree_skb(skb); return NOTIFY_DONE; } @@ -1286,14 +1347,14 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } -static const size_t cta_min_exp[CTA_EXPECT_MAX] = { - [CTA_EXPECT_TIMEOUT-1] = sizeof(u_int32_t), - [CTA_EXPECT_ID-1] = sizeof(u_int32_t) +static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { + [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, + [CTA_EXPECT_ID] = { .type = NLA_U32 }, }; static int ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]) + struct nlmsghdr *nlh, struct nlattr *cda[]) { struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; @@ -1302,16 +1363,13 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, u_int8_t u3 = nfmsg->nfgen_family; int err = 0; - if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) - return -EINVAL; - if (nlh->nlmsg_flags & NLM_F_DUMP) { return netlink_dump_start(ctnl, skb, nlh, ctnetlink_exp_dump_table, ctnetlink_exp_done); } - if (cda[CTA_EXPECT_MASTER-1]) + if (cda[CTA_EXPECT_MASTER]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3); else return -EINVAL; @@ -1323,9 +1381,9 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, if (!exp) return -ENOENT; - if (cda[CTA_EXPECT_ID-1]) { - __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); - if (exp->id != ntohl(id)) { + if (cda[CTA_EXPECT_ID]) { + __be32 id = *(__be32 *)nla_data(cda[CTA_EXPECT_ID]); + if (ntohl(id) != (u32)(unsigned long)exp) { nf_ct_expect_put(exp); return -ENOENT; } @@ -1355,7 +1413,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, static int ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]) + struct nlmsghdr *nlh, struct nlattr *cda[]) { struct nf_conntrack_expect *exp; struct nf_conntrack_tuple tuple; @@ -1366,10 +1424,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, unsigned int i; int err; - if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) - return -EINVAL; - - if (cda[CTA_EXPECT_TUPLE-1]) { + if (cda[CTA_EXPECT_TUPLE]) { /* delete a single expect by tuple */ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); if (err < 0) @@ -1380,9 +1435,9 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, if (!exp) return -ENOENT; - if (cda[CTA_EXPECT_ID-1]) { - __be32 id = *(__be32 *)NFA_DATA(cda[CTA_EXPECT_ID-1]); - if (exp->id != ntohl(id)) { + if (cda[CTA_EXPECT_ID]) { + __be32 id = *(__be32 *)nla_data(cda[CTA_EXPECT_ID]); + if (ntohl(id) != (u32)(unsigned long)exp) { nf_ct_expect_put(exp); return -ENOENT; } @@ -1393,8 +1448,8 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, /* have to put what we 'get' above. * after this line usage count == 0 */ nf_ct_expect_put(exp); - } else if (cda[CTA_EXPECT_HELP_NAME-1]) { - char *name = NFA_DATA(cda[CTA_EXPECT_HELP_NAME-1]); + } else if (cda[CTA_EXPECT_HELP_NAME]) { + char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); struct nf_conn_help *m_help; /* delete all expectations for this helper */ @@ -1436,13 +1491,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb, return 0; } static int -ctnetlink_change_expect(struct nf_conntrack_expect *x, struct nfattr *cda[]) +ctnetlink_change_expect(struct nf_conntrack_expect *x, struct nlattr *cda[]) { return -EOPNOTSUPP; } static int -ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3) +ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3) { struct nf_conntrack_tuple tuple, mask, master_tuple; struct nf_conntrack_tuple_hash *h = NULL; @@ -1499,7 +1554,7 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3) static int ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *cda[]) + struct nlmsghdr *nlh, struct nlattr *cda[]) { struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; @@ -1507,12 +1562,9 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb, u_int8_t u3 = nfmsg->nfgen_family; int err = 0; - if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp)) - return -EINVAL; - - if (!cda[CTA_EXPECT_TUPLE-1] - || !cda[CTA_EXPECT_MASK-1] - || !cda[CTA_EXPECT_MASTER-1]) + if (!cda[CTA_EXPECT_TUPLE] + || !cda[CTA_EXPECT_MASK] + || !cda[CTA_EXPECT_MASTER]) return -EINVAL; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3); @@ -1548,34 +1600,41 @@ static struct notifier_block ctnl_notifier_exp = { }; #endif -static struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { +static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack, - .attr_count = CTA_MAX, }, + .attr_count = CTA_MAX, + .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack, - .attr_count = CTA_MAX, }, + .attr_count = CTA_MAX, + .policy = ct_nla_policy }, [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack, - .attr_count = CTA_MAX, }, + .attr_count = CTA_MAX, + .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, - .attr_count = CTA_MAX, }, + .attr_count = CTA_MAX, + .policy = ct_nla_policy }, }; -static struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { +static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect, - .attr_count = CTA_EXPECT_MAX, }, + .attr_count = CTA_EXPECT_MAX, + .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect, - .attr_count = CTA_EXPECT_MAX, }, + .attr_count = CTA_EXPECT_MAX, + .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, - .attr_count = CTA_EXPECT_MAX, }, + .attr_count = CTA_EXPECT_MAX, + .policy = exp_nla_policy }, }; -static struct nfnetlink_subsystem ctnl_subsys = { +static const struct nfnetlink_subsystem ctnl_subsys = { .name = "conntrack", .subsys_id = NFNL_SUBSYS_CTNETLINK, .cb_count = IPCTNL_MSG_MAX, .cb = ctnl_cb, }; -static struct nfnetlink_subsystem ctnl_exp_subsys = { +static const struct nfnetlink_subsystem ctnl_exp_subsys = { .name = "conntrack_expect", .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, .cb_count = IPCTNL_MSG_EXP_MAX, diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index bdbead8a7a83c60360efe7895d5078aeb2ff23b9..4a185f6aa65a907235c7ac7c2ae9629aa37c242a 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -274,8 +274,9 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { .destroy = gre_destroy, .me = THIS_MODULE, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif }; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index eb3fe7401466544483d6356dbd28e3da0fca873c..c7075345971b000d6396fd07b4c710fcefc22227 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -831,6 +831,20 @@ static int tcp_packet(struct nf_conn *conntrack, tuple = &conntrack->tuplehash[dir].tuple; switch (new_state) { + case TCP_CONNTRACK_SYN_SENT: + if (old_state < TCP_CONNTRACK_TIME_WAIT) + break; + if (conntrack->proto.tcp.seen[!dir].flags & + IP_CT_TCP_FLAG_CLOSE_INIT) { + /* Attempt to reopen a closed connection. + * Delete this connection and look up again. */ + write_unlock_bh(&tcp_lock); + if (del_timer(&conntrack->timeout)) + conntrack->timeout.function((unsigned long) + conntrack); + return -NF_REPEAT; + } + /* Fall through */ case TCP_CONNTRACK_IGNORE: /* Ignored packets: * @@ -879,27 +893,6 @@ static int tcp_packet(struct nf_conn *conntrack, nf_log_packet(pf, 0, skb, NULL, NULL, NULL, "nf_ct_tcp: invalid state "); return -NF_ACCEPT; - case TCP_CONNTRACK_SYN_SENT: - if (old_state < TCP_CONNTRACK_TIME_WAIT) - break; - if ((conntrack->proto.tcp.seen[dir].flags & - IP_CT_TCP_FLAG_CLOSE_INIT) - || after(ntohl(th->seq), - conntrack->proto.tcp.seen[dir].td_end)) { - /* Attempt to reopen a closed connection. - * Delete this connection and look up again. */ - write_unlock_bh(&tcp_lock); - if (del_timer(&conntrack->timeout)) - conntrack->timeout.function((unsigned long) - conntrack); - return -NF_REPEAT; - } else { - write_unlock_bh(&tcp_lock); - if (LOG_INVALID(IPPROTO_TCP)) - nf_log_packet(pf, 0, skb, NULL, NULL, - NULL, "nf_ct_tcp: invalid SYN"); - return -NF_ACCEPT; - } case TCP_CONNTRACK_CLOSE: if (index == TCP_RST_SET && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) @@ -1067,93 +1060,96 @@ static int tcp_new(struct nf_conn *conntrack, #include #include -static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, +static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla, const struct nf_conn *ct) { - struct nfattr *nest_parms; + struct nlattr *nest_parms; struct nf_ct_tcp_flags tmp = {}; read_lock_bh(&tcp_lock); - nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); - NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), + nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP | NLA_F_NESTED); + if (!nest_parms) + goto nla_put_failure; + + NLA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), &ct->proto.tcp.state); - NFA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, sizeof(u_int8_t), + NLA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, sizeof(u_int8_t), &ct->proto.tcp.seen[0].td_scale); - NFA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, sizeof(u_int8_t), + NLA_PUT(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, sizeof(u_int8_t), &ct->proto.tcp.seen[1].td_scale); tmp.flags = ct->proto.tcp.seen[0].flags; - NFA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, + NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, sizeof(struct nf_ct_tcp_flags), &tmp); tmp.flags = ct->proto.tcp.seen[1].flags; - NFA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, + NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, sizeof(struct nf_ct_tcp_flags), &tmp); read_unlock_bh(&tcp_lock); - NFA_NEST_END(skb, nest_parms); + nla_nest_end(skb, nest_parms); return 0; -nfattr_failure: +nla_put_failure: read_unlock_bh(&tcp_lock); return -1; } -static const size_t cta_min_tcp[CTA_PROTOINFO_TCP_MAX] = { - [CTA_PROTOINFO_TCP_STATE-1] = sizeof(u_int8_t), - [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1] = sizeof(u_int8_t), - [CTA_PROTOINFO_TCP_WSCALE_REPLY-1] = sizeof(u_int8_t), - [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1] = sizeof(struct nf_ct_tcp_flags), - [CTA_PROTOINFO_TCP_FLAGS_REPLY-1] = sizeof(struct nf_ct_tcp_flags) +static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = { + [CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 }, + [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 }, + [CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 }, + [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) }, + [CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) }, }; -static int nfattr_to_tcp(struct nfattr *cda[], struct nf_conn *ct) +static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) { - struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1]; - struct nfattr *tb[CTA_PROTOINFO_TCP_MAX]; + struct nlattr *attr = cda[CTA_PROTOINFO_TCP]; + struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1]; + int err; /* updates could not contain anything about the private * protocol info, in that case skip the parsing */ if (!attr) return 0; - nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr); - - if (nfattr_bad_size(tb, CTA_PROTOINFO_TCP_MAX, cta_min_tcp)) - return -EINVAL; + err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr, tcp_nla_policy); + if (err < 0) + return err; - if (!tb[CTA_PROTOINFO_TCP_STATE-1]) + if (!tb[CTA_PROTOINFO_TCP_STATE]) return -EINVAL; write_lock_bh(&tcp_lock); ct->proto.tcp.state = - *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); + *(u_int8_t *)nla_data(tb[CTA_PROTOINFO_TCP_STATE]); - if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]) { + if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) { struct nf_ct_tcp_flags *attr = - NFA_DATA(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL-1]); + nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]); ct->proto.tcp.seen[0].flags &= ~attr->mask; ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask; } - if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY-1]) { + if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) { struct nf_ct_tcp_flags *attr = - NFA_DATA(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY-1]); + nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]); ct->proto.tcp.seen[1].flags &= ~attr->mask; ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask; } - if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1] && - tb[CTA_PROTOINFO_TCP_WSCALE_REPLY-1] && + if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] && + tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] && ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE && ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) { ct->proto.tcp.seen[0].td_scale = *(u_int8_t *) - NFA_DATA(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL-1]); + nla_data(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]); ct->proto.tcp.seen[1].td_scale = *(u_int8_t *) - NFA_DATA(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY-1]); + nla_data(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]); } write_unlock_bh(&tcp_lock); @@ -1384,10 +1380,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = .new = tcp_new, .error = tcp_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .to_nfattr = tcp_to_nfattr, - .from_nfattr = nfattr_to_tcp, - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .to_nlattr = tcp_to_nlattr, + .from_nlattr = nlattr_to_tcp, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &tcp_sysctl_table_users, @@ -1413,10 +1410,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly = .new = tcp_new, .error = tcp_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .to_nfattr = tcp_to_nfattr, - .from_nfattr = nfattr_to_tcp, - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .to_nlattr = tcp_to_nlattr, + .from_nlattr = nlattr_to_tcp, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &tcp_sysctl_table_users, diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 2a2fd1a764ea129ba34745f0adce87a6176ee145..ba80e1a1ea17fd740fae91cacde263106cec28d2 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -203,8 +203,9 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = .new = udp_new, .error = udp_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &udp_sysctl_table_users, @@ -230,8 +231,9 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = .new = udp_new, .error = udp_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &udp_sysctl_table_users, diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index b906b413997c8db589df41f97561c9e668aa9d93..b8981dd922bed6efe20c2b2727e8e673410b4d20 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -203,8 +203,9 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = .new = udplite_new, .error = udplite_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &udplite_sysctl_table_users, @@ -226,8 +227,9 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = .new = udplite_new, .error = udplite_error, #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) - .tuple_to_nfattr = nf_ct_port_tuple_to_nfattr, - .nfattr_to_tuple = nf_ct_port_nfattr_to_tuple, + .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, + .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, + .nla_policy = nf_ct_port_nla_policy, #endif #ifdef CONFIG_SYSCTL .ctl_table_users = &udplite_sysctl_table_users, diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index a4ce5e8879974c14e36f969b4d9c787d2ba41362..9efdd37fc19543a1e57b013ff51686ad2b1c406a 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef CONFIG_SYSCTL #include #endif @@ -194,22 +195,8 @@ static const struct seq_operations ct_seq_ops = { static int ct_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct ct_iter_state *st; - int ret; - - st = kzalloc(sizeof(struct ct_iter_state), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; - ret = seq_open(file, &ct_seq_ops); - if (ret) - goto out_free; - seq = file->private_data; - seq->private = st; - return ret; -out_free: - kfree(st); - return ret; + return seq_open_private(file, &ct_seq_ops, + sizeof(struct ct_iter_state)); } static const struct file_operations ct_file_ops = { @@ -420,10 +407,10 @@ static int __init nf_conntrack_standalone_init(void) return ret; #ifdef CONFIG_PROC_FS - proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops); + proc = proc_net_fops_create(&init_net, "nf_conntrack", 0440, &ct_file_ops); if (!proc) goto cleanup_init; - proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat); + proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, init_net.proc_net_stat); if (!proc_stat) goto cleanup_proc; @@ -444,9 +431,9 @@ static int __init nf_conntrack_standalone_init(void) cleanup_proc_stat: #endif #ifdef CONFIG_PROC_FS - remove_proc_entry("nf_conntrack", proc_net_stat); + remove_proc_entry("nf_conntrack", init_net. proc_net_stat); cleanup_proc: - proc_net_remove("nf_conntrack"); + proc_net_remove(&init_net, "nf_conntrack"); cleanup_init: #endif /* CNFIG_PROC_FS */ nf_conntrack_cleanup(); @@ -459,8 +446,8 @@ static void __exit nf_conntrack_standalone_fini(void) unregister_sysctl_table(nf_ct_sysctl_header); #endif #ifdef CONFIG_PROC_FS - remove_proc_entry("nf_conntrack", proc_net_stat); - proc_net_remove("nf_conntrack"); + remove_proc_entry("nf_conntrack", init_net.proc_net_stat); + proc_net_remove(&init_net, "nf_conntrack"); #endif /* CNFIG_PROC_FS */ nf_conntrack_cleanup(); } diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index e32761ce260cbc8b30216370ddf06871da35d221..aa2831587b821d1c3afb65d59f96d0eb739a9377 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -69,6 +69,9 @@ static int nf_sockopt(struct sock *sk, int pf, int val, struct nf_sockopt_ops *ops; int ret; + if (sk->sk_net != &init_net) + return -ENOPROTOOPT; + if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; @@ -125,6 +128,10 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val, struct nf_sockopt_ops *ops; int ret; + if (sk->sk_net != &init_net) + return -ENOPROTOOPT; + + if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 8797e6953ef25cd7ca98548bf8b3c0355ac57e98..2128542995f79279fb026bcc4688eef46b434305 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -41,32 +41,20 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER); static char __initdata nfversion[] = "0.30"; static struct sock *nfnl = NULL; -static struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; +static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; static DEFINE_MUTEX(nfnl_mutex); -static void nfnl_lock(void) +static inline void nfnl_lock(void) { mutex_lock(&nfnl_mutex); } -static int nfnl_trylock(void) -{ - return !mutex_trylock(&nfnl_mutex); -} - -static void __nfnl_unlock(void) -{ - mutex_unlock(&nfnl_mutex); -} - -static void nfnl_unlock(void) +static inline void nfnl_unlock(void) { mutex_unlock(&nfnl_mutex); - if (nfnl->sk_receive_queue.qlen) - nfnl->sk_data_ready(nfnl, 0); } -int nfnetlink_subsys_register(struct nfnetlink_subsystem *n) +int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n) { nfnl_lock(); if (subsys_table[n->subsys_id]) { @@ -80,7 +68,7 @@ int nfnetlink_subsys_register(struct nfnetlink_subsystem *n) } EXPORT_SYMBOL_GPL(nfnetlink_subsys_register); -int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n) +int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n) { nfnl_lock(); subsys_table[n->subsys_id] = NULL; @@ -90,7 +78,7 @@ int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n) } EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); -static inline struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) +static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) { u_int8_t subsys_id = NFNL_SUBSYS_ID(type); @@ -100,8 +88,8 @@ static inline struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type) return subsys_table[subsys_id]; } -static inline struct nfnl_callback * -nfnetlink_find_client(u_int16_t type, struct nfnetlink_subsystem *ss) +static inline const struct nfnl_callback * +nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss) { u_int8_t cb_id = NFNL_MSG_TYPE(type); @@ -111,62 +99,6 @@ nfnetlink_find_client(u_int16_t type, struct nfnetlink_subsystem *ss) return &ss->cb[cb_id]; } -void __nfa_fill(struct sk_buff *skb, int attrtype, int attrlen, - const void *data) -{ - struct nfattr *nfa; - int size = NFA_LENGTH(attrlen); - - nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size)); - nfa->nfa_type = attrtype; - nfa->nfa_len = size; - memcpy(NFA_DATA(nfa), data, attrlen); - memset(NFA_DATA(nfa) + attrlen, 0, NFA_ALIGN(size) - size); -} -EXPORT_SYMBOL_GPL(__nfa_fill); - -void nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len) -{ - memset(tb, 0, sizeof(struct nfattr *) * maxattr); - - while (NFA_OK(nfa, len)) { - unsigned flavor = NFA_TYPE(nfa); - if (flavor && flavor <= maxattr) - tb[flavor-1] = nfa; - nfa = NFA_NEXT(nfa, len); - } -} -EXPORT_SYMBOL_GPL(nfattr_parse); - -/** - * nfnetlink_check_attributes - check and parse nfnetlink attributes - * - * subsys: nfnl subsystem for which this message is to be parsed - * nlmsghdr: netlink message to be checked/parsed - * cda: array of pointers, needs to be at least subsys->attr_count big - * - */ -static int -nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, - struct nlmsghdr *nlh, struct nfattr *cda[]) -{ - int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); - u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); - u_int16_t attr_count = subsys->cb[cb_id].attr_count; - - /* check attribute lengths. */ - if (likely(nlh->nlmsg_len > min_len)) { - struct nfattr *attr = NFM_NFA(NLMSG_DATA(nlh)); - int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); - nfattr_parse(cda, attr_count, attr, attrlen); - } - - /* implicit: if nlmsg_len == min_len, we return 0, and an empty - * (zeroed) cda[] array. The message is valid, but empty. */ - - return 0; -} - int nfnetlink_has_listeners(unsigned int group) { return netlink_has_listeners(nfnl, group); @@ -175,16 +107,7 @@ EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) { - int err = 0; - - NETLINK_CB(skb).dst_group = group; - if (echo) - atomic_inc(&skb->users); - netlink_broadcast(nfnl, skb, pid, group, gfp_any()); - if (echo) - err = netlink_unicast(nfnl, skb, pid, MSG_DONTWAIT); - - return err; + return nlmsg_notify(nfnl, skb, pid, group, echo, gfp_any()); } EXPORT_SYMBOL_GPL(nfnetlink_send); @@ -197,8 +120,8 @@ EXPORT_SYMBOL_GPL(nfnetlink_unicast); /* Process one complete nfnetlink message. */ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { - struct nfnl_callback *nc; - struct nfnetlink_subsystem *ss; + const struct nfnl_callback *nc; + const struct nfnetlink_subsystem *ss; int type, err; if (security_netlink_recv(skb, CAP_NET_ADMIN)) @@ -212,9 +135,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) ss = nfnetlink_get_subsys(type); if (!ss) { #ifdef CONFIG_KMOD - /* don't call nfnl_unlock, since it would reenter - * with further packet processing */ - __nfnl_unlock(); + nfnl_unlock(); request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); nfnl_lock(); ss = nfnetlink_get_subsys(type); @@ -228,29 +149,31 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; { - u_int16_t attr_count = - ss->cb[NFNL_MSG_TYPE(nlh->nlmsg_type)].attr_count; - struct nfattr *cda[attr_count]; - - memset(cda, 0, sizeof(struct nfattr *) * attr_count); + int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); + u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); + u_int16_t attr_count = ss->cb[cb_id].attr_count; + struct nlattr *cda[attr_count+1]; + + if (likely(nlh->nlmsg_len >= min_len)) { + struct nlattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); + int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); + + err = nla_parse(cda, attr_count, attr, attrlen, + ss->cb[cb_id].policy); + if (err < 0) + return err; + } else + return -EINVAL; - err = nfnetlink_check_attributes(ss, nlh, cda); - if (err < 0) - return err; return nc->call(nfnl, skb, nlh, cda); } } -static void nfnetlink_rcv(struct sock *sk, int len) +static void nfnetlink_rcv(struct sk_buff *skb) { - unsigned int qlen = 0; - - do { - if (nfnl_trylock()) - return; - netlink_run_queue(sk, &qlen, nfnetlink_rcv_msg); - __nfnl_unlock(); - } while (qlen); + nfnl_lock(); + netlink_rcv_skb(skb, &nfnetlink_rcv_msg); + nfnl_unlock(); } static void __exit nfnetlink_exit(void) @@ -264,7 +187,7 @@ static int __init nfnetlink_init(void) { printk("Netfilter messages via NETLINK v%s.\n", nfversion); - nfnl = netlink_kernel_create(NETLINK_NETFILTER, NFNLGRP_MAX, + nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX, nfnetlink_rcv, NULL, THIS_MODULE); if (!nfnl) { printk(KERN_ERR "cannot initialize nfnetlink!\n"); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 2351533a850755bebd1c9ca6b26c9462fd296cf8..2c7bd2eb0294400ed1175749f41281ed4f7b735b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -37,8 +37,9 @@ #endif #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE -#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ +#define NFULNL_TIMEOUT_DEFAULT HZ /* every second */ #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ +#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ #define PRINTR(x, args...) do { if (net_ratelimit()) \ printk(x, ## args); } while (0); @@ -152,6 +153,11 @@ instance_create(u_int16_t group_num, int pid) if (!inst) goto out_unlock; + if (!try_module_get(THIS_MODULE)) { + kfree(inst); + goto out_unlock; + } + INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ @@ -166,10 +172,7 @@ instance_create(u_int16_t group_num, int pid) inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; - inst->copy_range = 0xffff; - - if (!try_module_get(THIS_MODULE)) - goto out_free; + inst->copy_range = NFULNL_COPY_RANGE_MAX; hlist_add_head(&inst->hlist, &instance_table[instance_hashfn(group_num)]); @@ -181,14 +184,12 @@ instance_create(u_int16_t group_num, int pid) return inst; -out_free: - instance_put(inst); out_unlock: write_unlock_bh(&instances_lock); return NULL; } -static int __nfulnl_send(struct nfulnl_instance *inst); +static void __nfulnl_flush(struct nfulnl_instance *inst); static void __instance_destroy(struct nfulnl_instance *inst) @@ -202,17 +203,8 @@ __instance_destroy(struct nfulnl_instance *inst) /* then flush all pending packets from skb */ spin_lock_bh(&inst->lock); - if (inst->skb) { - /* timer "holds" one reference (we have one more) */ - if (del_timer(&inst->timer)) - instance_put(inst); - if (inst->qlen) - __nfulnl_send(inst); - if (inst->skb) { - kfree_skb(inst->skb); - inst->skb = NULL; - } - } + if (inst->skb) + __nfulnl_flush(inst); spin_unlock_bh(&inst->lock); /* and finally put the refcount */ @@ -244,11 +236,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, case NFULNL_COPY_PACKET: inst->copy_mode = mode; - /* we're using struct nfattr which has 16bit nfa_len */ - if (range > 0xffff) - inst->copy_range = 0xffff; - else - inst->copy_range = range; + inst->copy_range = min_t(unsigned int, + range, NFULNL_COPY_RANGE_MAX); break; default: @@ -310,8 +299,8 @@ nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) return 0; } -static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size, - unsigned int pkt_size) +static struct sk_buff * +nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) { struct sk_buff *skb; unsigned int n; @@ -364,7 +353,18 @@ __nfulnl_send(struct nfulnl_instance *inst) return status; } -static void nfulnl_timer(unsigned long data) +static void +__nfulnl_flush(struct nfulnl_instance *inst) +{ + /* timer holds a reference */ + if (del_timer(&inst->timer)) + instance_put(inst); + if (inst->skb) + __nfulnl_send(inst); +} + +static void +nfulnl_timer(unsigned long data) { struct nfulnl_instance *inst = (struct nfulnl_instance *)data; @@ -409,36 +409,36 @@ __build_packet_message(struct nfulnl_instance *inst, pmsg.hw_protocol = skb->protocol; pmsg.hook = hooknum; - NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); + NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); if (prefix) - NFA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); + NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); if (indev) { tmp_uint = htonl(indev->ifindex); #ifndef CONFIG_BRIDGE_NETFILTER - NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), + NLA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), &tmp_uint); /* this is the bridge group "brX" */ tmp_uint = htonl(indev->br_port->br->dev->ifindex); - NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ - NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); if (skb->nf_bridge && skb->nf_bridge->physindev) { tmp_uint = htonl(skb->nf_bridge->physindev->ifindex); - NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), &tmp_uint); } } @@ -448,28 +448,28 @@ __build_packet_message(struct nfulnl_instance *inst, if (outdev) { tmp_uint = htonl(outdev->ifindex); #ifndef CONFIG_BRIDGE_NETFILTER - NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), + NLA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); #else if (pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), &tmp_uint); /* this is the bridge group "brX" */ tmp_uint = htonl(outdev->br_port->br->dev->ifindex); - NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); } else { /* Case 2: indev is a bridge group, we need to look * for physical device (when called from ipv4) */ - NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); if (skb->nf_bridge && skb->nf_bridge->physoutdev) { tmp_uint = htonl(skb->nf_bridge->physoutdev->ifindex); - NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, + NLA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), &tmp_uint); } } @@ -478,15 +478,16 @@ __build_packet_message(struct nfulnl_instance *inst, if (skb->mark) { tmp_uint = htonl(skb->mark); - NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint); + NLA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint); } - if (indev && skb->dev && skb->dev->hard_header_parse) { + if (indev && skb->dev) { struct nfulnl_msg_packet_hw phw; - int len = skb->dev->hard_header_parse((struct sk_buff *)skb, - phw.hw_addr); - phw.hw_addrlen = htons(len); - NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); + int len = dev_parse_header(skb, phw.hw_addr); + if (len > 0) { + phw.hw_addrlen = htons(len); + NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); + } } if (skb->tstamp.tv64) { @@ -495,7 +496,7 @@ __build_packet_message(struct nfulnl_instance *inst, ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); - NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); + NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } /* UID */ @@ -503,9 +504,9 @@ __build_packet_message(struct nfulnl_instance *inst, read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) { __be32 uid = htonl(skb->sk->sk_socket->file->f_uid); - /* need to unlock here since NFA_PUT may goto */ + /* need to unlock here since NLA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); - NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid); + NLA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid); } else read_unlock_bh(&skb->sk->sk_callback_lock); } @@ -513,28 +514,28 @@ __build_packet_message(struct nfulnl_instance *inst, /* local sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ) { tmp_uint = htonl(inst->seq++); - NFA_PUT(inst->skb, NFULA_SEQ, sizeof(tmp_uint), &tmp_uint); + NLA_PUT(inst->skb, NFULA_SEQ, sizeof(tmp_uint), &tmp_uint); } /* global sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) { tmp_uint = htonl(atomic_inc_return(&global_seq)); - NFA_PUT(inst->skb, NFULA_SEQ_GLOBAL, sizeof(tmp_uint), &tmp_uint); + NLA_PUT(inst->skb, NFULA_SEQ_GLOBAL, sizeof(tmp_uint), &tmp_uint); } if (data_len) { - struct nfattr *nfa; - int size = NFA_LENGTH(data_len); + struct nlattr *nla; + int size = nla_attr_size(data_len); - if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) { + if (skb_tailroom(inst->skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nfnetlink_log: no tailroom!\n"); goto nlmsg_failure; } - nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size)); - nfa->nfa_type = NFULA_PAYLOAD; - nfa->nfa_len = size; + nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len)); + nla->nla_type = NFULA_PAYLOAD; + nla->nla_len = size; - if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len)) + if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) BUG(); } @@ -543,7 +544,7 @@ __build_packet_message(struct nfulnl_instance *inst, nlmsg_failure: UDEBUG("nlmsg_failure\n"); -nfattr_failure: +nla_put_failure: PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); return -1; } @@ -590,32 +591,31 @@ nfulnl_log_packet(unsigned int pf, if (prefix) plen = strlen(prefix) + 1; - /* all macros expand to constant values at compile time */ /* FIXME: do we want to make the size calculation conditional based on * what is actually present? way more branches and checks, but more * memory efficient... */ - size = NLMSG_SPACE(sizeof(struct nfgenmsg)) - + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr)) - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ + size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) + + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif - + NFA_SPACE(sizeof(u_int32_t)) /* mark */ - + NFA_SPACE(sizeof(u_int32_t)) /* uid */ - + NFA_SPACE(plen) /* prefix */ - + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw)) - + NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp)); + + nla_total_size(sizeof(u_int32_t)) /* mark */ + + nla_total_size(sizeof(u_int32_t)) /* uid */ + + nla_total_size(plen) /* prefix */ + + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) + + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); UDEBUG("initial size=%u\n", size); spin_lock_bh(&inst->lock); if (inst->flags & NFULNL_CFG_F_SEQ) - size += NFA_SPACE(sizeof(u_int32_t)); + size += nla_total_size(sizeof(u_int32_t)); if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) - size += NFA_SPACE(sizeof(u_int32_t)); + size += nla_total_size(sizeof(u_int32_t)); qthreshold = inst->qthreshold; /* per-rule qthreshold overrides per-instance */ @@ -635,7 +635,7 @@ nfulnl_log_packet(unsigned int pf, else data_len = inst->copy_range; - size += NFA_SPACE(data_len); + size += nla_total_size(data_len); UDEBUG("copy_packet, therefore size now %u\n", size); break; @@ -643,17 +643,13 @@ nfulnl_log_packet(unsigned int pf, goto unlock_and_release; } - if (inst->qlen >= qthreshold || - (inst->skb && size > - skb_tailroom(inst->skb) - sizeof(struct nfgenmsg))) { + if (inst->skb && + size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { /* either the queue len is too high or we don't have * enough room in the skb left. flush to userspace. */ UDEBUG("flushing old skb\n"); - /* timer "holds" one reference (we have another one) */ - if (del_timer(&inst->timer)) - instance_put(inst); - __nfulnl_send(inst); + __nfulnl_flush(inst); } if (!inst->skb) { @@ -668,9 +664,11 @@ nfulnl_log_packet(unsigned int pf, __build_packet_message(inst, skb, data_len, pf, hooknum, in, out, li, prefix, plen); + if (inst->qlen >= qthreshold) + __nfulnl_flush(inst); /* timer_pending always called within inst->lock, so there * is no chance of a race here */ - if (!timer_pending(&inst->timer)) { + else if (!timer_pending(&inst->timer)) { instance_get(inst); inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); add_timer(&inst->timer); @@ -706,7 +704,8 @@ nfulnl_rcv_nl_event(struct notifier_block *this, hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { UDEBUG("node = %p\n", inst); - if (n->pid == inst->peer_pid) + if ((n->net == &init_net) && + (n->pid == inst->peer_pid)) __instance_destroy(inst); } } @@ -721,7 +720,7 @@ static struct notifier_block nfulnl_rtnl_notifier = { static int nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[]) + struct nlmsghdr *nlh, struct nlattr *nfqa[]) { return -ENOTSUPP; } @@ -732,34 +731,18 @@ static struct nf_logger nfulnl_logger = { .me = THIS_MODULE, }; -static const int nfula_min[NFULA_MAX] = { - [NFULA_PACKET_HDR-1] = sizeof(struct nfulnl_msg_packet_hdr), - [NFULA_MARK-1] = sizeof(u_int32_t), - [NFULA_TIMESTAMP-1] = sizeof(struct nfulnl_msg_packet_timestamp), - [NFULA_IFINDEX_INDEV-1] = sizeof(u_int32_t), - [NFULA_IFINDEX_OUTDEV-1]= sizeof(u_int32_t), - [NFULA_IFINDEX_PHYSINDEV-1] = sizeof(u_int32_t), - [NFULA_IFINDEX_PHYSOUTDEV-1] = sizeof(u_int32_t), - [NFULA_HWADDR-1] = sizeof(struct nfulnl_msg_packet_hw), - [NFULA_PAYLOAD-1] = 0, - [NFULA_PREFIX-1] = 0, - [NFULA_UID-1] = sizeof(u_int32_t), - [NFULA_SEQ-1] = sizeof(u_int32_t), - [NFULA_SEQ_GLOBAL-1] = sizeof(u_int32_t), -}; - -static const int nfula_cfg_min[NFULA_CFG_MAX] = { - [NFULA_CFG_CMD-1] = sizeof(struct nfulnl_msg_config_cmd), - [NFULA_CFG_MODE-1] = sizeof(struct nfulnl_msg_config_mode), - [NFULA_CFG_TIMEOUT-1] = sizeof(u_int32_t), - [NFULA_CFG_QTHRESH-1] = sizeof(u_int32_t), - [NFULA_CFG_NLBUFSIZ-1] = sizeof(u_int32_t), - [NFULA_CFG_FLAGS-1] = sizeof(u_int16_t), +static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = { + [NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) }, + [NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) }, + [NFULA_CFG_TIMEOUT] = { .type = NLA_U32 }, + [NFULA_CFG_QTHRESH] = { .type = NLA_U32 }, + [NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 }, + [NFULA_CFG_FLAGS] = { .type = NLA_U16 }, }; static int nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfula[]) + struct nlmsghdr *nlh, struct nlattr *nfula[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t group_num = ntohs(nfmsg->res_id); @@ -768,16 +751,11 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, UDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type)); - if (nfattr_bad_size(nfula, NFULA_CFG_MAX, nfula_cfg_min)) { - UDEBUG("bad attribute size\n"); - return -EINVAL; - } - inst = instance_lookup_get(group_num); - if (nfula[NFULA_CFG_CMD-1]) { + if (nfula[NFULA_CFG_CMD]) { u_int8_t pf = nfmsg->nfgen_family; struct nfulnl_msg_config_cmd *cmd; - cmd = NFA_DATA(nfula[NFULA_CFG_CMD-1]); + cmd = nla_data(nfula[NFULA_CFG_CMD]); UDEBUG("found CFG_CMD for\n"); switch (cmd->command) { @@ -840,38 +818,38 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, } } - if (nfula[NFULA_CFG_MODE-1]) { + if (nfula[NFULA_CFG_MODE]) { struct nfulnl_msg_config_mode *params; - params = NFA_DATA(nfula[NFULA_CFG_MODE-1]); + params = nla_data(nfula[NFULA_CFG_MODE]); nfulnl_set_mode(inst, params->copy_mode, ntohl(params->copy_range)); } - if (nfula[NFULA_CFG_TIMEOUT-1]) { + if (nfula[NFULA_CFG_TIMEOUT]) { __be32 timeout = - *(__be32 *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]); + *(__be32 *)nla_data(nfula[NFULA_CFG_TIMEOUT]); nfulnl_set_timeout(inst, ntohl(timeout)); } - if (nfula[NFULA_CFG_NLBUFSIZ-1]) { + if (nfula[NFULA_CFG_NLBUFSIZ]) { __be32 nlbufsiz = - *(__be32 *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]); + *(__be32 *)nla_data(nfula[NFULA_CFG_NLBUFSIZ]); nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz)); } - if (nfula[NFULA_CFG_QTHRESH-1]) { + if (nfula[NFULA_CFG_QTHRESH]) { __be32 qthresh = - *(__be32 *)NFA_DATA(nfula[NFULA_CFG_QTHRESH-1]); + *(__be32 *)nla_data(nfula[NFULA_CFG_QTHRESH]); nfulnl_set_qthresh(inst, ntohl(qthresh)); } - if (nfula[NFULA_CFG_FLAGS-1]) { + if (nfula[NFULA_CFG_FLAGS]) { __be16 flags = - *(__be16 *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); + *(__be16 *)nla_data(nfula[NFULA_CFG_FLAGS]); nfulnl_set_flags(inst, ntohs(flags)); } @@ -881,14 +859,15 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, return ret; } -static struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = { +static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = { [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp, .attr_count = NFULA_MAX, }, [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config, - .attr_count = NFULA_CFG_MAX, }, + .attr_count = NFULA_CFG_MAX, + .policy = nfula_cfg_policy }, }; -static struct nfnetlink_subsystem nfulnl_subsys = { +static const struct nfnetlink_subsystem nfulnl_subsys = { .name = "log", .subsys_id = NFNL_SUBSYS_ULOG, .cb_count = NFULNL_MSG_MAX, @@ -972,22 +951,8 @@ static const struct seq_operations nful_seq_ops = { static int nful_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct iter_state *is; - int ret; - - is = kzalloc(sizeof(*is), GFP_KERNEL); - if (!is) - return -ENOMEM; - ret = seq_open(file, &nful_seq_ops); - if (ret < 0) - goto out_free; - seq = file->private_data; - seq->private = is; - return ret; -out_free: - kfree(is); - return ret; + return seq_open_private(file, &nful_seq_ops, + sizeof(struct iter_state)); } static const struct file_operations nful_file_ops = { diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index bb65a38c816c79d88561feaacaa6734df57a6a5e..49f0480afe099ac4643bf895eb05d68256b563a6 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -299,7 +299,7 @@ __nfqnl_set_mode(struct nfqnl_instance *queue, case NFQNL_COPY_PACKET: queue->copy_mode = mode; - /* we're using struct nfattr which has 16bit nfa_len */ + /* we're using struct nlattr which has 16bit nla_len */ if (range > 0xffff) queue->copy_range = 0xffff; else @@ -353,18 +353,17 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, QDEBUG("entered\n"); - /* all macros expand to constant values at compile time */ - size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + - + NFA_SPACE(sizeof(struct nfqnl_msg_packet_hdr)) - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ + size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) + + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ - + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif - + NFA_SPACE(sizeof(u_int32_t)) /* mark */ - + NFA_SPACE(sizeof(struct nfqnl_msg_packet_hw)) - + NFA_SPACE(sizeof(struct nfqnl_msg_packet_timestamp)); + + nla_total_size(sizeof(u_int32_t)) /* mark */ + + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) + + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); outdev = entinf->outdev; @@ -389,7 +388,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, else data_len = queue->copy_range; - size += NFA_SPACE(data_len); + size += nla_total_size(data_len); break; default: @@ -417,33 +416,33 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, pmsg.hw_protocol = entskb->protocol; pmsg.hook = entinf->hook; - NFA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); + NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); indev = entinf->indev; if (indev) { tmp_uint = htonl(indev->ifindex); #ifndef CONFIG_BRIDGE_NETFILTER - NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); + NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); #else if (entinf->pf == PF_BRIDGE) { /* Case 1: indev is physical input device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), + NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), &tmp_uint); /* this is the bridge group "brX" */ tmp_uint = htonl(indev->br_port->br->dev->ifindex); - NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), + NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); } else { /* Case 2: indev is bridge group, we need to look for * physical device (when called from ipv4) */ - NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), + NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); if (entskb->nf_bridge && entskb->nf_bridge->physindev) { tmp_uint = htonl(entskb->nf_bridge->physindev->ifindex); - NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, + NLA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint), &tmp_uint); } } @@ -453,27 +452,27 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (outdev) { tmp_uint = htonl(outdev->ifindex); #ifndef CONFIG_BRIDGE_NETFILTER - NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); + NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); #else if (entinf->pf == PF_BRIDGE) { /* Case 1: outdev is physical output device, we need to * look for bridge group (when called from * netfilter_bridge) */ - NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), + NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), &tmp_uint); /* this is the bridge group "brX" */ tmp_uint = htonl(outdev->br_port->br->dev->ifindex); - NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), + NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); } else { /* Case 2: outdev is bridge group, we need to look for * physical output device (when called from ipv4) */ - NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), + NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) { tmp_uint = htonl(entskb->nf_bridge->physoutdev->ifindex); - NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, + NLA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint), &tmp_uint); } } @@ -482,17 +481,16 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (entskb->mark) { tmp_uint = htonl(entskb->mark); - NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint); + NLA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint); } - if (indev && entskb->dev - && entskb->dev->hard_header_parse) { + if (indev && entskb->dev) { struct nfqnl_msg_packet_hw phw; - - int len = entskb->dev->hard_header_parse(entskb, - phw.hw_addr); - phw.hw_addrlen = htons(len); - NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); + int len = dev_parse_header(entskb, phw.hw_addr); + if (len) { + phw.hw_addrlen = htons(len); + NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); + } } if (entskb->tstamp.tv64) { @@ -501,23 +499,23 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, ts.sec = cpu_to_be64(tv.tv_sec); ts.usec = cpu_to_be64(tv.tv_usec); - NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); + NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } if (data_len) { - struct nfattr *nfa; - int size = NFA_LENGTH(data_len); + struct nlattr *nla; + int size = nla_attr_size(data_len); - if (skb_tailroom(skb) < (int)NFA_SPACE(data_len)) { + if (skb_tailroom(skb) < nla_total_size(data_len)) { printk(KERN_WARNING "nf_queue: no tailroom!\n"); goto nlmsg_failure; } - nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size)); - nfa->nfa_type = NFQA_PAYLOAD; - nfa->nfa_len = size; + nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); + nla->nla_type = NFQA_PAYLOAD; + nla->nla_len = size; - if (skb_copy_bits(entskb, 0, NFA_DATA(nfa), data_len)) + if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) BUG(); } @@ -525,7 +523,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, return skb; nlmsg_failure: -nfattr_failure: +nla_put_failure: if (skb) kfree_skb(skb); *errp = -EINVAL; @@ -734,6 +732,9 @@ nfqnl_rcv_dev_event(struct notifier_block *this, { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + /* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) nfqnl_dev_drop(dev->ifindex); @@ -762,7 +763,8 @@ nfqnl_rcv_nl_event(struct notifier_block *this, struct hlist_head *head = &instance_table[i]; hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { - if (n->pid == inst->peer_pid) + if ((n->net == &init_net) && + (n->pid == inst->peer_pid)) __instance_destroy(inst); } } @@ -775,15 +777,15 @@ static struct notifier_block nfqnl_rtnl_notifier = { .notifier_call = nfqnl_rcv_nl_event, }; -static const int nfqa_verdict_min[NFQA_MAX] = { - [NFQA_VERDICT_HDR-1] = sizeof(struct nfqnl_msg_verdict_hdr), - [NFQA_MARK-1] = sizeof(u_int32_t), - [NFQA_PAYLOAD-1] = 0, +static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { + [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, + [NFQA_MARK] = { .type = NLA_U32 }, + [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, }; static int nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[]) + struct nlmsghdr *nlh, struct nlattr *nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); @@ -794,11 +796,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, struct nfqnl_queue_entry *entry; int err; - if (nfattr_bad_size(nfqa, NFQA_MAX, nfqa_verdict_min)) { - QDEBUG("bad attribute size\n"); - return -EINVAL; - } - queue = instance_lookup_get(queue_num); if (!queue) return -ENODEV; @@ -808,12 +805,12 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, goto err_out_put; } - if (!nfqa[NFQA_VERDICT_HDR-1]) { + if (!nfqa[NFQA_VERDICT_HDR]) { err = -EINVAL; goto err_out_put; } - vhdr = NFA_DATA(nfqa[NFQA_VERDICT_HDR-1]); + vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); verdict = ntohl(vhdr->verdict); if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { @@ -827,15 +824,15 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, goto err_out_put; } - if (nfqa[NFQA_PAYLOAD-1]) { - if (nfqnl_mangle(NFA_DATA(nfqa[NFQA_PAYLOAD-1]), - NFA_PAYLOAD(nfqa[NFQA_PAYLOAD-1]), entry) < 0) + if (nfqa[NFQA_PAYLOAD]) { + if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), + nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) verdict = NF_DROP; } - if (nfqa[NFQA_MARK-1]) + if (nfqa[NFQA_MARK]) entry->skb->mark = ntohl(*(__be32 *) - NFA_DATA(nfqa[NFQA_MARK-1])); + nla_data(nfqa[NFQA_MARK])); issue_verdict(entry, verdict); instance_put(queue); @@ -848,14 +845,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, static int nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[]) + struct nlmsghdr *nlh, struct nlattr *nfqa[]) { return -ENOTSUPP; } -static const int nfqa_cfg_min[NFQA_CFG_MAX] = { - [NFQA_CFG_CMD-1] = sizeof(struct nfqnl_msg_config_cmd), - [NFQA_CFG_PARAMS-1] = sizeof(struct nfqnl_msg_config_params), +static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { + [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, + [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, }; static struct nf_queue_handler nfqh = { @@ -865,7 +862,7 @@ static struct nf_queue_handler nfqh = { static int nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nfattr *nfqa[]) + struct nlmsghdr *nlh, struct nlattr *nfqa[]) { struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t queue_num = ntohs(nfmsg->res_id); @@ -874,15 +871,10 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type)); - if (nfattr_bad_size(nfqa, NFQA_CFG_MAX, nfqa_cfg_min)) { - QDEBUG("bad attribute size\n"); - return -EINVAL; - } - queue = instance_lookup_get(queue_num); - if (nfqa[NFQA_CFG_CMD-1]) { + if (nfqa[NFQA_CFG_CMD]) { struct nfqnl_msg_config_cmd *cmd; - cmd = NFA_DATA(nfqa[NFQA_CFG_CMD-1]); + cmd = nla_data(nfqa[NFQA_CFG_CMD]); QDEBUG("found CFG_CMD\n"); switch (cmd->command) { @@ -933,21 +925,21 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, } } - if (nfqa[NFQA_CFG_PARAMS-1]) { + if (nfqa[NFQA_CFG_PARAMS]) { struct nfqnl_msg_config_params *params; if (!queue) { ret = -ENOENT; goto out_put; } - params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]); + params = nla_data(nfqa[NFQA_CFG_PARAMS]); nfqnl_set_mode(queue, params->copy_mode, ntohl(params->copy_range)); } - if (nfqa[NFQA_CFG_QUEUE_MAXLEN-1]) { + if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { __be32 *queue_maxlen; - queue_maxlen = NFA_DATA(nfqa[NFQA_CFG_QUEUE_MAXLEN-1]); + queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); spin_lock_bh(&queue->lock); queue->queue_maxlen = ntohl(*queue_maxlen); spin_unlock_bh(&queue->lock); @@ -958,16 +950,18 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, return ret; } -static struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { +static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, .attr_count = NFQA_MAX, }, [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, - .attr_count = NFQA_MAX, }, + .attr_count = NFQA_MAX, + .policy = nfqa_verdict_policy }, [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, - .attr_count = NFQA_CFG_MAX, }, + .attr_count = NFQA_CFG_MAX, + .policy = nfqa_cfg_policy }, }; -static struct nfnetlink_subsystem nfqnl_subsys = { +static const struct nfnetlink_subsystem nfqnl_subsys = { .name = "nf_queue", .subsys_id = NFNL_SUBSYS_QUEUE, .cb_count = NFQNL_MSG_MAX, @@ -1057,22 +1051,8 @@ static const struct seq_operations nfqnl_seq_ops = { static int nfqnl_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - struct iter_state *is; - int ret; - - is = kzalloc(sizeof(*is), GFP_KERNEL); - if (!is) - return -ENOMEM; - ret = seq_open(file, &nfqnl_seq_ops); - if (ret < 0) - goto out_free; - seq = file->private_data; - seq->private = is; - return ret; -out_free: - kfree(is); - return ret; + return seq_open_private(file, &nfqnl_seq_ops, + sizeof(struct iter_state)); } static const struct file_operations nfqnl_file_ops = { diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index cc2baa6d5a7ac5d75d6a9ec3686e755787dc64ff..d9a3bded0d00ac6f48001842c6f55e18429eb8d4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -795,7 +796,7 @@ int xt_proto_init(int af) #ifdef CONFIG_PROC_FS strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); - proc = proc_net_fops_create(buf, 0440, &xt_file_ops); + proc = proc_net_fops_create(&init_net, buf, 0440, &xt_file_ops); if (!proc) goto out; proc->data = (void *) ((unsigned long) af | (TABLE << 16)); @@ -803,14 +804,14 @@ int xt_proto_init(int af) strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); - proc = proc_net_fops_create(buf, 0440, &xt_file_ops); + proc = proc_net_fops_create(&init_net, buf, 0440, &xt_file_ops); if (!proc) goto out_remove_tables; proc->data = (void *) ((unsigned long) af | (MATCH << 16)); strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TARGETS, sizeof(buf)); - proc = proc_net_fops_create(buf, 0440, &xt_file_ops); + proc = proc_net_fops_create(&init_net, buf, 0440, &xt_file_ops); if (!proc) goto out_remove_matches; proc->data = (void *) ((unsigned long) af | (TARGET << 16)); @@ -822,12 +823,12 @@ int xt_proto_init(int af) out_remove_matches: strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); - proc_net_remove(buf); + proc_net_remove(&init_net, buf); out_remove_tables: strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); - proc_net_remove(buf); + proc_net_remove(&init_net, buf); out: return -1; #endif @@ -841,15 +842,15 @@ void xt_proto_fini(int af) strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); - proc_net_remove(buf); + proc_net_remove(&init_net, buf); strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TARGETS, sizeof(buf)); - proc_net_remove(buf); + proc_net_remove(&init_net, buf); strlcpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); - proc_net_remove(buf); + proc_net_remove(&init_net, buf); #endif /*CONFIG_PROC_FS*/ } EXPORT_SYMBOL_GPL(xt_proto_fini); diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c index 5194285668299d077070d67c0db05091f1e4a0f9..07a1b96650056cb2c50137d3e52eaa15f3505200 100644 --- a/net/netfilter/xt_CLASSIFY.c +++ b/net/netfilter/xt_CLASSIFY.c @@ -24,6 +24,7 @@ MODULE_AUTHOR("Patrick McHardy "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("iptables qdisc classification target module"); MODULE_ALIAS("ipt_CLASSIFY"); +MODULE_ALIAS("ip6t_CLASSIFY"); static unsigned int target(struct sk_buff **pskb, diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c index 5a00c5444334fe926bd2a8bc995493c1bea574a0..7043c2757e098e3214ff6109b4863a0158573548 100644 --- a/net/netfilter/xt_CONNMARK.c +++ b/net/netfilter/xt_CONNMARK.c @@ -27,6 +27,7 @@ MODULE_AUTHOR("Henrik Nordstrom "); MODULE_DESCRIPTION("IP tables CONNMARK matching module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_CONNMARK"); +MODULE_ALIAS("ip6t_CONNMARK"); #include #include diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c index b7d6312fccc72b13606d6642103bf84c97326a52..fec1aefb1c326b5bdf2fd2446a3b95931c0a4f65 100644 --- a/net/netfilter/xt_NOTRACK.c +++ b/net/netfilter/xt_NOTRACK.c @@ -9,6 +9,7 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NOTRACK"); +MODULE_ALIAS("ip6t_NOTRACK"); static unsigned int target(struct sk_buff **pskb, diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index dd4d79b8fc9d20ced45ef0cfc8204569fe9492da..af79423bc8e81a2d41884e2564a4651b65563bfc 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -14,6 +14,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection"); MODULE_ALIAS("ipt_connbytes"); +MODULE_ALIAS("ip6t_connbytes"); static bool match(const struct sk_buff *skb, diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index e73fa9b46cf7d1a747077c333e92d7ad0809140f..1071fc54d6d3c8811f4404773636ec15083d9cc3 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -29,6 +29,7 @@ MODULE_AUTHOR("Henrik Nordstrom "); MODULE_DESCRIPTION("IP tables connmark match module"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_connmark"); +MODULE_ALIAS("ip6t_connmark"); static bool match(const struct sk_buff *skb, diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c index 83224ec89cc08348205daaf58a7104ea79773e53..c2b1b24ee335a256ece4cccc1bb6f057b759a0ae 100644 --- a/net/netfilter/xt_dccp.c +++ b/net/netfilter/xt_dccp.c @@ -24,6 +24,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("Match for DCCP protocol packets"); MODULE_ALIAS("ipt_dccp"); +MODULE_ALIAS("ip6t_dccp"); #define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ || (!!((invflag) & (option)) ^ (cond))) diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index bd45f9d3f7d0b4313b3681f1f68fd584aabe3ff1..19103678bf2079bb3b4c3c937a6992744711bca1 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -743,13 +744,13 @@ static int __init xt_hashlimit_init(void) printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); goto err2; } - hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", proc_net); + hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net); if (!hashlimit_procdir4) { printk(KERN_ERR "xt_hashlimit: unable to create proc dir " "entry\n"); goto err3; } - hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net); + hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net); if (!hashlimit_procdir6) { printk(KERN_ERR "xt_hashlimit: unable to create proc dir " "entry\n"); @@ -757,7 +758,7 @@ static int __init xt_hashlimit_init(void) } return 0; err4: - remove_proc_entry("ipt_hashlimit", proc_net); + remove_proc_entry("ipt_hashlimit", init_net.proc_net); err3: kmem_cache_destroy(hashlimit_cachep); err2: @@ -769,8 +770,8 @@ static int __init xt_hashlimit_init(void) static void __exit xt_hashlimit_fini(void) { - remove_proc_entry("ipt_hashlimit", proc_net); - remove_proc_entry("ip6t_hashlimit", proc_net); + remove_proc_entry("ipt_hashlimit", init_net.proc_net); + remove_proc_entry("ip6t_hashlimit", init_net.proc_net); kmem_cache_destroy(hashlimit_cachep); xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit)); } diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c index c002153b80ab76ef6e3725dfc9dc6d4fce242d69..f907770fd4e9097302ef8343f25401eec5eb6ee9 100644 --- a/net/netfilter/xt_sctp.c +++ b/net/netfilter/xt_sctp.c @@ -13,6 +13,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kiran Kumar Immidi"); MODULE_DESCRIPTION("Match for SCTP protocol packets"); MODULE_ALIAS("ipt_sctp"); +MODULE_ALIAS("ip6t_sctp"); #ifdef DEBUG_SCTP #define duprintf(format, args...) printk(format , ## args) diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c index cd5f6d758c6806b46032e5c264e417ec66931acf..84d401bfafad2be0fe528f9d3cd1996c15aa71df 100644 --- a/net/netfilter/xt_tcpmss.c +++ b/net/netfilter/xt_tcpmss.c @@ -22,6 +22,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marc Boucher "); MODULE_DESCRIPTION("iptables TCP MSS match module"); MODULE_ALIAS("ipt_tcpmss"); +MODULE_ALIAS("ip6t_tcpmss"); static bool match(const struct sk_buff *skb, diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c new file mode 100644 index 0000000000000000000000000000000000000000..ef48bbd93573080b3489f378a4cb3755d8c20e36 --- /dev/null +++ b/net/netfilter/xt_time.c @@ -0,0 +1,269 @@ +/* + * xt_time + * Copyright © Jan Engelhardt , 2007 + * + * based on ipt_time by Fabrice MARIE + * This is a module which is used for time matching + * It is using some modified code from dietlibc (localtime() function) + * that you can find at http://www.fefe.de/dietlibc/ + * This file is distributed under the terms of the GNU General Public + * License (GPL). Copies of the GPL can be obtained from gnu.org/gpl. + */ +#include +#include +#include +#include +#include +#include + +struct xtm { + u_int8_t month; /* (1-12) */ + u_int8_t monthday; /* (1-31) */ + u_int8_t weekday; /* (1-7) */ + u_int8_t hour; /* (0-23) */ + u_int8_t minute; /* (0-59) */ + u_int8_t second; /* (0-59) */ + unsigned int dse; +}; + +extern struct timezone sys_tz; /* ouch */ + +static const u_int16_t days_since_year[] = { + 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, +}; + +static const u_int16_t days_since_leapyear[] = { + 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, +}; + +/* + * Since time progresses forward, it is best to organize this array in reverse, + * to minimize lookup time. + */ +enum { + DSE_FIRST = 2039, +}; +static const u_int16_t days_since_epoch[] = { + /* 2039 - 2030 */ + 25202, 24837, 24472, 24106, 23741, 23376, 23011, 22645, 22280, 21915, + /* 2029 - 2020 */ + 21550, 21184, 20819, 20454, 20089, 19723, 19358, 18993, 18628, 18262, + /* 2019 - 2010 */ + 17897, 17532, 17167, 16801, 16436, 16071, 15706, 15340, 14975, 14610, + /* 2009 - 2000 */ + 14245, 13879, 13514, 13149, 12784, 12418, 12053, 11688, 11323, 10957, + /* 1999 - 1990 */ + 10592, 10227, 9862, 9496, 9131, 8766, 8401, 8035, 7670, 7305, + /* 1989 - 1980 */ + 6940, 6574, 6209, 5844, 5479, 5113, 4748, 4383, 4018, 3652, + /* 1979 - 1970 */ + 3287, 2922, 2557, 2191, 1826, 1461, 1096, 730, 365, 0, +}; + +static inline bool is_leap(unsigned int y) +{ + return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0); +} + +/* + * Each network packet has a (nano)seconds-since-the-epoch (SSTE) timestamp. + * Since we match against days and daytime, the SSTE value needs to be + * computed back into human-readable dates. + * + * This is done in three separate functions so that the most expensive + * calculations are done last, in case a "simple match" can be found earlier. + */ +static inline unsigned int localtime_1(struct xtm *r, time_t time) +{ + unsigned int v, w; + + /* Each day has 86400s, so finding the hour/minute is actually easy. */ + v = time % 86400; + r->second = v % 60; + w = v / 60; + r->minute = w % 60; + r->hour = w / 60; + return v; +} + +static inline void localtime_2(struct xtm *r, time_t time) +{ + /* + * Here comes the rest (weekday, monthday). First, divide the SSTE + * by seconds-per-day to get the number of _days_ since the epoch. + */ + r->dse = time / 86400; + + /* 1970-01-01 (w=0) was a Thursday (4). */ + r->weekday = (4 + r->dse) % 7; +} + +static void localtime_3(struct xtm *r, time_t time) +{ + unsigned int year, i, w = r->dse; + + /* + * In each year, a certain number of days-since-the-epoch have passed. + * Find the year that is closest to said days. + * + * Consider, for example, w=21612 (2029-03-04). Loop will abort on + * dse[i] <= w, which happens when dse[i] == 21550. This implies + * year == 2009. w will then be 62. + */ + for (i = 0, year = DSE_FIRST; days_since_epoch[i] > w; + ++i, --year) + /* just loop */; + + w -= days_since_epoch[i]; + + /* + * By now we have the current year, and the day of the year. + * r->yearday = w; + * + * On to finding the month (like above). In each month, a certain + * number of days-since-New Year have passed, and find the closest + * one. + * + * Consider w=62 (in a non-leap year). Loop will abort on + * dsy[i] < w, which happens when dsy[i] == 31+28 (i == 2). + * Concludes i == 2, i.e. 3rd month => March. + * + * (A different approach to use would be to subtract a monthlength + * from w repeatedly while counting.) + */ + if (is_leap(year)) { + for (i = ARRAY_SIZE(days_since_leapyear) - 1; + i > 0 && days_since_year[i] > w; --i) + /* just loop */; + } else { + for (i = ARRAY_SIZE(days_since_year) - 1; + i > 0 && days_since_year[i] > w; --i) + /* just loop */; + } + + r->month = i + 1; + r->monthday = w - days_since_year[i] + 1; + return; +} + +static bool xt_time_match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const struct xt_match *match, const void *matchinfo, + int offset, unsigned int protoff, bool *hotdrop) +{ + const struct xt_time_info *info = matchinfo; + unsigned int packet_time; + struct xtm current_time; + s64 stamp; + + /* + * We cannot use get_seconds() instead of __net_timestamp() here. + * Suppose you have two rules: + * 1. match before 13:00 + * 2. match after 13:00 + * If you match against processing time (get_seconds) it + * may happen that the same packet matches both rules if + * it arrived at the right moment before 13:00. + */ + if (skb->tstamp.tv64 == 0) + __net_timestamp((struct sk_buff *)skb); + + stamp = skb->tstamp.tv64; + do_div(stamp, NSEC_PER_SEC); + + if (info->flags & XT_TIME_LOCAL_TZ) + /* Adjust for local timezone */ + stamp -= 60 * sys_tz.tz_minuteswest; + + /* + * xt_time will match when _all_ of the following hold: + * - 'now' is in the global time range date_start..date_end + * - 'now' is in the monthday mask + * - 'now' is in the weekday mask + * - 'now' is in the daytime range time_start..time_end + * (and by default, libxt_time will set these so as to match) + */ + + if (stamp < info->date_start || stamp > info->date_stop) + return false; + + packet_time = localtime_1(¤t_time, stamp); + + if (info->daytime_start < info->daytime_stop) { + if (packet_time < info->daytime_start || + packet_time > info->daytime_stop) + return false; + } else { + if (packet_time < info->daytime_start && + packet_time > info->daytime_stop) + return false; + } + + localtime_2(¤t_time, stamp); + + if (!(info->weekdays_match & (1 << current_time.weekday))) + return false; + + /* Do not spend time computing monthday if all days match anyway */ + if (info->monthdays_match != XT_TIME_ALL_MONTHDAYS) { + localtime_3(¤t_time, stamp); + if (!(info->monthdays_match & (1 << current_time.monthday))) + return false; + } + + return true; +} + +static bool xt_time_check(const char *tablename, const void *ip, + const struct xt_match *match, void *matchinfo, + unsigned int hook_mask) +{ + struct xt_time_info *info = matchinfo; + + if (info->daytime_start > XT_TIME_MAX_DAYTIME || + info->daytime_stop > XT_TIME_MAX_DAYTIME) { + printk(KERN_WARNING "xt_time: invalid argument - start or " + "stop time greater than 23:59:59\n"); + return false; + } + + return true; +} + +static struct xt_match xt_time_reg[] __read_mostly = { + { + .name = "time", + .family = AF_INET, + .match = xt_time_match, + .matchsize = sizeof(struct xt_time_info), + .checkentry = xt_time_check, + .me = THIS_MODULE, + }, + { + .name = "time", + .family = AF_INET6, + .match = xt_time_match, + .matchsize = sizeof(struct xt_time_info), + .checkentry = xt_time_check, + .me = THIS_MODULE, + }, +}; + +static int __init xt_time_init(void) +{ + return xt_register_matches(xt_time_reg, ARRAY_SIZE(xt_time_reg)); +} + +static void __exit xt_time_exit(void) +{ + xt_unregister_matches(xt_time_reg, ARRAY_SIZE(xt_time_reg)); +} + +module_init(xt_time_init); +module_exit(xt_time_exit); +MODULE_AUTHOR("Jan Engelhardt "); +MODULE_DESCRIPTION("netfilter time match"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("ipt_time"); +MODULE_ALIAS("ip6t_time"); diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index c060e3f991f10ab9d13bb0c5aa4c043d17c2a66e..ba0ca8d3f77d49720358da39e1988797b487e9e1 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -130,7 +130,7 @@ static int netlbl_cipsov4_add_common(struct genl_info *info, return -EINVAL; nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) - if (nla->nla_type == NLBL_CIPSOV4_A_TAG) { + if (nla_type(nla) == NLBL_CIPSOV4_A_TAG) { if (iter >= CIPSO_V4_TAG_MAXCNT) return -EINVAL; doi_def->tags[iter++] = nla_get_u8(nla); @@ -192,13 +192,13 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) - if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { + if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSLVL) { if (nla_validate_nested(nla_a, NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) - switch (nla_b->nla_type) { + switch (nla_type(nla_b)) { case NLBL_CIPSOV4_A_MLSLVLLOC: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_LOC_LVLS) @@ -240,7 +240,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSLVLLST], nla_a_rem) - if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSLVL) { + if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSLVL) { struct nlattr *lvl_loc; struct nlattr *lvl_rem; @@ -265,13 +265,13 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSCATLST], nla_a_rem) - if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSCAT) { + if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSCAT) { if (nla_validate_nested(nla_a, NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) goto add_std_failure; nla_for_each_nested(nla_b, nla_a, nla_b_rem) - switch (nla_b->nla_type) { + switch (nla_type(nla_b)) { case NLBL_CIPSOV4_A_MLSCATLOC: if (nla_get_u32(nla_b) > CIPSO_V4_MAX_LOC_CATS) @@ -315,7 +315,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info) nla_for_each_nested(nla_a, info->attrs[NLBL_CIPSOV4_A_MLSCATLST], nla_a_rem) - if (nla_a->nla_type == NLBL_CIPSOV4_A_MLSCAT) { + if (nla_type(nla_a) == NLBL_CIPSOV4_A_MLSCAT) { struct nlattr *cat_loc; struct nlattr *cat_rem; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 5681ce3aebca05f5fbfd4e5ba194f8cdd14fc3ba..c776bcd9f8257b8008b968aec37c93d389d6854f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -57,6 +57,7 @@ #include #include +#include #include #include #include @@ -79,7 +80,7 @@ struct netlink_sock { struct netlink_callback *cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; - void (*data_ready)(struct sock *sk, int bytes); + void (*netlink_rcv)(struct sk_buff *skb); struct module *module; }; @@ -88,7 +89,12 @@ struct netlink_sock { static inline struct netlink_sock *nlk_sk(struct sock *sk) { - return (struct netlink_sock *)sk; + return container_of(sk, struct netlink_sock, sk); +} + +static inline int netlink_is_kernel(struct sock *sk) +{ + return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; } struct nl_pid_hash { @@ -121,7 +127,6 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static void netlink_destroy_callback(struct netlink_callback *cb); -static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); @@ -210,7 +215,7 @@ netlink_unlock_table(void) wake_up(&nl_table_wait); } -static __inline__ struct sock *netlink_lookup(int protocol, u32 pid) +static __inline__ struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) { struct nl_pid_hash *hash = &nl_table[protocol].hash; struct hlist_head *head; @@ -220,7 +225,7 @@ static __inline__ struct sock *netlink_lookup(int protocol, u32 pid) read_lock(&nl_table_lock); head = nl_pid_hashfn(hash, pid); sk_for_each(sk, node, head) { - if (nlk_sk(sk)->pid == pid) { + if ((sk->sk_net == net) && (nlk_sk(sk)->pid == pid)) { sock_hold(sk); goto found; } @@ -327,7 +332,7 @@ netlink_update_listeners(struct sock *sk) * makes sure updates are visible before bind or setsockopt return. */ } -static int netlink_insert(struct sock *sk, u32 pid) +static int netlink_insert(struct sock *sk, struct net *net, u32 pid) { struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; @@ -340,7 +345,7 @@ static int netlink_insert(struct sock *sk, u32 pid) head = nl_pid_hashfn(hash, pid); len = 0; sk_for_each(osk, node, head) { - if (nlk_sk(osk)->pid == pid) + if ((osk->sk_net == net) && (nlk_sk(osk)->pid == pid)) break; len++; } @@ -383,15 +388,15 @@ static struct proto netlink_proto = { .obj_size = sizeof(struct netlink_sock), }; -static int __netlink_create(struct socket *sock, struct mutex *cb_mutex, - int protocol) +static int __netlink_create(struct net *net, struct socket *sock, + struct mutex *cb_mutex, int protocol) { struct sock *sk; struct netlink_sock *nlk; sock->ops = &netlink_ops; - sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); + sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); if (!sk) return -ENOMEM; @@ -411,7 +416,7 @@ static int __netlink_create(struct socket *sock, struct mutex *cb_mutex, return 0; } -static int netlink_create(struct socket *sock, int protocol) +static int netlink_create(struct net *net, struct socket *sock, int protocol) { struct module *module = NULL; struct mutex *cb_mutex; @@ -440,7 +445,7 @@ static int netlink_create(struct socket *sock, int protocol) cb_mutex = nl_table[protocol].cb_mutex; netlink_unlock_table(); - if ((err = __netlink_create(sock, cb_mutex, protocol)) < 0) + if ((err = __netlink_create(net, sock, cb_mutex, protocol)) < 0) goto out_module; nlk = nlk_sk(sock->sk); @@ -477,6 +482,7 @@ static int netlink_release(struct socket *sock) if (nlk->pid && !nlk->subscriptions) { struct netlink_notify n = { + .net = sk->sk_net, .protocol = sk->sk_protocol, .pid = nlk->pid, }; @@ -487,7 +493,7 @@ static int netlink_release(struct socket *sock) module_put(nlk->module); netlink_table_grab(); - if (nlk->flags & NETLINK_KERNEL_SOCKET) { + if (netlink_is_kernel(sk)) { kfree(nl_table[sk->sk_protocol].listeners); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; @@ -505,6 +511,7 @@ static int netlink_release(struct socket *sock) static int netlink_autobind(struct socket *sock) { struct sock *sk = sock->sk; + struct net *net = sk->sk_net; struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; struct sock *osk; @@ -518,6 +525,8 @@ static int netlink_autobind(struct socket *sock) netlink_table_grab(); head = nl_pid_hashfn(hash, pid); sk_for_each(osk, node, head) { + if ((osk->sk_net != net)) + continue; if (nlk_sk(osk)->pid == pid) { /* Bind collision, search negative pid values. */ pid = rover--; @@ -529,7 +538,7 @@ static int netlink_autobind(struct socket *sock) } netlink_table_ungrab(); - err = netlink_insert(sk, pid); + err = netlink_insert(sk, net, pid); if (err == -EADDRINUSE) goto retry; @@ -594,6 +603,7 @@ static int netlink_realloc_groups(struct sock *sk) static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sock *sk = sock->sk; + struct net *net = sk->sk_net; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; @@ -615,7 +625,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len return -EINVAL; } else { err = nladdr->nl_pid ? - netlink_insert(sk, nladdr->nl_pid) : + netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); if (err) return err; @@ -698,19 +708,17 @@ static void netlink_overrun(struct sock *sk) static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) { - int protocol = ssk->sk_protocol; struct sock *sock; struct netlink_sock *nlk; - sock = netlink_lookup(protocol, pid); + sock = netlink_lookup(ssk->sk_net, ssk->sk_protocol, pid); if (!sock) return ERR_PTR(-ECONNREFUSED); /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); - if ((nlk->pid == 0 && !nlk->data_ready) || - (sock->sk_state == NETLINK_CONNECTED && - nlk->dst_pid != nlk_sk(ssk)->pid)) { + if (sock->sk_state == NETLINK_CONNECTED && + nlk->dst_pid != nlk_sk(ssk)->pid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } @@ -754,7 +762,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!timeo) { - if (!ssk || nlk_sk(ssk)->pid == 0) + if (!ssk || netlink_is_kernel(ssk)) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); @@ -783,7 +791,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, return 0; } -int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) +int netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; @@ -824,7 +832,34 @@ static inline struct sk_buff *netlink_trim(struct sk_buff *skb, return skb; } -int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) +static inline void netlink_rcv_wake(struct sock *sk) +{ + struct netlink_sock *nlk = nlk_sk(sk); + + if (skb_queue_empty(&sk->sk_receive_queue)) + clear_bit(0, &nlk->state); + if (!test_bit(0, &nlk->state)) + wake_up_interruptible(&nlk->wait); +} + +static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) +{ + int ret; + struct netlink_sock *nlk = nlk_sk(sk); + + ret = -ECONNREFUSED; + if (nlk->netlink_rcv != NULL) { + ret = skb->len; + skb_set_owner_r(skb, sk); + nlk->netlink_rcv(skb); + } + kfree_skb(skb); + sock_put(sk); + return ret; +} + +int netlink_unicast(struct sock *ssk, struct sk_buff *skb, + u32 pid, int nonblock) { struct sock *sk; int err; @@ -839,13 +874,16 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock kfree_skb(skb); return PTR_ERR(sk); } + if (netlink_is_kernel(sk)) + return netlink_unicast_kernel(sk, skb); + err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); if (err == 1) goto retry; if (err) return err; - return netlink_sendskb(sk, skb, ssk->sk_protocol); + return netlink_sendskb(sk, skb); } int netlink_has_listeners(struct sock *sk, unsigned int group) @@ -853,7 +891,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group) int res = 0; unsigned long *listeners; - BUG_ON(!(nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET)); + BUG_ON(!netlink_is_kernel(sk)); rcu_read_lock(); listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); @@ -883,6 +921,7 @@ static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff struct netlink_broadcast_data { struct sock *exclude_sk; + struct net *net; u32 pid; u32 group; int failure; @@ -905,6 +944,9 @@ static inline int do_one_broadcast(struct sock *sk, !test_bit(p->group - 1, nlk->groups)) goto out; + if ((sk->sk_net != p->net)) + goto out; + if (p->failure) { netlink_overrun(sk); goto out; @@ -943,6 +985,7 @@ static inline int do_one_broadcast(struct sock *sk, int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation) { + struct net *net = ssk->sk_net; struct netlink_broadcast_data info; struct hlist_node *node; struct sock *sk; @@ -950,6 +993,7 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, skb = netlink_trim(skb, allocation); info.exclude_sk = ssk; + info.net = net; info.pid = pid; info.group = group; info.failure = 0; @@ -998,6 +1042,9 @@ static inline int do_one_set_err(struct sock *sk, if (sk == p->exclude_sk) goto out; + if (sk->sk_net != p->exclude_sk->sk_net) + goto out; + if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; @@ -1129,16 +1176,6 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); } -static inline void netlink_rcv_wake(struct sock *sk) -{ - struct netlink_sock *nlk = nlk_sk(sk); - - if (skb_queue_empty(&sk->sk_receive_queue)) - clear_bit(0, &nlk->state); - if (!test_bit(0, &nlk->state)) - wake_up_interruptible(&nlk->wait); -} - static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { @@ -1286,11 +1323,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, static void netlink_data_ready(struct sock *sk, int len) { - struct netlink_sock *nlk = nlk_sk(sk); - - if (nlk->data_ready) - nlk->data_ready(sk, len); - netlink_rcv_wake(sk); + BUG(); } /* @@ -1300,8 +1333,8 @@ static void netlink_data_ready(struct sock *sk, int len) */ struct sock * -netlink_kernel_create(int unit, unsigned int groups, - void (*input)(struct sock *sk, int len), +netlink_kernel_create(struct net *net, int unit, unsigned int groups, + void (*input)(struct sk_buff *skb), struct mutex *cb_mutex, struct module *module) { struct socket *sock; @@ -1317,7 +1350,7 @@ netlink_kernel_create(int unit, unsigned int groups, if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) return NULL; - if (__netlink_create(sock, cb_mutex, unit) < 0) + if (__netlink_create(net, sock, cb_mutex, unit) < 0) goto out_sock_release; if (groups < 32) @@ -1330,20 +1363,22 @@ netlink_kernel_create(int unit, unsigned int groups, sk = sock->sk; sk->sk_data_ready = netlink_data_ready; if (input) - nlk_sk(sk)->data_ready = input; + nlk_sk(sk)->netlink_rcv = input; - if (netlink_insert(sk, 0)) + if (netlink_insert(sk, net, 0)) goto out_sock_release; nlk = nlk_sk(sk); nlk->flags |= NETLINK_KERNEL_SOCKET; netlink_table_grab(); - nl_table[unit].groups = groups; - nl_table[unit].listeners = listeners; - nl_table[unit].cb_mutex = cb_mutex; - nl_table[unit].module = module; - nl_table[unit].registered = 1; + if (!nl_table[unit].registered) { + nl_table[unit].groups = groups; + nl_table[unit].listeners = listeners; + nl_table[unit].cb_mutex = cb_mutex; + nl_table[unit].module = module; + nl_table[unit].registered = 1; + } netlink_table_ungrab(); return sk; @@ -1509,7 +1544,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, atomic_inc(&skb->users); cb->skb = skb; - sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid); + sk = netlink_lookup(ssk->sk_net, ssk->sk_protocol, NETLINK_CB(skb).pid); if (sk == NULL) { netlink_destroy_callback(cb); return -ECONNREFUSED; @@ -1528,12 +1563,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, netlink_dump(sk); sock_put(sk); - - /* We successfully started a dump, by returning -EINTR we - * signal the queue mangement to interrupt processing of - * any netlink messages so userspace gets a chance to read - * the results. */ - return -EINTR; + return 0; } void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) @@ -1551,7 +1581,8 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) if (!skb) { struct sock *sk; - sk = netlink_lookup(in_skb->sk->sk_protocol, + sk = netlink_lookup(in_skb->sk->sk_net, + in_skb->sk->sk_protocol, NETLINK_CB(in_skb).pid); if (sk) { sk->sk_err = ENOBUFS; @@ -1569,13 +1600,15 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); } -static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, +int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *)) { struct nlmsghdr *nlh; int err; while (skb->len >= nlmsg_total_size(0)) { + int msglen; + nlh = nlmsg_hdr(skb); err = 0; @@ -1591,83 +1624,19 @@ static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, goto skip; err = cb(skb, nlh); - if (err == -EINTR) { - /* Not an error, but we interrupt processing */ - netlink_queue_skip(nlh, skb); - return err; - } skip: if (nlh->nlmsg_flags & NLM_F_ACK || err) netlink_ack(skb, nlh, err); - netlink_queue_skip(nlh, skb); + msglen = NLMSG_ALIGN(nlh->nlmsg_len); + if (msglen > skb->len) + msglen = skb->len; + skb_pull(skb, msglen); } return 0; } -/** - * nelink_run_queue - Process netlink receive queue. - * @sk: Netlink socket containing the queue - * @qlen: Place to store queue length upon entry - * @cb: Callback function invoked for each netlink message found - * - * Processes as much as there was in the queue upon entry and invokes - * a callback function for each netlink message found. The callback - * function may refuse a message by returning a negative error code - * but setting the error pointer to 0 in which case this function - * returns with a qlen != 0. - * - * qlen must be initialized to 0 before the initial entry, afterwards - * the function may be called repeatedly until qlen reaches 0. - * - * The callback function may return -EINTR to signal that processing - * of netlink messages shall be interrupted. In this case the message - * currently being processed will NOT be requeued onto the receive - * queue. - */ -void netlink_run_queue(struct sock *sk, unsigned int *qlen, - int (*cb)(struct sk_buff *, struct nlmsghdr *)) -{ - struct sk_buff *skb; - - if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue)) - *qlen = skb_queue_len(&sk->sk_receive_queue); - - for (; *qlen; (*qlen)--) { - skb = skb_dequeue(&sk->sk_receive_queue); - if (netlink_rcv_skb(skb, cb)) { - if (skb->len) - skb_queue_head(&sk->sk_receive_queue, skb); - else { - kfree_skb(skb); - (*qlen)--; - } - break; - } - - kfree_skb(skb); - } -} - -/** - * netlink_queue_skip - Skip netlink message while processing queue. - * @nlh: Netlink message to be skipped - * @skb: Socket buffer containing the netlink messages. - * - * Pulls the given netlink message off the socket buffer so the next - * call to netlink_queue_run() will not reconsider the message. - */ -static void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb) -{ - int msglen = NLMSG_ALIGN(nlh->nlmsg_len); - - if (msglen > skb->len) - msglen = skb->len; - - skb_pull(skb, msglen); -} - /** * nlmsg_notify - send a notification netlink message * @sk: netlink socket to use @@ -1702,6 +1671,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, #ifdef CONFIG_PROC_FS struct nl_seq_iter { + struct net *net; int link; int hash_idx; }; @@ -1719,6 +1689,8 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) for (j = 0; j <= hash->mask; j++) { sk_for_each(s, node, &hash->table[j]) { + if (iter->net != s->sk_net) + continue; if (off == pos) { iter->link = i; iter->hash_idx = j; @@ -1748,11 +1720,14 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); - s = sk_next(v); + iter = seq->private; + s = v; + do { + s = sk_next(s); + } while (s && (iter->net != s->sk_net)); if (s) return s; - iter = seq->private; i = iter->link; j = iter->hash_idx + 1; @@ -1761,6 +1736,8 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); + while (s && (iter->net != s->sk_net)) + s = sk_next(s); if (s) { iter->link = i; iter->hash_idx = j; @@ -1815,31 +1792,35 @@ static const struct seq_operations netlink_seq_ops = { static int netlink_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; struct nl_seq_iter *iter; - int err; - iter = kzalloc(sizeof(*iter), GFP_KERNEL); + iter = __seq_open_private(file, &netlink_seq_ops, sizeof(*iter)); if (!iter) return -ENOMEM; - err = seq_open(file, &netlink_seq_ops); - if (err) { - kfree(iter); - return err; + iter->net = get_proc_net(inode); + if (!iter->net) { + seq_release_private(inode, file); + return -ENXIO; } - seq = file->private_data; - seq->private = iter; return 0; } +static int netlink_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct nl_seq_iter *iter = seq->private; + put_net(iter->net); + return seq_release_private(inode, file); +} + static const struct file_operations netlink_seq_fops = { .owner = THIS_MODULE, .open = netlink_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release_private, + .release = netlink_seq_release, }; #endif @@ -1881,11 +1862,32 @@ static struct net_proto_family netlink_family_ops = { .owner = THIS_MODULE, /* for consistency 8) */ }; +static int __net_init netlink_net_init(struct net *net) +{ +#ifdef CONFIG_PROC_FS + if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) + return -ENOMEM; +#endif + return 0; +} + +static void __net_exit netlink_net_exit(struct net *net) +{ +#ifdef CONFIG_PROC_FS + proc_net_remove(net, "netlink"); +#endif +} + +static struct pernet_operations __net_initdata netlink_net_ops = { + .init = netlink_net_init, + .exit = netlink_net_exit, +}; + static int __init netlink_proto_init(void) { struct sk_buff *dummy_skb; int i; - unsigned long max; + unsigned long limit; unsigned int order; int err = proto_register(&netlink_proto, 0); @@ -1899,13 +1901,13 @@ static int __init netlink_proto_init(void) goto panic; if (num_physpages >= (128 * 1024)) - max = num_physpages >> (21 - PAGE_SHIFT); + limit = num_physpages >> (21 - PAGE_SHIFT); else - max = num_physpages >> (23 - PAGE_SHIFT); + limit = num_physpages >> (23 - PAGE_SHIFT); - order = get_bitmask_order(max) - 1 + PAGE_SHIFT; - max = (1UL << order) / sizeof(struct hlist_head); - order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1; + order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; + limit = (1UL << order) / sizeof(struct hlist_head); + order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; @@ -1926,9 +1928,7 @@ static int __init netlink_proto_init(void) } sock_register(&netlink_family_ops); -#ifdef CONFIG_PROC_FS - proc_net_fops_create("netlink", 0, &netlink_seq_fops); -#endif + register_pernet_subsys(&netlink_net_ops); /* The netlink device handler may be needed early. */ rtnetlink_init(); out: @@ -1940,7 +1940,7 @@ static int __init netlink_proto_init(void) core_initcall(netlink_proto_init); EXPORT_SYMBOL(netlink_ack); -EXPORT_SYMBOL(netlink_run_queue); +EXPORT_SYMBOL(netlink_rcv_skb); EXPORT_SYMBOL(netlink_broadcast); EXPORT_SYMBOL(netlink_dump_start); EXPORT_SYMBOL(netlink_kernel_create); diff --git a/net/netlink/attr.c b/net/netlink/attr.c index e4d7bed99c2e8bbc763cef5fbb24a01a2ad91694..ec39d12c2423347463e766fc83bda5ef9861901a 100644 --- a/net/netlink/attr.c +++ b/net/netlink/attr.c @@ -27,12 +27,12 @@ static int validate_nla(struct nlattr *nla, int maxtype, const struct nla_policy *policy) { const struct nla_policy *pt; - int minlen = 0, attrlen = nla_len(nla); + int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); - if (nla->nla_type <= 0 || nla->nla_type > maxtype) + if (type <= 0 || type > maxtype) return 0; - pt = &policy[nla->nla_type]; + pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); @@ -149,7 +149,7 @@ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { - u16 type = nla->nla_type; + u16 type = nla_type(nla); if (type > 0 && type <= maxtype) { if (policy) { @@ -185,7 +185,7 @@ struct nlattr *nla_find(struct nlattr *head, int len, int attrtype) int rem; nla_for_each_attr(nla, head, len, rem) - if (nla->nla_type == attrtype) + if (nla_type(nla) == attrtype) return nla; return NULL; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 8c11ca4a2121692614ac43064c2718695d11bf6b..150579a21469d664563dfcee8bdef5d3f0e355e1 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -22,22 +22,14 @@ struct sock *genl_sock = NULL; static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ -static void genl_lock(void) +static inline void genl_lock(void) { mutex_lock(&genl_mutex); } -static int genl_trylock(void) -{ - return !mutex_trylock(&genl_mutex); -} - -static void genl_unlock(void) +static inline void genl_unlock(void) { mutex_unlock(&genl_mutex); - - if (genl_sock && genl_sock->sk_receive_queue.qlen) - genl_sock->sk_data_ready(genl_sock, 0); } #define GENL_FAM_TAB_SIZE 16 @@ -478,16 +470,11 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return ops->doit(skb, &info); } -static void genl_rcv(struct sock *sk, int len) +static void genl_rcv(struct sk_buff *skb) { - unsigned int qlen = 0; - - do { - if (genl_trylock()) - return; - netlink_run_queue(sk, &qlen, genl_rcv_msg); - genl_unlock(); - } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen); + genl_lock(); + netlink_rcv_skb(skb, &genl_rcv_msg); + genl_unlock(); } /************************************************************************** @@ -782,8 +769,8 @@ static int __init genl_init(void) netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); /* we'll bump the group number right afterwards */ - genl_sock = netlink_kernel_create(NETLINK_GENERIC, 0, genl_rcv, - NULL, THIS_MODULE); + genl_sock = netlink_kernel_create(&init_net, NETLINK_GENERIC, 0, + genl_rcv, NULL, THIS_MODULE); if (genl_sock == NULL) panic("GENL: Cannot initialize generic netlink\n"); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index dc9273295a382a98b2564718fa95d6a6ab74c0b8..3a4d479ea64e493c342f9fde022f28adbbeb29c1 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -105,6 +106,9 @@ static int nr_device_event(struct notifier_block *this, unsigned long event, voi { struct net_device *dev = (struct net_device *)ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event != NETDEV_DOWN) return NOTIFY_DONE; @@ -408,15 +412,18 @@ static struct proto nr_proto = { .obj_size = sizeof(struct nr_sock), }; -static int nr_create(struct socket *sock, int protocol) +static int nr_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct nr_sock *nr; + if (net != &init_net) + return -EAFNOSUPPORT; + if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; - if ((sk = sk_alloc(PF_NETROM, GFP_ATOMIC, &nr_proto, 1)) == NULL) + if ((sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, 1)) == NULL) return -ENOMEM; nr = nr_sk(sk); @@ -458,7 +465,7 @@ static struct sock *nr_make_new(struct sock *osk) if (osk->sk_type != SOCK_SEQPACKET) return NULL; - if ((sk = sk_alloc(PF_NETROM, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) + if ((sk = sk_alloc(osk->sk_net, PF_NETROM, GFP_ATOMIC, osk->sk_prot, 1)) == NULL) return NULL; nr = nr_sk(sk); @@ -1447,9 +1454,9 @@ static int __init nr_proto_init(void) nr_loopback_init(); - proc_net_fops_create("nr", S_IRUGO, &nr_info_fops); - proc_net_fops_create("nr_neigh", S_IRUGO, &nr_neigh_fops); - proc_net_fops_create("nr_nodes", S_IRUGO, &nr_nodes_fops); + proc_net_fops_create(&init_net, "nr", S_IRUGO, &nr_info_fops); + proc_net_fops_create(&init_net, "nr_neigh", S_IRUGO, &nr_neigh_fops); + proc_net_fops_create(&init_net, "nr_nodes", S_IRUGO, &nr_nodes_fops); out: return rc; fail: @@ -1477,9 +1484,9 @@ static void __exit nr_exit(void) { int i; - proc_net_remove("nr"); - proc_net_remove("nr_neigh"); - proc_net_remove("nr_nodes"); + proc_net_remove(&init_net, "nr"); + proc_net_remove(&init_net, "nr_neigh"); + proc_net_remove(&init_net, "nr_nodes"); nr_loopback_clear(); nr_rt_free(); diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index c7b5d930e7323b84313acf551939229a40663233..8c68da5ef0a17840a017dcf92993e8842d744952 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -95,8 +95,9 @@ static int nr_rebuild_header(struct sk_buff *skb) #endif -static int nr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int nr_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); @@ -193,6 +194,12 @@ static struct net_device_stats *nr_get_stats(struct net_device *dev) return &nr->stats; } +static const struct header_ops nr_header_ops = { + .create = nr_header, + .rebuild= nr_rebuild_header, +}; + + void nr_setup(struct net_device *dev) { dev->mtu = NR_MAX_PACKET_SIZE; @@ -200,11 +207,10 @@ void nr_setup(struct net_device *dev) dev->open = nr_open; dev->stop = nr_close; - dev->hard_header = nr_header; + dev->header_ops = &nr_header_ops; dev->hard_header_len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_NETROM; - dev->rebuild_header = nr_rebuild_header; dev->set_mac_address = nr_set_mac_address; /* New-style flags. */ diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 24fe4a66d297d37ebc9600c3e15f263122a53d30..e943c16552a204badf0737b3550d11f6d85df663 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -580,7 +580,7 @@ static struct net_device *nr_ax25_dev_get(char *devname) { struct net_device *dev; - if ((dev = dev_get_by_name(devname)) == NULL) + if ((dev = dev_get_by_name(&init_net, devname)) == NULL) return NULL; if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) @@ -598,7 +598,7 @@ struct net_device *nr_dev_first(void) struct net_device *dev, *first = NULL; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; @@ -618,7 +618,7 @@ struct net_device *nr_dev_get(ax25_address *addr) struct net_device *dev; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1322d62b5d97224d74586d5eb92df887a383e7cb..e11000a8e950950cd5a52ece4597c72761dc8d38 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -251,6 +252,9 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct struct sock *sk; struct sockaddr_pkt *spkt; + if (dev->nd_net != &init_net) + goto out; + /* * When we registered the protocol we saved the socket in the data * field for just this event. @@ -343,7 +347,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, */ saddr->spkt_device[13] = 0; - dev = dev_get_by_name(saddr->spkt_device); + dev = dev_get_by_name(&init_net, saddr->spkt_device); err = -ENODEV; if (dev == NULL) goto out_unlock; @@ -385,7 +389,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock, skb_reset_network_header(skb); /* Try to align data part correctly */ - if (dev->hard_header) { + if (dev->header_ops) { skb->data -= dev->hard_header_len; skb->tail -= dev->hard_header_len; if (len < dev->hard_header_len) @@ -451,6 +455,9 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet int skb_len = skb->len; unsigned int snaplen, res; + if (dev->nd_net != &init_net) + goto drop; + if (skb->pkt_type == PACKET_LOOPBACK) goto drop; @@ -459,7 +466,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet skb->dev = dev; - if (dev->hard_header) { + if (dev->header_ops) { /* The device has an explicit notion of ll header, exported to higher levels. @@ -512,10 +519,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet sll->sll_ifindex = orig_dev->ifindex; else sll->sll_ifindex = dev->ifindex; - sll->sll_halen = 0; - if (dev->hard_header_parse) - sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr); + sll->sll_halen = dev_parse_header(skb, sll->sll_addr); PACKET_SKB_CB(skb)->origlen = skb->len; @@ -567,13 +572,16 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe struct sk_buff *copy_skb = NULL; struct timeval tv; + if (dev->nd_net != &init_net) + goto drop; + if (skb->pkt_type == PACKET_LOOPBACK) goto drop; sk = pt->af_packet_priv; po = pkt_sk(sk); - if (dev->hard_header) { + if (dev->header_ops) { if (sk->sk_type != SOCK_DGRAM) skb_push(skb, skb->data - skb_mac_header(skb)); else if (skb->pkt_type == PACKET_OUTGOING) { @@ -640,18 +648,15 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe h->tp_snaplen = snaplen; h->tp_mac = macoff; h->tp_net = netoff; - if (skb->tstamp.tv64 == 0) { - __net_timestamp(skb); - sock_enable_timestamp(sk); - } - tv = ktime_to_timeval(skb->tstamp); + if (skb->tstamp.tv64) + tv = ktime_to_timeval(skb->tstamp); + else + do_gettimeofday(&tv); h->tp_sec = tv.tv_sec; h->tp_usec = tv.tv_usec; sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); - sll->sll_halen = 0; - if (dev->hard_header_parse) - sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr); + sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; sll->sll_protocol = skb->protocol; @@ -733,7 +738,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, } - dev = dev_get_by_index(ifindex); + dev = dev_get_by_index(&init_net, ifindex); err = -ENXIO; if (dev == NULL) goto out_unlock; @@ -756,16 +761,10 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, LL_RESERVED_SPACE(dev)); skb_reset_network_header(skb); - if (dev->hard_header) { - int res; - err = -EINVAL; - res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len); - if (sock->type != SOCK_DGRAM) { - skb_reset_tail_pointer(skb); - skb->len = 0; - } else if (res < 0) - goto out_free; - } + err = -EINVAL; + if (sock->type == SOCK_DGRAM && + dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0) + goto out_free; /* Returns -EFAULT on error */ err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); @@ -928,7 +927,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add return -EINVAL; strlcpy(name,uaddr->sa_data,sizeof(name)); - dev = dev_get_by_name(name); + dev = dev_get_by_name(&init_net, name); if (dev) { err = packet_do_bind(sk, dev, pkt_sk(sk)->num); dev_put(dev); @@ -955,7 +954,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len if (sll->sll_ifindex) { err = -ENODEV; - dev = dev_get_by_index(sll->sll_ifindex); + dev = dev_get_by_index(&init_net, sll->sll_ifindex); if (dev == NULL) goto out; } @@ -977,13 +976,16 @@ static struct proto packet_proto = { * Create a packet of type SOCK_PACKET. */ -static int packet_create(struct socket *sock, int protocol) +static int packet_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct packet_sock *po; __be16 proto = (__force __be16)protocol; /* weird, but documented */ int err; + if (net != &init_net) + return -EAFNOSUPPORT; + if (!capable(CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && @@ -993,7 +995,7 @@ static int packet_create(struct socket *sock, int protocol) sock->state = SS_UNCONNECTED; err = -ENOBUFS; - sk = sk_alloc(PF_PACKET, GFP_KERNEL, &packet_proto, 1); + sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, 1); if (sk == NULL) goto out; @@ -1149,7 +1151,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; - dev = dev_get_by_index(pkt_sk(sk)->ifindex); + dev = dev_get_by_index(&init_net, pkt_sk(sk)->ifindex); if (dev) { strlcpy(uaddr->sa_data, dev->name, 15); dev_put(dev); @@ -1174,7 +1176,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; - dev = dev_get_by_index(po->ifindex); + dev = dev_get_by_index(&init_net, po->ifindex); if (dev) { sll->sll_hatype = dev->type; sll->sll_halen = dev->addr_len; @@ -1226,7 +1228,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq) rtnl_lock(); err = -ENODEV; - dev = __dev_get_by_index(mreq->mr_ifindex); + dev = __dev_get_by_index(&init_net, mreq->mr_ifindex); if (!dev) goto done; @@ -1280,7 +1282,7 @@ static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq) if (--ml->count == 0) { struct net_device *dev; *mlp = ml->next; - dev = dev_get_by_index(ml->ifindex); + dev = dev_get_by_index(&init_net, ml->ifindex); if (dev) { packet_dev_mc(dev, ml, -1); dev_put(dev); @@ -1308,7 +1310,7 @@ static void packet_flush_mclist(struct sock *sk) struct net_device *dev; po->mclist = ml->next; - if ((dev = dev_get_by_index(ml->ifindex)) != NULL) { + if ((dev = dev_get_by_index(&init_net, ml->ifindex)) != NULL) { packet_dev_mc(dev, ml, -1); dev_put(dev); } @@ -1465,6 +1467,9 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void struct hlist_node *node; struct net_device *dev = data; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + read_lock(&packet_sklist_lock); sk_for_each(sk, node, &packet_sklist) { struct packet_sock *po = pkt_sk(sk); @@ -1952,7 +1957,7 @@ static const struct file_operations packet_seq_fops = { static void __exit packet_exit(void) { - proc_net_remove("packet"); + proc_net_remove(&init_net, "packet"); unregister_netdevice_notifier(&packet_netdev_notifier); sock_unregister(PF_PACKET); proto_unregister(&packet_proto); @@ -1967,7 +1972,7 @@ static int __init packet_init(void) sock_register(&packet_family_ops); register_netdevice_notifier(&packet_netdev_notifier); - proc_net_fops_create("packet", 0, &packet_seq_fops); + proc_net_fops_create(&init_net, "packet", 0, &packet_seq_fops); out: return rc; } diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 8b31759ee8b08311164ca0910fecf240aa45a69f..7f807b30cfbbc2b50a57d3d6531106a24db7d03d 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -5,7 +5,7 @@ menuconfig RFKILL tristate "RF switch subsystem support" help Say Y here if you want to have control over RF switches - found on many WiFi, Bluetooth and IRDA cards. + found on many WiFi and Bluetooth cards. To compile this driver as a module, choose M here: the module will be called rfkill. @@ -22,3 +22,10 @@ config RFKILL_INPUT To compile this driver as a module, choose M here: the module will be called rfkill-input. + +# LED trigger support +config RFKILL_LEDS + bool + depends on RFKILL && LEDS_TRIGGERS + default y + diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c index 9f746be588543813b8e9e686ffef3979b6ddb046..eaabf087c59b53e1dcf05af11647fdd2288d136a 100644 --- a/net/rfkill/rfkill-input.c +++ b/net/rfkill/rfkill-input.c @@ -17,6 +17,8 @@ #include #include +#include "rfkill-input.h" + MODULE_AUTHOR("Dmitry Torokhov "); MODULE_DESCRIPTION("Input layer to RF switch connector"); MODULE_LICENSE("GPL"); @@ -81,6 +83,7 @@ static void rfkill_schedule_toggle(struct rfkill_task *task) static DEFINE_RFKILL_TASK(rfkill_wlan, RFKILL_TYPE_WLAN); static DEFINE_RFKILL_TASK(rfkill_bt, RFKILL_TYPE_BLUETOOTH); +static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); static void rfkill_event(struct input_handle *handle, unsigned int type, unsigned int code, int down) @@ -93,6 +96,9 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, case KEY_BLUETOOTH: rfkill_schedule_toggle(&rfkill_bt); break; + case KEY_UWB: + rfkill_schedule_toggle(&rfkill_uwb); + break; default: break; } @@ -148,6 +154,11 @@ static const struct input_device_id rfkill_ids[] = { .evbit = { BIT(EV_KEY) }, .keybit = { [LONG(KEY_BLUETOOTH)] = BIT(KEY_BLUETOOTH) }, }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT, + .evbit = { BIT(EV_KEY) }, + .keybit = { [LONG(KEY_UWB)] = BIT(KEY_UWB) }, + }, { } }; diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h new file mode 100644 index 0000000000000000000000000000000000000000..4dae5006fc776639764158b2a11e1f3c9b080885 --- /dev/null +++ b/net/rfkill/rfkill-input.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2007 Ivo van Doorn + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __RFKILL_INPUT_H +#define __RFKILL_INPUT_H + +void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); + +#endif /* __RFKILL_INPUT_H */ diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index db3395bfbcfa304e5702dcd91c9d503fba01da84..51d151c0e9621169730881d3867fa70e50e20042 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 Ivo van Doorn + * Copyright (C) 2006 - 2007 Ivo van Doorn * Copyright (C) 2007 Dmitry Torokhov * * This program is free software; you can redistribute it and/or modify @@ -37,6 +37,22 @@ static DEFINE_MUTEX(rfkill_mutex); static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; + +static void rfkill_led_trigger(struct rfkill *rfkill, + enum rfkill_state state) +{ +#ifdef CONFIG_RFKILL_LEDS + struct led_trigger *led = &rfkill->led_trigger; + + if (!led->name) + return; + if (state == RFKILL_STATE_OFF) + led_trigger_event(led, LED_OFF); + else + led_trigger_event(led, LED_FULL); +#endif /* CONFIG_RFKILL_LEDS */ +} + static int rfkill_toggle_radio(struct rfkill *rfkill, enum rfkill_state state) { @@ -48,8 +64,10 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, if (state != rfkill->state) { retval = rfkill->toggle_radio(rfkill->data, state); - if (!retval) + if (!retval) { rfkill->state = state; + rfkill_led_trigger(rfkill, state); + } } mutex_unlock(&rfkill->mutex); @@ -106,8 +124,8 @@ static ssize_t rfkill_type_show(struct device *dev, case RFKILL_TYPE_BLUETOOTH: type = "bluetooth"; break; - case RFKILL_TYPE_IRDA: - type = "irda"; + case RFKILL_TYPE_UWB: + type = "ultrawideband"; break; default: BUG(); @@ -172,6 +190,10 @@ static ssize_t rfkill_claim_store(struct device *dev, if (error) return error; + if (rfkill->user_claim_unsupported) { + error = -EOPNOTSUPP; + goto out_unlock; + } if (rfkill->user_claim != claim) { if (!claim) rfkill_toggle_radio(rfkill, @@ -179,9 +201,10 @@ static ssize_t rfkill_claim_store(struct device *dev, rfkill->user_claim = claim; } +out_unlock: mutex_unlock(&rfkill_mutex); - return count; + return error ? error : count; } static struct device_attribute rfkill_dev_attrs[] = { @@ -281,7 +304,7 @@ static void rfkill_remove_switch(struct rfkill *rfkill) /** * rfkill_allocate - allocate memory for rfkill structure. * @parent: device that has rf switch on it - * @type: type of the switch (wlan, bluetooth, irda) + * @type: type of the switch (RFKILL_TYPE_*) * * This function should be called by the network driver when it needs * rfkill structure. Once the structure is allocated the driver shoud @@ -328,6 +351,26 @@ void rfkill_free(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_free); +static void rfkill_led_trigger_register(struct rfkill *rfkill) +{ +#ifdef CONFIG_RFKILL_LEDS + int error; + + rfkill->led_trigger.name = rfkill->dev.bus_id; + error = led_trigger_register(&rfkill->led_trigger); + if (error) + rfkill->led_trigger.name = NULL; +#endif /* CONFIG_RFKILL_LEDS */ +} + +static void rfkill_led_trigger_unregister(struct rfkill *rfkill) +{ +#ifdef CONFIG_RFKILL_LEDS + if (rfkill->led_trigger.name) + led_trigger_unregister(&rfkill->led_trigger); +#endif +} + /** * rfkill_register - Register a rfkill structure. * @rfkill: rfkill structure to be registered @@ -357,6 +400,7 @@ int rfkill_register(struct rfkill *rfkill) rfkill_remove_switch(rfkill); return error; } + rfkill_led_trigger_register(rfkill); return 0; } @@ -372,6 +416,7 @@ EXPORT_SYMBOL(rfkill_register); */ void rfkill_unregister(struct rfkill *rfkill) { + rfkill_led_trigger_unregister(rfkill); device_del(&rfkill->dev); rfkill_remove_switch(rfkill); put_device(&rfkill->dev); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 976c3cc86a297041d7709f0b4cac7eef72a2767e..509defe53ee53fb5b49f37c0b2372edfa83cc68b 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -196,6 +197,9 @@ static int rose_device_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = (struct net_device *)ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event != NETDEV_DOWN) return NOTIFY_DONE; @@ -498,15 +502,18 @@ static struct proto rose_proto = { .obj_size = sizeof(struct rose_sock), }; -static int rose_create(struct socket *sock, int protocol) +static int rose_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct rose_sock *rose; + if (net != &init_net) + return -EAFNOSUPPORT; + if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; - if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) + if ((sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) return -ENOMEM; rose = rose_sk(sk); @@ -544,7 +551,7 @@ static struct sock *rose_make_new(struct sock *osk) if (osk->sk_type != SOCK_SEQPACKET) return NULL; - if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) + if ((sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL) return NULL; rose = rose_sk(sk); @@ -1576,10 +1583,10 @@ static int __init rose_proto_init(void) rose_add_loopback_neigh(); - proc_net_fops_create("rose", S_IRUGO, &rose_info_fops); - proc_net_fops_create("rose_neigh", S_IRUGO, &rose_neigh_fops); - proc_net_fops_create("rose_nodes", S_IRUGO, &rose_nodes_fops); - proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops); + proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); + proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); + proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); + proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); out: return rc; fail: @@ -1606,10 +1613,10 @@ static void __exit rose_exit(void) { int i; - proc_net_remove("rose"); - proc_net_remove("rose_neigh"); - proc_net_remove("rose_nodes"); - proc_net_remove("rose_routes"); + proc_net_remove(&init_net, "rose"); + proc_net_remove(&init_net, "rose_neigh"); + proc_net_remove(&init_net, "rose_nodes"); + proc_net_remove(&init_net, "rose_routes"); rose_loopback_clear(); rose_rt_free(); diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 8d88795dc663497654aeb0eb292fa91680e260c1..1b6741f1d746c547fce948d799ec9298a9bf7b65 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -35,8 +35,9 @@ #include #include -static int rose_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, - void *daddr, void *saddr, unsigned len) +static int rose_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, + const void *daddr, const void *saddr, unsigned len) { unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2); @@ -148,6 +149,11 @@ static struct net_device_stats *rose_get_stats(struct net_device *dev) return netdev_priv(dev); } +static const struct header_ops rose_header_ops = { + .create = rose_header, + .rebuild= rose_rebuild_header, +}; + void rose_setup(struct net_device *dev) { dev->mtu = ROSE_MAX_PACKET_SIZE - 2; @@ -155,11 +161,10 @@ void rose_setup(struct net_device *dev) dev->open = rose_open; dev->stop = rose_close; - dev->hard_header = rose_header; + dev->header_ops = &rose_header_ops; dev->hard_header_len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; dev->addr_len = ROSE_ADDR_LEN; dev->type = ARPHRD_ROSE; - dev->rebuild_header = rose_rebuild_header; dev->set_mac_address = rose_set_mac_address; /* New-style flags. */ diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 96f61a71b2527da33c900acf34c92e9fa014b055..540c0f26ffeeae0d5afb0caab156815deabbebaa 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -583,7 +583,7 @@ static struct net_device *rose_ax25_dev_get(char *devname) { struct net_device *dev; - if ((dev = dev_get_by_name(devname)) == NULL) + if ((dev = dev_get_by_name(&init_net, devname)) == NULL) return NULL; if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) @@ -601,7 +601,7 @@ struct net_device *rose_dev_first(void) struct net_device *dev, *first = NULL; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; @@ -619,7 +619,7 @@ struct net_device *rose_dev_get(rose_address *addr) struct net_device *dev; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; @@ -636,7 +636,7 @@ static int rose_dev_exists(rose_address *addr) struct net_device *dev; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) goto out; } diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index c58fa0d1be26b7cea959ae4e00d8a4bbfe2bde0e..0803f305ed081c09d29d5c568ada9df7aeb27ee4 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include "ar-internal.h" @@ -605,13 +606,16 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock, /* * create an RxRPC socket */ -static int rxrpc_create(struct socket *sock, int protocol) +static int rxrpc_create(struct net *net, struct socket *sock, int protocol) { struct rxrpc_sock *rx; struct sock *sk; _enter("%p,%d", sock, protocol); + if (net != &init_net) + return -EAFNOSUPPORT; + /* we support transport protocol UDP only */ if (protocol != PF_INET) return -EPROTONOSUPPORT; @@ -622,7 +626,7 @@ static int rxrpc_create(struct socket *sock, int protocol) sock->ops = &rxrpc_rpc_ops; sock->state = SS_UNCONNECTED; - sk = sk_alloc(PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); + sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, 1); if (!sk) return -ENOMEM; @@ -829,8 +833,8 @@ static int __init af_rxrpc_init(void) } #ifdef CONFIG_PROC_FS - proc_net_fops_create("rxrpc_calls", 0, &rxrpc_call_seq_fops); - proc_net_fops_create("rxrpc_conns", 0, &rxrpc_connection_seq_fops); + proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops); + proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops); #endif return 0; @@ -868,8 +872,8 @@ static void __exit af_rxrpc_exit(void) _debug("flush scheduled work"); flush_workqueue(rxrpc_workqueue); - proc_net_remove("rxrpc_conns"); - proc_net_remove("rxrpc_calls"); + proc_net_remove(&init_net, "rxrpc_conns"); + proc_net_remove(&init_net, "rxrpc_calls"); destroy_workqueue(rxrpc_workqueue); kmem_cache_destroy(rxrpc_call_jar); _leave(""); diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 8a74cac0be8c01382979f8fd93144ea4065753e5..92435a882fac6c61232a4a4064a33906932f0b9b 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -447,6 +447,17 @@ config NET_ACT_IPT To compile this code as a module, choose M here: the module will be called ipt. +config NET_ACT_NAT + tristate "Stateless NAT" + depends on NET_CLS_ACT + select NETFILTER + ---help--- + Say Y here to do stateless NAT on IPv4 packets. You should use + netfilter for NAT unless you know what you are doing. + + To compile this code as a module, choose M here: the + module will be called nat. + config NET_ACT_PEDIT tristate "Packet Editing" depends on NET_CLS_ACT diff --git a/net/sched/Makefile b/net/sched/Makefile index b67c36f65cf2ddc44ecbb4ef952351717de57154..81ecbe8e7dce66d85d239857c9ee981cf886d3af 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_NET_ACT_POLICE) += act_police.o obj-$(CONFIG_NET_ACT_GACT) += act_gact.o obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o +obj-$(CONFIG_NET_ACT_NAT) += act_nat.o obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 579578944ae778b34c771ed73632cc50dd6b96b2..fd7bca4d5c208f173cc673b8753ff2dac4e14c51 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -73,7 +74,7 @@ static int tcf_mirred_init(struct rtattr *rta, struct rtattr *est, parm = RTA_DATA(tb[TCA_MIRRED_PARMS-1]); if (parm->ifindex) { - dev = __dev_get_by_index(parm->ifindex); + dev = __dev_get_by_index(&init_net, parm->ifindex); if (dev == NULL) return -ENODEV; switch (dev->type) { diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c new file mode 100644 index 0000000000000000000000000000000000000000..c96273bcaf9cbce08531b01f5ccdf785ef320c4c --- /dev/null +++ b/net/sched/act_nat.c @@ -0,0 +1,322 @@ +/* + * Stateless NAT actions + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define NAT_TAB_MASK 15 +static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1]; +static u32 nat_idx_gen; +static DEFINE_RWLOCK(nat_lock); + +static struct tcf_hashinfo nat_hash_info = { + .htab = tcf_nat_ht, + .hmask = NAT_TAB_MASK, + .lock = &nat_lock, +}; + +static int tcf_nat_init(struct rtattr *rta, struct rtattr *est, + struct tc_action *a, int ovr, int bind) +{ + struct rtattr *tb[TCA_NAT_MAX]; + struct tc_nat *parm; + int ret = 0; + struct tcf_nat *p; + struct tcf_common *pc; + + if (rta == NULL || rtattr_parse_nested(tb, TCA_NAT_MAX, rta) < 0) + return -EINVAL; + + if (tb[TCA_NAT_PARMS - 1] == NULL || + RTA_PAYLOAD(tb[TCA_NAT_PARMS - 1]) < sizeof(*parm)) + return -EINVAL; + parm = RTA_DATA(tb[TCA_NAT_PARMS - 1]); + + pc = tcf_hash_check(parm->index, a, bind, &nat_hash_info); + if (!pc) { + pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, + &nat_idx_gen, &nat_hash_info); + if (unlikely(!pc)) + return -ENOMEM; + p = to_tcf_nat(pc); + ret = ACT_P_CREATED; + } else { + p = to_tcf_nat(pc); + if (!ovr) { + tcf_hash_release(pc, bind, &nat_hash_info); + return -EEXIST; + } + } + + spin_lock_bh(&p->tcf_lock); + p->old_addr = parm->old_addr; + p->new_addr = parm->new_addr; + p->mask = parm->mask; + p->flags = parm->flags; + + p->tcf_action = parm->action; + spin_unlock_bh(&p->tcf_lock); + + if (ret == ACT_P_CREATED) + tcf_hash_insert(pc, &nat_hash_info); + + return ret; +} + +static int tcf_nat_cleanup(struct tc_action *a, int bind) +{ + struct tcf_nat *p = a->priv; + + return tcf_hash_release(&p->common, bind, &nat_hash_info); +} + +static int tcf_nat(struct sk_buff *skb, struct tc_action *a, + struct tcf_result *res) +{ + struct tcf_nat *p = a->priv; + struct iphdr *iph; + __be32 old_addr; + __be32 new_addr; + __be32 mask; + __be32 addr; + int egress; + int action; + int ihl; + + spin_lock(&p->tcf_lock); + + p->tcf_tm.lastuse = jiffies; + old_addr = p->old_addr; + new_addr = p->new_addr; + mask = p->mask; + egress = p->flags & TCA_NAT_FLAG_EGRESS; + action = p->tcf_action; + + p->tcf_bstats.bytes += skb->len; + p->tcf_bstats.packets++; + + spin_unlock(&p->tcf_lock); + + if (unlikely(action == TC_ACT_SHOT)) + goto drop; + + if (!pskb_may_pull(skb, sizeof(*iph))) + goto drop; + + iph = ip_hdr(skb); + + if (egress) + addr = iph->saddr; + else + addr = iph->daddr; + + if (!((old_addr ^ addr) & mask)) { + if (skb_cloned(skb) && + !skb_clone_writable(skb, sizeof(*iph)) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto drop; + + new_addr &= mask; + new_addr |= addr & ~mask; + + /* Rewrite IP header */ + iph = ip_hdr(skb); + if (egress) + iph->saddr = new_addr; + else + iph->daddr = new_addr; + + nf_csum_replace4(&iph->check, addr, new_addr); + } + + ihl = iph->ihl * 4; + + /* It would be nice to share code with stateful NAT. */ + switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { + case IPPROTO_TCP: + { + struct tcphdr *tcph; + + if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) || + (skb_cloned(skb) && + !skb_clone_writable(skb, ihl + sizeof(*tcph)) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + goto drop; + + tcph = (void *)(skb_network_header(skb) + ihl); + nf_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); + break; + } + case IPPROTO_UDP: + { + struct udphdr *udph; + + if (!pskb_may_pull(skb, ihl + sizeof(*udph)) || + (skb_cloned(skb) && + !skb_clone_writable(skb, ihl + sizeof(*udph)) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + goto drop; + + udph = (void *)(skb_network_header(skb) + ihl); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + nf_proto_csum_replace4(&udph->check, skb, addr, + new_addr, 1); + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } + break; + } + case IPPROTO_ICMP: + { + struct icmphdr *icmph; + + if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) + goto drop; + + icmph = (void *)(skb_network_header(skb) + ihl); + + if ((icmph->type != ICMP_DEST_UNREACH) && + (icmph->type != ICMP_TIME_EXCEEDED) && + (icmph->type != ICMP_PARAMETERPROB)) + break; + + iph = (void *)(icmph + 1); + if (egress) + addr = iph->daddr; + else + addr = iph->saddr; + + if ((old_addr ^ addr) & mask) + break; + + if (skb_cloned(skb) && + !skb_clone_writable(skb, + ihl + sizeof(*icmph) + sizeof(*iph)) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto drop; + + icmph = (void *)(skb_network_header(skb) + ihl); + iph = (void *)(icmph + 1); + + new_addr &= mask; + new_addr |= addr & ~mask; + + /* XXX Fix up the inner checksums. */ + if (egress) + iph->daddr = new_addr; + else + iph->saddr = new_addr; + + nf_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, + 1); + break; + } + default: + break; + } + + return action; + +drop: + spin_lock(&p->tcf_lock); + p->tcf_qstats.drops++; + spin_unlock(&p->tcf_lock); + return TC_ACT_SHOT; +} + +static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct tcf_nat *p = a->priv; + struct tc_nat *opt; + struct tcf_t t; + int s; + + s = sizeof(*opt); + + /* netlink spinlocks held above us - must use ATOMIC */ + opt = kzalloc(s, GFP_ATOMIC); + if (unlikely(!opt)) + return -ENOBUFS; + + opt->old_addr = p->old_addr; + opt->new_addr = p->new_addr; + opt->mask = p->mask; + opt->flags = p->flags; + + opt->index = p->tcf_index; + opt->action = p->tcf_action; + opt->refcnt = p->tcf_refcnt - ref; + opt->bindcnt = p->tcf_bindcnt - bind; + + RTA_PUT(skb, TCA_NAT_PARMS, s, opt); + t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); + t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); + t.expires = jiffies_to_clock_t(p->tcf_tm.expires); + RTA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); + + kfree(opt); + + return skb->len; + +rtattr_failure: + nlmsg_trim(skb, b); + kfree(opt); + return -1; +} + +static struct tc_action_ops act_nat_ops = { + .kind = "nat", + .hinfo = &nat_hash_info, + .type = TCA_ACT_NAT, + .capab = TCA_CAP_NONE, + .owner = THIS_MODULE, + .act = tcf_nat, + .dump = tcf_nat_dump, + .cleanup = tcf_nat_cleanup, + .lookup = tcf_hash_search, + .init = tcf_nat_init, + .walk = tcf_generic_walker +}; + +MODULE_DESCRIPTION("Stateless NAT actions"); +MODULE_LICENSE("GPL"); + +static int __init nat_init_module(void) +{ + return tcf_register_action(&act_nat_ops); +} + +static void __exit nat_cleanup_module(void) +{ + tcf_unregister_action(&act_nat_ops); +} + +module_init(nat_init_module); +module_exit(nat_cleanup_module); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 17f6f27e28a2bda0ef6a16d043b0dba66313d7f5..a73e3e6d87ea378f3f72b36b69d3ca3266061949 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -21,8 +21,8 @@ #include #include -#define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log]) -#define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log]) +#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L) +#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L) #define POL_TAB_MASK 15 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5f0fbca7393ff73648129e8bad3b36081a525f44..03657976fd501f9715840750e51bf9029f1c631d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -154,7 +154,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) /* Find head of filter chain. */ /* Find link */ - if ((dev = __dev_get_by_index(t->tcm_ifindex)) == NULL) + if ((dev = __dev_get_by_index(&init_net, t->tcm_ifindex)) == NULL) return -ENODEV; /* Find qdisc */ @@ -387,7 +387,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) return skb->len; - if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return skb->len; if (!tcm->tcm_parent) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d4d5d2f271d2f77f17fc7dbfd605cbbe3ab14f5a..9e98c6e567dde518ba28b29ed7211cb6baac19de 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -592,7 +592,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, } else handle = gen_new_kid(ht, htid); - if (tb[TCA_U32_SEL-1] == 0 || + if (tb[TCA_U32_SEL-1] == NULL || RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel)) return -EINVAL; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 650f09c8bd6a77b41f376f9213781547adb032c2..e9989610712ca2b8e68bc845623c28a5e78e8e5f 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -291,7 +291,7 @@ META_COLLECTOR(var_sk_bound_if) } else { struct net_device *dev; - dev = dev_get_by_index(skb->sk->sk_bound_dev_if); + dev = dev_get_by_index(&init_net, skb->sk->sk_bound_dev_if); *err = var_dev(dev, dst); if (dev) dev_put(dev); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index dee0d5fb39c555a5f1f60eaaf4f4dd3df9ae666d..8ae137e3522b44e0cb8a5e6ccb191f10b47984e8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -28,6 +28,7 @@ #include #include +#include #include #include @@ -606,7 +607,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) struct Qdisc *p = NULL; int err; - if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return -ENODEV; if (clid) { @@ -673,7 +674,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) clid = tcm->tcm_parent; q = p = NULL; - if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return -ENODEV; if (clid) { @@ -880,7 +881,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = q_idx = cb->args[1]; read_lock(&dev_base_lock); idx = 0; - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (idx < s_idx) goto cont; if (idx > s_idx) @@ -931,7 +932,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) u32 qid = TC_H_MAJ(clid); int err; - if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return -ENODEV; /* @@ -1114,7 +1115,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) return 0; - if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) + if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) return 0; s_t = cb->args[0]; @@ -1225,10 +1226,13 @@ EXPORT_SYMBOL(tcf_destroy_chain); #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { + struct timespec ts; + + hrtimer_get_res(CLOCK_MONOTONIC, &ts); seq_printf(seq, "%08x %08x %08x %08x\n", (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1), 1000000, - (u32)NSEC_PER_SEC/(u32)ktime_to_ns(KTIME_MONOTONIC_RES)); + (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts))); return 0; } @@ -1251,7 +1255,7 @@ static int __init pktsched_init(void) { register_qdisc(&pfifo_qdisc_ops); register_qdisc(&bfifo_qdisc_ops); - proc_net_fops_create("psched", 0, &psched_fops); + proc_net_fops_create(&init_net, "psched", 0, &psched_fops); rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index cbef3bbfc20f15e84c36cb6dafd9f2e08319e20d..4de3744e65c35cdf688b1c134749535f42ca6e07 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -175,7 +175,7 @@ struct cbq_sched_data }; -#define L2T(cl,len) ((cl)->R_tab->data[(len)>>(cl)->R_tab->rate.cell_log]) +#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) static __inline__ unsigned cbq_hash(u32 h) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index c81649cf0b9ecc62316205286eb6149e869b910b..95ae11956f35ebb931fc4bf20039df2c8ba1caf8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -134,34 +134,19 @@ static inline int qdisc_restart(struct net_device *dev) { struct Qdisc *q = dev->qdisc; struct sk_buff *skb; - unsigned lockless; int ret; /* Dequeue packet */ if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) return 0; - /* - * When the driver has LLTX set, it does its own locking in - * start_xmit. These checks are worth it because even uncongested - * locks can be quite expensive. The driver can do a trylock, as - * is being done here; in case of lock contention it should return - * NETDEV_TX_LOCKED and the packet will be requeued. - */ - lockless = (dev->features & NETIF_F_LLTX); - - if (!lockless && !netif_tx_trylock(dev)) { - /* Another CPU grabbed the driver tx lock */ - return handle_dev_cpu_collision(skb, dev, q); - } /* And release queue */ spin_unlock(&dev->queue_lock); + HARD_TX_LOCK(dev, smp_processor_id()); ret = dev_hard_start_xmit(skb, dev); - - if (!lockless) - netif_tx_unlock(dev); + HARD_TX_UNLOCK(dev); spin_lock(&dev->queue_lock); q = dev->qdisc; @@ -256,6 +241,12 @@ static void dev_watchdog_down(struct net_device *dev) netif_tx_unlock_bh(dev); } +/** + * netif_carrier_on - set carrier + * @dev: network device + * + * Device has detected that carrier. + */ void netif_carrier_on(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) @@ -264,6 +255,12 @@ void netif_carrier_on(struct net_device *dev) __netdev_watchdog_up(dev); } +/** + * netif_carrier_off - clear carrier + * @dev: network device + * + * Device has detected loss of carrier. + */ void netif_carrier_off(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 246a2f9765f1312c6f83092c31944db1bf9aaa1f..5e608a64935af6b644c6ecf3596f7dd84c32d35a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -132,10 +132,8 @@ struct htb_class { static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, int size) { - int slot = size >> rate->rate.cell_log; - if (slot > 255) - return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]); - return rate->data[slot]; + long result = qdisc_l2t(rate, size); + return result; } struct htb_sched { diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 8c2639af4c6a42a45c7c6ee5fecda621c6c14f82..b0d81098b0eee58acff75961491975ad45d2bb65 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -115,8 +115,8 @@ struct tbf_sched_data struct qdisc_watchdog watchdog; /* Watchdog timer */ }; -#define L2T(q,L) ((q)->R_tab->data[(L)>>(q)->R_tab->rate.cell_log]) -#define L2T_P(q,L) ((q)->P_tab->data[(L)>>(q)->P_tab->rate.cell_log]) +#define L2T(q,L) qdisc_l2t((q)->R_tab,L) +#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L) static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) { diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 0968184ea6becc6f58ba848fc76c629aa8e26126..be57cf317a7f0dfdaae0b09f2504346e6ba107d8 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -232,9 +232,12 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * } if (neigh_event_send(n, skb_res) == 0) { int err; + read_lock(&n->lock); - err = dev->hard_header(skb, dev, ntohs(skb->protocol), n->ha, NULL, skb->len); + err = dev_hard_header(skb, dev, ntohs(skb->protocol), + n->ha, NULL, skb->len); read_unlock(&n->lock); + if (err < 0) { neigh_release(n); return -EINVAL; @@ -246,10 +249,10 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * return (skb_res == NULL) ? -EAGAIN : 1; } -static __inline__ int -teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) +static inline int teql_resolve(struct sk_buff *skb, + struct sk_buff *skb_res, struct net_device *dev) { - if (dev->hard_header == NULL || + if (dev->header_ops == NULL || skb->dst == NULL || skb->dst->neighbour == NULL) return 0; @@ -432,7 +435,6 @@ static __init void teql_master_setup(struct net_device *dev) dev->tx_queue_len = 100; dev->flags = IFF_NOARP; dev->hard_header_len = LL_MAX_HEADER; - SET_MODULE_OWNER(dev); } static LIST_HEAD(master_dev_list); diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 70c828bbe444119630051c4b951855389083d288..1da7204d9b42c5bd9eec93304642a8c85221b416 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -9,7 +9,8 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ transport.o chunk.o sm_make_chunk.o ulpevent.o \ inqueue.o outqueue.o ulpqueue.o command.o \ tsnmap.o bind_addr.o socket.o primitive.o \ - output.o input.o debug.o ssnmap.o proc.o crc32c.o + output.o input.o debug.o ssnmap.o proc.o crc32c.o \ + auth.o sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o sctp-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 9bad8ba0feda9610d55c088153d3921ff8b69156..03158e3665da0b34786843104d4ab5f4b5fa4736 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -74,6 +74,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a { struct sctp_sock *sp; int i; + sctp_paramhdr_t *p; + int err; /* Retrieve the SCTP per socket area. */ sp = sctp_sk((struct sock *)sk); @@ -298,6 +300,30 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->default_timetolive = sp->default_timetolive; asoc->default_rcv_context = sp->default_rcv_context; + /* AUTH related initializations */ + INIT_LIST_HEAD(&asoc->endpoint_shared_keys); + err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); + if (err) + goto fail_init; + + asoc->active_key_id = ep->active_key_id; + asoc->asoc_shared_key = NULL; + + asoc->default_hmac_id = 0; + /* Save the hmacs and chunks list into this association */ + if (ep->auth_hmacs_list) + memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, + ntohs(ep->auth_hmacs_list->param_hdr.length)); + if (ep->auth_chunk_list) + memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, + ntohs(ep->auth_chunk_list->param_hdr.length)); + + /* Get the AUTH random number for this association */ + p = (sctp_paramhdr_t *)asoc->c.auth_random; + p->type = SCTP_PARAM_RANDOM; + p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); + get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); + return asoc; fail_init: @@ -389,6 +415,9 @@ void sctp_association_free(struct sctp_association *asoc) /* Free peer's cached cookie. */ kfree(asoc->peer.cookie); + kfree(asoc->peer.peer_random); + kfree(asoc->peer.peer_chunks); + kfree(asoc->peer.peer_hmacs); /* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { @@ -407,6 +436,12 @@ void sctp_association_free(struct sctp_association *asoc) if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); + /* AUTH - Free the endpoint shared keys */ + sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); + + /* AUTH - Free the association shared key */ + sctp_auth_key_put(asoc->asoc_shared_key); + sctp_association_put(asoc); } @@ -976,6 +1011,16 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + /* SCTP-AUTH, Section 6.3: + * The receiver has a list of chunk types which it expects + * to be received only after an AUTH-chunk. This list has + * been sent to the peer during the association setup. It + * MUST silently discard these chunks if they are not placed + * after an AUTH chunk in the packet. + */ + if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) + continue; + /* Remember where the last DATA chunk came from so we * know where to send the SACK. */ @@ -1112,6 +1157,24 @@ void sctp_assoc_update(struct sctp_association *asoc, sctp_assoc_set_id(asoc, GFP_ATOMIC); } } + + /* SCTP-AUTH: Save the peer parameters from the new assocaitions + * and also move the association shared keys over + */ + kfree(asoc->peer.peer_random); + asoc->peer.peer_random = new->peer.peer_random; + new->peer.peer_random = NULL; + + kfree(asoc->peer.peer_chunks); + asoc->peer.peer_chunks = new->peer.peer_chunks; + new->peer.peer_chunks = NULL; + + kfree(asoc->peer.peer_hmacs); + asoc->peer.peer_hmacs = new->peer.peer_hmacs; + new->peer.peer_hmacs = NULL; + + sctp_auth_key_put(asoc->asoc_shared_key); + sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); } /* Update the retran path for sending a retransmitted packet. diff --git a/net/sctp/auth.c b/net/sctp/auth.c new file mode 100644 index 0000000000000000000000000000000000000000..781810724714aead5a11dce08853a35a82c59946 --- /dev/null +++ b/net/sctp/auth.c @@ -0,0 +1,938 @@ +/* SCTP kernel reference Implementation + * (C) Copyright 2007 Hewlett-Packard Development Company, L.P. + * + * This file is part of the SCTP kernel reference Implementation + * + * The SCTP reference implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * The SCTP reference implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, write to + * the Free Software Foundation, 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers + * + * Or submit a bug report through the following website: + * http://www.sf.net/projects/lksctp + * + * Written or modified by: + * Vlad Yasevich + * + * Any bugs reported given to us we will try to fix... any fixes shared will + * be incorporated into the next SCTP release. + */ + +#include +#include +#include +#include +#include + +static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = { + { + /* id 0 is reserved. as all 0 */ + .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_0, + }, + { + .hmac_id = SCTP_AUTH_HMAC_ID_SHA1, + .hmac_name="hmac(sha1)", + .hmac_len = SCTP_SHA1_SIG_SIZE, + }, + { + /* id 2 is reserved as well */ + .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2, + }, + { + .hmac_id = SCTP_AUTH_HMAC_ID_SHA256, + .hmac_name="hmac(sha256)", + .hmac_len = SCTP_SHA256_SIG_SIZE, + } +}; + + +void sctp_auth_key_put(struct sctp_auth_bytes *key) +{ + if (!key) + return; + + if (atomic_dec_and_test(&key->refcnt)) { + kfree(key); + SCTP_DBG_OBJCNT_DEC(keys); + } +} + +/* Create a new key structure of a given length */ +static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) +{ + struct sctp_auth_bytes *key; + + /* Allocate the shared key */ + key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); + if (!key) + return NULL; + + key->len = key_len; + atomic_set(&key->refcnt, 1); + SCTP_DBG_OBJCNT_INC(keys); + + return key; +} + +/* Create a new shared key container with a give key id */ +struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) +{ + struct sctp_shared_key *new; + + /* Allocate the shared key container */ + new = kzalloc(sizeof(struct sctp_shared_key), gfp); + if (!new) + return NULL; + + INIT_LIST_HEAD(&new->key_list); + new->key_id = key_id; + + return new; +} + +/* Free the shared key stucture */ +void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) +{ + BUG_ON(!list_empty(&sh_key->key_list)); + sctp_auth_key_put(sh_key->key); + sh_key->key = NULL; + kfree(sh_key); +} + +/* Destory the entire key list. This is done during the + * associon and endpoint free process. + */ +void sctp_auth_destroy_keys(struct list_head *keys) +{ + struct sctp_shared_key *ep_key; + struct sctp_shared_key *tmp; + + if (list_empty(keys)) + return; + + key_for_each_safe(ep_key, tmp, keys) { + list_del_init(&ep_key->key_list); + sctp_auth_shkey_free(ep_key); + } +} + +/* Compare two byte vectors as numbers. Return values + * are: + * 0 - vectors are equal + * < 0 - vector 1 is smaller then vector2 + * > 0 - vector 1 is greater then vector2 + * + * Algorithm is: + * This is performed by selecting the numerically smaller key vector... + * If the key vectors are equal as numbers but differ in length ... + * the shorter vector is considered smaller + * + * Examples (with small values): + * 000123456789 > 123456789 (first number is longer) + * 000123456789 < 234567891 (second number is larger numerically) + * 123456789 > 2345678 (first number is both larger & longer) + */ +static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1, + struct sctp_auth_bytes *vector2) +{ + int diff; + int i; + const __u8 *longer; + + diff = vector1->len - vector2->len; + if (diff) { + longer = (diff > 0) ? vector1->data : vector2->data; + + /* Check to see if the longer number is + * lead-zero padded. If it is not, it + * is automatically larger numerically. + */ + for (i = 0; i < abs(diff); i++ ) { + if (longer[i] != 0) + return diff; + } + } + + /* lengths are the same, compare numbers */ + return memcmp(vector1->data, vector2->data, vector1->len); +} + +/* + * Create a key vector as described in SCTP-AUTH, Section 6.1 + * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO + * parameter sent by each endpoint are concatenated as byte vectors. + * These parameters include the parameter type, parameter length, and + * the parameter value, but padding is omitted; all padding MUST be + * removed from this concatenation before proceeding with further + * computation of keys. Parameters which were not sent are simply + * omitted from the concatenation process. The resulting two vectors + * are called the two key vectors. + */ +static struct sctp_auth_bytes *sctp_auth_make_key_vector( + sctp_random_param_t *random, + sctp_chunks_param_t *chunks, + sctp_hmac_algo_param_t *hmacs, + gfp_t gfp) +{ + struct sctp_auth_bytes *new; + __u32 len; + __u32 offset = 0; + + len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length); + if (chunks) + len += ntohs(chunks->param_hdr.length); + + new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp); + if (!new) + return NULL; + + new->len = len; + + memcpy(new->data, random, ntohs(random->param_hdr.length)); + offset += ntohs(random->param_hdr.length); + + if (chunks) { + memcpy(new->data + offset, chunks, + ntohs(chunks->param_hdr.length)); + offset += ntohs(chunks->param_hdr.length); + } + + memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length)); + + return new; +} + + +/* Make a key vector based on our local parameters */ +struct sctp_auth_bytes *sctp_auth_make_local_vector( + const struct sctp_association *asoc, + gfp_t gfp) +{ + return sctp_auth_make_key_vector( + (sctp_random_param_t*)asoc->c.auth_random, + (sctp_chunks_param_t*)asoc->c.auth_chunks, + (sctp_hmac_algo_param_t*)asoc->c.auth_hmacs, + gfp); +} + +/* Make a key vector based on peer's parameters */ +struct sctp_auth_bytes *sctp_auth_make_peer_vector( + const struct sctp_association *asoc, + gfp_t gfp) +{ + return sctp_auth_make_key_vector(asoc->peer.peer_random, + asoc->peer.peer_chunks, + asoc->peer.peer_hmacs, + gfp); +} + + +/* Set the value of the association shared key base on the parameters + * given. The algorithm is: + * From the endpoint pair shared keys and the key vectors the + * association shared keys are computed. This is performed by selecting + * the numerically smaller key vector and concatenating it to the + * endpoint pair shared key, and then concatenating the numerically + * larger key vector to that. The result of the concatenation is the + * association shared key. + */ +static struct sctp_auth_bytes *sctp_auth_asoc_set_secret( + struct sctp_shared_key *ep_key, + struct sctp_auth_bytes *first_vector, + struct sctp_auth_bytes *last_vector, + gfp_t gfp) +{ + struct sctp_auth_bytes *secret; + __u32 offset = 0; + __u32 auth_len; + + auth_len = first_vector->len + last_vector->len; + if (ep_key->key) + auth_len += ep_key->key->len; + + secret = sctp_auth_create_key(auth_len, gfp); + if (!secret) + return NULL; + + if (ep_key->key) { + memcpy(secret->data, ep_key->key->data, ep_key->key->len); + offset += ep_key->key->len; + } + + memcpy(secret->data + offset, first_vector->data, first_vector->len); + offset += first_vector->len; + + memcpy(secret->data + offset, last_vector->data, last_vector->len); + + return secret; +} + +/* Create an association shared key. Follow the algorithm + * described in SCTP-AUTH, Section 6.1 + */ +static struct sctp_auth_bytes *sctp_auth_asoc_create_secret( + const struct sctp_association *asoc, + struct sctp_shared_key *ep_key, + gfp_t gfp) +{ + struct sctp_auth_bytes *local_key_vector; + struct sctp_auth_bytes *peer_key_vector; + struct sctp_auth_bytes *first_vector, + *last_vector; + struct sctp_auth_bytes *secret = NULL; + int cmp; + + + /* Now we need to build the key vectors + * SCTP-AUTH , Section 6.1 + * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO + * parameter sent by each endpoint are concatenated as byte vectors. + * These parameters include the parameter type, parameter length, and + * the parameter value, but padding is omitted; all padding MUST be + * removed from this concatenation before proceeding with further + * computation of keys. Parameters which were not sent are simply + * omitted from the concatenation process. The resulting two vectors + * are called the two key vectors. + */ + + local_key_vector = sctp_auth_make_local_vector(asoc, gfp); + peer_key_vector = sctp_auth_make_peer_vector(asoc, gfp); + + if (!peer_key_vector || !local_key_vector) + goto out; + + /* Figure out the order in wich the key_vectors will be + * added to the endpoint shared key. + * SCTP-AUTH, Section 6.1: + * This is performed by selecting the numerically smaller key + * vector and concatenating it to the endpoint pair shared + * key, and then concatenating the numerically larger key + * vector to that. If the key vectors are equal as numbers + * but differ in length, then the concatenation order is the + * endpoint shared key, followed by the shorter key vector, + * followed by the longer key vector. Otherwise, the key + * vectors are identical, and may be concatenated to the + * endpoint pair key in any order. + */ + cmp = sctp_auth_compare_vectors(local_key_vector, + peer_key_vector); + if (cmp < 0) { + first_vector = local_key_vector; + last_vector = peer_key_vector; + } else { + first_vector = peer_key_vector; + last_vector = local_key_vector; + } + + secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector, + gfp); +out: + kfree(local_key_vector); + kfree(peer_key_vector); + + return secret; +} + +/* + * Populate the association overlay list with the list + * from the endpoint. + */ +int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, + struct sctp_association *asoc, + gfp_t gfp) +{ + struct sctp_shared_key *sh_key; + struct sctp_shared_key *new; + + BUG_ON(!list_empty(&asoc->endpoint_shared_keys)); + + key_for_each(sh_key, &ep->endpoint_shared_keys) { + new = sctp_auth_shkey_create(sh_key->key_id, gfp); + if (!new) + goto nomem; + + new->key = sh_key->key; + sctp_auth_key_hold(new->key); + list_add(&new->key_list, &asoc->endpoint_shared_keys); + } + + return 0; + +nomem: + sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); + return -ENOMEM; +} + + +/* Public interface to creat the association shared key. + * See code above for the algorithm. + */ +int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_auth_bytes *secret; + struct sctp_shared_key *ep_key; + + /* If we don't support AUTH, or peer is not capable + * we don't need to do anything. + */ + if (!sctp_auth_enable || !asoc->peer.auth_capable) + return 0; + + /* If the key_id is non-zero and we couldn't find an + * endpoint pair shared key, we can't compute the + * secret. + * For key_id 0, endpoint pair shared key is a NULL key. + */ + ep_key = sctp_auth_get_shkey(asoc, asoc->active_key_id); + BUG_ON(!ep_key); + + secret = sctp_auth_asoc_create_secret(asoc, ep_key, gfp); + if (!secret) + return -ENOMEM; + + sctp_auth_key_put(asoc->asoc_shared_key); + asoc->asoc_shared_key = secret; + + return 0; +} + + +/* Find the endpoint pair shared key based on the key_id */ +struct sctp_shared_key *sctp_auth_get_shkey( + const struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key = NULL; + + /* First search associations set of endpoint pair shared keys */ + key_for_each(key, &asoc->endpoint_shared_keys) { + if (key->key_id == key_id) + break; + } + + return key; +} + +/* + * Initialize all the possible digest transforms that we can use. Right now + * now, the supported digests are SHA1 and SHA256. We do this here once + * because of the restrictiong that transforms may only be allocated in + * user context. This forces us to pre-allocated all possible transforms + * at the endpoint init time. + */ +int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) +{ + struct crypto_hash *tfm = NULL; + __u16 id; + + /* if the transforms are already allocted, we are done */ + if (!sctp_auth_enable) { + ep->auth_hmacs = NULL; + return 0; + } + + if (ep->auth_hmacs) + return 0; + + /* Allocated the array of pointers to transorms */ + ep->auth_hmacs = kzalloc( + sizeof(struct crypto_hash *) * SCTP_AUTH_NUM_HMACS, + gfp); + if (!ep->auth_hmacs) + return -ENOMEM; + + for (id = 0; id < SCTP_AUTH_NUM_HMACS; id++) { + + /* See is we support the id. Supported IDs have name and + * length fields set, so that we can allocated and use + * them. We can safely just check for name, for without the + * name, we can't allocate the TFM. + */ + if (!sctp_hmac_list[id].hmac_name) + continue; + + /* If this TFM has been allocated, we are all set */ + if (ep->auth_hmacs[id]) + continue; + + /* Allocate the ID */ + tfm = crypto_alloc_hash(sctp_hmac_list[id].hmac_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto out_err; + + ep->auth_hmacs[id] = tfm; + } + + return 0; + +out_err: + /* Clean up any successfull allocations */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); + return -ENOMEM; +} + +/* Destroy the hmac tfm array */ +void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]) +{ + int i; + + if (!auth_hmacs) + return; + + for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) + { + if (auth_hmacs[i]) + crypto_free_hash(auth_hmacs[i]); + } + kfree(auth_hmacs); +} + + +struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id) +{ + return &sctp_hmac_list[hmac_id]; +} + +/* Get an hmac description information that we can use to build + * the AUTH chunk + */ +struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 n_elt; + __u16 id = 0; + int i; + + /* If we have a default entry, use it */ + if (asoc->default_hmac_id) + return &sctp_hmac_list[asoc->default_hmac_id]; + + /* Since we do not have a default entry, find the first entry + * we support and return that. Do not cache that id. + */ + hmacs = asoc->peer.peer_hmacs; + if (!hmacs) + return NULL; + + n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1; + for (i = 0; i < n_elt; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + /* Check the id is in the supported range */ + if (id > SCTP_AUTH_HMAC_ID_MAX) + continue; + + /* See is we support the id. Supported IDs have name and + * length fields set, so that we can allocated and use + * them. We can safely just check for name, for without the + * name, we can't allocate the TFM. + */ + if (!sctp_hmac_list[id].hmac_name) + continue; + + break; + } + + if (id == 0) + return NULL; + + return &sctp_hmac_list[id]; +} + +static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) +{ + int found = 0; + int i; + + for (i = 0; i < n_elts; i++) { + if (hmac_id == hmacs[i]) { + found = 1; + break; + } + } + + return found; +} + +/* See if the HMAC_ID is one that we claim as supported */ +int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, + __u16 hmac_id) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 n_elt; + + if (!asoc) + return 0; + + hmacs = (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs; + n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1; + + return __sctp_auth_find_hmacid(hmacs->hmac_ids, n_elt, hmac_id); +} + + +/* Cache the default HMAC id. This to follow this text from SCTP-AUTH: + * Section 6.1: + * The receiver of a HMAC-ALGO parameter SHOULD use the first listed + * algorithm it supports. + */ +void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, + struct sctp_hmac_algo_param *hmacs) +{ + struct sctp_endpoint *ep; + __u16 id; + int i; + int n_params; + + /* if the default id is already set, use it */ + if (asoc->default_hmac_id) + return; + + n_params = (ntohs(hmacs->param_hdr.length) + - sizeof(sctp_paramhdr_t)) >> 1; + ep = asoc->ep; + for (i = 0; i < n_params; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + /* Check the id is in the supported range */ + if (id > SCTP_AUTH_HMAC_ID_MAX) + continue; + + /* If this TFM has been allocated, use this id */ + if (ep->auth_hmacs[id]) { + asoc->default_hmac_id = id; + break; + } + } +} + + +/* Check to see if the given chunk is supposed to be authenticated */ +static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param) +{ + unsigned short len; + int found = 0; + int i; + + if (!param) + return 0; + + len = ntohs(param->param_hdr.length) - sizeof(sctp_paramhdr_t); + + /* SCTP-AUTH, Section 3.2 + * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH + * chunks MUST NOT be listed in the CHUNKS parameter. However, if + * a CHUNKS parameter is received then the types for INIT, INIT-ACK, + * SHUTDOWN-COMPLETE and AUTH chunks MUST be ignored. + */ + for (i = 0; !found && i < len; i++) { + switch (param->chunks[i]) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + case SCTP_CID_AUTH: + break; + + default: + if (param->chunks[i] == chunk) + found = 1; + break; + } + } + + return found; +} + +/* Check if peer requested that this chunk is authenticated */ +int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) +{ + if (!sctp_auth_enable || !asoc || !asoc->peer.auth_capable) + return 0; + + return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); +} + +/* Check if we requested that peer authenticate this chunk. */ +int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) +{ + if (!sctp_auth_enable || !asoc) + return 0; + + return __sctp_auth_cid(chunk, + (struct sctp_chunks_param *)asoc->c.auth_chunks); +} + +/* SCTP-AUTH: Section 6.2: + * The sender MUST calculate the MAC as described in RFC2104 [2] using + * the hash function H as described by the MAC Identifier and the shared + * association key K based on the endpoint pair shared key described by + * the shared key identifier. The 'data' used for the computation of + * the AUTH-chunk is given by the AUTH chunk with its HMAC field set to + * zero (as shown in Figure 6) followed by all chunks that are placed + * after the AUTH chunk in the SCTP packet. + */ +void sctp_auth_calculate_hmac(const struct sctp_association *asoc, + struct sk_buff *skb, + struct sctp_auth_chunk *auth, + gfp_t gfp) +{ + struct scatterlist sg; + struct hash_desc desc; + struct sctp_auth_bytes *asoc_key; + __u16 key_id, hmac_id; + __u8 *digest; + unsigned char *end; + int free_key = 0; + + /* Extract the info we need: + * - hmac id + * - key id + */ + key_id = ntohs(auth->auth_hdr.shkey_id); + hmac_id = ntohs(auth->auth_hdr.hmac_id); + + if (key_id == asoc->active_key_id) + asoc_key = asoc->asoc_shared_key; + else { + struct sctp_shared_key *ep_key; + + ep_key = sctp_auth_get_shkey(asoc, key_id); + if (!ep_key) + return; + + asoc_key = sctp_auth_asoc_create_secret(asoc, ep_key, gfp); + if (!asoc_key) + return; + + free_key = 1; + } + + /* set up scatter list */ + end = skb_tail_pointer(skb); + sg.page = virt_to_page(auth); + sg.offset = (unsigned long)(auth) % PAGE_SIZE; + sg.length = end - (unsigned char *)auth; + + desc.tfm = asoc->ep->auth_hmacs[hmac_id]; + desc.flags = 0; + + digest = auth->auth_hdr.hmac; + if (crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len)) + goto free; + + crypto_hash_digest(&desc, &sg, sg.length, digest); + +free: + if (free_key) + sctp_auth_key_put(asoc_key); +} + +/* API Helpers */ + +/* Add a chunk to the endpoint authenticated chunk list */ +int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id) +{ + struct sctp_chunks_param *p = ep->auth_chunk_list; + __u16 nchunks; + __u16 param_len; + + /* If this chunk is already specified, we are done */ + if (__sctp_auth_cid(chunk_id, p)) + return 0; + + /* Check if we can add this chunk to the array */ + param_len = ntohs(p->param_hdr.length); + nchunks = param_len - sizeof(sctp_paramhdr_t); + if (nchunks == SCTP_NUM_CHUNK_TYPES) + return -EINVAL; + + p->chunks[nchunks] = chunk_id; + p->param_hdr.length = htons(param_len + 1); + return 0; +} + +/* Add hmac identifires to the endpoint list of supported hmac ids */ +int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, + struct sctp_hmacalgo *hmacs) +{ + int has_sha1 = 0; + __u16 id; + int i; + + /* Scan the list looking for unsupported id. Also make sure that + * SHA1 is specified. + */ + for (i = 0; i < hmacs->shmac_num_idents; i++) { + id = hmacs->shmac_idents[i]; + + if (SCTP_AUTH_HMAC_ID_SHA1 == id) + has_sha1 = 1; + + if (!sctp_hmac_list[id].hmac_name) + return -EOPNOTSUPP; + } + + if (!has_sha1) + return -EINVAL; + + memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], + hmacs->shmac_num_idents * sizeof(__u16)); + ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + + hmacs->shmac_num_idents * sizeof(__u16)); + return 0; +} + +/* Set a new shared key on either endpoint or association. If the + * the key with a same ID already exists, replace the key (remove the + * old key and add a new one). + */ +int sctp_auth_set_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + struct sctp_authkey *auth_key) +{ + struct sctp_shared_key *cur_key = NULL; + struct sctp_auth_bytes *key; + struct list_head *sh_keys; + int replace = 0; + + /* Try to find the given key id to see if + * we are doing a replace, or adding a new key + */ + if (asoc) + sh_keys = &asoc->endpoint_shared_keys; + else + sh_keys = &ep->endpoint_shared_keys; + + key_for_each(cur_key, sh_keys) { + if (cur_key->key_id == auth_key->sca_keynumber) { + replace = 1; + break; + } + } + + /* If we are not replacing a key id, we need to allocate + * a shared key. + */ + if (!replace) { + cur_key = sctp_auth_shkey_create(auth_key->sca_keynumber, + GFP_KERNEL); + if (!cur_key) + return -ENOMEM; + } + + /* Create a new key data based on the info passed in */ + key = sctp_auth_create_key(auth_key->sca_keylen, GFP_KERNEL); + if (!key) + goto nomem; + + memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylen); + + /* If we are replacing, remove the old keys data from the + * key id. If we are adding new key id, add it to the + * list. + */ + if (replace) + sctp_auth_key_put(cur_key->key); + else + list_add(&cur_key->key_list, sh_keys); + + cur_key->key = key; + sctp_auth_key_hold(key); + + return 0; +nomem: + if (!replace) + sctp_auth_shkey_free(cur_key); + + return -ENOMEM; +} + +int sctp_auth_set_active_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + struct list_head *sh_keys; + int found = 0; + + /* The key identifier MUST correst to an existing key */ + if (asoc) + sh_keys = &asoc->endpoint_shared_keys; + else + sh_keys = &ep->endpoint_shared_keys; + + key_for_each(key, sh_keys) { + if (key->key_id == key_id) { + found = 1; + break; + } + } + + if (!found) + return -EINVAL; + + if (asoc) { + asoc->active_key_id = key_id; + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + } else + ep->active_key_id = key_id; + + return 0; +} + +int sctp_auth_del_key_id(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + struct list_head *sh_keys; + int found = 0; + + /* The key identifier MUST NOT be the current active key + * The key identifier MUST correst to an existing key + */ + if (asoc) { + if (asoc->active_key_id == key_id) + return -EINVAL; + + sh_keys = &asoc->endpoint_shared_keys; + } else { + if (ep->active_key_id == key_id) + return -EINVAL; + + sh_keys = &ep->endpoint_shared_keys; + } + + key_for_each(key, sh_keys) { + if (key->key_id == key_id) { + found = 1; + break; + } + } + + if (!found) + return -EINVAL; + + /* Delete the shared key */ + list_del_init(&key->key_list); + sctp_auth_shkey_free(key); + + return 0; +} diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 77fb7b06a9c4eb62d2836b8b93fd779f6be4dfc0..619d0f2dee5124ef5c2c31cb3f24aabb78b4e531 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -194,6 +194,18 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, max = asoc->frag_point; + /* If the the peer requested that we authenticate DATA chunks + * we need to accound for bundling of the AUTH chunks along with + * DATA. + */ + if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) { + struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); + + if (hmac_desc) + max -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + + hmac_desc->hmac_len); + } + whole = 0; first_len = max; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 8f485a0d14bd3f7c0c533d1b07a9056895e501e7..2d2d81ef4a69011d89e3dcc9ecdc15ce35dc806b 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -69,12 +69,56 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, gfp_t gfp) { + struct sctp_hmac_algo_param *auth_hmacs = NULL; + struct sctp_chunks_param *auth_chunks = NULL; + struct sctp_shared_key *null_key; + int err; + memset(ep, 0, sizeof(struct sctp_endpoint)); ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); if (!ep->digest) return NULL; + if (sctp_auth_enable) { + /* Allocate space for HMACS and CHUNKS authentication + * variables. There are arrays that we encode directly + * into parameters to make the rest of the operations easier. + */ + auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + + sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); + if (!auth_hmacs) + goto nomem; + + auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + + SCTP_NUM_CHUNK_TYPES, gfp); + if (!auth_chunks) + goto nomem; + + /* Initialize the HMACS parameter. + * SCTP-AUTH: Section 3.3 + * Every endpoint supporting SCTP chunk authentication MUST + * support the HMAC based on the SHA-1 algorithm. + */ + auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO; + auth_hmacs->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); + auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1); + + /* Initialize the CHUNKS parameter */ + auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; + + /* If the Add-IP functionality is enabled, we must + * authenticate, ASCONF and ASCONF-ACK chunks + */ + if (sctp_addip_enable) { + auth_chunks->chunks[0] = SCTP_CID_ASCONF; + auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; + auth_chunks->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); + } + } + /* Initialize the base structure. */ /* What type of endpoint are we? */ ep->base.type = SCTP_EP_TYPE_SOCKET; @@ -102,6 +146,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Use SCTP specific send buffer space queues. */ ep->sndbuf_policy = sctp_sndbuf_policy; + sk->sk_write_space = sctp_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); @@ -113,7 +158,36 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, ep->last_key = ep->current_key = 0; ep->key_changed_at = jiffies; + /* SCTP-AUTH extensions*/ + INIT_LIST_HEAD(&ep->endpoint_shared_keys); + null_key = sctp_auth_shkey_create(0, GFP_KERNEL); + if (!null_key) + goto nomem; + + list_add(&null_key->key_list, &ep->endpoint_shared_keys); + + /* Allocate and initialize transorms arrays for suported HMACs. */ + err = sctp_auth_init_hmacs(ep, gfp); + if (err) + goto nomem_hmacs; + + /* Add the null key to the endpoint shared keys list and + * set the hmcas and chunks pointers. + */ + ep->auth_hmacs_list = auth_hmacs; + ep->auth_chunk_list = auth_chunks; + return ep; + +nomem_hmacs: + sctp_auth_destroy_keys(&ep->endpoint_shared_keys); +nomem: + /* Free all allocations */ + kfree(auth_hmacs); + kfree(auth_chunks); + kfree(ep->digest); + return NULL; + } /* Create a sctp_endpoint with all that boring stuff initialized. @@ -186,6 +260,16 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) /* Free the digest buffer */ kfree(ep->digest); + /* SCTP-AUTH: Free up AUTH releated data such as shared keys + * chunks and hmacs arrays that were allocated + */ + sctp_auth_destroy_keys(&ep->endpoint_shared_keys); + kfree(ep->auth_hmacs_list); + kfree(ep->auth_chunk_list); + + /* AUTH - Free any allocated HMAC transform containers */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); + /* Cleanup. */ sctp_inq_free(&ep->base.inqueue); sctp_bind_addr_free(&ep->base.bind_addr); @@ -316,6 +400,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) sctp_subtype_t subtype; sctp_state_t state; int error = 0; + int first_time = 1; /* is this the first time through the looop */ if (ep->base.dead) return; @@ -327,6 +412,29 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) while (NULL != (chunk = sctp_inq_pop(inqueue))) { subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + /* If the first chunk in the packet is AUTH, do special + * processing specified in Section 6.3 of SCTP-AUTH spec + */ + if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { + struct sctp_chunkhdr *next_hdr; + + next_hdr = sctp_inq_peek(inqueue); + if (!next_hdr) + goto normal; + + /* If the next chunk is COOKIE-ECHO, skip the AUTH + * chunk while saving a pointer to it so we can do + * Authentication later (during cookie-echo + * processing). + */ + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { + chunk->auth_chunk = skb_clone(chunk->skb, + GFP_ATOMIC); + chunk->auth = 1; + continue; + } + } +normal: /* We might have grown an association since last we * looked, so try again. * @@ -342,6 +450,8 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) } state = asoc ? asoc->state : SCTP_STATE_CLOSED; + if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) + continue; /* Remember where the last DATA chunk came from so we * know where to send the SACK. @@ -365,5 +475,8 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) */ if (!sctp_sk(sk)->ep) break; + + if (first_time) + first_time = 0; } } diff --git a/net/sctp/input.c b/net/sctp/input.c index f9a0c9276e3b55f8c1497beacba30c8426ae0d28..86503e7fa21e7fa55bcb54f1762dc5840c11cdfe 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -911,15 +911,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, ch = (sctp_chunkhdr_t *) skb->data; - /* If this is INIT/INIT-ACK look inside the chunk too. */ - switch (ch->type) { - case SCTP_CID_INIT: - case SCTP_CID_INIT_ACK: - break; - default: - return NULL; - } - /* The code below will attempt to walk the chunk and extract * parameter information. Before we do that, we need to verify * that the chunk length doesn't cause overflow. Otherwise, we'll @@ -964,6 +955,60 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, return NULL; } +/* SCTP-AUTH, Section 6.3: +* If the receiver does not find a STCB for a packet containing an AUTH +* chunk as the first chunk and not a COOKIE-ECHO chunk as the second +* chunk, it MUST use the chunks after the AUTH chunk to look up an existing +* association. +* +* This means that any chunks that can help us identify the association need +* to be looked at to find this assocation. +* +* TODO: The only chunk currently defined that can do that is ASCONF, but we +* don't support that functionality yet. +*/ +static struct sctp_association *__sctp_rcv_auth_lookup(struct sk_buff *skb, + const union sctp_addr *paddr, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + /* XXX - walk through the chunks looking for something that can + * help us find the association. INIT, and INIT-ACK are not permitted. + * That leaves ASCONF, but we don't support that yet. + */ + return NULL; +} + +/* + * There are circumstances when we need to look inside the SCTP packet + * for information to help us find the association. Examples + * include looking inside of INIT/INIT-ACK chunks or after the AUTH + * chunks. + */ +static struct sctp_association *__sctp_rcv_lookup_harder(struct sk_buff *skb, + const union sctp_addr *paddr, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + sctp_chunkhdr_t *ch; + + ch = (sctp_chunkhdr_t *) skb->data; + + /* If this is INIT/INIT-ACK look inside the chunk too. */ + switch (ch->type) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + return __sctp_rcv_init_lookup(skb, laddr, transportp); + break; + + case SCTP_CID_AUTH: + return __sctp_rcv_auth_lookup(skb, paddr, laddr, transportp); + break; + } + + return NULL; +} + /* Lookup an association for an inbound skb. */ static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, const union sctp_addr *paddr, @@ -979,7 +1024,7 @@ static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, * parameters within the INIT or INIT-ACK. */ if (!asoc) - asoc = __sctp_rcv_init_lookup(skb, laddr, transportp); + asoc = __sctp_rcv_lookup_harder(skb, paddr, laddr, transportp); return asoc; } diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index e4ea7fdf36ed798c23d3b2dfb6e3f2a0e54e893b..f10fe7fbf24c425ca9609e46ed837d505a39552b 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -100,6 +100,25 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) q->immediate.func(&q->immediate); } +/* Peek at the next chunk on the inqeue. */ +struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) +{ + struct sctp_chunk *chunk; + sctp_chunkhdr_t *ch = NULL; + + chunk = queue->in_progress; + /* If there is no more chunks in this packet, say so */ + if (chunk->singleton || + chunk->end_of_packet || + chunk->pdiscard) + return NULL; + + ch = (sctp_chunkhdr_t *)chunk->chunk_end; + + return ch; +} + + /* Extract a chunk from an SCTP inqueue. * * WARNING: If you need to put the chunk on another queue, you need to diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 670fd2740b890bb6f2aeef51ddc7b05c07e4e9d0..9de3ddaa27685daf19d9795727c8d81d67fdc00c 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -493,7 +493,7 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, if (addr1->sa.sa_family != addr2->sa.sa_family) { if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET6 && - IPV6_ADDR_MAPPED == ipv6_addr_type(&addr2->v6.sin6_addr)) { + ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { if (addr2->v6.sin6_port == addr1->v4.sin_port && addr2->v6.sin6_addr.s6_addr32[3] == addr1->v4.sin_addr.s_addr) @@ -501,7 +501,7 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, } if (addr2->sa.sa_family == AF_INET && addr1->sa.sa_family == AF_INET6 && - IPV6_ADDR_MAPPED == ipv6_addr_type(&addr1->v6.sin6_addr)) { + ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { if (addr1->v6.sin6_port == addr2->v4.sin_port && addr1->v6.sin6_addr.s6_addr32[3] == addr2->v4.sin_addr.s_addr) @@ -631,7 +631,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct sctp6_sock *newsctp6sk; - newsk = sk_alloc(PF_INET6, GFP_KERNEL, sk->sk_prot, 1); + newsk = sk_alloc(sk->sk_net, PF_INET6, GFP_KERNEL, sk->sk_prot, 1); if (!newsk) goto out; @@ -855,7 +855,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) if (type & IPV6_ADDR_LINKLOCAL) { if (!addr->v6.sin6_scope_id) return 0; - dev = dev_get_by_index(addr->v6.sin6_scope_id); + dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); if (!dev) return 0; if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) { @@ -886,7 +886,7 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) if (type & IPV6_ADDR_LINKLOCAL) { if (!addr->v6.sin6_scope_id) return 0; - dev = dev_get_by_index(addr->v6.sin6_scope_id); + dev = dev_get_by_index(&init_net, addr->v6.sin6_scope_id); if (!dev) return 0; dev_put(dev); diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index fcfb9d806de1137d3b5fb7e1e3beab4e79a94933..2cf6ad6ff8ce01a51645681792e453e13c063b2f 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -58,6 +58,7 @@ SCTP_DBG_OBJCNT(chunk); SCTP_DBG_OBJCNT(addr); SCTP_DBG_OBJCNT(ssnmap); SCTP_DBG_OBJCNT(datamsg); +SCTP_DBG_OBJCNT(keys); /* An array to make it easy to pretty print the debug information * to the proc fs. @@ -73,6 +74,7 @@ static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { SCTP_DBG_OBJCNT_ENTRY(addr), SCTP_DBG_OBJCNT_ENTRY(ssnmap), SCTP_DBG_OBJCNT_ENTRY(datamsg), + SCTP_DBG_OBJCNT_ENTRY(keys), }; /* Callback from procfs to read out objcount information. diff --git a/net/sctp/output.c b/net/sctp/output.c index d85543def75474d1c55750c882e0253652d178e3..847639d542c01777fac96dba2badfa0e49cc47d6 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -79,7 +79,10 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, packet->vtag = vtag; packet->has_cookie_echo = 0; packet->has_sack = 0; + packet->has_auth = 0; + packet->has_data = 0; packet->ipfragok = 0; + packet->auth = NULL; if (ecn_capable && sctp_packet_empty(packet)) { chunk = sctp_get_ecne_prepend(packet->transport->asoc); @@ -121,8 +124,11 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, packet->vtag = 0; packet->has_cookie_echo = 0; packet->has_sack = 0; + packet->has_auth = 0; + packet->has_data = 0; packet->ipfragok = 0; packet->malloced = 0; + packet->auth = NULL; return packet; } @@ -181,6 +187,39 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, return retval; } +/* Try to bundle an auth chunk into the packet. */ +static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, + struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = pkt->transport->asoc; + struct sctp_chunk *auth; + sctp_xmit_t retval = SCTP_XMIT_OK; + + /* if we don't have an association, we can't do authentication */ + if (!asoc) + return retval; + + /* See if this is an auth chunk we are bundling or if + * auth is already bundled. + */ + if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->auth) + return retval; + + /* if the peer did not request this chunk to be authenticated, + * don't do it + */ + if (!chunk->auth) + return retval; + + auth = sctp_make_auth(asoc); + if (!auth) + return retval; + + retval = sctp_packet_append_chunk(pkt, auth); + + return retval; +} + /* Try to bundle a SACK with the packet. */ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) @@ -227,12 +266,17 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, chunk); - retval = sctp_packet_bundle_sack(packet, chunk); - psize = packet->size; + /* Try to bundle AUTH chunk */ + retval = sctp_packet_bundle_auth(packet, chunk); + if (retval != SCTP_XMIT_OK) + goto finish; + /* Try to bundle SACK chunk */ + retval = sctp_packet_bundle_sack(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; + psize = packet->size; pmtu = ((packet->transport->asoc) ? (packet->transport->asoc->pathmtu) : (packet->transport->pathmtu)); @@ -241,10 +285,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, /* Decide if we need to fragment or resubmit later. */ if (too_big) { - /* Both control chunks and data chunks with TSNs are - * non-fragmentable. + /* It's OK to fragmet at IP level if any one of the following + * is true: + * 1. The packet is empty (meaning this chunk is greater + * the MTU) + * 2. The chunk we are adding is a control chunk + * 3. The packet doesn't have any data in it yet and data + * requires authentication. */ - if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk)) { + if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || + (!packet->has_data && chunk->auth)) { /* We no longer do re-fragmentation. * Just fragment at the IP layer, if we * actually hit this condition @@ -266,16 +316,31 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, /* DATA is a special case since we must examine both rwnd and cwnd * before we send DATA. */ - if (sctp_chunk_is_data(chunk)) { + switch (chunk->chunk_hdr->type) { + case SCTP_CID_DATA: retval = sctp_packet_append_data(packet, chunk); /* Disallow SACK bundling after DATA. */ packet->has_sack = 1; + /* Disallow AUTH bundling after DATA */ + packet->has_auth = 1; + /* Let it be knows that packet has DATA in it */ + packet->has_data = 1; if (SCTP_XMIT_OK != retval) goto finish; - } else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type) + break; + case SCTP_CID_COOKIE_ECHO: packet->has_cookie_echo = 1; - else if (SCTP_CID_SACK == chunk->chunk_hdr->type) + break; + + case SCTP_CID_SACK: packet->has_sack = 1; + break; + + case SCTP_CID_AUTH: + packet->has_auth = 1; + packet->auth = chunk; + break; + } /* It is OK to send this chunk. */ list_add_tail(&chunk->list, &packet->chunk_list); @@ -303,6 +368,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) int padding; /* How much padding do we need? */ __u8 has_data = 0; struct dst_entry *dst = tp->dst; + unsigned char *auth = NULL; /* pointer to auth in skb data */ + __u32 cksum_buf_len = sizeof(struct sctphdr); SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); @@ -356,16 +423,6 @@ int sctp_packet_transmit(struct sctp_packet *packet) sh->vtag = htonl(packet->vtag); sh->checksum = 0; - /* 2) Calculate the Adler-32 checksum of the whole packet, - * including the SCTP common header and all the - * chunks. - * - * Note: Adler-32 is no longer applicable, as has been replaced - * by CRC32-C as described in . - */ - if (!(dst->dev->features & NETIF_F_NO_CSUM)) - crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr)); - /** * 6.10 Bundling * @@ -416,14 +473,16 @@ int sctp_packet_transmit(struct sctp_packet *packet) if (padding) memset(skb_put(chunk->skb, padding), 0, padding); - if (dst->dev->features & NETIF_F_NO_CSUM) - memcpy(skb_put(nskb, chunk->skb->len), + /* if this is the auth chunk that we are adding, + * store pointer where it will be added and put + * the auth into the packet. + */ + if (chunk == packet->auth) + auth = skb_tail_pointer(nskb); + + cksum_buf_len += chunk->skb->len; + memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data, chunk->skb->len); - else - crc32 = sctp_update_copy_cksum(skb_put(nskb, - chunk->skb->len), - chunk->skb->data, - chunk->skb->len, crc32); SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", "*** Chunk", chunk, @@ -445,9 +504,31 @@ int sctp_packet_transmit(struct sctp_packet *packet) sctp_chunk_free(chunk); } - /* Perform final transformation on checksum. */ - if (!(dst->dev->features & NETIF_F_NO_CSUM)) + /* SCTP-AUTH, Section 6.2 + * The sender MUST calculate the MAC as described in RFC2104 [2] + * using the hash function H as described by the MAC Identifier and + * the shared association key K based on the endpoint pair shared key + * described by the shared key identifier. The 'data' used for the + * computation of the AUTH-chunk is given by the AUTH chunk with its + * HMAC field set to zero (as shown in Figure 6) followed by all + * chunks that are placed after the AUTH chunk in the SCTP packet. + */ + if (auth) + sctp_auth_calculate_hmac(asoc, nskb, + (struct sctp_auth_chunk *)auth, + GFP_ATOMIC); + + /* 2) Calculate the Adler-32 checksum of the whole packet, + * including the SCTP common header and all the + * chunks. + * + * Note: Adler-32 is no longer applicable, as has been replaced + * by CRC32-C as described in . + */ + if (!(dst->dev->features & NETIF_F_NO_CSUM)) { + crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); crc32 = sctp_end_cksum(crc32); + } /* 3) Put the resultant value into the checksum field in the * common header, and leave the rest of the bits unchanged. diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 3d036cdfae41574d254d85356cf9400166f93d0e..81b26c5ffd4b443f1780555ba940c82820acce1f 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -51,6 +51,8 @@ #include #include #include +#include +#include #include #include #include @@ -82,6 +84,10 @@ static struct sctp_af *sctp_af_v6_specific; struct kmem_cache *sctp_chunk_cachep __read_mostly; struct kmem_cache *sctp_bucket_cachep __read_mostly; +int sysctl_sctp_mem[3]; +int sysctl_sctp_rmem[3]; +int sysctl_sctp_wmem[3]; + /* Return the address of the control sock. */ struct sock *sctp_get_ctl_sock(void) { @@ -93,7 +99,7 @@ static __init int sctp_proc_init(void) { if (!proc_net_sctp) { struct proc_dir_entry *ent; - ent = proc_mkdir("net/sctp", NULL); + ent = proc_mkdir("sctp", init_net.proc_net); if (ent) { ent->owner = THIS_MODULE; proc_net_sctp = ent; @@ -126,7 +132,7 @@ static void sctp_proc_exit(void) if (proc_net_sctp) { proc_net_sctp = NULL; - remove_proc_entry("net/sctp", NULL); + remove_proc_entry("sctp", init_net.proc_net); } } @@ -173,7 +179,7 @@ static void sctp_get_local_addr_list(void) struct sctp_af *af; read_lock(&dev_base_lock); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { __list_for_each(pos, &sctp_address_families) { af = list_entry(pos, struct sctp_af, list); af->copy_addrlist(&sctp_local_addr_list, dev); @@ -546,7 +552,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk, { struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; - struct sock *newsk = sk_alloc(PF_INET, GFP_KERNEL, sk->sk_prot, 1); + struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1); if (!newsk) goto out; @@ -987,6 +993,8 @@ SCTP_STATIC __init int sctp_init(void) int i; int status = -EINVAL; unsigned long goal; + unsigned long limit; + int max_share; int order; /* SCTP_DEBUG sanity check. */ @@ -1077,6 +1085,31 @@ SCTP_STATIC __init int sctp_init(void) /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); + /* Set the pressure threshold to be a fraction of global memory that + * is up to 1/2 at 256 MB, decreasing toward zero with the amount of + * memory, with a floor of 128 pages. + * Note this initalizes the data in sctpv6_prot too + * Unabashedly stolen from tcp_init + */ + limit = min(num_physpages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); + limit = (limit * (num_physpages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + limit = max(limit, 128UL); + sysctl_sctp_mem[0] = limit / 4 * 3; + sysctl_sctp_mem[1] = limit; + sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; + + /* Set per-socket limits to no more than 1/128 the pressure threshold*/ + limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); + max_share = min(4UL*1024*1024, limit); + + sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */ + sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); + sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); + + sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM; + sysctl_sctp_wmem[1] = 16*1024; + sysctl_sctp_wmem[2] = max(64*1024, max_share); + /* Size and allocate the association hash table. * The methodology is similar to that of the tcp hash tables. */ @@ -1139,9 +1172,6 @@ SCTP_STATIC __init int sctp_init(void) sctp_port_hashtable[i].chain = NULL; } - spin_lock_init(&sctp_port_alloc_lock); - sctp_port_rover = sysctl_local_port_range[0] - 1; - printk(KERN_INFO "SCTP: Hash tables configured " "(established %d bind %d)\n", sctp_assoc_hashsize, sctp_port_hashsize); @@ -1152,6 +1182,9 @@ SCTP_STATIC __init int sctp_init(void) /* Enable PR-SCTP by default. */ sctp_prsctp_enable = 1; + /* Disable AUTH by default. */ + sctp_auth_enable = 0; + sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 23ae37ec871167d17c37ddbdbb9d4095e26a4575..f983a369d4e2fa1ef95d0644e0a5c43974e43f2e 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -179,6 +179,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, sctp_supported_addrs_param_t sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; + sctp_supported_ext_param_t ext_param; + int num_ext = 0; + __u8 extensions[3]; + sctp_paramhdr_t *auth_chunks = NULL, + *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * @@ -202,11 +207,52 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); chunksize += sizeof(ecap_param); - if (sctp_prsctp_enable) + if (sctp_prsctp_enable) { chunksize += sizeof(prsctp_param); + extensions[num_ext] = SCTP_CID_FWD_TSN; + num_ext += 1; + } + /* ADDIP: Section 4.2.7: + * An implementation supporting this extension [ADDIP] MUST list + * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and + * INIT-ACK parameters. + */ + if (sctp_addip_enable) { + extensions[num_ext] = SCTP_CID_ASCONF; + extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; + num_ext += 2; + } + chunksize += sizeof(aiparam); chunksize += vparam_len; + /* Account for AUTH related parameters */ + if (sctp_auth_enable) { + /* Add random parameter length*/ + chunksize += sizeof(asoc->c.auth_random); + + /* Add HMACS parameter length if any were defined */ + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; + if (auth_hmacs->length) + chunksize += ntohs(auth_hmacs->length); + else + auth_hmacs = NULL; + + /* Add CHUNKS parameter length */ + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; + if (auth_chunks->length) + chunksize += ntohs(auth_chunks->length); + else + auth_hmacs = NULL; + + extensions[num_ext] = SCTP_CID_AUTH; + num_ext += 1; + } + + /* If we have any extensions to report, account for that */ + if (num_ext) + chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; + /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 3: An INIT chunk MUST NOT contain more than one Host @@ -241,12 +287,38 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); + + /* Add the supported extensions paramter. Be nice and add this + * fist before addiding the parameters for the extensions themselves + */ + if (num_ext) { + ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; + ext_param.param_hdr.length = + htons(sizeof(sctp_supported_ext_param_t) + num_ext); + sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), + &ext_param); + sctp_addto_chunk(retval, num_ext, extensions); + } + if (sctp_prsctp_enable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); + aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; aiparam.param_hdr.length = htons(sizeof(aiparam)); aiparam.adaptation_ind = htonl(sp->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + + /* Add SCTP-AUTH chunks to the parameter list */ + if (sctp_auth_enable) { + sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), + asoc->c.auth_random); + if (auth_hmacs) + sctp_addto_chunk(retval, ntohs(auth_hmacs->length), + auth_hmacs); + if (auth_chunks) + sctp_addto_chunk(retval, ntohs(auth_chunks->length), + auth_chunks); + } nodata: kfree(addrs.v); return retval; @@ -264,6 +336,12 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, int cookie_len; size_t chunksize; sctp_adaptation_ind_param_t aiparam; + sctp_supported_ext_param_t ext_param; + int num_ext = 0; + __u8 extensions[3]; + sctp_paramhdr_t *auth_chunks = NULL, + *auth_hmacs = NULL, + *auth_random = NULL; retval = NULL; @@ -294,11 +372,41 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, chunksize += sizeof(ecap_param); /* Tell peer that we'll do PR-SCTP only if peer advertised. */ - if (asoc->peer.prsctp_capable) + if (asoc->peer.prsctp_capable) { chunksize += sizeof(prsctp_param); + extensions[num_ext] = SCTP_CID_FWD_TSN; + num_ext += 1; + } + + if (sctp_addip_enable) { + extensions[num_ext] = SCTP_CID_ASCONF; + extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; + num_ext += 2; + } + chunksize += sizeof(ext_param) + num_ext; chunksize += sizeof(aiparam); + if (asoc->peer.auth_capable) { + auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; + chunksize += ntohs(auth_random->length); + + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; + if (auth_hmacs->length) + chunksize += ntohs(auth_hmacs->length); + else + auth_hmacs = NULL; + + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; + if (auth_chunks->length) + chunksize += ntohs(auth_chunks->length); + else + auth_chunks = NULL; + + extensions[num_ext] = SCTP_CID_AUTH; + num_ext += 1; + } + /* Now allocate and fill out the chunk. */ retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) @@ -314,6 +422,14 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, sctp_addto_chunk(retval, cookie_len, cookie); if (asoc->peer.ecn_capable) sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); + if (num_ext) { + ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; + ext_param.param_hdr.length = + htons(sizeof(sctp_supported_ext_param_t) + num_ext); + sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), + &ext_param); + sctp_addto_chunk(retval, num_ext, extensions); + } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); @@ -322,6 +438,17 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, aiparam.adaptation_ind = htonl(sctp_sk(asoc->base.sk)->adaptation_ind); sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + if (asoc->peer.auth_capable) { + sctp_addto_chunk(retval, ntohs(auth_random->length), + auth_random); + if (auth_hmacs) + sctp_addto_chunk(retval, ntohs(auth_hmacs->length), + auth_hmacs); + if (auth_chunks) + sctp_addto_chunk(retval, ntohs(auth_chunks->length), + auth_chunks); + } + /* We need to remove the const qualifier at this point. */ retval->asoc = (struct sctp_association *) asoc; @@ -839,6 +966,26 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, return retval; } +/* Append bytes to the end of a parameter. Will panic if chunk is not big + * enough. + */ +static void *sctp_addto_param(struct sctp_chunk *chunk, int len, + const void *data) +{ + void *target; + int chunklen = ntohs(chunk->chunk_hdr->length); + + target = skb_put(chunk->skb, len); + + memcpy(target, data, len); + + /* Adjust the chunk length field. */ + chunk->chunk_hdr->length = htons(chunklen + len); + chunk->chunk_end = skb_tail_pointer(chunk->skb); + + return target; +} + /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( const struct sctp_association *asoc, @@ -964,6 +1111,41 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, return retval; } +struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) +{ + struct sctp_chunk *retval; + struct sctp_hmac *hmac_desc; + struct sctp_authhdr auth_hdr; + __u8 *hmac; + + /* Get the first hmac that the peer told us to use */ + hmac_desc = sctp_auth_asoc_get_hmac(asoc); + if (unlikely(!hmac_desc)) + return NULL; + + retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0, + hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); + if (!retval) + return NULL; + + auth_hdr.hmac_id = htons(hmac_desc->hmac_id); + auth_hdr.shkey_id = htons(asoc->active_key_id); + + retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), + &auth_hdr); + + hmac = skb_put(retval->skb, hmac_desc->hmac_len); + memset(hmac, 0, hmac_desc->hmac_len); + + /* Adjust the chunk header to include the empty MAC */ + retval->chunk_hdr->length = + htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); + retval->chunk_end = skb_tail_pointer(retval->skb); + + return retval; +} + + /******************************************************************** * 2nd Level Abstractions ********************************************************************/ @@ -1078,6 +1260,10 @@ struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, retval->chunk_hdr = chunk_hdr; retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); + /* Determine if the chunk needs to be authenticated */ + if (sctp_auth_send_cid(type, asoc)) + retval->auth = 1; + /* Set the skb to the belonging sock for accounting. */ skb->sk = sk; @@ -1146,25 +1332,6 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) return target; } -/* Append bytes to the end of a parameter. Will panic if chunk is not big - * enough. - */ -void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) -{ - void *target; - int chunklen = ntohs(chunk->chunk_hdr->length); - - target = skb_put(chunk->skb, len); - - memcpy(target, data, len); - - /* Adjust the chunk length field. */ - chunk->chunk_hdr->length = htons(chunklen + len); - chunk->chunk_end = skb_tail_pointer(chunk->skb); - - return target; -} - /* Append bytes from user space to the end of a chunk. Will panic if * chunk is not big enough. * Returns a kernel err value. @@ -1663,6 +1830,35 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, return 0; } +static void sctp_process_ext_param(struct sctp_association *asoc, + union sctp_params param) +{ + __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); + int i; + + for (i = 0; i < num_ext; i++) { + switch (param.ext->chunks[i]) { + case SCTP_CID_FWD_TSN: + if (sctp_prsctp_enable && + !asoc->peer.prsctp_capable) + asoc->peer.prsctp_capable = 1; + break; + case SCTP_CID_AUTH: + /* if the peer reports AUTH, assume that he + * supports AUTH. + */ + asoc->peer.auth_capable = 1; + break; + case SCTP_CID_ASCONF: + case SCTP_CID_ASCONF_ACK: + asoc->peer.addip_capable = 1; + break; + default: + break; + } + } +} + /* RFC 3.2.1 & the Implementers Guide 2.2. * * The Parameter Types are encoded such that the @@ -1779,15 +1975,52 @@ static int sctp_verify_param(const struct sctp_association *asoc, case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: case SCTP_PARAM_ECN_CAPABLE: case SCTP_PARAM_ADAPTATION_LAYER_IND: + case SCTP_PARAM_SUPPORTED_EXT: break; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ return sctp_process_hn_param(asoc, param, chunk, err_chunk); + case SCTP_PARAM_FWD_TSN_SUPPORT: if (sctp_prsctp_enable) break; + goto fallthrough; + + case SCTP_PARAM_RANDOM: + if (!sctp_auth_enable) + goto fallthrough; + + /* SCTP-AUTH: Secion 6.1 + * If the random number is not 32 byte long the association + * MUST be aborted. The ABORT chunk SHOULD contain the error + * cause 'Protocol Violation'. + */ + if (SCTP_AUTH_RANDOM_LENGTH != + ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) + return sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + break; + + case SCTP_PARAM_CHUNKS: + if (!sctp_auth_enable) + goto fallthrough; + + /* SCTP-AUTH: Section 3.2 + * The CHUNKS parameter MUST be included once in the INIT or + * INIT-ACK chunk if the sender wants to receive authenticated + * chunks. Its maximum length is 260 bytes. + */ + if (260 < ntohs(param.p->length)) + return sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + break; + + case SCTP_PARAM_HMAC_ALGO: + if (!sctp_auth_enable) + break; /* Fall Through */ +fallthrough: default: SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", ntohs(param.p->type), cid); @@ -1892,13 +2125,29 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, } /* Process the initialization parameters. */ - sctp_walk_params(param, peer_init, init_hdr.params) { if (!sctp_process_param(asoc, param, peer_addr, gfp)) goto clean_up; } + /* AUTH: After processing the parameters, make sure that we + * have all the required info to potentially do authentications. + */ + if (asoc->peer.auth_capable && (!asoc->peer.peer_random || + !asoc->peer.peer_hmacs)) + asoc->peer.auth_capable = 0; + + + /* If the peer claims support for ADD-IP without support + * for AUTH, disable support for ADD-IP. + */ + if (asoc->peer.addip_capable && !asoc->peer.auth_capable) { + asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | + SCTP_PARAM_DEL_IP | + SCTP_PARAM_SET_PRIMARY); + } + /* Walk list of transports, removing transports in the UNKNOWN state. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); @@ -2128,12 +2377,57 @@ static int sctp_process_param(struct sctp_association *asoc, asoc->peer.adaptation_ind = param.aind->adaptation_ind; break; + case SCTP_PARAM_SUPPORTED_EXT: + sctp_process_ext_param(asoc, param); + break; + case SCTP_PARAM_FWD_TSN_SUPPORT: if (sctp_prsctp_enable) { asoc->peer.prsctp_capable = 1; break; } /* Fall Through */ + goto fall_through; + + case SCTP_PARAM_RANDOM: + if (!sctp_auth_enable) + goto fall_through; + + /* Save peer's random parameter */ + asoc->peer.peer_random = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_random) { + retval = 0; + break; + } + break; + + case SCTP_PARAM_HMAC_ALGO: + if (!sctp_auth_enable) + goto fall_through; + + /* Save peer's HMAC list */ + asoc->peer.peer_hmacs = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_hmacs) { + retval = 0; + break; + } + + /* Set the default HMAC the peer requested*/ + sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); + break; + + case SCTP_PARAM_CHUNKS: + if (!sctp_auth_enable) + goto fall_through; + + asoc->peer.peer_chunks = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_chunks) + retval = 0; + break; +fall_through: default: /* Any unrecognized parameters should have been caught * and handled by sctp_verify_param() which should be diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 8d7890083493d9619c7899311bf9afb9ab6eb195..bbdc938da86fa37166866b27cd37fe2aa25efd05 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1524,6 +1524,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_cmd_adaptation_ind(commands, asoc); break; + case SCTP_CMD_ASSOC_SHKEY: + error = sctp_auth_asoc_init_active_key(asoc, + GFP_ATOMIC); + break; + default: printk(KERN_WARNING "Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a583d67cab63859439c5a38fb54133afc406a401..f01b408508ff6144b94851f1a67ae09c7a7953d4 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -138,6 +138,11 @@ static sctp_disposition_t sctp_sf_violation_chunk( void *arg, sctp_cmd_seq_t *commands); +static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk); + /* Small helper function that checks if the chunk length * is of the appropriate length. The 'required_length' argument * is set to be the size of a specific chunk we are testing. @@ -495,8 +500,6 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes if there is any. */ @@ -521,6 +524,22 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); error = SCTP_ERROR_INV_PARAM; } + + /* SCTP-AUTH, Section 6.3: + * It should be noted that if the receiver wants to tear + * down an association in an authenticated way only, the + * handling of malformed packets should not result in + * tearing down the association. + * + * This means that if we only want to abort associations + * in an authenticated way (i.e AUTH+ABORT), then we + * can't destory this association just becuase the packet + * was malformed. + */ + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + + SCTP_INC_STATS(SCTP_MIB_ABORTEDS); return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc, chunk->transport); } @@ -549,6 +568,11 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); + /* SCTP-AUTH: genereate the assocition shared keys so that + * we can potentially signe the COOKIE-ECHO. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL()); + /* 5.1 C) "A" shall then send the State Cookie received in the * INIT ACK chunk in a COOKIE ECHO chunk, ... */ @@ -686,6 +710,44 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, peer_init, GFP_ATOMIC)) goto nomem_init; + /* SCTP-AUTH: Now that we've populate required fields in + * sctp_process_init, set up the assocaition shared keys as + * necessary so that we can potentially authenticate the ACK + */ + error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC); + if (error) + goto nomem_init; + + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo + * is supposed to be authenticated and we have to do delayed + * authentication. We've just recreated the association using + * the information in the cookie and now it's much easier to + * do the authentication. + */ + if (chunk->auth_chunk) { + struct sctp_chunk auth; + sctp_ierror_t ret; + + /* set-up our fake chunk so that we can process it */ + auth.skb = chunk->auth_chunk; + auth.asoc = chunk->asoc; + auth.sctp_hdr = chunk->sctp_hdr; + auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk, + sizeof(sctp_chunkhdr_t)); + skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t)); + auth.transport = chunk->transport; + + ret = sctp_sf_authenticate(ep, new_asoc, type, &auth); + + /* We can now safely free the auth_chunk clone */ + kfree_skb(chunk->auth_chunk); + + if (ret != SCTP_IERROR_NO_ERROR) { + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + } + } + repl = sctp_make_cookie_ack(new_asoc, chunk); if (!repl) goto nomem_init; @@ -1247,6 +1309,26 @@ static void sctp_tietags_populate(struct sctp_association *new_asoc, new_asoc->c.initial_tsn = asoc->c.initial_tsn; } +static void sctp_auth_params_populate(struct sctp_association *new_asoc, + const struct sctp_association *asoc) +{ + /* Only perform this if AUTH extension is enabled */ + if (!sctp_auth_enable) + return; + + /* We need to provide the same parameter information as + * was in the original INIT. This means that we need to copy + * the HMACS, CHUNKS, and RANDOM parameter from the original + * assocaition. + */ + memcpy(new_asoc->c.auth_random, asoc->c.auth_random, + sizeof(asoc->c.auth_random)); + memcpy(new_asoc->c.auth_hmacs, asoc->c.auth_hmacs, + sizeof(asoc->c.auth_hmacs)); + memcpy(new_asoc->c.auth_chunks, asoc->c.auth_chunks, + sizeof(asoc->c.auth_chunks)); +} + /* * Compare vtag/tietag values to determine unexpected COOKIE-ECHO * handling action. @@ -1404,6 +1486,8 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( sctp_tietags_populate(new_asoc, asoc); + sctp_auth_params_populate(new_asoc, asoc); + /* B) "Z" shall respond immediately with an INIT ACK chunk. */ /* If there are errors need to be reported for unknown parameters, @@ -3617,6 +3701,169 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( return SCTP_DISPOSITION_CONSUME; } +/* + * SCTP-AUTH Section 6.3 Receving authenticated chukns + * + * The receiver MUST use the HMAC algorithm indicated in the HMAC + * Identifier field. If this algorithm was not specified by the + * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk + * during association setup, the AUTH chunk and all chunks after it MUST + * be discarded and an ERROR chunk SHOULD be sent with the error cause + * defined in Section 4.1. + * + * If an endpoint with no shared key receives a Shared Key Identifier + * other than 0, it MUST silently discard all authenticated chunks. If + * the endpoint has at least one endpoint pair shared key for the peer, + * it MUST use the key specified by the Shared Key Identifier if a + * key has been configured for that Shared Key Identifier. If no + * endpoint pair shared key has been configured for that Shared Key + * Identifier, all authenticated chunks MUST be silently discarded. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * The return value is the disposition of the chunk. + */ +static sctp_ierror_t sctp_sf_authenticate(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk) +{ + struct sctp_authhdr *auth_hdr; + struct sctp_hmac *hmac; + unsigned int sig_len; + __u16 key_id; + __u8 *save_digest; + __u8 *digest; + + /* Pull in the auth header, so we can do some more verification */ + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; + chunk->subh.auth_hdr = auth_hdr; + skb_pull(chunk->skb, sizeof(struct sctp_authhdr)); + + /* Make sure that we suport the HMAC algorithm from the auth + * chunk. + */ + if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id)) + return SCTP_IERROR_AUTH_BAD_HMAC; + + /* Make sure that the provided shared key identifier has been + * configured + */ + key_id = ntohs(auth_hdr->shkey_id); + if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id)) + return SCTP_IERROR_AUTH_BAD_KEYID; + + + /* Make sure that the length of the signature matches what + * we expect. + */ + sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t); + hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id)); + if (sig_len != hmac->hmac_len) + return SCTP_IERROR_PROTO_VIOLATION; + + /* Now that we've done validation checks, we can compute and + * verify the hmac. The steps involved are: + * 1. Save the digest from the chunk. + * 2. Zero out the digest in the chunk. + * 3. Compute the new digest + * 4. Compare saved and new digests. + */ + digest = auth_hdr->hmac; + skb_pull(chunk->skb, sig_len); + + save_digest = kmemdup(digest, sig_len, GFP_ATOMIC); + if (!save_digest) + goto nomem; + + memset(digest, 0, sig_len); + + sctp_auth_calculate_hmac(asoc, chunk->skb, + (struct sctp_auth_chunk *)chunk->chunk_hdr, + GFP_ATOMIC); + + /* Discard the packet if the digests do not match */ + if (memcmp(save_digest, digest, sig_len)) { + kfree(save_digest); + return SCTP_IERROR_BAD_SIG; + } + + kfree(save_digest); + chunk->auth = 1; + + return SCTP_IERROR_NO_ERROR; +nomem: + return SCTP_IERROR_NOMEM; +} + +sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_authhdr *auth_hdr; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *err_chunk; + sctp_ierror_t error; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + } + + /* Make sure that the AUTH chunk has valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) + return sctp_sf_violation_chunklen(ep, asoc, type, arg, + commands); + + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; + error = sctp_sf_authenticate(ep, asoc, type, chunk); + switch (error) { + case SCTP_IERROR_AUTH_BAD_HMAC: + /* Generate the ERROR chunk and discard the rest + * of the packet + */ + err_chunk = sctp_make_op_error(asoc, chunk, + SCTP_ERROR_UNSUP_HMAC, + &auth_hdr->hmac_id, + sizeof(__u16)); + if (err_chunk) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err_chunk)); + } + /* Fall Through */ + case SCTP_IERROR_AUTH_BAD_KEYID: + case SCTP_IERROR_BAD_SIG: + return sctp_sf_pdiscard(ep, asoc, type, arg, commands); + break; + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_violation_chunklen(ep, asoc, type, arg, + commands); + break; + case SCTP_IERROR_NOMEM: + return SCTP_DISPOSITION_NOMEM; + default: + break; + } + + if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) { + struct sctp_ulpevent *ev; + + ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id), + SCTP_AUTH_NEWKEY, GFP_ATOMIC); + + if (!ev) + return -ENOMEM; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + } + + return SCTP_DISPOSITION_CONSUME; +} + /* * Process an unknown chunk. * @@ -3822,6 +4069,20 @@ static sctp_disposition_t sctp_sf_abort_violation( if (!abort) goto nomem; + /* SCTP-AUTH, Section 6.3: + * It should be noted that if the receiver wants to tear + * down an association in an authenticated way only, the + * handling of malformed packets should not result in + * tearing down the association. + * + * This means that if we only want to abort associations + * in an authenticated way (i.e AUTH+ABORT), then we + * can't destory this association just becuase the packet + * was malformed. + */ + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + goto discard; + if (asoc) { sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); @@ -3859,6 +4120,7 @@ static sctp_disposition_t sctp_sf_abort_violation( SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); } +discard: sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); SCTP_INC_STATS(SCTP_MIB_ABORTEDS); @@ -5428,10 +5690,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, sctp_verb_t deliver; int tmp; __u32 tsn; - int account_value; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sock *sk = asoc->base.sk; - int rcvbuf_over = 0; data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); @@ -5441,48 +5701,6 @@ static int sctp_eat_data(const struct sctp_association *asoc, /* ASSERT: Now skb->data is really the user data. */ - /* - * If we are established, and we have used up our receive buffer - * memory, think about droping the frame. - * Note that we have an opportunity to improve performance here. - * If we accept one chunk from an skbuff, we have to keep all the - * memory of that skbuff around until the chunk is read into user - * space. Therefore, once we accept 1 chunk we may as well accept all - * remaining chunks in the skbuff. The data_accepted flag helps us do - * that. - */ - if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) { - /* - * If the receive buffer policy is 1, then each - * association can allocate up to sk_rcvbuf bytes - * otherwise, all the associations in aggregate - * may allocate up to sk_rcvbuf bytes - */ - if (asoc->ep->rcvbuf_policy) - account_value = atomic_read(&asoc->rmem_alloc); - else - account_value = atomic_read(&sk->sk_rmem_alloc); - if (account_value > sk->sk_rcvbuf) { - /* - * We need to make forward progress, even when we are - * under memory pressure, so we always allow the - * next tsn after the ctsn ack point to be accepted. - * This lets us avoid deadlocks in which we have to - * drop frames that would otherwise let us drain the - * receive queue. - */ - if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn) - return SCTP_IERROR_IGNORE_TSN; - - /* - * We're going to accept the frame but we should renege - * to make space for it. This will send us down that - * path later in this function. - */ - rcvbuf_over = 1; - } - } - /* Process ECN based congestion. * * Since the chunk structure is reused for all chunks within @@ -5542,18 +5760,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, * seems a bit troublesome in that frag_point varies based on * PMTU. In cases, such as loopback, this might be a rather * large spill over. - * NOTE: If we have a full receive buffer here, we only renege if - * our receiver can still make progress without the tsn being - * received. We do this because in the event that the associations - * receive queue is empty we are filling a leading gap, and since - * reneging moves the gap to the end of the tsn stream, we are likely - * to stall again very shortly. Avoiding the renege when we fill a - * leading gap is a good heuristic for avoiding such steady state - * stalls. - */ - if (!asoc->rwnd || asoc->rwnd_over || - (datalen > asoc->rwnd + asoc->frag_point) || - (rcvbuf_over && (!skb_queue_len(&sk->sk_receive_queue)))) { + */ + if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || + (datalen > asoc->rwnd + asoc->frag_point))) { /* If this is the next TSN, consider reneging to make * room. Note: Playing nice with a confused sender. A @@ -5573,6 +5782,21 @@ static int sctp_eat_data(const struct sctp_association *asoc, } } + /* + * Also try to renege to limit our memory usage in the event that + * we are under memory pressure + * If we can't renege, don't worry about it, the sk_stream_rmem_schedule + * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our + * memory usage too much + */ + if (*sk->sk_prot_creator->memory_pressure) { + if (sctp_tsnmap_has_gap(map) && + (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { + SCTP_DEBUG_PRINTK("Under Pressure! Reneging for tsn:%u\n", tsn); + deliver = SCTP_CMD_RENEGE; + } + } + /* * Section 3.3.10.9 No User Data (9) * diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index ddb0ba3974b05a31807c8652f97ad9a2cfc10fcf..a93a4bc8f68f2bb8cd48619032e45cf7fca9b3d4 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -523,6 +523,34 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN TYPE_SCTP_FWD_TSN, }; /*state_fn_t prsctp_chunk_event_table[][] */ +#define TYPE_SCTP_AUTH { \ + /* SCTP_STATE_EMPTY */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ +} /* TYPE_SCTP_AUTH */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_AUTH, +}; /*state_fn_t auth_chunk_event_table[][] */ + static const sctp_sm_table_entry_t chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* SCTP_STATE_EMPTY */ @@ -976,5 +1004,10 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, return &addip_chunk_event_table[1][state]; } + if (sctp_auth_enable) { + if (cid == SCTP_CID_AUTH) + return &auth_chunk_event_table[0][state]; + } + return &chunk_event_table_unknown[state]; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 772fbfb4bfda801e63229c4ac6479278e823459c..9c6a4b5f6264639724c52717d0e84c75129ed747 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -107,23 +107,42 @@ static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t); static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; +extern struct kmem_cache *sctp_bucket_cachep; +extern int sysctl_sctp_mem[3]; +extern int sysctl_sctp_rmem[3]; +extern int sysctl_sctp_wmem[3]; + +static int sctp_memory_pressure; +static atomic_t sctp_memory_allocated; +static atomic_t sctp_sockets_allocated; + +static void sctp_enter_memory_pressure(void) +{ + sctp_memory_pressure = 1; +} + + /* Get the sndbuf space available at the time on the association. */ static inline int sctp_wspace(struct sctp_association *asoc) { - struct sock *sk = asoc->base.sk; - int amt = 0; + int amt; - if (asoc->ep->sndbuf_policy) { - /* make sure that no association uses more than sk_sndbuf */ - amt = sk->sk_sndbuf - asoc->sndbuf_used; + if (asoc->ep->sndbuf_policy) + amt = asoc->sndbuf_used; + else + amt = atomic_read(&asoc->base.sk->sk_wmem_alloc); + + if (amt >= asoc->base.sk->sk_sndbuf) { + if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) + amt = 0; + else { + amt = sk_stream_wspace(asoc->base.sk); + if (amt < 0) + amt = 0; + } } else { - /* do socket level accounting */ - amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); + amt = asoc->base.sk->sk_sndbuf - amt; } - - if (amt < 0) - amt = 0; - return amt; } @@ -155,6 +174,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) sizeof(struct sctp_chunk); atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); + sk_charge_skb(sk, chunk->skb); } /* Verify that this is a valid address. */ @@ -2926,6 +2946,164 @@ static int sctp_setsockopt_maxburst(struct sock *sk, return 0; } +/* + * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) + * + * This set option adds a chunk type that the user is requesting to be + * received only in an authenticated way. Changes to the list of chunks + * will only effect future associations on the socket. + */ +static int sctp_setsockopt_auth_chunk(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authchunk val; + + if (optlen != sizeof(struct sctp_authchunk)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + switch (val.sauth_chunk) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + case SCTP_CID_AUTH: + return -EINVAL; + } + + /* add this chunk id to the endpoint */ + return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk); +} + +/* + * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) + * + * This option gets or sets the list of HMAC algorithms that the local + * endpoint requires the peer to use. + */ +static int sctp_setsockopt_hmac_ident(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_hmacalgo *hmacs; + int err; + + if (optlen < sizeof(struct sctp_hmacalgo)) + return -EINVAL; + + hmacs = kmalloc(optlen, GFP_KERNEL); + if (!hmacs) + return -ENOMEM; + + if (copy_from_user(hmacs, optval, optlen)) { + err = -EFAULT; + goto out; + } + + if (hmacs->shmac_num_idents == 0 || + hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) { + err = -EINVAL; + goto out; + } + + err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs); +out: + kfree(hmacs); + return err; +} + +/* + * 7.1.20. Set a shared key (SCTP_AUTH_KEY) + * + * This option will set a shared secret key which is used to build an + * association shared key. + */ +static int sctp_setsockopt_auth_key(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authkey *authkey; + struct sctp_association *asoc; + int ret; + + if (optlen <= sizeof(struct sctp_authkey)) + return -EINVAL; + + authkey = kmalloc(optlen, GFP_KERNEL); + if (!authkey) + return -ENOMEM; + + if (copy_from_user(authkey, optval, optlen)) { + ret = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); + if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { + ret = -EINVAL; + goto out; + } + + ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey); +out: + kfree(authkey); + return ret; +} + +/* + * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) + * + * This option will get or set the active shared key to be used to build + * the association shared key. + */ +static int sctp_setsockopt_active_key(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (optlen != sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc, + val.scact_keynumber); +} + +/* + * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) + * + * This set option will delete a shared secret key from use. + */ +static int sctp_setsockopt_del_key(struct sock *sk, + char __user *optval, + int optlen) +{ + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (optlen != sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc, + val.scact_keynumber); + +} + + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -3049,6 +3227,21 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); break; + case SCTP_AUTH_CHUNK: + retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); + break; + case SCTP_HMAC_IDENT: + retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); + break; + case SCTP_AUTH_KEY: + retval = sctp_setsockopt_auth_key(sk, optval, optlen); + break; + case SCTP_AUTH_ACTIVE_KEY: + retval = sctp_setsockopt_active_key(sk, optval, optlen); + break; + case SCTP_AUTH_DELETE_KEY: + retval = sctp_setsockopt_del_key(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -3293,6 +3486,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk) sp->hmac = NULL; SCTP_DBG_OBJCNT_INC(sock); + atomic_inc(&sctp_sockets_allocated); return 0; } @@ -3306,7 +3500,7 @@ SCTP_STATIC int sctp_destroy_sock(struct sock *sk) /* Release our hold on the endpoint. */ ep = sctp_sk(sk)->ep; sctp_endpoint_free(ep); - + atomic_dec(&sctp_sockets_allocated); return 0; } @@ -4819,6 +5013,118 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, return -ENOTSUPP; } +static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 param_len; + + hmacs = sctp_sk(sk)->ep->auth_hmacs_list; + param_len = ntohs(hmacs->param_hdr.length); + + if (len < param_len) + return -EINVAL; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, hmacs->hmac_ids, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_active_key(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (len < sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + val.scact_keynumber = asoc->active_key_id; + else + val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; + + return 0; +} + +static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_authchunks val; + struct sctp_association *asoc; + struct sctp_chunks_param *ch; + char __user *to; + + if (len <= sizeof(struct sctp_authchunks)) + return -EINVAL; + + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + return -EFAULT; + + to = val.gauth_chunks; + asoc = sctp_id2assoc(sk, val.gauth_assoc_id); + if (!asoc) + return -EINVAL; + + ch = asoc->peer.peer_chunks; + + /* See if the user provided enough room for all the data */ + if (len < ntohs(ch->param_hdr.length)) + return -EINVAL; + + len = ntohs(ch->param_hdr.length); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(to, ch->chunks, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_authchunks val; + struct sctp_association *asoc; + struct sctp_chunks_param *ch; + char __user *to; + + if (len <= sizeof(struct sctp_authchunks)) + return -EINVAL; + + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + return -EFAULT; + + to = val.gauth_chunks; + asoc = sctp_id2assoc(sk, val.gauth_assoc_id); + if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + ch = (struct sctp_chunks_param*)asoc->c.auth_chunks; + else + ch = sctp_sk(sk)->ep->auth_chunk_list; + + if (len < ntohs(ch->param_hdr.length)) + return -EINVAL; + + len = ntohs(ch->param_hdr.length); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(to, ch->chunks, len)) + return -EFAULT; + + return 0; +} + SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -4942,6 +5248,25 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_MAX_BURST: retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); break; + case SCTP_AUTH_KEY: + case SCTP_AUTH_CHUNK: + case SCTP_AUTH_DELETE_KEY: + retval = -EOPNOTSUPP; + break; + case SCTP_HMAC_IDENT: + retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); + break; + case SCTP_AUTH_ACTIVE_KEY: + retval = sctp_getsockopt_active_key(sk, len, optval, optlen); + break; + case SCTP_PEER_AUTH_CHUNKS: + retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, + optlen); + break; + case SCTP_LOCAL_AUTH_CHUNKS: + retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -4989,22 +5314,14 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) sctp_local_bh_disable(); if (snum == 0) { - /* Search for an available port. - * - * 'sctp_port_rover' was the last port assigned, so - * we start to search from 'sctp_port_rover + - * 1'. What we do is first check if port 'rover' is - * already in the hash table; if not, we use that; if - * it is, we try next. - */ - int low = sysctl_local_port_range[0]; - int high = sysctl_local_port_range[1]; - int remaining = (high - low) + 1; - int rover; - int index; - - sctp_spin_lock(&sctp_port_alloc_lock); - rover = sctp_port_rover; + /* Search for an available port. */ + int low, high, remaining, index; + unsigned int rover; + + inet_get_local_port_range(&low, &high); + remaining = (high - low) + 1; + rover = net_random() % remaining + low; + do { rover++; if ((rover < low) || (rover > high)) @@ -5019,8 +5336,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) next: sctp_spin_unlock(&head->lock); } while (--remaining > 0); - sctp_port_rover = rover; - sctp_spin_unlock(&sctp_port_alloc_lock); /* Exhausted local port range during search? */ ret = 1; @@ -5720,6 +6035,12 @@ static void sctp_wfree(struct sk_buff *skb) atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); + /* + * This undoes what is done via sk_charge_skb + */ + sk->sk_wmem_queued -= skb->truesize; + sk->sk_forward_alloc += skb->truesize; + sock_wfree(skb); __sctp_write_space(asoc); @@ -5737,6 +6058,11 @@ void sctp_sock_rfree(struct sk_buff *skb) struct sctp_ulpevent *event = sctp_skb2event(skb); atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); + + /* + * Mimic the behavior of sk_stream_rfree + */ + sk->sk_forward_alloc += event->rmem_len; } @@ -6126,6 +6452,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, sctp_release_sock(newsk); } + /* This proto struct describes the ULP interface for SCTP. */ struct proto sctp_prot = { .name = "SCTP", @@ -6148,6 +6475,12 @@ struct proto sctp_prot = { .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), + .sysctl_mem = sysctl_sctp_mem, + .sysctl_rmem = sysctl_sctp_rmem, + .sysctl_wmem = sysctl_sctp_wmem, + .memory_pressure = &sctp_memory_pressure, + .enter_memory_pressure = sctp_enter_memory_pressure, + .memory_allocated = &sctp_memory_allocated, }; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) @@ -6172,5 +6505,11 @@ struct proto sctpv6_prot = { .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), + .sysctl_mem = sysctl_sctp_mem, + .sysctl_rmem = sysctl_sctp_rmem, + .sysctl_wmem = sysctl_sctp_wmem, + .memory_pressure = &sctp_memory_pressure, + .enter_memory_pressure = sctp_enter_memory_pressure, + .memory_allocated = &sctp_memory_allocated, }; #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index e2c679baf912ad35662837d4a6447057053218be..0669778e4335aeb22911315882084b6189612ff5 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -52,6 +52,10 @@ static int int_max = INT_MAX; static long sack_timer_min = 1; static long sack_timer_max = 500; +extern int sysctl_sctp_mem[3]; +extern int sysctl_sctp_rmem[3]; +extern int sysctl_sctp_wmem[3]; + static ctl_table sctp_table[] = { { .ctl_name = NET_SCTP_RTO_INITIAL, @@ -226,6 +230,39 @@ static ctl_table sctp_table[] = { .extra1 = &sack_timer_min, .extra2 = &sack_timer_max, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sctp_mem", + .data = &sysctl_sctp_mem, + .maxlen = sizeof(sysctl_sctp_mem), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sctp_rmem", + .data = &sysctl_sctp_rmem, + .maxlen = sizeof(sysctl_sctp_rmem), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "sctp_wmem", + .data = &sysctl_sctp_wmem, + .maxlen = sizeof(sysctl_sctp_wmem), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "auth_enable", + .data = &sctp_auth_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + .strategy = &sysctl_intvec + }, { .ctl_name = 0 } }; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index bfecb353ab3da070f0b4784c6861eaef7de04d1c..2c17c7efad461b33f016c83401c4570068843e84 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -685,6 +685,24 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_ulpevent *event = NULL; struct sk_buff *skb; size_t padding, len; + int rx_count; + + /* + * check to see if we need to make space for this + * new skb, expand the rcvbuffer if needed, or drop + * the frame + */ + if (asoc->ep->rcvbuf_policy) + rx_count = atomic_read(&asoc->rmem_alloc); + else + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); + + if (rx_count >= asoc->base.sk->sk_rcvbuf) { + + if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || + (!sk_stream_rmem_schedule(asoc->base.sk, chunk->skb))) + goto fail; + } /* Clone the original skb, sharing the data. */ skb = skb_clone(chunk->skb, gfp); @@ -795,6 +813,43 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( return NULL; } +struct sctp_ulpevent *sctp_ulpevent_make_authkey( + const struct sctp_association *asoc, __u16 key_id, + __u32 indication, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_authkey_event *ak; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + ak = (struct sctp_authkey_event *) + skb_put(skb, sizeof(struct sctp_authkey_event)); + + ak->auth_type = SCTP_AUTHENTICATION_EVENT; + ak->auth_flags = 0; + ak->auth_length = sizeof(struct sctp_authkey_event); + + ak->auth_keynumber = key_id; + ak->auth_altkeynumber = 0; + ak->auth_indication = indication; + + /* + * The association id field, holds the identifier for the association. + */ + sctp_ulpevent_set_owner(event, asoc); + ak->auth_assoc_id = sctp_assoc2id(asoc); + + return event; +fail: + return NULL; +} + + /* Return the notification type, assuming this is a notification * event. */ diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index fa0ba2a5564e51c8a6f78aa6d40481fc80f52a02..b9370956b18706b6df1bd3bdf646ee07257361a1 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -1027,6 +1027,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, sctp_ulpq_partial_delivery(ulpq, chunk, gfp); } + sk_stream_mem_reclaim(asoc->base.sk); return; } diff --git a/net/socket.c b/net/socket.c index b09eb9036a17a3563e9ad5ca08a1734b86aa0df5..379b3a3907551f50d42776664d0bc66aafcfa671 100644 --- a/net/socket.c +++ b/net/socket.c @@ -84,6 +84,7 @@ #include #include #include +#include #include #include @@ -790,9 +791,9 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, */ static DEFINE_MUTEX(br_ioctl_mutex); -static int (*br_ioctl_hook) (unsigned int cmd, void __user *arg) = NULL; +static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL; -void brioctl_set(int (*hook) (unsigned int, void __user *)) +void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) { mutex_lock(&br_ioctl_mutex); br_ioctl_hook = hook; @@ -802,9 +803,9 @@ void brioctl_set(int (*hook) (unsigned int, void __user *)) EXPORT_SYMBOL(brioctl_set); static DEFINE_MUTEX(vlan_ioctl_mutex); -static int (*vlan_ioctl_hook) (void __user *arg); +static int (*vlan_ioctl_hook) (struct net *, void __user *arg); -void vlan_ioctl_set(int (*hook) (void __user *)) +void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) { mutex_lock(&vlan_ioctl_mutex); vlan_ioctl_hook = hook; @@ -833,16 +834,20 @@ EXPORT_SYMBOL(dlci_ioctl_set); static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock; + struct sock *sk; void __user *argp = (void __user *)arg; int pid, err; + struct net *net; sock = file->private_data; + sk = sock->sk; + net = sk->sk_net; if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { - err = dev_ioctl(cmd, argp); + err = dev_ioctl(net, cmd, argp); } else #ifdef CONFIG_WIRELESS_EXT if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { - err = dev_ioctl(cmd, argp); + err = dev_ioctl(net, cmd, argp); } else #endif /* CONFIG_WIRELESS_EXT */ switch (cmd) { @@ -868,7 +873,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) - err = br_ioctl_hook(cmd, argp); + err = br_ioctl_hook(net, cmd, argp); mutex_unlock(&br_ioctl_mutex); break; case SIOCGIFVLAN: @@ -879,7 +884,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) mutex_lock(&vlan_ioctl_mutex); if (vlan_ioctl_hook) - err = vlan_ioctl_hook(argp); + err = vlan_ioctl_hook(net, argp); mutex_unlock(&vlan_ioctl_mutex); break; case SIOCADDDLCI: @@ -902,7 +907,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) * to the NIC driver. */ if (err == -ENOIOCTLCMD) - err = dev_ioctl(cmd, argp); + err = dev_ioctl(net, cmd, argp); break; } return err; @@ -1071,7 +1076,7 @@ int sock_wake_async(struct socket *sock, int how, int band) return 0; } -static int __sock_create(int family, int type, int protocol, +static int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; @@ -1147,7 +1152,7 @@ static int __sock_create(int family, int type, int protocol, /* Now protected by module ref count */ rcu_read_unlock(); - err = pf->create(sock, protocol); + err = pf->create(net, sock, protocol); if (err < 0) goto out_module_put; @@ -1186,12 +1191,12 @@ static int __sock_create(int family, int type, int protocol, int sock_create(int family, int type, int protocol, struct socket **res) { - return __sock_create(family, type, protocol, res, 0); + return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); } int sock_create_kern(int family, int type, int protocol, struct socket **res) { - return __sock_create(family, type, protocol, res, 1); + return __sock_create(&init_net, family, type, protocol, res, 1); } asmlinkage long sys_socket(int family, int type, int protocol) @@ -1924,7 +1929,7 @@ asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, * kernel msghdr to use the kernel address space) */ - uaddr = (void __user *)msg_sys.msg_name; + uaddr = (__force void __user *)msg_sys.msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(&msg_sys, iov, addr, VERIFY_WRITE); @@ -2230,6 +2235,7 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags) err = sock->ops->accept(sock, *newsock, flags); if (err < 0) { sock_release(*newsock); + *newsock = NULL; goto done; } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index ebe344f34d1ae741c9696db10785d971f49450a4..8e05557414cebfd522ea5dd0a056d6d95399588d 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1218,23 +1218,15 @@ static const struct seq_operations cache_content_op = { static int content_open(struct inode *inode, struct file *file) { - int res; struct handle *han; struct cache_detail *cd = PDE(inode)->data; - han = kmalloc(sizeof(*han), GFP_KERNEL); + han = __seq_open_private(file, &cache_content_op, sizeof(*han)); if (han == NULL) return -ENOMEM; han->cd = cd; - - res = seq_open(file, &cache_content_op); - if (res) - kfree(han); - else - ((struct seq_file *)file->private_data)->private = han; - - return res; + return 0; } static const struct file_operations content_file_operations = { diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 74ba7d443dfc06ec7162c2d7b1735bc2bfc67e06..4d4f3738b6887837ec4327c73ed07e50bdc69784 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -21,6 +21,7 @@ #include #include #include +#include #define RPCDBG_FACILITY RPCDBG_MISC @@ -265,7 +266,7 @@ rpc_proc_init(void) dprintk("RPC: registering /proc/net/rpc\n"); if (!proc_net_rpc) { struct proc_dir_entry *ent; - ent = proc_mkdir("rpc", proc_net); + ent = proc_mkdir("rpc", init_net.proc_net); if (ent) { ent->owner = THIS_MODULE; proc_net_rpc = ent; @@ -279,7 +280,7 @@ rpc_proc_exit(void) dprintk("RPC: unregistering /proc/net/rpc\n"); if (proc_net_rpc) { proc_net_rpc = NULL; - remove_proc_entry("net/rpc", NULL); + remove_proc_entry("rpc", init_net.proc_net); } } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 036ab520df21ec5ecea04342e979fc58762aa059..c75bffeb89eb705831585ba1447c0ba77dd06a9c 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -19,6 +19,7 @@ * Copyright (C) 1995, 1996 Olaf Kirch */ +#include #include #include #include @@ -103,7 +104,7 @@ static struct lock_class_key svc_slock_key[2]; static inline void svc_reclassify_socket(struct socket *sock) { struct sock *sk = sock->sk; - BUG_ON(sk->sk_lock.owner != NULL); + BUG_ON(sock_owned_by_user(sk)); switch (sk->sk_family) { case AF_INET: sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", @@ -877,7 +878,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) } else { rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; rqstp->rq_respages = rqstp->rq_pages + 1 + - (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; + DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); } if (serv->sv_stats) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 4ae7eed7f6170b665845344ab95663975c09a38a..282efd447a61d535b23c6322aecc2bc756a73195 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1186,7 +1186,7 @@ static struct lock_class_key xs_slock_key[2]; static inline void xs_reclassify_socket(struct socket *sock) { struct sock *sk = sock->sk; - BUG_ON(sk->sk_lock.owner != NULL); + BUG_ON(sock_owned_by_user(sk)); switch (sk->sk_family) { case AF_INET: sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS", diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 711ca4b1f051720ca546d6e722e1ab387a20ed21..3bbef2ab22ae8c6ead19093ee0bcebbf0efc04c1 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -38,6 +38,7 @@ #include #include #include +#include #define MAX_ETH_BEARERS 2 #define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI @@ -76,7 +77,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, skb_reset_network_header(clone); dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev; clone->dev = dev; - dev->hard_header(clone, dev, ETH_P_TIPC, + dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr, dev->dev_addr, clone->len); dev_queue_xmit(clone); @@ -100,6 +101,11 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; u32 size; + if (dev->nd_net != &init_net) { + kfree_skb(buf); + return 0; + } + if (likely(eb_ptr->bearer)) { if (likely(buf->pkt_type <= PACKET_BROADCAST)) { size = msg_size((struct tipc_msg *)buf->data); @@ -129,7 +135,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) /* Find device with specified name */ - for_each_netdev(pdev){ + for_each_netdev(&init_net, pdev){ if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { dev = pdev; break; @@ -192,6 +198,9 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt, struct eth_bearer *eb_ptr = ð_bearers[0]; struct eth_bearer *stop = ð_bearers[MAX_ETH_BEARERS]; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + while ((eb_ptr->dev != dev)) { if (++eb_ptr == stop) return NOTIFY_DONE; /* couldn't find device */ @@ -234,12 +243,12 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt, static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) { unchar *addr = (unchar *)&a->dev_addr; + DECLARE_MAC_BUF(mac); if (str_size < 18) *str_buf = '\0'; else - sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + sprintf(str_buf, "%s", print_mac(mac, addr)); return str_buf; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 84110172031ef6bfa69716a90ca4bbda6b48e3f4..e36b4b5a5222320644fd1d7fe35c06bfe982e7f0 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -162,13 +162,16 @@ static void advance_queue(struct tipc_sock *tsock) * * Returns 0 on success, errno otherwise */ -static int tipc_create(struct socket *sock, int protocol) +static int tipc_create(struct net *net, struct socket *sock, int protocol) { struct tipc_sock *tsock; struct tipc_port *port; struct sock *sk; u32 ref; + if (net != &init_net) + return -EAFNOSUPPORT; + if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; @@ -198,7 +201,7 @@ static int tipc_create(struct socket *sock, int protocol) return -EPROTOTYPE; } - sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1); + sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, 1); if (!sk) { tipc_deleteport(ref); return -ENOMEM; @@ -1372,7 +1375,7 @@ static int accept(struct socket *sock, struct socket *newsock, int flags) } buf = skb_peek(&sock->sk->sk_receive_queue); - res = tipc_create(newsock, 0); + res = tipc_create(sock->sk->sk_net, newsock, 0); if (!res) { struct tipc_sock *new_tsock = tipc_sk(newsock->sk); struct tipc_portid id; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index a05c34260e7005a346e3a507dc97149c82d2b740..2b57eaf66abc4086f7387704c1993ee399ba65c3 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -103,6 +103,7 @@ #include #include #include +#include #include #include #include @@ -593,7 +594,7 @@ static struct proto unix_proto = { */ static struct lock_class_key af_unix_sk_receive_queue_lock_key; -static struct sock * unix_create1(struct socket *sock) +static struct sock * unix_create1(struct net *net, struct socket *sock) { struct sock *sk = NULL; struct unix_sock *u; @@ -601,7 +602,7 @@ static struct sock * unix_create1(struct socket *sock) if (atomic_read(&unix_nr_socks) >= 2*get_max_files()) goto out; - sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1); + sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, 1); if (!sk) goto out; @@ -627,8 +628,11 @@ static struct sock * unix_create1(struct socket *sock) return sk; } -static int unix_create(struct socket *sock, int protocol) +static int unix_create(struct net *net, struct socket *sock, int protocol) { + if (net != &init_net) + return -EAFNOSUPPORT; + if (protocol && protocol != PF_UNIX) return -EPROTONOSUPPORT; @@ -654,7 +658,7 @@ static int unix_create(struct socket *sock, int protocol) return -ESOCKTNOSUPPORT; } - return unix_create1(sock) ? 0 : -ENOMEM; + return unix_create1(net, sock) ? 0 : -ENOMEM; } static int unix_release(struct socket *sock) @@ -1038,7 +1042,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, err = -ENOMEM; /* create new sock for complete connection */ - newsk = unix_create1(NULL); + newsk = unix_create1(sk->sk_net, NULL); if (newsk == NULL) goto out; @@ -2082,25 +2086,7 @@ static const struct seq_operations unix_seq_ops = { static int unix_seq_open(struct inode *inode, struct file *file) { - struct seq_file *seq; - int rc = -ENOMEM; - int *iter = kmalloc(sizeof(int), GFP_KERNEL); - - if (!iter) - goto out; - - rc = seq_open(file, &unix_seq_ops); - if (rc) - goto out_kfree; - - seq = file->private_data; - seq->private = iter; - *iter = 0; -out: - return rc; -out_kfree: - kfree(iter); - goto out; + return seq_open_private(file, &unix_seq_ops, sizeof(int)); } static const struct file_operations unix_seq_fops = { @@ -2135,7 +2121,7 @@ static int __init af_unix_init(void) sock_register(&unix_family_ops); #ifdef CONFIG_PROC_FS - proc_net_fops_create("unix", 0, &unix_seq_fops); + proc_net_fops_create(&init_net, "unix", 0, &unix_seq_fops); #endif unix_sysctl_register(); out: @@ -2146,7 +2132,7 @@ static void __exit af_unix_exit(void) { sock_unregister(PF_UNIX); unix_sysctl_unregister(); - proc_net_remove("unix"); + proc_net_remove(&init_net, "unix"); proto_unregister(&unix_proto); } diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c index 236e7eaf1b7f4d6069efa61b16b4aedcc1ef238e..f2e54c3f064ebb64360339d9b68d5137df570124 100644 --- a/net/wanrouter/wanproc.c +++ b/net/wanrouter/wanproc.c @@ -29,6 +29,7 @@ #include #include +#include #include #define PROC_STATS_FORMAT "%30s: %12lu\n" @@ -287,7 +288,7 @@ static const struct file_operations wandev_fops = { int __init wanrouter_proc_init(void) { struct proc_dir_entry *p; - proc_router = proc_mkdir(ROUTER_NAME, proc_net); + proc_router = proc_mkdir(ROUTER_NAME, init_net.proc_net); if (!proc_router) goto fail; @@ -303,7 +304,7 @@ int __init wanrouter_proc_init(void) fail_stat: remove_proc_entry("config", proc_router); fail_config: - remove_proc_entry(ROUTER_NAME, proc_net); + remove_proc_entry(ROUTER_NAME, init_net.proc_net); fail: return -ENOMEM; } @@ -316,7 +317,7 @@ void wanrouter_proc_cleanup(void) { remove_proc_entry("config", proc_router); remove_proc_entry("status", proc_router); - remove_proc_entry(ROUTER_NAME, proc_net); + remove_proc_entry(ROUTER_NAME, init_net.proc_net); } /* diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index a228d56a91b848c6569a5f94ddb342f674a0d34c..6426055a8be01c5e7bd44c981827bc845d537aa3 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -1,6 +1,19 @@ config CFG80211 tristate "Improved wireless configuration API" +config NL80211 + bool "nl80211 new netlink interface support" + depends on CFG80211 + default y + ---help--- + This option turns on the new netlink interface + (nl80211) support in cfg80211. + + If =n, drivers using mac80211 will be configured via + wireless extension support provided by that subsystem. + + If unsure, say Y. + config WIRELESS_EXT bool "Wireless extensions" default n @@ -10,7 +23,9 @@ config WIRELESS_EXT Wireless extensions will be replaced by cfg80211 and will be required only by legacy drivers that implement - wireless extension handlers. + wireless extension handlers. This option does not + affect the wireless-extension backward compatibility + code in cfg80211. Say N (if you can) unless you know you need wireless extensions for external modules. diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 092116e390b67f8c093a8fe2484aabffd3bd33f5..65710a42e5a7890365b4478518653685ebe8e769 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_WIRELESS_EXT) += wext.o obj-$(CONFIG_CFG80211) += cfg80211.o cfg80211-y += core.o sysfs.o radiotap.o +cfg80211-$(CONFIG_NL80211) += nl80211.o diff --git a/net/wireless/core.c b/net/wireless/core.c index 9771451eae217d9624db3efbac4e08f91ec9ab5c..febc33bc9c09f644e21a37c1eebebf0c07b04e5b 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -16,6 +16,7 @@ #include #include #include +#include "nl80211.h" #include "core.h" #include "sysfs.h" @@ -36,6 +37,146 @@ static int wiphy_counter; /* for debugfs */ static struct dentry *ieee80211_debugfs_dir; +/* requires cfg80211_drv_mutex to be held! */ +static struct cfg80211_registered_device *cfg80211_drv_by_wiphy(int wiphy) +{ + struct cfg80211_registered_device *result = NULL, *drv; + + list_for_each_entry(drv, &cfg80211_drv_list, list) { + if (drv->idx == wiphy) { + result = drv; + break; + } + } + + return result; +} + +/* requires cfg80211_drv_mutex to be held! */ +static struct cfg80211_registered_device * +__cfg80211_drv_from_info(struct genl_info *info) +{ + int ifindex; + struct cfg80211_registered_device *bywiphy = NULL, *byifidx = NULL; + struct net_device *dev; + int err = -EINVAL; + + if (info->attrs[NL80211_ATTR_WIPHY]) { + bywiphy = cfg80211_drv_by_wiphy( + nla_get_u32(info->attrs[NL80211_ATTR_WIPHY])); + err = -ENODEV; + } + + if (info->attrs[NL80211_ATTR_IFINDEX]) { + ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); + dev = dev_get_by_index(&init_net, ifindex); + if (dev) { + if (dev->ieee80211_ptr) + byifidx = + wiphy_to_dev(dev->ieee80211_ptr->wiphy); + dev_put(dev); + } + err = -ENODEV; + } + + if (bywiphy && byifidx) { + if (bywiphy != byifidx) + return ERR_PTR(-EINVAL); + else + return bywiphy; /* == byifidx */ + } + if (bywiphy) + return bywiphy; + + if (byifidx) + return byifidx; + + return ERR_PTR(err); +} + +struct cfg80211_registered_device * +cfg80211_get_dev_from_info(struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + + mutex_lock(&cfg80211_drv_mutex); + drv = __cfg80211_drv_from_info(info); + + /* if it is not an error we grab the lock on + * it to assure it won't be going away while + * we operate on it */ + if (!IS_ERR(drv)) + mutex_lock(&drv->mtx); + + mutex_unlock(&cfg80211_drv_mutex); + + return drv; +} + +struct cfg80211_registered_device * +cfg80211_get_dev_from_ifindex(int ifindex) +{ + struct cfg80211_registered_device *drv = ERR_PTR(-ENODEV); + struct net_device *dev; + + mutex_lock(&cfg80211_drv_mutex); + dev = dev_get_by_index(&init_net, ifindex); + if (!dev) + goto out; + if (dev->ieee80211_ptr) { + drv = wiphy_to_dev(dev->ieee80211_ptr->wiphy); + mutex_lock(&drv->mtx); + } else + drv = ERR_PTR(-ENODEV); + dev_put(dev); + out: + mutex_unlock(&cfg80211_drv_mutex); + return drv; +} + +void cfg80211_put_dev(struct cfg80211_registered_device *drv) +{ + BUG_ON(IS_ERR(drv)); + mutex_unlock(&drv->mtx); +} + +int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, + char *newname) +{ + int idx, taken = -1, result, digits; + + /* prohibit calling the thing phy%d when %d is not its number */ + sscanf(newname, PHY_NAME "%d%n", &idx, &taken); + if (taken == strlen(newname) && idx != rdev->idx) { + /* count number of places needed to print idx */ + digits = 1; + while (idx /= 10) + digits++; + /* + * deny the name if it is phy where is printed + * without leading zeroes. taken == strlen(newname) here + */ + if (taken == strlen(PHY_NAME) + digits) + return -EINVAL; + } + + /* this will check for collisions */ + result = device_rename(&rdev->wiphy.dev, newname); + if (result) + return result; + + if (!debugfs_rename(rdev->wiphy.debugfsdir->d_parent, + rdev->wiphy.debugfsdir, + rdev->wiphy.debugfsdir->d_parent, + newname)) + printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n", + newname); + + nl80211_notify_dev_rename(rdev); + + return 0; +} + /* exported functions */ struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) @@ -204,10 +345,16 @@ static int cfg80211_init(void) if (err) goto out_fail_notifier; + err = nl80211_init(); + if (err) + goto out_fail_nl80211; + ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); return 0; +out_fail_nl80211: + unregister_netdevice_notifier(&cfg80211_netdev_notifier); out_fail_notifier: wiphy_sysfs_exit(); out_fail_sysfs: @@ -218,6 +365,7 @@ subsys_initcall(cfg80211_init); static void cfg80211_exit(void) { debugfs_remove(ieee80211_debugfs_dir); + nl80211_exit(); unregister_netdevice_notifier(&cfg80211_netdev_notifier); wiphy_sysfs_exit(); } diff --git a/net/wireless/core.h b/net/wireless/core.h index 158db1edb92a98cc41d09d72864eb1a473f08bf8..eb0f846b40dff4ba9ae6a6b741c5758023b3366b 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -43,7 +43,39 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy) extern struct mutex cfg80211_drv_mutex; extern struct list_head cfg80211_drv_list; +/* + * This function returns a pointer to the driver + * that the genl_info item that is passed refers to. + * If successful, it returns non-NULL and also locks + * the driver's mutex! + * + * This means that you need to call cfg80211_put_dev() + * before being allowed to acquire &cfg80211_drv_mutex! + * + * This is necessary because we need to lock the global + * mutex to get an item off the list safely, and then + * we lock the drv mutex so it doesn't go away under us. + * + * We don't want to keep cfg80211_drv_mutex locked + * for all the time in order to allow requests on + * other interfaces to go through at the same time. + * + * The result of this can be a PTR_ERR and hence must + * be checked with IS_ERR() for errors. + */ +extern struct cfg80211_registered_device * +cfg80211_get_dev_from_info(struct genl_info *info); + +/* identical to cfg80211_get_dev_from_info but only operate on ifindex */ +extern struct cfg80211_registered_device * +cfg80211_get_dev_from_ifindex(int ifindex); + +extern void cfg80211_put_dev(struct cfg80211_registered_device *drv); + /* free object */ extern void cfg80211_dev_free(struct cfg80211_registered_device *drv); +extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv, + char *newname); + #endif /* __NET_WIRELESS_CORE_H */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c new file mode 100644 index 0000000000000000000000000000000000000000..48b0d453e4e161cb243130f295cfd0573bd9874d --- /dev/null +++ b/net/wireless/nl80211.c @@ -0,0 +1,431 @@ +/* + * This is the new netlink-based wireless configuration interface. + * + * Copyright 2006, 2007 Johannes Berg + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "core.h" +#include "nl80211.h" + +/* the netlink family */ +static struct genl_family nl80211_fam = { + .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ + .name = "nl80211", /* have users key off the name instead */ + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ + .maxattr = NL80211_ATTR_MAX, +}; + +/* internal helper: get drv and dev */ +static int get_drv_dev_by_info_ifindex(struct genl_info *info, + struct cfg80211_registered_device **drv, + struct net_device **dev) +{ + int ifindex; + + if (!info->attrs[NL80211_ATTR_IFINDEX]) + return -EINVAL; + + ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); + *dev = dev_get_by_index(&init_net, ifindex); + if (!*dev) + return -ENODEV; + + *drv = cfg80211_get_dev_from_ifindex(ifindex); + if (IS_ERR(*drv)) { + dev_put(*dev); + return PTR_ERR(*drv); + } + + return 0; +} + +/* policy for the attributes */ +static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = { + [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, + .len = BUS_ID_SIZE-1 }, + + [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, + [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, +}; + +/* message building helper */ +static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, + int flags, u8 cmd) +{ + /* since there is no private header just add the generic one */ + return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); +} + +/* netlink command implementations */ + +static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, + struct cfg80211_registered_device *dev) +{ + void *hdr; + + hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); + NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); + return genlmsg_end(msg, hdr); + + nla_put_failure: + return genlmsg_cancel(msg, hdr); +} + +static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx = 0; + int start = cb->args[0]; + struct cfg80211_registered_device *dev; + + mutex_lock(&cfg80211_drv_mutex); + list_for_each_entry(dev, &cfg80211_drv_list, list) { + if (++idx < start) + continue; + if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + dev) < 0) + break; + } + mutex_unlock(&cfg80211_drv_mutex); + + cb->args[0] = idx; + + return skb->len; +} + +static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + struct cfg80211_registered_device *dev; + + dev = cfg80211_get_dev_from_info(info); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + goto out_err; + + if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) + goto out_free; + + cfg80211_put_dev(dev); + + return genlmsg_unicast(msg, info->snd_pid); + + out_free: + nlmsg_free(msg); + out_err: + cfg80211_put_dev(dev); + return -ENOBUFS; +} + +static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev; + int result; + + if (!info->attrs[NL80211_ATTR_WIPHY_NAME]) + return -EINVAL; + + rdev = cfg80211_get_dev_from_info(info); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + + result = cfg80211_dev_rename(rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); + + cfg80211_put_dev(rdev); + return result; +} + + +static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, + struct net_device *dev) +{ + void *hdr; + + hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE); + if (!hdr) + return -1; + + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); + /* TODO: interface type */ + return genlmsg_end(msg, hdr); + + nla_put_failure: + return genlmsg_cancel(msg, hdr); +} + +static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) +{ + int wp_idx = 0; + int if_idx = 0; + int wp_start = cb->args[0]; + int if_start = cb->args[1]; + struct cfg80211_registered_device *dev; + struct wireless_dev *wdev; + + mutex_lock(&cfg80211_drv_mutex); + list_for_each_entry(dev, &cfg80211_drv_list, list) { + if (++wp_idx < wp_start) + continue; + if_idx = 0; + + mutex_lock(&dev->devlist_mtx); + list_for_each_entry(wdev, &dev->netdev_list, list) { + if (++if_idx < if_start) + continue; + if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + wdev->netdev) < 0) + break; + } + mutex_unlock(&dev->devlist_mtx); + } + mutex_unlock(&cfg80211_drv_mutex); + + cb->args[0] = wp_idx; + cb->args[1] = if_idx; + + return skb->len; +} + +static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + struct cfg80211_registered_device *dev; + struct net_device *netdev; + int err; + + err = get_drv_dev_by_info_ifindex(info, &dev, &netdev); + if (err) + return err; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + goto out_err; + + if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, netdev) < 0) + goto out_free; + + dev_put(netdev); + cfg80211_put_dev(dev); + + return genlmsg_unicast(msg, info->snd_pid); + + out_free: + nlmsg_free(msg); + out_err: + dev_put(netdev); + cfg80211_put_dev(dev); + return -ENOBUFS; +} + +static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + int err, ifindex; + enum nl80211_iftype type; + struct net_device *dev; + + if (info->attrs[NL80211_ATTR_IFTYPE]) { + type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); + if (type > NL80211_IFTYPE_MAX) + return -EINVAL; + } else + return -EINVAL; + + err = get_drv_dev_by_info_ifindex(info, &drv, &dev); + if (err) + return err; + ifindex = dev->ifindex; + dev_put(dev); + + if (!drv->ops->change_virtual_intf) { + err = -EOPNOTSUPP; + goto unlock; + } + + rtnl_lock(); + err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, type); + rtnl_unlock(); + + unlock: + cfg80211_put_dev(drv); + return err; +} + +static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + int err; + enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; + + if (!info->attrs[NL80211_ATTR_IFNAME]) + return -EINVAL; + + if (info->attrs[NL80211_ATTR_IFTYPE]) { + type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); + if (type > NL80211_IFTYPE_MAX) + return -EINVAL; + } + + drv = cfg80211_get_dev_from_info(info); + if (IS_ERR(drv)) + return PTR_ERR(drv); + + if (!drv->ops->add_virtual_intf) { + err = -EOPNOTSUPP; + goto unlock; + } + + rtnl_lock(); + err = drv->ops->add_virtual_intf(&drv->wiphy, + nla_data(info->attrs[NL80211_ATTR_IFNAME]), type); + rtnl_unlock(); + + unlock: + cfg80211_put_dev(drv); + return err; +} + +static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *drv; + int ifindex, err; + struct net_device *dev; + + err = get_drv_dev_by_info_ifindex(info, &drv, &dev); + if (err) + return err; + ifindex = dev->ifindex; + dev_put(dev); + + if (!drv->ops->del_virtual_intf) { + err = -EOPNOTSUPP; + goto out; + } + + rtnl_lock(); + err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex); + rtnl_unlock(); + + out: + cfg80211_put_dev(drv); + return err; +} + +static struct genl_ops nl80211_ops[] = { + { + .cmd = NL80211_CMD_GET_WIPHY, + .doit = nl80211_get_wiphy, + .dumpit = nl80211_dump_wiphy, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = NL80211_CMD_SET_WIPHY, + .doit = nl80211_set_wiphy, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_GET_INTERFACE, + .doit = nl80211_get_interface, + .dumpit = nl80211_dump_interface, + .policy = nl80211_policy, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = NL80211_CMD_SET_INTERFACE, + .doit = nl80211_set_interface, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_NEW_INTERFACE, + .doit = nl80211_new_interface, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = NL80211_CMD_DEL_INTERFACE, + .doit = nl80211_del_interface, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +/* multicast groups */ +static struct genl_multicast_group nl80211_config_mcgrp = { + .name = "config", +}; + +/* notification functions */ + +void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) +{ + struct sk_buff *msg; + + msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_wiphy(msg, 0, 0, 0, rdev) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); +} + +/* initialisation/exit functions */ + +int nl80211_init(void) +{ + int err, i; + + err = genl_register_family(&nl80211_fam); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(nl80211_ops); i++) { + err = genl_register_ops(&nl80211_fam, &nl80211_ops[i]); + if (err) + goto err_out; + } + + err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); + if (err) + goto err_out; + + return 0; + err_out: + genl_unregister_family(&nl80211_fam); + return err; +} + +void nl80211_exit(void) +{ + genl_unregister_family(&nl80211_fam); +} diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h new file mode 100644 index 0000000000000000000000000000000000000000..f3ea5c029aeeaf8e19342471b6933408e9a29cd5 --- /dev/null +++ b/net/wireless/nl80211.h @@ -0,0 +1,24 @@ +#ifndef __NET_WIRELESS_NL80211_H +#define __NET_WIRELESS_NL80211_H + +#include "core.h" + +#ifdef CONFIG_NL80211 +extern int nl80211_init(void); +extern void nl80211_exit(void); +extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev); +#else +static inline int nl80211_init(void) +{ + return 0; +} +static inline void nl80211_exit(void) +{ +} +static inline void nl80211_notify_dev_rename( + struct cfg80211_registered_device *rdev) +{ +} +#endif /* CONFIG_NL80211 */ + +#endif /* __NET_WIRELESS_NL80211_H */ diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c index 68c11d0999175f2e7a2414f7346d1ad8c5929872..28fbd0b0b5688c23462641d9eeb5f918ae93be9c 100644 --- a/net/wireless/radiotap.c +++ b/net/wireless/radiotap.c @@ -161,7 +161,11 @@ int ieee80211_radiotap_iterator_next( [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11, [IEEE80211_RADIOTAP_ANTENNA] = 0x11, [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11, - [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11 + [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11, + [IEEE80211_RADIOTAP_RX_FLAGS] = 0x22, + [IEEE80211_RADIOTAP_TX_FLAGS] = 0x22, + [IEEE80211_RADIOTAP_RTS_RETRIES] = 0x11, + [IEEE80211_RADIOTAP_DATA_RETRIES] = 0x11, /* * add more here as they are defined in * include/net/ieee80211_radiotap.h diff --git a/net/wireless/wext.c b/net/wireless/wext.c index d6aaf65192e90f14301c76870f8c8a1ec7238b8d..85e5f9dd0d8e757f03e69ac08966addff50bd163 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c @@ -93,6 +93,7 @@ #include /* ARPHRD_ETHER */ #include /* compare_ether_addr */ #include +#include #include /* Pretty obvious */ #include /* New driver API */ @@ -672,7 +673,26 @@ static const struct seq_operations wireless_seq_ops = { static int wireless_seq_open(struct inode *inode, struct file *file) { - return seq_open(file, &wireless_seq_ops); + struct seq_file *seq; + int res; + res = seq_open(file, &wireless_seq_ops); + if (!res) { + seq = file->private_data; + seq->private = get_proc_net(inode); + if (!seq->private) { + seq_release(inode, file); + res = -ENXIO; + } + } + return res; +} + +static int wireless_seq_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct net *net = seq->private; + put_net(net); + return seq_release(inode, file); } static const struct file_operations wireless_seq_fops = { @@ -680,17 +700,22 @@ static const struct file_operations wireless_seq_fops = { .open = wireless_seq_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = wireless_seq_release, }; -int __init wext_proc_init(void) +int wext_proc_init(struct net *net) { /* Create /proc/net/wireless entry */ - if (!proc_net_fops_create("wireless", S_IRUGO, &wireless_seq_fops)) + if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops)) return -ENOMEM; return 0; } + +void wext_proc_exit(struct net *net) +{ + proc_net_remove(net, "wireless"); +} #endif /* CONFIG_PROC_FS */ /************************** IOCTL SUPPORT **************************/ @@ -1010,7 +1035,7 @@ static int ioctl_private_call(struct net_device *dev, struct ifreq *ifr, * Main IOCTl dispatcher. * Check the type of IOCTL and call the appropriate wrapper... */ -static int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) +static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd) { struct net_device *dev; iw_handler handler; @@ -1019,7 +1044,7 @@ static int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) * The copy_to/from_user() of ifr is also dealt with in there */ /* Make sure the device exist */ - if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL) + if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) return -ENODEV; /* A bunch of special cases, then the generic case... @@ -1053,7 +1078,7 @@ static int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd) } /* entry point from dev ioctl */ -int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, +int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, void __user *arg) { int ret; @@ -1065,9 +1090,9 @@ int wext_handle_ioctl(struct ifreq *ifr, unsigned int cmd, && !capable(CAP_NET_ADMIN)) return -EPERM; - dev_load(ifr->ifr_name); + dev_load(net, ifr->ifr_name); rtnl_lock(); - ret = wireless_process_ioctl(ifr, cmd); + ret = wireless_process_ioctl(net, ifr, cmd); rtnl_unlock(); if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct ifreq))) return -EFAULT; @@ -1129,10 +1154,12 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, { struct ifinfomsg *r; struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); - nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r)); - r = NLMSG_DATA(nlh); + nlh = nlmsg_put(skb, 0, 0, type, sizeof(*r), 0); + if (nlh == NULL) + return -EMSGSIZE; + + r = nlmsg_data(nlh); r->ifi_family = AF_UNSPEC; r->__ifi_pad = 0; r->ifi_type = dev->type; @@ -1141,15 +1168,13 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, r->ifi_change = 0; /* Wireless changes don't affect those flags */ /* Add the wireless events in the netlink packet */ - RTA_PUT(skb, IFLA_WIRELESS, event_len, event); + NLA_PUT(skb, IFLA_WIRELESS, event_len, event); - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); -nlmsg_failure: -rtattr_failure: - nlmsg_trim(skb, b); - return -1; +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } /* ---------------------------------------------------------------- */ @@ -1162,17 +1187,19 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev, static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len) { struct sk_buff *skb; - int size = NLMSG_GOODSIZE; + int err; - skb = alloc_skb(size, GFP_ATOMIC); + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; - if (rtnetlink_fill_iwinfo(skb, dev, RTM_NEWLINK, - event, event_len) < 0) { + err = rtnetlink_fill_iwinfo(skb, dev, RTM_NEWLINK, event, event_len); + if (err < 0) { + WARN_ON(err == -EMSGSIZE); kfree_skb(skb); return; } + NETLINK_CB(skb).dst_group = RTNLGRP_LINK; skb_queue_tail(&wireless_nlevent_queue, skb); tasklet_schedule(&wireless_nlevent_tasklet); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 479927cb45cacd3c8f3f797a916bb51edf0cbbfc..fc416f9606a9e259e22f2e641c602107e06b9200 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -191,6 +191,9 @@ static int x25_device_event(struct notifier_block *this, unsigned long event, struct net_device *dev = ptr; struct x25_neigh *nb; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (dev->type == ARPHRD_X25 #if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) || dev->type == ARPHRD_ETHER @@ -466,10 +469,10 @@ static struct proto x25_proto = { .obj_size = sizeof(struct x25_sock), }; -static struct sock *x25_alloc_socket(void) +static struct sock *x25_alloc_socket(struct net *net) { struct x25_sock *x25; - struct sock *sk = sk_alloc(AF_X25, GFP_ATOMIC, &x25_proto, 1); + struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, 1); if (!sk) goto out; @@ -485,17 +488,20 @@ static struct sock *x25_alloc_socket(void) return sk; } -static int x25_create(struct socket *sock, int protocol) +static int x25_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct x25_sock *x25; int rc = -ESOCKTNOSUPPORT; + if (net != &init_net) + return -EAFNOSUPPORT; + if (sock->type != SOCK_SEQPACKET || protocol) goto out; rc = -ENOMEM; - if ((sk = x25_alloc_socket()) == NULL) + if ((sk = x25_alloc_socket(net)) == NULL) goto out; x25 = x25_sk(sk); @@ -543,7 +549,7 @@ static struct sock *x25_make_new(struct sock *osk) if (osk->sk_type != SOCK_SEQPACKET) goto out; - if ((sk = x25_alloc_socket()) == NULL) + if ((sk = x25_alloc_socket(osk->sk_net)) == NULL) goto out; x25 = x25_sk(sk); diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 848a6b6f90a658d98167916e3d118a1907df28ba..f0679d283110885e4e7f29f7358e07df3c753e49 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -95,6 +95,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct sk_buff *nskb; struct x25_neigh *nb; + if (dev->nd_net != &init_net) + goto drop; + nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) goto drop; diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c index 7405b9c5b7f2dfa2734adf7946515f0219f67728..7d55e50c936f9a2fe11daeba85ece6099815b858 100644 --- a/net/x25/x25_proc.c +++ b/net/x25/x25_proc.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -301,7 +302,7 @@ int __init x25_proc_init(void) struct proc_dir_entry *p; int rc = -ENOMEM; - x25_proc_dir = proc_mkdir("x25", proc_net); + x25_proc_dir = proc_mkdir("x25", init_net.proc_net); if (!x25_proc_dir) goto out; @@ -328,7 +329,7 @@ int __init x25_proc_init(void) out_socket: remove_proc_entry("route", x25_proc_dir); out_route: - remove_proc_entry("x25", proc_net); + remove_proc_entry("x25", init_net.proc_net); goto out; } @@ -337,7 +338,7 @@ void __exit x25_proc_exit(void) remove_proc_entry("forward", x25_proc_dir); remove_proc_entry("route", x25_proc_dir); remove_proc_entry("socket", x25_proc_dir); - remove_proc_entry("x25", proc_net); + remove_proc_entry("x25", init_net.proc_net); } #else /* CONFIG_PROC_FS */ diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c index 060fcfaa2f47c3ec8465348336dd4a095546b22e..86b5b4da097c5c17065b17c87160555b2fa94ea0 100644 --- a/net/x25/x25_route.c +++ b/net/x25/x25_route.c @@ -129,7 +129,7 @@ void x25_route_device_down(struct net_device *dev) */ struct net_device *x25_dev_get(char *devname) { - struct net_device *dev = dev_get_by_name(devname); + struct net_device *dev = dev_get_by_name(&init_net, devname); if (dev && (!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25 diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index de3c1a625a46f8fbc32b148f3eda70e62d3ece40..45744a3d3a51ed240e8eee2c780b8837a2ba395e 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -3,6 +3,6 @@ # obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ - xfrm_input.o xfrm_algo.o + xfrm_input.o xfrm_output.o xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c new file mode 100644 index 0000000000000000000000000000000000000000..0eb3377602e9c3c9b413072f1db6f0b5a915b8e5 --- /dev/null +++ b/net/xfrm/xfrm_output.c @@ -0,0 +1,95 @@ +/* + * xfrm_output.c - Common IPsec encapsulation code. + * + * Copyright (c) 2007 Herbert Xu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) +{ + int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev) + - skb_headroom(skb); + + if (nhead > 0) + return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); + + /* Check tail too... */ + return 0; +} + +static int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb) +{ + int err = xfrm_state_check_expire(x); + if (err < 0) + goto err; + err = xfrm_state_check_space(x, skb); +err: + return err; +} + +int xfrm_output(struct sk_buff *skb) +{ + struct dst_entry *dst = skb->dst; + struct xfrm_state *x = dst->xfrm; + int err; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + err = skb_checksum_help(skb); + if (err) + goto error_nolock; + } + + do { + spin_lock_bh(&x->lock); + err = xfrm_state_check(x, skb); + if (err) + goto error; + + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { + XFRM_SKB_CB(skb)->seq = ++x->replay.oseq; + if (xfrm_aevent_is_on()) + xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); + } + + err = x->mode->output(x, skb); + if (err) + goto error; + + x->curlft.bytes += skb->len; + x->curlft.packets++; + + spin_unlock_bh(&x->lock); + + err = x->type->output(x, skb); + if (err) + goto error_nolock; + + if (!(skb->dst = dst_pop(dst))) { + err = -EHOSTUNREACH; + goto error_nolock; + } + dst = skb->dst; + x = dst->xfrm; + } while (x && (x->props.mode != XFRM_MODE_TUNNEL)); + + err = 0; + +error_nolock: + return err; +error: + spin_unlock_bh(&x->lock); + goto error_nolock; +} +EXPORT_SYMBOL_GPL(xfrm_output); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7012891d39f2c6757b1c50784299043d353cc07a..af27c193697c5ceede169a36588ed36cbd4e4abb 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -850,10 +849,9 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info) continue; err = security_xfrm_policy_delete(pol); if (err) { - xfrm_audit_log(audit_info->loginuid, - audit_info->secid, - AUDIT_MAC_IPSEC_DELSPD, 0, - pol, NULL); + xfrm_audit_policy_delete(pol, 0, + audit_info->loginuid, + audit_info->secid); return err; } } @@ -865,10 +863,9 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info) continue; err = security_xfrm_policy_delete(pol); if (err) { - xfrm_audit_log(audit_info->loginuid, - audit_info->secid, - AUDIT_MAC_IPSEC_DELSPD, - 0, pol, NULL); + xfrm_audit_policy_delete(pol, 0, + audit_info->loginuid, + audit_info->secid); return err; } } @@ -909,8 +906,8 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) hlist_del(&pol->byidx); write_unlock_bh(&xfrm_policy_lock); - xfrm_audit_log(audit_info->loginuid, audit_info->secid, - AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL); + xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, + audit_info->secid); xfrm_policy_kill(pol); killed++; @@ -930,11 +927,9 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) hlist_del(&pol->byidx); write_unlock_bh(&xfrm_policy_lock); - xfrm_audit_log(audit_info->loginuid, - audit_info->secid, - AUDIT_MAC_IPSEC_DELSPD, 1, - pol, NULL); - + xfrm_audit_policy_delete(pol, 1, + audit_info->loginuid, + audit_info->secid); xfrm_policy_kill(pol); killed++; @@ -1477,7 +1472,7 @@ int __xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, pol_dead = 0; xfrm_nr = 0; - if (sk && sk->sk_policy[1]) { + if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); if (IS_ERR(policy)) return PTR_ERR(policy); @@ -1687,17 +1682,13 @@ static inline int xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl) { struct xfrm_state *x; - int err; if (!skb->sp || idx < 0 || idx >= skb->sp->len) return 0; x = skb->sp->xvec[idx]; if (!x->type->reject) return 0; - xfrm_state_hold(x); - err = x->type->reject(x, skb, fl); - xfrm_state_put(x); - return err; + return x->type->reject(x, skb, fl); } /* When skb is transformed back to its "native" form, we have to @@ -1954,8 +1945,8 @@ static int stale_bundle(struct dst_entry *dst) void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) { while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { - dst->dev = &loopback_dev; - dev_hold(&loopback_dev); + dst->dev = init_net.loopback_dev; + dev_hold(dst->dev); dev_put(dev); } } @@ -2150,123 +2141,6 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, EXPORT_SYMBOL(xfrm_bundle_ok); -#ifdef CONFIG_AUDITSYSCALL -/* Audit addition and deletion of SAs and ipsec policy */ - -void xfrm_audit_log(uid_t auid, u32 sid, int type, int result, - struct xfrm_policy *xp, struct xfrm_state *x) -{ - - char *secctx; - u32 secctx_len; - struct xfrm_sec_ctx *sctx = NULL; - struct audit_buffer *audit_buf; - int family; - extern int audit_enabled; - - if (audit_enabled == 0) - return; - - BUG_ON((type == AUDIT_MAC_IPSEC_ADDSA || - type == AUDIT_MAC_IPSEC_DELSA) && !x); - BUG_ON((type == AUDIT_MAC_IPSEC_ADDSPD || - type == AUDIT_MAC_IPSEC_DELSPD) && !xp); - - audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); - if (audit_buf == NULL) - return; - - switch(type) { - case AUDIT_MAC_IPSEC_ADDSA: - audit_log_format(audit_buf, "SAD add: auid=%u", auid); - break; - case AUDIT_MAC_IPSEC_DELSA: - audit_log_format(audit_buf, "SAD delete: auid=%u", auid); - break; - case AUDIT_MAC_IPSEC_ADDSPD: - audit_log_format(audit_buf, "SPD add: auid=%u", auid); - break; - case AUDIT_MAC_IPSEC_DELSPD: - audit_log_format(audit_buf, "SPD delete: auid=%u", auid); - break; - default: - return; - } - - if (sid != 0 && - security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) { - audit_log_format(audit_buf, " subj=%s", secctx); - security_release_secctx(secctx, secctx_len); - } else - audit_log_task_context(audit_buf); - - if (xp) { - family = xp->selector.family; - if (xp->security) - sctx = xp->security; - } else { - family = x->props.family; - if (x->security) - sctx = x->security; - } - - if (sctx) - audit_log_format(audit_buf, - " sec_alg=%u sec_doi=%u sec_obj=%s", - sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str); - - switch(family) { - case AF_INET: - { - struct in_addr saddr, daddr; - if (xp) { - saddr.s_addr = xp->selector.saddr.a4; - daddr.s_addr = xp->selector.daddr.a4; - } else { - saddr.s_addr = x->props.saddr.a4; - daddr.s_addr = x->id.daddr.a4; - } - audit_log_format(audit_buf, - " src=%u.%u.%u.%u dst=%u.%u.%u.%u", - NIPQUAD(saddr), NIPQUAD(daddr)); - } - break; - case AF_INET6: - { - struct in6_addr saddr6, daddr6; - if (xp) { - memcpy(&saddr6, xp->selector.saddr.a6, - sizeof(struct in6_addr)); - memcpy(&daddr6, xp->selector.daddr.a6, - sizeof(struct in6_addr)); - } else { - memcpy(&saddr6, x->props.saddr.a6, - sizeof(struct in6_addr)); - memcpy(&daddr6, x->id.daddr.a6, - sizeof(struct in6_addr)); - } - audit_log_format(audit_buf, - " src=" NIP6_FMT " dst=" NIP6_FMT, - NIP6(saddr6), NIP6(daddr6)); - } - break; - } - - if (x) - audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s", - (unsigned long)ntohl(x->id.spi), - (unsigned long)ntohl(x->id.spi), - x->id.proto == IPPROTO_AH ? "AH" : - (x->id.proto == IPPROTO_ESP ? - "ESP" : "IPCOMP")); - - audit_log_format(audit_buf, " res=%u", result); - audit_log_end(audit_buf); -} - -EXPORT_SYMBOL(xfrm_audit_log); -#endif /* CONFIG_AUDITSYSCALL */ - int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { int err = 0; @@ -2358,6 +2232,11 @@ static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo) static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct net_device *dev = ptr; + + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + switch (event) { case NETDEV_DOWN: xfrm_flush_bundles(); @@ -2412,6 +2291,72 @@ void __init xfrm_init(void) xfrm_input_init(); } +#ifdef CONFIG_AUDITSYSCALL +static inline void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, + struct audit_buffer *audit_buf) +{ + if (xp->security) + audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", + xp->security->ctx_alg, xp->security->ctx_doi, + xp->security->ctx_str); + + switch(xp->selector.family) { + case AF_INET: + audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u", + NIPQUAD(xp->selector.saddr.a4), + NIPQUAD(xp->selector.daddr.a4)); + break; + case AF_INET6: + { + struct in6_addr saddr6, daddr6; + + memcpy(&saddr6, xp->selector.saddr.a6, + sizeof(struct in6_addr)); + memcpy(&daddr6, xp->selector.daddr.a6, + sizeof(struct in6_addr)); + audit_log_format(audit_buf, + " src=" NIP6_FMT " dst=" NIP6_FMT, + NIP6(saddr6), NIP6(daddr6)); + } + break; + } +} + +void +xfrm_audit_policy_add(struct xfrm_policy *xp, int result, u32 auid, u32 sid) +{ + struct audit_buffer *audit_buf; + extern int audit_enabled; + + if (audit_enabled == 0) + return; + audit_buf = xfrm_audit_start(sid, auid); + if (audit_buf == NULL) + return; + audit_log_format(audit_buf, " op=SPD-add res=%u", result); + xfrm_audit_common_policyinfo(xp, audit_buf); + audit_log_end(audit_buf); +} +EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); + +void +xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, u32 auid, u32 sid) +{ + struct audit_buffer *audit_buf; + extern int audit_enabled; + + if (audit_enabled == 0) + return; + audit_buf = xfrm_audit_start(sid, auid); + if (audit_buf == NULL) + return; + audit_log_format(audit_buf, " op=SPD-delete res=%u", result); + xfrm_audit_common_policyinfo(xp, audit_buf); + audit_log_end(audit_buf); +} +EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); +#endif + #ifdef CONFIG_XFRM_MIGRATE static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp, struct xfrm_selector *sel_tgt) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d4356e6f7f9bc65fd67eeba6663b1cc9f321fa55..344f0a6abec53aacd0da9dfe9600854f130c5f7b 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include "xfrm_hash.h" @@ -301,8 +300,8 @@ static void xfrm_timer_handler(unsigned long data) if (!err && x->id.spi) km_state_expired(x, 1, 0); - xfrm_audit_log(audit_get_loginuid(current->audit_context), 0, - AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); + xfrm_audit_state_delete(x, err ? 0 : 1, + audit_get_loginuid(current->audit_context), 0); out: spin_unlock(&x->lock); @@ -403,11 +402,9 @@ xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info) hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) { if (xfrm_id_proto_match(x->id.proto, proto) && (err = security_xfrm_state_delete(x)) != 0) { - xfrm_audit_log(audit_info->loginuid, - audit_info->secid, - AUDIT_MAC_IPSEC_DELSA, - 0, NULL, x); - + xfrm_audit_state_delete(x, 0, + audit_info->loginuid, + audit_info->secid); return err; } } @@ -443,10 +440,9 @@ int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info) spin_unlock_bh(&xfrm_state_lock); err = xfrm_state_delete(x); - xfrm_audit_log(audit_info->loginuid, - audit_info->secid, - AUDIT_MAC_IPSEC_DELSA, - err ? 0 : 1, NULL, x); + xfrm_audit_state_delete(x, err ? 0 : 1, + audit_info->loginuid, + audit_info->secid); xfrm_state_put(x); spin_lock_bh(&xfrm_state_lock); @@ -852,7 +848,6 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re hlist_add_head(&x->bydst, xfrm_state_bydst+h); h = xfrm_src_hash(daddr, saddr, family); hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); - wake_up(&km_waitq); xfrm_state_num++; @@ -1156,29 +1151,6 @@ int xfrm_state_check_expire(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_check_expire); -static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) -{ - int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev) - - skb_headroom(skb); - - if (nhead > 0) - return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); - - /* Check tail too... */ - return 0; -} - -int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb) -{ - int err = xfrm_state_check_expire(x); - if (err < 0) - goto err; - err = xfrm_state_check_space(x, skb); -err: - return err; -} -EXPORT_SYMBOL(xfrm_state_check); - struct xfrm_state * xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family) @@ -1303,26 +1275,33 @@ u32 xfrm_get_acqseq(void) } EXPORT_SYMBOL(xfrm_get_acqseq); -void -xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi) +int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) { unsigned int h; struct xfrm_state *x0; + int err = -ENOENT; + __be32 minspi = htonl(low); + __be32 maxspi = htonl(high); + + spin_lock_bh(&x->lock); + if (x->km.state == XFRM_STATE_DEAD) + goto unlock; + err = 0; if (x->id.spi) - return; + goto unlock; + + err = -ENOENT; if (minspi == maxspi) { x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family); if (x0) { xfrm_state_put(x0); - return; + goto unlock; } x->id.spi = minspi; } else { u32 spi = 0; - u32 low = ntohl(minspi); - u32 high = ntohl(maxspi); for (h=0; hid.daddr, htonl(spi), x->id.proto, x->props.family); @@ -1338,8 +1317,14 @@ xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi) h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family); hlist_add_head(&x->byspi, xfrm_state_byspi+h); spin_unlock_bh(&xfrm_state_lock); - wake_up(&km_waitq); + + err = 0; } + +unlock: + spin_unlock_bh(&x->lock); + + return err; } EXPORT_SYMBOL(xfrm_alloc_spi); @@ -1424,7 +1409,6 @@ void xfrm_replay_notify(struct xfrm_state *x, int event) !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) x->xflags &= ~XFRM_TIME_DEFER; } -EXPORT_SYMBOL(xfrm_replay_notify); static void xfrm_replay_timer_handler(unsigned long data) { @@ -1821,3 +1805,72 @@ void __init xfrm_state_init(void) INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); } +#ifdef CONFIG_AUDITSYSCALL +static inline void xfrm_audit_common_stateinfo(struct xfrm_state *x, + struct audit_buffer *audit_buf) +{ + if (x->security) + audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", + x->security->ctx_alg, x->security->ctx_doi, + x->security->ctx_str); + + switch(x->props.family) { + case AF_INET: + audit_log_format(audit_buf, " src=%u.%u.%u.%u dst=%u.%u.%u.%u", + NIPQUAD(x->props.saddr.a4), + NIPQUAD(x->id.daddr.a4)); + break; + case AF_INET6: + { + struct in6_addr saddr6, daddr6; + + memcpy(&saddr6, x->props.saddr.a6, + sizeof(struct in6_addr)); + memcpy(&daddr6, x->id.daddr.a6, + sizeof(struct in6_addr)); + audit_log_format(audit_buf, + " src=" NIP6_FMT " dst=" NIP6_FMT, + NIP6(saddr6), NIP6(daddr6)); + } + break; + } +} + +void +xfrm_audit_state_add(struct xfrm_state *x, int result, u32 auid, u32 sid) +{ + struct audit_buffer *audit_buf; + extern int audit_enabled; + + if (audit_enabled == 0) + return; + audit_buf = xfrm_audit_start(sid, auid); + if (audit_buf == NULL) + return; + audit_log_format(audit_buf, " op=SAD-add res=%u",result); + xfrm_audit_common_stateinfo(x, audit_buf); + audit_log_format(audit_buf, " spi=%lu(0x%lx)", + (unsigned long)x->id.spi, (unsigned long)x->id.spi); + audit_log_end(audit_buf); +} +EXPORT_SYMBOL_GPL(xfrm_audit_state_add); + +void +xfrm_audit_state_delete(struct xfrm_state *x, int result, u32 auid, u32 sid) +{ + struct audit_buffer *audit_buf; + extern int audit_enabled; + + if (audit_enabled == 0) + return; + audit_buf = xfrm_audit_start(sid, auid); + if (audit_buf == NULL) + return; + audit_log_format(audit_buf, " op=SAD-delete res=%u",result); + xfrm_audit_common_stateinfo(x, audit_buf); + audit_log_format(audit_buf, " spi=%lu(0x%lx)", + (unsigned long)x->id.spi, (unsigned long)x->id.spi); + audit_log_end(audit_buf); +} +EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); +#endif /* CONFIG_AUDITSYSCALL */ diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 61339e17a0f5dd52bb3ba69f24b47366833d5dc4..d41588d101d01db756f5db3a1cd40d6e783883a9 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -31,25 +30,22 @@ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include #endif -#include -static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) +static inline int alg_len(struct xfrm_algo *alg) { - struct rtattr *rt = xfrma[type - 1]; + return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); +} + +static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) +{ + struct nlattr *rt = attrs[type]; struct xfrm_algo *algp; - int len; if (!rt) return 0; - len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp); - if (len < 0) - return -EINVAL; - - algp = RTA_DATA(rt); - - len -= (algp->alg_key_len + 7U) / 8; - if (len < 0) + algp = nla_data(rt); + if (nla_len(rt) < alg_len(algp)) return -EINVAL; switch (type) { @@ -77,55 +73,25 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type) return 0; } -static int verify_encap_tmpl(struct rtattr **xfrma) -{ - struct rtattr *rt = xfrma[XFRMA_ENCAP - 1]; - struct xfrm_encap_tmpl *encap; - - if (!rt) - return 0; - - if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap)) - return -EINVAL; - - return 0; -} - -static int verify_one_addr(struct rtattr **xfrma, enum xfrm_attr_type_t type, +static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, xfrm_address_t **addrp) { - struct rtattr *rt = xfrma[type - 1]; - - if (!rt) - return 0; - - if ((rt->rta_len - sizeof(*rt)) < sizeof(**addrp)) - return -EINVAL; - - if (addrp) - *addrp = RTA_DATA(rt); + struct nlattr *rt = attrs[type]; - return 0; + if (rt && addrp) + *addrp = nla_data(rt); } -static inline int verify_sec_ctx_len(struct rtattr **xfrma) +static inline int verify_sec_ctx_len(struct nlattr **attrs) { - struct rtattr *rt = xfrma[XFRMA_SEC_CTX - 1]; + struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_user_sec_ctx *uctx; - int len = 0; if (!rt) return 0; - if (rt->rta_len < sizeof(*uctx)) - return -EINVAL; - - uctx = RTA_DATA(rt); - - len += sizeof(struct xfrm_user_sec_ctx); - len += uctx->ctx_len; - - if (uctx->len != len) + uctx = nla_data(rt); + if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) return -EINVAL; return 0; @@ -133,7 +99,7 @@ static inline int verify_sec_ctx_len(struct rtattr **xfrma) static int verify_newsa_info(struct xfrm_usersa_info *p, - struct rtattr **xfrma) + struct nlattr **attrs) { int err; @@ -157,35 +123,35 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, err = -EINVAL; switch (p->id.proto) { case IPPROTO_AH: - if (!xfrma[XFRMA_ALG_AUTH-1] || - xfrma[XFRMA_ALG_CRYPT-1] || - xfrma[XFRMA_ALG_COMP-1]) + if (!attrs[XFRMA_ALG_AUTH] || + attrs[XFRMA_ALG_CRYPT] || + attrs[XFRMA_ALG_COMP]) goto out; break; case IPPROTO_ESP: - if ((!xfrma[XFRMA_ALG_AUTH-1] && - !xfrma[XFRMA_ALG_CRYPT-1]) || - xfrma[XFRMA_ALG_COMP-1]) + if ((!attrs[XFRMA_ALG_AUTH] && + !attrs[XFRMA_ALG_CRYPT]) || + attrs[XFRMA_ALG_COMP]) goto out; break; case IPPROTO_COMP: - if (!xfrma[XFRMA_ALG_COMP-1] || - xfrma[XFRMA_ALG_AUTH-1] || - xfrma[XFRMA_ALG_CRYPT-1]) + if (!attrs[XFRMA_ALG_COMP] || + attrs[XFRMA_ALG_AUTH] || + attrs[XFRMA_ALG_CRYPT]) goto out; break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: - if (xfrma[XFRMA_ALG_COMP-1] || - xfrma[XFRMA_ALG_AUTH-1] || - xfrma[XFRMA_ALG_CRYPT-1] || - xfrma[XFRMA_ENCAP-1] || - xfrma[XFRMA_SEC_CTX-1] || - !xfrma[XFRMA_COADDR-1]) + if (attrs[XFRMA_ALG_COMP] || + attrs[XFRMA_ALG_AUTH] || + attrs[XFRMA_ALG_CRYPT] || + attrs[XFRMA_ENCAP] || + attrs[XFRMA_SEC_CTX] || + !attrs[XFRMA_COADDR]) goto out; break; #endif @@ -194,17 +160,13 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, goto out; } - if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH))) - goto out; - if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT))) - goto out; - if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP))) + if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) goto out; - if ((err = verify_encap_tmpl(xfrma))) + if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) goto out; - if ((err = verify_sec_ctx_len(xfrma))) + if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP))) goto out; - if ((err = verify_one_addr(xfrma, XFRMA_COADDR, NULL))) + if ((err = verify_sec_ctx_len(attrs))) goto out; err = -EINVAL; @@ -227,25 +189,22 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, struct xfrm_algo_desc *(*get_byname)(char *, int), - struct rtattr *u_arg) + struct nlattr *rta) { - struct rtattr *rta = u_arg; struct xfrm_algo *p, *ualg; struct xfrm_algo_desc *algo; - int len; if (!rta) return 0; - ualg = RTA_DATA(rta); + ualg = nla_data(rta); algo = get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; *props = algo->desc.sadb_alg_id; - len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8; - p = kmemdup(ualg, len, GFP_KERNEL); + p = kmemdup(ualg, alg_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; @@ -254,24 +213,6 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, return 0; } -static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg) -{ - struct rtattr *rta = u_arg; - struct xfrm_encap_tmpl *p, *uencap; - - if (!rta) - return 0; - - uencap = RTA_DATA(rta); - p = kmemdup(uencap, sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; - - *encapp = p; - return 0; -} - - static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) { int len = 0; @@ -283,34 +224,6 @@ static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) return len; } -static int attach_sec_ctx(struct xfrm_state *x, struct rtattr *u_arg) -{ - struct xfrm_user_sec_ctx *uctx; - - if (!u_arg) - return 0; - - uctx = RTA_DATA(u_arg); - return security_xfrm_state_alloc(x, uctx); -} - -static int attach_one_addr(xfrm_address_t **addrpp, struct rtattr *u_arg) -{ - struct rtattr *rta = u_arg; - xfrm_address_t *p, *uaddrp; - - if (!rta) - return 0; - - uaddrp = RTA_DATA(rta); - p = kmemdup(uaddrp, sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; - - *addrpp = p; - return 0; -} - static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { memcpy(&x->id, &p->id, sizeof(x->id)); @@ -336,53 +249,38 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * * somehow made shareable and move it to xfrm_state.c - JHS * */ -static int xfrm_update_ae_params(struct xfrm_state *x, struct rtattr **xfrma) +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) { - int err = - EINVAL; - struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1]; - struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1]; - struct rtattr *et = xfrma[XFRMA_ETIMER_THRESH-1]; - struct rtattr *rt = xfrma[XFRMA_REPLAY_THRESH-1]; + struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; + struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; + struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; + struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; if (rp) { struct xfrm_replay_state *replay; - if (RTA_PAYLOAD(rp) < sizeof(*replay)) - goto error; - replay = RTA_DATA(rp); + replay = nla_data(rp); memcpy(&x->replay, replay, sizeof(*replay)); memcpy(&x->preplay, replay, sizeof(*replay)); } if (lt) { struct xfrm_lifetime_cur *ltime; - if (RTA_PAYLOAD(lt) < sizeof(*ltime)) - goto error; - ltime = RTA_DATA(lt); + ltime = nla_data(lt); x->curlft.bytes = ltime->bytes; x->curlft.packets = ltime->packets; x->curlft.add_time = ltime->add_time; x->curlft.use_time = ltime->use_time; } - if (et) { - if (RTA_PAYLOAD(et) < sizeof(u32)) - goto error; - x->replay_maxage = *(u32*)RTA_DATA(et); - } - - if (rt) { - if (RTA_PAYLOAD(rt) < sizeof(u32)) - goto error; - x->replay_maxdiff = *(u32*)RTA_DATA(rt); - } + if (et) + x->replay_maxage = nla_get_u32(et); - return 0; -error: - return err; + if (rt) + x->replay_maxdiff = nla_get_u32(rt); } static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p, - struct rtattr **xfrma, + struct nlattr **attrs, int *errp) { struct xfrm_state *x = xfrm_state_alloc(); @@ -395,25 +293,37 @@ static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p, if ((err = attach_one_algo(&x->aalg, &x->props.aalgo, xfrm_aalg_get_byname, - xfrma[XFRMA_ALG_AUTH-1]))) + attrs[XFRMA_ALG_AUTH]))) goto error; if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, xfrm_ealg_get_byname, - xfrma[XFRMA_ALG_CRYPT-1]))) + attrs[XFRMA_ALG_CRYPT]))) goto error; if ((err = attach_one_algo(&x->calg, &x->props.calgo, xfrm_calg_get_byname, - xfrma[XFRMA_ALG_COMP-1]))) - goto error; - if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1]))) - goto error; - if ((err = attach_one_addr(&x->coaddr, xfrma[XFRMA_COADDR-1]))) + attrs[XFRMA_ALG_COMP]))) goto error; + + if (attrs[XFRMA_ENCAP]) { + x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), + sizeof(*x->encap), GFP_KERNEL); + if (x->encap == NULL) + goto error; + } + + if (attrs[XFRMA_COADDR]) { + x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), + sizeof(*x->coaddr), GFP_KERNEL); + if (x->coaddr == NULL) + goto error; + } + err = xfrm_init_state(x); if (err) goto error; - if ((err = attach_sec_ctx(x, xfrma[XFRMA_SEC_CTX-1]))) + if (attrs[XFRMA_SEC_CTX] && + security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) goto error; x->km.seq = p->seq; @@ -426,9 +336,7 @@ static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p, /* override default values from above */ - err = xfrm_update_ae_params(x, (struct rtattr **)xfrma); - if (err < 0) - goto error; + xfrm_update_ae_params(x, attrs); return x; @@ -441,18 +349,18 @@ static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p, } static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { - struct xfrm_usersa_info *p = NLMSG_DATA(nlh); + struct xfrm_usersa_info *p = nlmsg_data(nlh); struct xfrm_state *x; int err; struct km_event c; - err = verify_newsa_info(p, xfrma); + err = verify_newsa_info(p, attrs); if (err) return err; - x = xfrm_state_construct(p, xfrma, &err); + x = xfrm_state_construct(p, attrs, &err); if (!x) return err; @@ -462,8 +370,8 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, else err = xfrm_state_update(x); - xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, - AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x); + xfrm_audit_state_add(x, err ? 0 : 1, NETLINK_CB(skb).loginuid, + NETLINK_CB(skb).sid); if (err < 0) { x->km.state = XFRM_STATE_DEAD; @@ -482,7 +390,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, } static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, - struct rtattr **xfrma, + struct nlattr **attrs, int *errp) { struct xfrm_state *x = NULL; @@ -494,10 +402,7 @@ static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, } else { xfrm_address_t *saddr = NULL; - err = verify_one_addr(xfrma, XFRMA_SRCADDR, &saddr); - if (err) - goto out; - + verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); if (!saddr) { err = -EINVAL; goto out; @@ -515,14 +420,14 @@ static struct xfrm_state *xfrm_user_state_lookup(struct xfrm_usersa_id *p, } static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_state *x; int err = -ESRCH; struct km_event c; - struct xfrm_usersa_id *p = NLMSG_DATA(nlh); + struct xfrm_usersa_id *p = nlmsg_data(nlh); - x = xfrm_user_state_lookup(p, xfrma, &err); + x = xfrm_user_state_lookup(p, attrs, &err); if (x == NULL) return err; @@ -545,8 +450,8 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, km_state_notify(x, &c); out: - xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, - AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x); + xfrm_audit_state_delete(x, err ? 0 : 1, NETLINK_CB(skb).loginuid, + NETLINK_CB(skb).sid); xfrm_state_put(x); return err; } @@ -576,67 +481,92 @@ struct xfrm_dump_info { int this_idx; }; -static int dump_one_state(struct xfrm_state *x, int count, void *ptr) +static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) { - struct xfrm_dump_info *sp = ptr; - struct sk_buff *in_skb = sp->in_skb; - struct sk_buff *skb = sp->out_skb; - struct xfrm_usersa_info *p; - struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); + struct xfrm_user_sec_ctx *uctx; + struct nlattr *attr; + int ctx_size = sizeof(*uctx) + s->ctx_len; - if (sp->this_idx < sp->start_idx) - goto out; + attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); + if (attr == NULL) + return -EMSGSIZE; + + uctx = nla_data(attr); + uctx->exttype = XFRMA_SEC_CTX; + uctx->len = ctx_size; + uctx->ctx_doi = s->ctx_doi; + uctx->ctx_alg = s->ctx_alg; + uctx->ctx_len = s->ctx_len; + memcpy(uctx + 1, s->ctx_str, s->ctx_len); - nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid, - sp->nlmsg_seq, - XFRM_MSG_NEWSA, sizeof(*p)); - nlh->nlmsg_flags = sp->nlmsg_flags; + return 0; +} - p = NLMSG_DATA(nlh); +/* Don't change this without updating xfrm_sa_len! */ +static int copy_to_user_state_extra(struct xfrm_state *x, + struct xfrm_usersa_info *p, + struct sk_buff *skb) +{ + spin_lock_bh(&x->lock); copy_to_user_state(x, p); + if (x->coaddr) + NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); + + if (x->lastused) + NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); + spin_unlock_bh(&x->lock); + if (x->aalg) - RTA_PUT(skb, XFRMA_ALG_AUTH, - sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg); + NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg); if (x->ealg) - RTA_PUT(skb, XFRMA_ALG_CRYPT, - sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg); + NLA_PUT(skb, XFRMA_ALG_CRYPT, alg_len(x->ealg), x->ealg); if (x->calg) - RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); + NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); if (x->encap) - RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); + NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); - if (x->security) { - int ctx_size = sizeof(struct xfrm_sec_ctx) + - x->security->ctx_len; - struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size); - struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); - - uctx->exttype = XFRMA_SEC_CTX; - uctx->len = ctx_size; - uctx->ctx_doi = x->security->ctx_doi; - uctx->ctx_alg = x->security->ctx_alg; - uctx->ctx_len = x->security->ctx_len; - memcpy(uctx + 1, x->security->ctx_str, x->security->ctx_len); - } + if (x->security && copy_sec_ctx(x->security, skb) < 0) + goto nla_put_failure; - if (x->coaddr) - RTA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); + return 0; - if (x->lastused) - RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused); +nla_put_failure: + return -EMSGSIZE; +} - nlh->nlmsg_len = skb_tail_pointer(skb) - b; +static int dump_one_state(struct xfrm_state *x, int count, void *ptr) +{ + struct xfrm_dump_info *sp = ptr; + struct sk_buff *in_skb = sp->in_skb; + struct sk_buff *skb = sp->out_skb; + struct xfrm_usersa_info *p; + struct nlmsghdr *nlh; + int err; + + if (sp->this_idx < sp->start_idx) + goto out; + + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, + XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); + if (nlh == NULL) + return -EMSGSIZE; + + p = nlmsg_data(nlh); + + err = copy_to_user_state_extra(x, p, skb); + if (err) + goto nla_put_failure; + + nlmsg_end(skb, nlh); out: sp->this_idx++; return 0; -nlmsg_failure: -rtattr_failure: - nlmsg_trim(skb, b); - return -1; +nla_put_failure: + nlmsg_cancel(skb, nlh); + return err; } static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) @@ -661,7 +591,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, struct xfrm_dump_info info; struct sk_buff *skb; - skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); @@ -679,6 +609,13 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, return skb; } +static inline size_t xfrm_spdinfo_msgsize(void) +{ + return NLMSG_ALIGN(4) + + nla_total_size(sizeof(struct xfrmu_spdinfo)) + + nla_total_size(sizeof(struct xfrmu_spdhinfo)); +} + static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) { struct xfrmk_spdinfo si; @@ -714,18 +651,14 @@ static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) } static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct sk_buff *r_skb; - u32 *flags = NLMSG_DATA(nlh); + u32 *flags = nlmsg_data(nlh); u32 spid = NETLINK_CB(skb).pid; u32 seq = nlh->nlmsg_seq; - int len = NLMSG_LENGTH(sizeof(u32)); - len += RTA_SPACE(sizeof(struct xfrmu_spdinfo)); - len += RTA_SPACE(sizeof(struct xfrmu_spdhinfo)); - - r_skb = alloc_skb(len, GFP_ATOMIC); + r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; @@ -735,6 +668,13 @@ static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, return nlmsg_unicast(xfrm_nl, r_skb, spid); } +static inline size_t xfrm_sadinfo_msgsize(void) +{ + return NLMSG_ALIGN(4) + + nla_total_size(sizeof(struct xfrmu_sadhinfo)) + + nla_total_size(4); /* XFRMA_SAD_CNT */ +} + static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) { struct xfrmk_sadinfo si; @@ -764,19 +704,14 @@ static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) } static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct sk_buff *r_skb; - u32 *flags = NLMSG_DATA(nlh); + u32 *flags = nlmsg_data(nlh); u32 spid = NETLINK_CB(skb).pid; u32 seq = nlh->nlmsg_seq; - int len = NLMSG_LENGTH(sizeof(u32)); - - len += RTA_SPACE(sizeof(struct xfrmu_sadhinfo)); - len += RTA_SPACE(sizeof(u32)); - - r_skb = alloc_skb(len, GFP_ATOMIC); + r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; @@ -787,14 +722,14 @@ static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, } static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { - struct xfrm_usersa_id *p = NLMSG_DATA(nlh); + struct xfrm_usersa_id *p = nlmsg_data(nlh); struct xfrm_state *x; struct sk_buff *resp_skb; int err = -ESRCH; - x = xfrm_user_state_lookup(p, xfrma, &err); + x = xfrm_user_state_lookup(p, attrs, &err); if (x == NULL) goto out_noput; @@ -802,8 +737,7 @@ static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { - err = netlink_unicast(xfrm_nl, resp_skb, - NETLINK_CB(skb).pid, MSG_DONTWAIT); + err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid); } xfrm_state_put(x); out_noput: @@ -834,7 +768,7 @@ static int verify_userspi_info(struct xfrm_userspi_info *p) } static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_state *x; struct xfrm_userspi_info *p; @@ -843,7 +777,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, int family; int err; - p = NLMSG_DATA(nlh); + p = nlmsg_data(nlh); err = verify_userspi_info(p); if (err) goto out_noput; @@ -869,23 +803,17 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, if (x == NULL) goto out_noput; - resp_skb = ERR_PTR(-ENOENT); - - spin_lock_bh(&x->lock); - if (x->km.state != XFRM_STATE_DEAD) { - xfrm_alloc_spi(x, htonl(p->min), htonl(p->max)); - if (x->id.spi) - resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); - } - spin_unlock_bh(&x->lock); + err = xfrm_alloc_spi(x, p->min, p->max); + if (err) + goto out; + resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); goto out; } - err = netlink_unicast(xfrm_nl, resp_skb, - NETLINK_CB(skb).pid, MSG_DONTWAIT); + err = nlmsg_unicast(xfrm_nl, resp_skb, NETLINK_CB(skb).pid); out: xfrm_state_put(x); @@ -964,15 +892,15 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) return verify_policy_dir(p->dir); } -static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct rtattr **xfrma) +static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) { - struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; + struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_user_sec_ctx *uctx; if (!rt) return 0; - uctx = RTA_DATA(rt); + uctx = nla_data(rt); return security_xfrm_policy_alloc(pol, uctx); } @@ -1032,38 +960,35 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) return 0; } -static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma) +static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs) { - struct rtattr *rt = xfrma[XFRMA_TMPL-1]; + struct nlattr *rt = attrs[XFRMA_TMPL]; if (!rt) { pol->xfrm_nr = 0; } else { - struct xfrm_user_tmpl *utmpl = RTA_DATA(rt); - int nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl); + struct xfrm_user_tmpl *utmpl = nla_data(rt); + int nr = nla_len(rt) / sizeof(*utmpl); int err; err = validate_tmpl(nr, utmpl, pol->family); if (err) return err; - copy_templates(pol, RTA_DATA(rt), nr); + copy_templates(pol, utmpl, nr); } return 0; } -static int copy_from_user_policy_type(u8 *tp, struct rtattr **xfrma) +static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs) { - struct rtattr *rt = xfrma[XFRMA_POLICY_TYPE-1]; + struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; struct xfrm_userpolicy_type *upt; u8 type = XFRM_POLICY_TYPE_MAIN; int err; if (rt) { - if (rt->rta_len < sizeof(*upt)) - return -EINVAL; - - upt = RTA_DATA(rt); + upt = nla_data(rt); type = upt->type; } @@ -1101,7 +1026,7 @@ static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_i p->share = XFRM_SHARE_ANY; /* XXX xp->share */ } -static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp) +static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp) { struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL); int err; @@ -1113,12 +1038,12 @@ static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, copy_from_user_policy(xp, p); - err = copy_from_user_policy_type(&xp->type, xfrma); + err = copy_from_user_policy_type(&xp->type, attrs); if (err) goto error; - if (!(err = copy_from_user_tmpl(xp, xfrma))) - err = copy_from_user_sec_ctx(xp, xfrma); + if (!(err = copy_from_user_tmpl(xp, attrs))) + err = copy_from_user_sec_ctx(xp, attrs); if (err) goto error; @@ -1130,9 +1055,9 @@ static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, } static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { - struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh); + struct xfrm_userpolicy_info *p = nlmsg_data(nlh); struct xfrm_policy *xp; struct km_event c; int err; @@ -1141,11 +1066,11 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, err = verify_newpolicy_info(p); if (err) return err; - err = verify_sec_ctx_len(xfrma); + err = verify_sec_ctx_len(attrs); if (err) return err; - xp = xfrm_policy_construct(p, xfrma, &err); + xp = xfrm_policy_construct(p, attrs, &err); if (!xp) return err; @@ -1155,8 +1080,8 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, * a type XFRM_MSG_UPDPOLICY - JHS */ excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; err = xfrm_policy_insert(p->dir, xp, excl); - xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, - AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); + xfrm_audit_policy_add(xp, err ? 0 : 1, NETLINK_CB(skb).loginuid, + NETLINK_CB(skb).sid); if (err) { security_xfrm_policy_free(xp); @@ -1197,32 +1122,9 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) up->ealgos = kp->ealgos; up->calgos = kp->calgos; } - RTA_PUT(skb, XFRMA_TMPL, - (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr), - vec); - return 0; - -rtattr_failure: - return -1; -} - -static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) -{ - int ctx_size = sizeof(struct xfrm_sec_ctx) + s->ctx_len; - struct rtattr *rt = __RTA_PUT(skb, XFRMA_SEC_CTX, ctx_size); - struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); - - uctx->exttype = XFRMA_SEC_CTX; - uctx->len = ctx_size; - uctx->ctx_doi = s->ctx_doi; - uctx->ctx_alg = s->ctx_alg; - uctx->ctx_len = s->ctx_len; - memcpy(uctx + 1, s->ctx_str, s->ctx_len); - return 0; - - rtattr_failure: - return -1; + return nla_put(skb, XFRMA_TMPL, + sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); } static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) @@ -1240,21 +1142,23 @@ static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *s } return 0; } +static inline size_t userpolicy_type_attrsize(void) +{ +#ifdef CONFIG_XFRM_SUB_POLICY + return nla_total_size(sizeof(struct xfrm_userpolicy_type)); +#else + return 0; +#endif +} #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { - struct xfrm_userpolicy_type upt; - - memset(&upt, 0, sizeof(upt)); - upt.type = type; - - RTA_PUT(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); + struct xfrm_userpolicy_type upt = { + .type = type, + }; - return 0; - -rtattr_failure: - return -1; + return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } #else @@ -1271,17 +1175,16 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr struct sk_buff *in_skb = sp->in_skb; struct sk_buff *skb = sp->out_skb; struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); if (sp->this_idx < sp->start_idx) goto out; - nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid, - sp->nlmsg_seq, - XFRM_MSG_NEWPOLICY, sizeof(*p)); - p = NLMSG_DATA(nlh); - nlh->nlmsg_flags = sp->nlmsg_flags; + nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, + XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); + if (nlh == NULL) + return -EMSGSIZE; + p = nlmsg_data(nlh); copy_to_user_policy(xp, p, dir); if (copy_to_user_tmpl(xp, skb) < 0) goto nlmsg_failure; @@ -1290,14 +1193,14 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; - nlh->nlmsg_len = skb_tail_pointer(skb) - b; + nlmsg_end(skb, nlh); out: sp->this_idx++; return 0; nlmsg_failure: - nlmsg_trim(skb, b); - return -1; + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) @@ -1326,7 +1229,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, struct xfrm_dump_info info; struct sk_buff *skb; - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); @@ -1345,7 +1248,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, } static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_policy *xp; struct xfrm_userpolicy_id *p; @@ -1354,10 +1257,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; - p = NLMSG_DATA(nlh); + p = nlmsg_data(nlh); delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; - err = copy_from_user_policy_type(&type, xfrma); + err = copy_from_user_policy_type(&type, attrs); if (err) return err; @@ -1368,16 +1271,16 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (p->index) xp = xfrm_policy_byid(type, p->dir, p->index, delete, &err); else { - struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; + struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_policy tmp; - err = verify_sec_ctx_len(xfrma); + err = verify_sec_ctx_len(attrs); if (err) return err; memset(&tmp, 0, sizeof(struct xfrm_policy)); if (rt) { - struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); + struct xfrm_user_sec_ctx *uctx = nla_data(rt); if ((err = security_xfrm_policy_alloc(&tmp, uctx))) return err; @@ -1396,13 +1299,13 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { - err = netlink_unicast(xfrm_nl, resp_skb, - NETLINK_CB(skb).pid, - MSG_DONTWAIT); + err = nlmsg_unicast(xfrm_nl, resp_skb, + NETLINK_CB(skb).pid); } } else { - xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, - AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL); + xfrm_audit_policy_delete(xp, err ? 0 : 1, + NETLINK_CB(skb).loginuid, + NETLINK_CB(skb).sid); if (err != 0) goto out; @@ -1420,10 +1323,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, } static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct km_event c; - struct xfrm_usersa_flush *p = NLMSG_DATA(nlh); + struct xfrm_usersa_flush *p = nlmsg_data(nlh); struct xfrm_audit audit_info; int err; @@ -1441,18 +1344,25 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; } +static inline size_t xfrm_aevent_msgsize(void) +{ + return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) + + nla_total_size(sizeof(struct xfrm_replay_state)) + + nla_total_size(sizeof(struct xfrm_lifetime_cur)) + + nla_total_size(4) /* XFRM_AE_RTHR */ + + nla_total_size(4); /* XFRM_AE_ETHR */ +} static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) { struct xfrm_aevent_id *id; struct nlmsghdr *nlh; - struct xfrm_lifetime_cur ltime; - unsigned char *b = skb_tail_pointer(skb); - nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id)); - id = NLMSG_DATA(nlh); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); + if (nlh == NULL) + return -EMSGSIZE; + id = nlmsg_data(nlh); memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); id->sa_id.spi = x->id.spi; id->sa_id.family = x->props.family; @@ -1461,54 +1371,34 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve id->reqid = x->props.reqid; id->flags = c->data.aevent; - RTA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); + NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); + NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); - ltime.bytes = x->curlft.bytes; - ltime.packets = x->curlft.packets; - ltime.add_time = x->curlft.add_time; - ltime.use_time = x->curlft.use_time; + if (id->flags & XFRM_AE_RTHR) + NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); - RTA_PUT(skb, XFRMA_LTIME_VAL, sizeof(struct xfrm_lifetime_cur), <ime); + if (id->flags & XFRM_AE_ETHR) + NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, + x->replay_maxage * 10 / HZ); - if (id->flags&XFRM_AE_RTHR) { - RTA_PUT(skb,XFRMA_REPLAY_THRESH,sizeof(u32),&x->replay_maxdiff); - } - - if (id->flags&XFRM_AE_ETHR) { - u32 etimer = x->replay_maxage*10/HZ; - RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer); - } - - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); -rtattr_failure: -nlmsg_failure: - nlmsg_trim(skb, b); - return -1; +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_state *x; struct sk_buff *r_skb; int err; struct km_event c; - struct xfrm_aevent_id *p = NLMSG_DATA(nlh); - int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id)); + struct xfrm_aevent_id *p = nlmsg_data(nlh); struct xfrm_usersa_id *id = &p->sa_id; - len += RTA_SPACE(sizeof(struct xfrm_replay_state)); - len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur)); - - if (p->flags&XFRM_AE_RTHR) - len+=RTA_SPACE(sizeof(u32)); - - if (p->flags&XFRM_AE_ETHR) - len+=RTA_SPACE(sizeof(u32)); - - r_skb = alloc_skb(len, GFP_ATOMIC); + r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; @@ -1530,22 +1420,21 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, if (build_aevent(r_skb, x, &c) < 0) BUG(); - err = netlink_unicast(xfrm_nl, r_skb, - NETLINK_CB(skb).pid, MSG_DONTWAIT); + err = nlmsg_unicast(xfrm_nl, r_skb, NETLINK_CB(skb).pid); spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; } static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_state *x; struct km_event c; int err = - EINVAL; - struct xfrm_aevent_id *p = NLMSG_DATA(nlh); - struct rtattr *rp = xfrma[XFRMA_REPLAY_VAL-1]; - struct rtattr *lt = xfrma[XFRMA_LTIME_VAL-1]; + struct xfrm_aevent_id *p = nlmsg_data(nlh); + struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; + struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; if (!lt && !rp) return err; @@ -1562,10 +1451,8 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, goto out; spin_lock_bh(&x->lock); - err = xfrm_update_ae_params(x, xfrma); + xfrm_update_ae_params(x, attrs); spin_unlock_bh(&x->lock); - if (err < 0) - goto out; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; @@ -1579,14 +1466,14 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, } static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct km_event c; u8 type = XFRM_POLICY_TYPE_MAIN; int err; struct xfrm_audit audit_info; - err = copy_from_user_policy_type(&type, xfrma); + err = copy_from_user_policy_type(&type, attrs); if (err) return err; @@ -1604,31 +1491,31 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, } static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_policy *xp; - struct xfrm_user_polexpire *up = NLMSG_DATA(nlh); + struct xfrm_user_polexpire *up = nlmsg_data(nlh); struct xfrm_userpolicy_info *p = &up->pol; u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; - err = copy_from_user_policy_type(&type, xfrma); + err = copy_from_user_policy_type(&type, attrs); if (err) return err; if (p->index) xp = xfrm_policy_byid(type, p->dir, p->index, 0, &err); else { - struct rtattr *rt = xfrma[XFRMA_SEC_CTX-1]; + struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_policy tmp; - err = verify_sec_ctx_len(xfrma); + err = verify_sec_ctx_len(attrs); if (err) return err; memset(&tmp, 0, sizeof(struct xfrm_policy)); if (rt) { - struct xfrm_user_sec_ctx *uctx = RTA_DATA(rt); + struct xfrm_user_sec_ctx *uctx = nla_data(rt); if ((err = security_xfrm_policy_alloc(&tmp, uctx))) return err; @@ -1650,8 +1537,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, err = 0; if (up->hard) { xfrm_policy_delete(xp, p->dir); - xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, - AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL); + xfrm_audit_policy_delete(xp, 1, NETLINK_CB(skb).loginuid, + NETLINK_CB(skb).sid); } else { // reset the timers here? @@ -1665,11 +1552,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, } static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_state *x; int err; - struct xfrm_user_expire *ue = NLMSG_DATA(nlh); + struct xfrm_user_expire *ue = nlmsg_data(nlh); struct xfrm_usersa_info *p = &ue->state; x = xfrm_state_lookup(&p->id.daddr, p->id.spi, p->id.proto, p->family); @@ -1686,8 +1573,8 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (ue->hard) { __xfrm_state_delete(x); - xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid, - AUDIT_MAC_IPSEC_DELSA, 1, NULL, x); + xfrm_audit_state_delete(x, 1, NETLINK_CB(skb).loginuid, + NETLINK_CB(skb).sid); } err = 0; out: @@ -1697,14 +1584,14 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, } static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { struct xfrm_policy *xp; struct xfrm_user_tmpl *ut; int i; - struct rtattr *rt = xfrma[XFRMA_TMPL-1]; + struct nlattr *rt = attrs[XFRMA_TMPL]; - struct xfrm_user_acquire *ua = NLMSG_DATA(nlh); + struct xfrm_user_acquire *ua = nlmsg_data(nlh); struct xfrm_state *x = xfrm_state_alloc(); int err = -ENOMEM; @@ -1719,7 +1606,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, } /* build an XP */ - xp = xfrm_policy_construct(&ua->policy, (struct rtattr **) xfrma, &err); + xp = xfrm_policy_construct(&ua->policy, attrs, &err); if (!xp) { kfree(x); return err; @@ -1729,7 +1616,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); - ut = RTA_DATA(rt); + ut = nla_data(rt); /* extract the templates and for each call km_key */ for (i = 0; i < xp->xfrm_nr; i++, ut++) { struct xfrm_tmpl *t = &xp->xfrm_vec[i]; @@ -1751,29 +1638,15 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, } #ifdef CONFIG_XFRM_MIGRATE -static int verify_user_migrate(struct rtattr **xfrma) -{ - struct rtattr *rt = xfrma[XFRMA_MIGRATE-1]; - struct xfrm_user_migrate *um; - - if (!rt) - return -EINVAL; - - if ((rt->rta_len - sizeof(*rt)) < sizeof(*um)) - return -EINVAL; - - return 0; -} - static int copy_from_user_migrate(struct xfrm_migrate *ma, - struct rtattr **xfrma, int *num) + struct nlattr **attrs, int *num) { - struct rtattr *rt = xfrma[XFRMA_MIGRATE-1]; + struct nlattr *rt = attrs[XFRMA_MIGRATE]; struct xfrm_user_migrate *um; int i, num_migrate; - um = RTA_DATA(rt); - num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um); + um = nla_data(rt); + num_migrate = nla_len(rt) / sizeof(*um); if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) return -EINVAL; @@ -1797,24 +1670,23 @@ static int copy_from_user_migrate(struct xfrm_migrate *ma, } static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { - struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh); + struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); struct xfrm_migrate m[XFRM_MAX_DEPTH]; u8 type; int err; int n = 0; - err = verify_user_migrate((struct rtattr **)xfrma); - if (err) - return err; + if (attrs[XFRMA_MIGRATE] == NULL) + return -EINVAL; - err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma); + err = copy_from_user_policy_type(&type, attrs); if (err) return err; err = copy_from_user_migrate((struct xfrm_migrate *)m, - (struct rtattr **)xfrma, &n); + attrs, &n); if (err) return err; @@ -1827,7 +1699,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, } #else static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, - struct rtattr **xfrma) + struct nlattr **attrs) { return -ENOPROTOOPT; } @@ -1849,11 +1721,14 @@ static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb) memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); - RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um); - return 0; + return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); +} -rtattr_failure: - return -1; +static inline size_t xfrm_migrate_msgsize(int num_migrate) +{ + return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) + + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) + + userpolicy_type_attrsize(); } static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, @@ -1863,13 +1738,13 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, struct xfrm_migrate *mp; struct xfrm_userpolicy_id *pol_id; struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); int i; - nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id)); - pol_id = NLMSG_DATA(nlh); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); + if (nlh == NULL) + return -EMSGSIZE; + pol_id = nlmsg_data(nlh); /* copy data from selector, dir, and type to the pol_id */ memset(pol_id, 0, sizeof(*pol_id)); memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); @@ -1883,25 +1758,18 @@ static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m, goto nlmsg_failure; } - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); nlmsg_failure: - nlmsg_trim(skb, b); - return -1; + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_migrate) { struct sk_buff *skb; - size_t len; - len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate); - len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id)); -#ifdef CONFIG_XFRM_SUB_POLICY - len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); -#endif - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; @@ -1909,9 +1777,7 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0) BUG(); - NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, - GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); } #else static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, @@ -1921,7 +1787,7 @@ static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type, } #endif -#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) +#define XMSGSIZE(type) sizeof(struct type) static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), @@ -1937,19 +1803,36 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), - [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0), + [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), - [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)), - [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = NLMSG_LENGTH(sizeof(u32)), + [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), + [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), }; #undef XMSGSIZE +static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { + [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, + [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, + [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, + [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, + [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, + [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, + [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, + [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, + [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, + [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, + [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, + [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, +}; + static struct xfrm_link { - int (*doit)(struct sk_buff *, struct nlmsghdr *, struct rtattr **); + int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); int (*dump)(struct sk_buff *, struct netlink_callback *); } xfrm_dispatch[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, @@ -1977,9 +1860,9 @@ static struct xfrm_link { static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { - struct rtattr *xfrma[XFRMA_MAX]; + struct nlattr *attrs[XFRMA_MAX+1]; struct xfrm_link *link; - int type, min_len; + int type, err; type = nlh->nlmsg_type; if (type > XFRM_MSG_MAX) @@ -2001,98 +1884,71 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) return netlink_dump_start(xfrm_nl, skb, nlh, link->dump, NULL); } - memset(xfrma, 0, sizeof(xfrma)); - - if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type])) - return -EINVAL; - - if (nlh->nlmsg_len > min_len) { - int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); - struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len); - - while (RTA_OK(attr, attrlen)) { - unsigned short flavor = attr->rta_type; - if (flavor) { - if (flavor > XFRMA_MAX) - return -EINVAL; - xfrma[flavor - 1] = attr; - } - attr = RTA_NEXT(attr, attrlen); - } - } + err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, + xfrma_policy); + if (err < 0) + return err; if (link->doit == NULL) return -EINVAL; - return link->doit(skb, nlh, xfrma); + return link->doit(skb, nlh, attrs); } -static void xfrm_netlink_rcv(struct sock *sk, int len) +static void xfrm_netlink_rcv(struct sk_buff *skb) { - unsigned int qlen = 0; - - do { - mutex_lock(&xfrm_cfg_mutex); - netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg); - mutex_unlock(&xfrm_cfg_mutex); + mutex_lock(&xfrm_cfg_mutex); + netlink_rcv_skb(skb, &xfrm_user_rcv_msg); + mutex_unlock(&xfrm_cfg_mutex); +} - } while (qlen); +static inline size_t xfrm_expire_msgsize(void) +{ + return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)); } static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c) { struct xfrm_user_expire *ue; struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); - nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE, - sizeof(*ue)); - ue = NLMSG_DATA(nlh); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); + if (nlh == NULL) + return -EMSGSIZE; + ue = nlmsg_data(nlh); copy_to_user_state(x, &ue->state); ue->hard = (c->data.hard != 0) ? 1 : 0; - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; - -nlmsg_failure: - nlmsg_trim(skb, b); - return -1; + return nlmsg_end(skb, nlh); } static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c) { struct sk_buff *skb; - int len = NLMSG_LENGTH(sizeof(struct xfrm_user_expire)); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_expire(skb, x, c) < 0) BUG(); - NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); } static int xfrm_aevent_state_notify(struct xfrm_state *x, struct km_event *c) { struct sk_buff *skb; - int len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id)); - len += RTA_SPACE(sizeof(struct xfrm_replay_state)); - len += RTA_SPACE(sizeof(struct xfrm_lifetime_cur)); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_aevent(skb, x, c) < 0) BUG(); - NETLINK_CB(skb).dst_group = XFRMNLGRP_AEVENTS; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); } static int xfrm_notify_sa_flush(struct km_event *c) @@ -2100,42 +1956,45 @@ static int xfrm_notify_sa_flush(struct km_event *c) struct xfrm_usersa_flush *p; struct nlmsghdr *nlh; struct sk_buff *skb; - sk_buff_data_t b; - int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)); + int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; - b = skb->tail; - nlh = NLMSG_PUT(skb, c->pid, c->seq, - XFRM_MSG_FLUSHSA, sizeof(*p)); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); + if (nlh == NULL) { + kfree_skb(skb); + return -EMSGSIZE; + } - p = NLMSG_DATA(nlh); + p = nlmsg_data(nlh); p->proto = c->data.proto; - nlh->nlmsg_len = skb->tail - b; - - NETLINK_CB(skb).dst_group = XFRMNLGRP_SA; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); + nlmsg_end(skb, nlh); -nlmsg_failure: - kfree_skb(skb); - return -1; + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); } -static inline int xfrm_sa_len(struct xfrm_state *x) +static inline size_t xfrm_sa_len(struct xfrm_state *x) { - int l = 0; + size_t l = 0; if (x->aalg) - l += RTA_SPACE(sizeof(*x->aalg) + (x->aalg->alg_key_len+7)/8); + l += nla_total_size(alg_len(x->aalg)); if (x->ealg) - l += RTA_SPACE(sizeof(*x->ealg) + (x->ealg->alg_key_len+7)/8); + l += nla_total_size(alg_len(x->ealg)); if (x->calg) - l += RTA_SPACE(sizeof(*x->calg)); + l += nla_total_size(sizeof(*x->calg)); if (x->encap) - l += RTA_SPACE(sizeof(*x->encap)); + l += nla_total_size(sizeof(*x->encap)); + if (x->security) + l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + + x->security->ctx_len); + if (x->coaddr) + l += nla_total_size(sizeof(*x->coaddr)); + + /* Must count this as this may become non-zero behind our back. */ + l += nla_total_size(sizeof(x->lastused)); return l; } @@ -2146,57 +2005,51 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c) struct xfrm_usersa_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; - sk_buff_data_t b; int len = xfrm_sa_len(x); int headlen; headlen = sizeof(*p); if (c->event == XFRM_MSG_DELSA) { - len += RTA_SPACE(headlen); + len += nla_total_size(headlen); headlen = sizeof(*id); } - len += NLMSG_SPACE(headlen); + len += NLMSG_ALIGN(headlen); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; - b = skb->tail; - nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); + if (nlh == NULL) + goto nla_put_failure; - p = NLMSG_DATA(nlh); + p = nlmsg_data(nlh); if (c->event == XFRM_MSG_DELSA) { - id = NLMSG_DATA(nlh); + struct nlattr *attr; + + id = nlmsg_data(nlh); memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); id->spi = x->id.spi; id->family = x->props.family; id->proto = x->id.proto; - p = RTA_DATA(__RTA_PUT(skb, XFRMA_SA, sizeof(*p))); - } - - copy_to_user_state(x, p); + attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); + if (attr == NULL) + goto nla_put_failure; - if (x->aalg) - RTA_PUT(skb, XFRMA_ALG_AUTH, - sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg); - if (x->ealg) - RTA_PUT(skb, XFRMA_ALG_CRYPT, - sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg); - if (x->calg) - RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); + p = nla_data(attr); + } - if (x->encap) - RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); + if (copy_to_user_state_extra(x, p, skb)) + goto nla_put_failure; - nlh->nlmsg_len = skb->tail - b; + nlmsg_end(skb, nlh); - NETLINK_CB(skb).dst_group = XFRMNLGRP_SA; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); -nlmsg_failure: -rtattr_failure: +nla_put_failure: + /* Somebody screwed up with xfrm_sa_len! */ + WARN_ON(1); kfree_skb(skb); return -1; } @@ -2224,20 +2077,28 @@ static int xfrm_send_state_notify(struct xfrm_state *x, struct km_event *c) } +static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x, + struct xfrm_policy *xp) +{ + return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) + + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) + + nla_total_size(xfrm_user_sec_ctx_size(x->security)) + + userpolicy_type_attrsize(); +} + static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_tmpl *xt, struct xfrm_policy *xp, int dir) { struct xfrm_user_acquire *ua; struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); __u32 seq = xfrm_get_acqseq(); - nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE, - sizeof(*ua)); - ua = NLMSG_DATA(nlh); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); + if (nlh == NULL) + return -EMSGSIZE; + ua = nlmsg_data(nlh); memcpy(&ua->id, &x->id, sizeof(ua->id)); memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); @@ -2254,35 +2115,26 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); nlmsg_failure: - nlmsg_trim(skb, b); - return -1; + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, struct xfrm_policy *xp, int dir) { struct sk_buff *skb; - size_t len; - len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); - len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire)); - len += RTA_SPACE(xfrm_user_sec_ctx_size(x->security)); -#ifdef CONFIG_XFRM_SUB_POLICY - len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); -#endif - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_acquire(skb, x, xt, xp, dir) < 0) BUG(); - NETLINK_CB(skb).dst_group = XFRMNLGRP_ACQUIRE; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); } /* User gives us xfrm_user_policy_info followed by an array of 0 @@ -2344,18 +2196,26 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, return xp; } +static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp) +{ + return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) + + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) + + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) + + userpolicy_type_attrsize(); +} + static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, int dir, struct km_event *c) { struct xfrm_user_polexpire *upe; struct nlmsghdr *nlh; int hard = c->data.hard; - unsigned char *b = skb_tail_pointer(skb); - nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe)); - upe = NLMSG_DATA(nlh); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); + if (nlh == NULL) + return -EMSGSIZE; + upe = nlmsg_data(nlh); copy_to_user_policy(xp, &upe->pol, dir); if (copy_to_user_tmpl(xp, skb) < 0) goto nlmsg_failure; @@ -2365,34 +2225,25 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, goto nlmsg_failure; upe->hard = !!hard; - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); nlmsg_failure: - nlmsg_trim(skb, b); - return -1; + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c) { struct sk_buff *skb; - size_t len; - len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); - len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire)); - len += RTA_SPACE(xfrm_user_sec_ctx_size(xp->security)); -#ifdef CONFIG_XFRM_SUB_POLICY - len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); -#endif - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_polexpire(skb, xp, dir, c) < 0) BUG(); - NETLINK_CB(skb).dst_group = XFRMNLGRP_EXPIRE; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); } static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c) @@ -2401,30 +2252,30 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event * struct xfrm_userpolicy_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; - sk_buff_data_t b; - int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); + int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); int headlen; headlen = sizeof(*p); if (c->event == XFRM_MSG_DELPOLICY) { - len += RTA_SPACE(headlen); + len += nla_total_size(headlen); headlen = sizeof(*id); } -#ifdef CONFIG_XFRM_SUB_POLICY - len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); -#endif - len += NLMSG_SPACE(headlen); + len += userpolicy_type_attrsize(); + len += NLMSG_ALIGN(headlen); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; - b = skb->tail; - nlh = NLMSG_PUT(skb, c->pid, c->seq, c->event, headlen); + nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); + if (nlh == NULL) + goto nlmsg_failure; - p = NLMSG_DATA(nlh); + p = nlmsg_data(nlh); if (c->event == XFRM_MSG_DELPOLICY) { - id = NLMSG_DATA(nlh); + struct nlattr *attr; + + id = nlmsg_data(nlh); memset(id, 0, sizeof(*id)); id->dir = dir; if (c->data.byid) @@ -2432,10 +2283,12 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event * else memcpy(&id->sel, &xp->selector, sizeof(id->sel)); - p = RTA_DATA(__RTA_PUT(skb, XFRMA_POLICY, sizeof(*p))); - } + attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); + if (attr == NULL) + goto nlmsg_failure; - nlh->nlmsg_flags = 0; + p = nla_data(attr); + } copy_to_user_policy(xp, p, dir); if (copy_to_user_tmpl(xp, skb) < 0) @@ -2443,13 +2296,11 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event * if (copy_to_user_policy_type(xp->type, skb) < 0) goto nlmsg_failure; - nlh->nlmsg_len = skb->tail - b; + nlmsg_end(skb, nlh); - NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); nlmsg_failure: -rtattr_failure: kfree_skb(skb); return -1; } @@ -2458,28 +2309,20 @@ static int xfrm_notify_policy_flush(struct km_event *c) { struct nlmsghdr *nlh; struct sk_buff *skb; - sk_buff_data_t b; - int len = 0; -#ifdef CONFIG_XFRM_SUB_POLICY - len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); -#endif - len += NLMSG_LENGTH(0); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; - b = skb->tail; - - nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); + if (nlh == NULL) + goto nlmsg_failure; if (copy_to_user_policy_type(c->data.type, skb) < 0) goto nlmsg_failure; - nlh->nlmsg_len = skb->tail - b; + nlmsg_end(skb, nlh); - NETLINK_CB(skb).dst_group = XFRMNLGRP_POLICY; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); nlmsg_failure: kfree_skb(skb); @@ -2506,48 +2349,48 @@ static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_ev } +static inline size_t xfrm_report_msgsize(void) +{ + return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); +} + static int build_report(struct sk_buff *skb, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { struct xfrm_user_report *ur; struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); - nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur)); - ur = NLMSG_DATA(nlh); - nlh->nlmsg_flags = 0; + nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); + if (nlh == NULL) + return -EMSGSIZE; + ur = nlmsg_data(nlh); ur->proto = proto; memcpy(&ur->sel, sel, sizeof(ur->sel)); if (addr) - RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); + NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; + return nlmsg_end(skb, nlh); -nlmsg_failure: -rtattr_failure: - nlmsg_trim(skb, b); - return -1; +nla_put_failure: + nlmsg_cancel(skb, nlh); + return -EMSGSIZE; } static int xfrm_send_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { struct sk_buff *skb; - size_t len; - len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(struct xfrm_user_report))); - skb = alloc_skb(len, GFP_ATOMIC); + skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_report(skb, proto, sel, addr) < 0) BUG(); - NETLINK_CB(skb).dst_group = XFRMNLGRP_REPORT; - return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); + return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); } static struct xfrm_mgr netlink_mgr = { @@ -2566,7 +2409,7 @@ static int __init xfrm_user_init(void) printk(KERN_INFO "Initializing XFRM netlink socket\n"); - nlsk = netlink_kernel_create(NETLINK_XFRM, XFRMNLGRP_MAX, + nlsk = netlink_kernel_create(&init_net, NETLINK_XFRM, XFRMNLGRP_MAX, xfrm_netlink_rcv, NULL, THIS_MODULE); if (nlsk == NULL) return -ENOMEM; diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 1e5d4d6931955bbf9432b3309c13b0ef0b8192c5..36e3754db53a5b0951fb0a6c180f111ae2f9c54a 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -496,7 +496,21 @@ static int do_sdio_entry(const char *filename, ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class); ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor); ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device); + return 1; +} + +/* Looks like: ssb:vNidNrevN. */ +static int do_ssb_entry(const char *filename, + struct ssb_device_id *id, char *alias) +{ + id->vendor = TO_NATIVE(id->vendor); + id->coreid = TO_NATIVE(id->coreid); + id->revision = TO_NATIVE(id->revision); + strcpy(alias, "ssb:"); + ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor); + ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid); + ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision); return 1; } @@ -619,6 +633,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, do_table(symval, sym->st_size, sizeof(struct sdio_device_id), "sdio", do_sdio_entry, mod); + else if (sym_is(symname, "__mod_ssb_device_table")) + do_table(symval, sym->st_size, + sizeof(struct ssb_device_id), "ssb", + do_ssb_entry, mod); } /* Now add out buffered information to the generated C source */ diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 6c145d6e89dec73d2b595a9895825a70c9bd6efe..0a4051fbd34e24116cd55c4e758605ea683348a8 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -709,6 +709,7 @@ static int secref_whitelist(const char *modname, const char *tosec, /* Check for pattern 0 */ if ((strncmp(fromsec, ".text.init.refok", strlen(".text.init.refok")) == 0) || + (strncmp(fromsec, ".exit.text.refok", strlen(".exit.text.refok")) == 0) || (strncmp(fromsec, ".data.init.refok", strlen(".data.init.refok")) == 0)) return 1; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 0753b20e23fed48c80f4864b67fbe476de8daced..3c3fff33d1ce9644ef2fdac0d00af18ca4742df4 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -47,7 +47,7 @@ #include #include #include -#include /* for sysctl_local_port_range[] */ +#include /* for local_port_range[] */ #include /* struct or_callable used in sock_rcv_skb */ #include #include @@ -3232,8 +3232,6 @@ static int selinux_socket_post_create(struct socket *sock, int family, /* Range of port numbers used to automatically bind. Need to determine whether we should perform a name_bind permission check between the socket and the port number. */ -#define ip_local_port_range_0 sysctl_local_port_range[0] -#define ip_local_port_range_1 sysctl_local_port_range[1] static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { @@ -3276,20 +3274,27 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in addrp = (char *)&addr6->sin6_addr.s6_addr; } - if (snum&&(snum < max(PROT_SOCK,ip_local_port_range_0) || - snum > ip_local_port_range_1)) { - err = security_port_sid(sk->sk_family, sk->sk_type, - sk->sk_protocol, snum, &sid); - if (err) - goto out; - AVC_AUDIT_DATA_INIT(&ad,NET); - ad.u.net.sport = htons(snum); - ad.u.net.family = family; - err = avc_has_perm(isec->sid, sid, - isec->sclass, - SOCKET__NAME_BIND, &ad); - if (err) - goto out; + if (snum) { + int low, high; + + inet_get_local_port_range(&low, &high); + + if (snum < max(PROT_SOCK, low) || snum > high) { + err = security_port_sid(sk->sk_family, + sk->sk_type, + sk->sk_protocol, snum, + &sid); + if (err) + goto out; + AVC_AUDIT_DATA_INIT(&ad,NET); + ad.u.net.sport = htons(snum); + ad.u.net.family = family; + err = avc_has_perm(isec->sid, sid, + isec->sclass, + SOCKET__NAME_BIND, &ad); + if (err) + goto out; + } } switch(isec->sclass) { diff --git a/security/selinux/netif.c b/security/selinux/netif.c index b10c34e8a74344f97afce95012a41ebaf7157242..e87ab948104c05a263163752dbbe173c9403f144 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "security.h" #include "objsec.h" @@ -234,6 +235,9 @@ static int sel_netif_netdev_notifier_handler(struct notifier_block *this, { struct net_device *dev = ptr; + if (dev->nd_net != &init_net) + return NOTIFY_DONE; + if (event == NETDEV_DOWN) sel_netif_kill(dev); diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c index f49046de63a2d126a90815e53612c7034d2b51d3..b59871d74dad870e72d13ee48f0dab4b3e95bcde 100644 --- a/security/selinux/netlink.c +++ b/security/selinux/netlink.c @@ -17,6 +17,7 @@ #include #include #include +#include static struct sock *selnl; @@ -104,8 +105,8 @@ void selnl_notify_policyload(u32 seqno) static int __init selnl_init(void) { - selnl = netlink_kernel_create(NETLINK_SELINUX, SELNLGRP_MAX, NULL, NULL, - THIS_MODULE); + selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, + SELNLGRP_MAX, NULL, NULL, THIS_MODULE); if (selnl == NULL) panic("SELinux: Cannot create netlink socket."); netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);